code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production.
#
# In case you are facing issues, please read the following two documents first:
#
# https://github.com/IBM/skillsnetwork/wiki/Environment-Setup
#
# https://github.com/IBM/skillsnetwork/wiki/FAQ
#
# Then, please feel free to ask:
#
# https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all
#
# Please make sure to follow the guidelines before asking a question:
#
# https://github.com/IBM/skillsnetwork/wiki/FAQ#im-feeling-lost-and-confused-please-help-me
#
#
# If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells.
# +
from IPython.display import Markdown, display
def printmd(string):
display(Markdown('# <span style="color:red">'+string+'</span>'))
if ('sc' in locals() or 'sc' in globals()):
printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>')
# -
# !pip install pyspark==2.4.5
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
except ImportError as e:
printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>')
# +
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
# -
# Welcome to exercise three of “Apache Spark for Scalable Machine Learning on BigData”. In this exercise you’ll create a DataFrame, register a temporary query table and issue SQL commands against it.
#
# Let’s create a little data frame:
# +
from pyspark.sql import Row
df = spark.createDataFrame([Row(id=1, value='value1'),Row(id=2, value='value2')])
# let's have a look what's inside
df.show()
# let's print the schema
df.printSchema()
# -
# Now we register this DataFrame as query table and issue an SQL statement against it. Please note that the result of the SQL execution returns a new DataFrame we can work with.
# +
# register dataframe as query table
df.createOrReplaceTempView('df_view')
# execute SQL query
df_result = spark.sql('select value from df_view where id=2')
# examine contents of result
df_result.show()
# get result as string
df_result.first().value
# -
# Although we’ll learn more about DataFrames next week, please try to find a way to count the rows in this DataFrame by looking at the API documentation. No worries, we’ll cover DataFrames in more detail next week.
#
# https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame
df.count()
| scalable-machine-learning-on-big-data-using-apache-spark/Week 1/Exercise 3 - working with DataFrames.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import os
import sys
sys.path.append("../src/")
import matplotsoccer as mps
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
# +
data = "../data/spadl-v2.hdf"
games = pd.read_hdf(data,key="games")
epl16 = games[(games.competition_id == 8) & (games.season_id == 2016)]
epl16[:5]
def get_actions(games, hdf_url):
actions = []
for game in tqdm(list(games.itertuples())):
a = pd.read_hdf(hdf_url, key="actions/" + str(game.id))
a["left_to_right"] = a["team_id"] == game.home_team_id
actions.append(a)
actions = pd.concat(actions)
#actions = always_ltr(actions)
return actions
def always_ltr(actions):
away_idx = ~actions.left_to_right
actions.loc[away_idx, "start_x"] = 105 - actions[away_idx].start_x.values
actions.loc[away_idx, "start_y"] = 68 - actions[away_idx].start_y.values
actions.loc[away_idx, "end_x"] = 105 - actions[away_idx].end_x.values
actions.loc[away_idx, "end_y"] = 68 - actions[away_idx].end_y.values
return actions
actions = get_actions(epl16,data)
actiontypes = pd.read_hdf(data, key="actiontypes")
actiontypes.columns = ["type_id","type_name"]
actions = actions.merge(actiontypes, on="type_id")
players = pd.read_hdf(data,key="players")
actions = actions.merge(players,left_on="player_id",right_on="id")
teams = pd.read_hdf(data,key="teams")
actions = actions.merge(teams,left_on="team_id",right_on="id")
actions = actions.sort_values(["game_id","period_id","time_seconds","timestamp"])
actions.columns
# -
player_actions = actions[actions.last_name.str.contains("Kompany")].copy()
set(player_actions.soccer_name)
player_actions = always_ltr(player_actions)
x,y = player_actions.start_x, player_actions.start_y
# # Field
f = mps.field()
f = mps.field(color="green",figsize=10)
# # Heatmap
# +
ax = mps.field(show=False)
ax.scatter(x,y,s=2); plt.show()
matrix = mps.count(x,y,n=20,m=20)
hm = mps.heatmap(matrix)
# -
start = 499605
delta = 40
phase = actions[start:start+delta].copy()
phase["team"] = phase.full_name
phase["player"] = phase.soccer_name
phase = phase[["team","player","time_seconds","type_name","result","start_x","start_y","end_x","end_y"]]
phase[0-5:0+1]
# # Actions
# +
# %matplotlib nbagg
#import matplotlib.animation as animation
from matplotlib import animation
#from IPython.display import HTML
def animate(i):
return mps.actions(phase[max(i-5,0):i+3],figsize = 8),
fig = plt.figure()
anim = animation.FuncAnimation(fig, animate,
frames=2, interval=20, blit=True)
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
TWOPI = 2*np.pi
fig, ax = plt.subplots()
t = np.arange(0.0, TWOPI, 0.001)
s = np.sin(t)
l = plt.plot(t, s)
ax = plt.axis([0,TWOPI,-1,1])
redDot, = plt.plot([0], [np.sin(0)], 'ro')
def animate(i):
redDot.set_data(i, np.sin(i))
return redDot,
# create animation using the animate() function
myAnimation = animation.FuncAnimation(fig, animate, frames=np.arange(0.0, TWOPI, 0.1), \
interval=10, blit=True, repeat=True)
plt.show()
| experimental-notebooks/animate-actions (does not work yet).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calcuate the Circumference of Circule
# ## Please Define the radius of Circule
import math
rd = float(input('Please Enter the radius of Circule :'))
Circumference = 2*math.pi*rd
print ('Circumference of Circule of Redius {} is {}'.format(rd,Circumference))
| Circumference of circule.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demos: Lecture 8
# ## Demo 1: Grover revisited
# <img src="fig/grover_full.png" width="500px">
# ## Demo 2: `qml.specs`
| demos/Lecture08-Demos-Blank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# # An Introduction to Linear Algebra for Quantum Computing
#
# -
from matplotlib import pyplot as plt
import numpy as np
from qiskit import *
from qiskit.visualization import plot_bloch_vector
# **Introduction**
#
# Linear algebra is the language of quantum computing. It is therefore crucial to develop a good understanding of the basic mathematical concepts that linear algebra is built upon, in order to arrive at many of the amazing and interesting constructions seen in quantum computation. The goal of this section is to create a foundation of introductory linear algebra knowledge, upon which the reader can build during their study of quantum computing.
#
# **Vectors and Vector Spaces**
#
# We will start our investigation into introductory linear algebra by first discussing one of the most important mathematical quantities in quantum computation: the vector.
#
# Formally, a **vector** $|v\rangle$ is defined as elements of a set known as a vector space. A more intuitive and geometric definition is that a vector "is a mathematical quantity with both direction and magnitude". For instance, consider a vector with $x$ and $y$ components of the form $\begin{pmatrix} 3 \\ 5 \end{pmatrix}$. This vector can be visualized as an arrow pointing in the direction of $3$ units down the $x$ axis and $5$ units up the $y$ axis:
plt.figure()
ax = plt.gca()
ax.quiver([3], [5], angles='xy', scale_units='xy', scale=1)
ax.set_xlim([-1, 10])
ax.set_ylim([-1, 10])
plt.draw()
plt.show()
# Note that "tail" of the vector doesn't have to be positioned at the origin; it only needs to point in the correct direction.
#
# In quantum computing, we often deal with **state vectors**, which are simply vectors that point to a specific point in space that corresponds to a particular quantum state. This can be visualized using a Bloch sphere. For instance, a vector representing the state of a quantum system could look something like this arrow, enclosed inside the Bloch sphere, which is the so-called "state space" of all possible points to which our state vectors can "point":
plot_bloch_vector([1, 0, 0])
# This particular state corresponds to an even superposition between $|0\rangle$ and $|1\rangle$ (the arrow is halfway between $|0\rangle$ at the top and $|1\rangle$ at the bottom of the sphere). Our vectors are allowed to rotate anywhere on the surface of the sphere, and each of these points represents a different quantum state.
#
# Let's revisit our more formal definition of a vector, which is that a vector is an element of a vector space. We must now define a vector space. A **vector space** $V$ over a **field** $F$ is a set of objects (vectors), where two conditions hold. Firstly, **vector addition** of two vectors $|a\rangle, \ |b\rangle \ \in \ V$ will yield a third vector $|a\rangle \ + \ |b\rangle \ = \ |c\rangle$, also contained in $V$. The second condition is that **scalar multiplication** between some $|a\rangle \ \in \ V$ and some $n \ \in \ F$, denoted by $n|a\rangle$, is also contained within $V$.
#
# We will now clarify this previous definition by working through a basic example. Let us demonstrate that the set $\mathbb{R}^2$ over the field $\mathbb{R}$ is a vector space. We assert that
#
# <br>
#
#
# $$\begin{pmatrix} x_1 \\ y_1 \end{pmatrix} \ + \ \begin{pmatrix} x_2 \\ y_2 \end{pmatrix} \ = \ \begin{pmatrix} x_1 \ + \ x_2 \\ y_1 \ + \ y_2 \end{pmatrix}$$
#
#
# <br>
#
# is contained within $\mathbb{R}^2$. This is evidently the case, as the sum of two real numbers is a real number, making both components of the newly-formed vector real numbers; thus, the vector is contained in $\mathbb{R}^2$ by definition. We also assert that:
#
# <br>
#
#
# $$n |v\rangle \ = \ \begin{pmatrix} nx \\ ny \end{pmatrix} \ \in \ V \ \ \ \ \forall n \ \in \ \mathbb{R}$$
#
#
# <br>
#
# This is true as well, as the product of a real number and a real number is a real number, making the entire new vector real, and thus proving this statement.
#
# **Matrices and Matrix Operations**
#
# Let's turn our attention to another fundamental concept: a **matrix**. Matrices are mathematical objects that transform vectors to other vectors:
#
# <br>
#
#
# $$|v\rangle \ \rightarrow \ |v'\rangle \ = \ M |v\rangle$$
#
#
# <br>
#
# Generally, matrices are written as "arrays" of numbers, looking something like this:
#
# <br>
#
#
# $$M \ = \ \begin{pmatrix} 1 & -2 & 3 \\ 1 & 5i & 0 \\ 1 \ + \ i & 7 & -4 \end{pmatrix}$$
#
#
# <br>
#
# We can "apply" a matrix to a vector by performing matrix multiplication. In general, matrix multiplication between two matrices involves taking the first row of the first matrix, and multiplying each element by its "partner" in the first column of the second matrix (the first number of the row is multiplied by the first number of the column, second number of the row and second number of column, etc.). The sum of these new numbers becomes the first element of the first row of the new matrix. To fill in the rest of the first row, we repeat this process for the second, third, etc. columns of the second matrix. Then we take the second row of the first matrix, and repeat the process for each column of the second matrix, to produce the second row. We perform this process until we have used all rows of the first matrix. The resulting matrix is our new matrix. Here is an example:
#
# <br>
#
#
# $$\begin{pmatrix} 2 & 0 \\ 5 & -1 \end{pmatrix} \begin{pmatrix} -3 & 1 \\ 2 & 1 \end{pmatrix} \ = \ \begin{pmatrix} (2)(-3) + (0)(2) & (2)(1) \ + \ (0)(1) \\ (5)(-3) + (-1)(2) & (5)(1) \ + \ (-1)(1) \end{pmatrix} \ = \ \begin{pmatrix} -6 & 2 \\ -17 & 4 \end{pmatrix}$$
#
#
# <br>
#
# To perform a quantum computation, we have some quantum state vector we manipulate by applying a matrix to that vector. A vector is simply a matrix with one column. To apply a matrix to a vector, therefore, we follow the same matrix multiplication procedure described above. We manipulate qubits in our quantum computer by applying sequences of **quantum gates**. Each quantum gate can be expressed as a matrix that can be applied to state vectors, thus changing the state. For instance, a commonly seen quantum gate is the Pauli-X gate, which is represented by the following matrix:
#
# <br>
#
#
# $$\sigma_x \ = \ \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}$$
#
#
# <br>
#
# This gate acts similarly to the classical NOT logic gate. It maps the computational basis state $|0\rangle$ to $|1\rangle$ and $|1\rangle$ to $|0\rangle$ (it "flips" the state). We write the two basis states as column vectors:
#
# <br>
#
#
# $$|0\rangle \ = \ \begin{pmatrix} 1 \\ 0 \end{pmatrix} \ \ \ \ \ \ \ |1\rangle \ = \ \begin{pmatrix} 0 \\ 1 \end{pmatrix}$$
#
#
# <br>
#
# When we apply this matrix to each of the vectors:
#
# <br>
#
#
# $$\sigma_x |0\rangle \ = \ \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} \ = \ \begin{pmatrix} (0)(1) \ + \ (1)(0) \\ (1)(1) \ + \ (0)(0) \end{pmatrix} \ = \ \begin{pmatrix} 0 \\ 1 \end{pmatrix} \ = \ |1\rangle$$
#
#
# <br>
#
#
# $$\sigma_x |1\rangle \ = \ \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} \ = \ \begin{pmatrix} (0)(0) \ + \ (1)(1) \\ (1)(0) \ + \ (0)(1) \end{pmatrix} \ = \ \begin{pmatrix} 1 \\ 0 \end{pmatrix} \ = \ |0\rangle$$
#
#
# <br>
#
# The matrix acts on the state vectors as expected.
#
# Within quantum computation, we often encounter two important types of matrices: **Hermitian** and **Unitary** matrices. The former is more important in the study of quantum mechanics, but is still necessary to discuss in a study of quantum computation. The latter is of unparalleled importance in both quantum mechanics and quantum computation. If you take away only one concept from this section on linear algebra, it should be the concept of a unitary matrix.
#
# A Hermitian matrix is simply a matrix that is equal to its **conjugate transpose** (denoted with a $\dagger$ symbol). This means that flipping the sign of a Hermitian matrix's imaginary components, then reflecting its entries along its main diagonal (from the top left to bottom right corners), produces an equal matrix. For instance, the Pauli-Y matrix, commonly used in quantum computation, is Hermitian:
#
# <br>
#
#
# $$\sigma_y \ = \ \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \ \Rightarrow \ \sigma_y^{\dagger} \ = \ \begin{pmatrix} 0 & -(i) \\ -(-i) & 0 \end{pmatrix} \ = \ \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \ = \ \sigma_y$$
#
#
# <br>
#
# Notice how we switched the places of the $i$ and the $-i$ (as we reflect across the main diagonal, the zeroes remain unchanged), and then flipped the sign.
#
# A unitary matrix is very similar. Specifically, it is a matrix such that the **inverse matrix** is equal to the conjugate transpose of the original matrix.
#
# The inverse of some matrix $A$, denoted as $A^{-1}$, is a matrix such that:
#
# <br>
#
#
# $$A^{-1} A \ = \ A A^{-1} \ = \ \mathbb{I}$$
#
#
# <br>
#
# where $\mathbb{I}$ is the identity matrix. The identity matrix has $1$s along the main diagonal (top left to bottom right), and $0$s in all other places. It is called the identity matrix because it acts trivially on any other matrix (it has no effect). You can prove this on your own by multiplying an identity matrix by any other matrix.
#
# When matrices get larger than $2 \ \times \ 2$, calculating the inverse becomes sufficiently complicated that it is usually left to computers to calculate. For a $2 \ \times \ 2$ matrix, the inverse is defined as:
#
# <br>
# $$A \ = \ \begin{pmatrix} a & b \\ c & d \end{pmatrix} \ \Rightarrow \ A^{-1} \ = \ \frac{1}{\text{det} \ A} \begin{pmatrix} d & -b \\ -c & a \end{pmatrix},$$
# <br>
#
# where $\text{det} \ A$ is the **determinant** of the matrix. In the $2 \ \times \ 2$ case, $\text{det} \ A \ = \ ad \ - \ bc$.
#
# Calculating inverse matrices is rarely important in quantum computing. Since most of the matrices we encounter are unitary, we can assume that the inverse is simply given by taking the conjugate transpose.
#
# Let's look at a basic example. The Pauli-Y matrix, in addition to being Hermitian, is also unitary (it is equal to its conjugate transpose, which is also equal to its inverse; therefore, the Pauli-Y matrix is its own inverse!). We can verify that this matrix is in fact unitary:
#
# <br>
#
#
# $$\sigma_y \ = \ \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \ \ \ \ \ \sigma_y^{\dagger} \ = \ \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \ \Rightarrow \ \sigma_y^{\dagger} \sigma_y \ = \ \begin{pmatrix} (0)(0) + (-i)(i) & (0)(-i) \ + \ (-i)(0) \\ (i)(0) \ + \ (0)(i) & (i)(-i) \ + \ (0)(0) \end{pmatrix} \ = \ \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \ = \ \mathbb{I}$$
#
#
# <br>
#
# The reason unitary matrices are important will become more apparent in the section on Hilbert spaces, and more so in the quantum mechanics subtopic of this textbook. The basic idea is that evolution of a quantum state by application of a unitary matrix "preserves" the quantum state.
#
# **Spanning Sets, Linear Dependence, and Bases**
#
# We are now in a position to discuss the construction of vector spaces. Consider some vector space $V$. We say that some set of vectors $S$ spans a subspace $V_S \ \subset \ V$ (subset closed under vector space operations) of the vector space, if we can write any vector in the subspace as a **linear combination** of vectors contained within the spanning set.
#
# A linear combination of some collection vectors $|v_1\rangle, \ ..., \ |v_n\rangle$ in some vector space over a field $F$ is defined as an arbitrary sum of these vectors (which of course will be another vector that we will call $|v\rangle$):
#
# <br>
#
#
# $$|v\rangle \ = \ f_1 |v_1\rangle \ + \ f_2 |v_2\rangle \ + \ ... \ + \ f_n |v_n\rangle \ = \ \displaystyle\sum_{i} \ f_i |v_i\rangle$$
#
#
# <br>
#
# where each $f_i$ is some element of $F$. If we have a set of vectors that spans a space, we are saying that **any other vector** in the vector space can be written as a linear combination of these vectors.
#
# A set of vectors $|v_1\rangle, \ ..., \ |v_n\rangle$ is said to be **linearly dependent** if there exist corresponding coefficients for each vector, $b_i \ \in \ F$, such that:
#
# <br>
#
# $$b_1 |v_1\rangle \ + \ b_2 |v_2\rangle \ + \ ... \ + \ b_n |v_n\rangle \ = \ \displaystyle\sum_{i} \ b_i |v_i\rangle \ = \ 0,$$
#
# <br>
#
# where at least one of the $b_i$ coefficients is non-zero. This is equivalent to the more intuitive statement that "the set of vectors can be expressed as linear combinations of each other". For example, let us have the set $\{|v_1\rangle, \ ..., \ |v_n\rangle \}$ along with the corresponding coefficients $\{b_1, \ ..., \ b_n \}$, such that the linear combination is equal to $0$. Since there is at least one vector with a non-zero coefficient, we choose a term in the linear combination $b_a |v_a\rangle$:
#
# <br>
#
#
# $$\displaystyle\sum_{i} \ b_i |v_i\rangle \ = \ b_a |v_a\rangle \ + \ \displaystyle\sum_{i, \ i \ \neq \ a} \ b_i |v_i\rangle \ = \ 0 \ \Rightarrow \ |v_a\rangle \ = \ - \displaystyle\sum_{i, \ i \ \neq \ a} \ \frac{b_i}{b_a} |v_i\rangle \ = \ \displaystyle\sum_{i, \ i \ \neq \ a} \ c_i |v_i\rangle$$
#
#
# <br>
#
# In the case that $b_a$ is the only non-zero coefficient, it is necessarily true that $|v_a\rangle$ is the null vector, automatically making the set linearly dependent. If this is not the case, $|v_a\rangle$ has been written as a linear combination of non-zero vectors, as was shown above. To prove the converse, we assume that there exists some vector $|v_a\rangle$ in the subspace $|v_1\rangle, ..., \ |v_n\rangle$ that can be written as a linear combination of other vectors in the subspace. This means that:
#
# <br>
#
# $$|v_a\rangle \ = \ \displaystyle\sum_{s} b_s |v_s\rangle$$
#
# <br>
#
# where $s$ is an index that runs over a subset of the subspace. It follows that:
#
# <br>
#
#
# $$|v_a\rangle \ - \ \displaystyle\sum_{s} b_s |v_s\rangle \ = \ |v_a\rangle \ - \ (|v_{s_1}\rangle \ + \ ... \ + \ |v_{s_r}\rangle) \ = \ 0$$
#
#
# <br>
#
# For all vectors in the subspace that are not included in the subset indexed by $s$, we set their coefficients, indexed by $q$, equal to $0$. Thus,
#
# <br>
#
# $$|v_a\rangle \ - \ (|v_{s_1}\rangle \ + \ ... \ + \ |v_{s_r}\rangle) \ + \ (0)(|v_{q_1}\rangle \ + \ ... \ + \ |v_{q_t}\rangle) \ = \ 0$$
#
# <br>
#
# which is a linear combination of all elements in the subspace $|v_1\rangle, \ ..., \ |v_n\rangle$. This is equal to $0$, thus completing the proof that the two definitions of linear dependence imply each other.
#
# Let's now consider a basic example. Consider the set of two vectors in $\mathbb{R}^2$, consisting of $|a\rangle \ = \ \begin{pmatrix} 1 \\ 0 \end{pmatrix}$ and $|b\rangle \ = \ \begin{pmatrix} 2 \\ 0 \end{pmatrix}$. If we choose the field over our vector space to be $\mathbb{R}$, then we can create a linear combination of these vectors that equates to $0$. For example:
#
# <br>
#
#
# $$2|a\rangle \ - \ |b\rangle \ = \ 0$$
#
#
# <br>
#
# A set of vectors is said to be **linearly independent** if there is no vector in the set that can be expressed as a linear combination of all the others.
#
# The notion of a **basis** is simply a **linearly independent spanning set**. In this sense, the basis of a vector space is the minimal possible set that spans the entire space. We call the size of the basis set the **dimension** of the vector space.
#
# Bases and spanning sets are important because they allow us to "shrink down" vector spaces and express them in terms of only a few vectors. We can come to certain conclusions about our basis set that we can generalize to the entire vector space, simply because we know every vector in the space is just a linear combination of the basis vectors.
#
# In quantum computation, one of the bases that we often encounter is $|0\rangle, \ |1\rangle$. We can write any other qubit state as a linear combination of these basis vectors. For instance, the linear combination
#
# <br>
#
#
# $$\frac{|0\rangle \ + \ |1\rangle}{\sqrt{2}}$$
#
#
# <br>
#
# represents a superposition between the $|0\rangle$ and $|1\rangle$ basis state, with equal probability of measuring the state to be in either one of the basis vector states (this is intuitive, as the "weight" or the "amount of each basis vector" in the linear combination is equal, both being scaled by $1/\sqrt{2}$).
#
# **Hilbert Spaces, Orthonormality, and the Inner Product**
#
# Hilbert Spaces are one of the most important mathematical constructs in quantum mechanics and quantum computation. A Hilbert space can be thought of as the state space in which all quantum state vectors "live". The main difference between a Hilbert space and any random vector space is that a Hilbert space is equipped with an **inner product**, which is an operation that can be performed between two vectors, returning a scalar.
#
# In the context of quantum mechanics and quantum computation, the inner product between two state vectors returns a scalar quantity representing the amount to which the first vector lies along the second vector. From this, the probabilities of measurement in different quantum states (among other things) can be calculated (this will be discussed more in the quantum mechanics subtopic).
#
# For two vectors $|a\rangle$ and $|b\rangle$ in a Hilbert space, we denote the inner product as $\langle a | b \rangle$, where $\langle a |$ is equal to the conjugate transpose of $|a\rangle$, denoted $|a\rangle^{\dagger}$. Thus, the inner product between two vectors of the Hilbert space looks something like:
#
# <br>
#
# $$\langle a | b \rangle \ = \ \begin{pmatrix} a_1^{*} & a_2^{*} & ... & a_n^{*} \end{pmatrix} \begin{pmatrix} b_1 \\ b_2 \\ . \\ . \\ . \\ b_n \end{pmatrix} \ = \ a_1^{*} b_1 \ + \ a_2^{*} b_2 \ + \ ... \ + \ a_n^{*} b_n$$
#
# <br>
#
# where $*$ denotes the complex conjugate of the vector.
#
# One of the most important conditions for a Hilbert space representing a quantum system is that the inner product of a vector with itself is equal to one: $\langle \psi | \psi \rangle \ = \ 1$. This is the so-called normalization condition, which states that the length of the vector squared (each component of the vector is squared and summed together, by defintion of the inner product) must be equal to one. The physical significance of this is that the length of a vector in a particular direction is representative of the "probability amplitude" of the quantum system with regards to measurement in that particular state. Obviously, the probability of the quantum system being measured in the state that it is in must be $1$ (after all, the sum of the probabilities of finding the quantum system in any particular state must equal $1$).
#
# Let's consider the Bloch sphere:
plot_bloch_vector([0, 0, 0])
# The surface of this sphere, along with the inner product between qubit state vectors, is a valid Hilbert space. In addition, the normalization condition holds true, as the radius of the Bloch sphere is $1$, and thus the length squared of each vector must also equal $1$.
#
# A final note regarding Hilbert spaces and the inner product is their relationship to **unitary matrices**. Unitary matrices are important in quantum computation because they **preserve the inner product**, meaning that no matter how you transform a vector under a sequence of unitary matrices, the normalization condition still holds true. This can be demonstrated in the following short proof:
#
# <br>
#
#
# $$\langle \psi | \psi \rangle \ = \ 1 \ \Rightarrow \ |\psi\rangle \ \rightarrow \ U |\psi\rangle \ = \ |\psi'\rangle \ \Rightarrow \ \langle \psi' | \psi' \rangle \ = \ (U |\psi\rangle)^{\dagger} U|\psi\rangle \ = \ \langle \psi | U^{\dagger} U |\psi\rangle \ = \ \langle \psi | \psi \rangle \ = \ 1$$
#
#
# <br>
#
# This means that unitary evolution sends quantum states to other valid quantum states. For a single-qubit Hilbert space, represented by the Bloch sphere, unitary transformations correspond to rotations of state vectors to different points on the sphere, not changing the length of the state vector in any way.
#
# **Eigenvectors and Eigenvalues**
#
# Consider the relationship of the form:
#
# $$A |v\rangle \ = \ \lambda |v\rangle,$$
#
# where $A$ is a matrix, and $\lambda$ is some number. If we are given some matrix $A$, and need to find the vectors $|v\rangle$ and numbers $\lambda$ that satisfy this relationship, we call these vectors **eigenvectors**, and their corresponding number multipliers **eigenvalues**. Eigenvectors and eigenvalues have very important physical significance in the context of quantum mechanics, and therefore quantum computation. Given some $A$, we exploit an interesting trick in order to find the set of eigenvectors and corresponding eigenvalues. Let us rearrange our equation as:
#
# <br>
#
#
# $$A |v\rangle \ - \ \lambda |v\rangle \ = 0 \ \Rightarrow \ (A \ - \ \lambda \mathbb{I}) |v\rangle \ = \ 0$$
#
#
# <br>
#
# If we multiply both sides of this equation by the inverse matrix $(A \ - \ \lambda \mathbb{I})^{-1}$, we get $|v\rangle \ = \ 0$. This is an extraneous solution (we don't allow eigenvectors to be the null vector, or else any eigenvalue/matrix combination would satisfy the eigenvector-eigenvalue relationship). Thus, in order to find the allowed eigenvectors and eigenvalues, we have to assume that the matrix $(A \ - \ \lambda \mathbb{I})$ is **non-invertible**. Recall from earlier that the inverse of a matrix is of the form:
#
# <br>
# $$M^{-1} \ = \ \frac{1}{\text{det} (M)} \ F(M),$$
# <br>
#
# where $F(M)$ is some new matrix (the particulars of which do not matter in this context) that depends on $M$. The part of this equation in which we are interested is the inverse of the determinant. If the determinant of the matrix $M$ is $0$, it follows that the inverse is undefined, and thus so is the inverse, making the matrix $M$ non-invertible. We therefore require that:
#
# <br>
#
#
# $$\text{det} (A \ - \ \lambda \mathbb{I}) \ = \ 0$$
#
#
# <br>
#
# From this, we can determine $\lambda$, then we plug each value of $\lambda$ back into the original equation to get the eigenvalues. Let's do an example, and find the eigenvectors/eigenvalues of the Pauli-Z matrix, $\sigma_z$. We start with:
#
# <br>
#
#
# $$\text{det} (\sigma_z \ - \ \lambda \mathbb{I}) \ = \ \text{det} \begin{pmatrix} 1 \ - \ \lambda & 0 \\ 0 & -1 \ - \ \lambda \end{pmatrix} \ = \ (-1 \ - \ \lambda)(1 \ - \ \lambda) \ = \ 1 \ - \ \lambda^2 \ = \ 0 \ \Rightarrow \ \lambda \ = \ \pm 1$$
#
#
# <br>
#
# The equation, in terms of $\lambda$, that results when solving the determinant is called the **characteristic polynomial**. We can then plug each of these values back into the original equation. We'll start with $\lambda \ = \ 1$:
#
# <br>
#
#
# $$\begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} |v\rangle \ = \ |v\rangle \ \Rightarrow \ \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \begin{pmatrix} a \\ b \end{pmatrix} \ = \ \begin{pmatrix} a \\ b \end{pmatrix} \ \Rightarrow \begin{pmatrix} a \\ -b \end{pmatrix} \ = \ \begin{pmatrix} a \\ b \end{pmatrix}$$
#
#
# <br>
#
# $a$ can be any number, and $b$ is $0$; thus, the vector $\begin{pmatrix} 1 \\ 0 \end{pmatrix}$ forms a basis for all vectors that satisfy our relationship, and is therefore the eigenvector that corresponds to the eigenvalue of $1$. We do the same thing for $\lambda \ = \ -1$:
#
# <br>
#
#
# $$\begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} |v\rangle \ = \ -|v\rangle \ \Rightarrow \ \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \begin{pmatrix} a \\ b \end{pmatrix} \ = \ \begin{pmatrix} -a \\ -b \end{pmatrix} \ \Rightarrow \begin{pmatrix} a \\ -b \end{pmatrix} \ = \ \begin{pmatrix} -a \\ -b \end{pmatrix}$$
#
#
# <br>
#
# This time, $b$ can be any number, and $a$ is $0$; thus, our basis vector (and our eigenvector corresponding to $-1$) is $\begin{pmatrix} 0 \\ 1 \end{pmatrix}$. Notice how the eigenvectors of the Pauli-Z matrix are the quantum computational basis states $|0\rangle$ and $|1\rangle$. This is no coincidence. For instance, when we measure a qubit in the $Z$-basis, we are referring to a measurement that collapses the qubit's state into one of the eigenvectors of the Z matrix, either $|0\rangle$ or $|1\rangle$.
#
# **Matrix Exponentials**
#
#
# The notion of a matrix exponential is a very specific yet extremely important concept. We often see unitary transformations in the form:
#
# <br>
# $$U \ = \ e^{i\gamma H},$$
# <br>
#
# where $H$ is some Hermitian matrix and $\gamma$ is some real number. It is fairly simple to prove that all matrices of this form are unitary. Taking the conjugate transpose of $U$, we get:
#
# <br>
#
#
# $$U^{\dagger} \ = \ \Big( e^{i\gamma H} \Big)^{\dagger} \ = \ e^{-i \gamma H^{\dagger}}$$
#
#
# <br>
#
# But since $H$ is Hermitian, we know that $H^{\dagger} \ = \ H$, thus:
#
# <br>
#
#
# $$e^{-i \gamma H^{\dagger}} \ = \ e^{-i \gamma H} \ \Rightarrow \ U^{\dagger} U \ = \ e^{-i \gamma H} e^{i\gamma H} \ = \ \mathbb{I}$$
#
#
# <br>
#
# You may wonder why a matrix inside of an exponential can still be considered a matrix. The answer becomes clearer when we expand our exponential function as a Taylor series. Recall from calculus that a Taylor series is essentially a way to write any function as an infinite-degree polynomial, and the main idea is to choose the terms of the polynomial and center it at some point $x_0$ lying on the function we are trying to transform into the polynomial, such that the zeroth, first, second, third, etc. derivative is the same for both the original function and the polynomial. Thus, we write our Taylor series in the form:
#
# <br>
# $$g(x) \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ f^{(n)}(x_0) \ \frac{(x \ - \ x_0)^n}{n!},$$
# <br>
#
# where $g(x)$ is the polynomial, $f(x)$ is the original function, $f^{(n)}$ is the $n$-th derivative of $f$, and $x_0$ is the point at which we center the function. Since we are not approximating, $x_0$ doesn't matter, so for simplicity, we choose $x_0 \ = \ 0$, and the Taylor series becomes a Maclaurin series:
#
# <br>
#
#
# $$g(x) \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ f^{(n)}(0) \ \frac{x^n}{n!}$$
#
#
# <br>
#
# If we choose $f(x) \ = \ e^x$, we can create an equivalent polynomial using the Maclaurin series. Since the derivative of $e^x$ is simply $e^x$, and evidently, $e^0 \ = \ 1$, we get:
#
# <br>
#
#
# $$g(x) \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{x^n}{n!} \ = \ e^x$$
#
#
# <br>
#
# Thus, for some matrix, $i \gamma H$, we get:
#
# <br>
#
#
# $$e^{i \gamma H} \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(i \gamma H)^n}{n!}$$
#
#
# <br>
#
# Therefore, the exponential of a matrix is a matrix. It is an infinite sum of powers of matrices, which admittedly looks overly complex...but the point here is that the matrix exponential is indeed a matrix.
#
# We are now in a position to demonstrate a very important fact: if we have some matrix $B$ such that $B^2 \ = \ \mathbb{I}$ (this is called an **involutory matrix**), then:
#
# <br>
#
#
# $$e^{i \gamma B} \ = \ \cos(\gamma) \mathbb{I} \ + \ i \sin(\gamma) B$$
#
#
# <br>
#
# We start with the Maclaurin series:
#
# <br>
#
#
# $$e^{i \gamma B} \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(i \gamma B)^n}{n!}$$
#
#
# <br>
#
# Notice that we can split the summation into an imaginary part and a real part, based on whether $n$ is even or odd in each term of the sum:
#
# <br>
#
#
# $$\displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(i \gamma B)^n}{n!} \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(-1)^n \gamma^{2n} B^{2n}}{(2n)!} \ + \ i \displaystyle\sum_{n \ = \ 0}^{\infty} \frac{(-1)^n \gamma^{2n + 1} B^{2n + 1}}{(2n + 1)!}$$
#
#
# <br>
#
# Now, let us find the Maclaurin series for both $\sin x$ and $\cos x$. We'll start with $f(x) \ = \ \sin x$:
#
# <br>
#
#
# $$\sin x \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ f^{n}(0) \frac{x^n}{n!}$$
#
#
# <br>
#
# The derivative of $\sin x$ is **cyclical** in a sense (each arrow represents taking the derivative of the previous function):
#
# <br>
#
#
# $$\sin x \ \rightarrow \ \cos x \ \rightarrow \ -\sin x \ \rightarrow \ -\cos x \ \rightarrow \ \sin x$$
#
#
# <br>
#
# Since $\sin (0) \ = \ 0$ and $\cos (0) \ = \ 1$, all terms with even $n$ become $0$, and we get:
#
# <br>
#
#
# $$\displaystyle\sum_{n \ = \ 0}^{\infty} \ f^{n}(0) \frac{x^n}{n!} \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(-1)^n x^{2n \ + \ 1}}{(2n \ + \ 1)!}$$
#
#
# <br>
#
# This looks similar to the odd term of our original equation. In fact, if we let $x \ = \ \gamma B$, they are exactly the same. We follow a process that is almost identical to show that the even terms are the same as the Maclaurin series for $f(x) \ = \ \cos x$:
#
# <br>
#
#
# $$\cos x \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ f^{n}(0) \frac{x^n}{n!}$$
#
#
# <br>
#
#
# $$\Rightarrow \ \cos x \ \rightarrow \ -\sin x \ \rightarrow \ -\cos x \ \rightarrow \ \sin x \ \rightarrow \ \cos x$$
#
#
# <br>
#
#
# $$\Rightarrow \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ f^{n}(0) \frac{x^n}{n!} \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(-1)^n x^{2n}}{(2n)!}$$
#
#
# <br>
#
# Let us go back to the original equation. Recall that $B^2 \ = \ \mathbb{I}$. For any $n$, we have:
#
# <br>
#
#
# $$B^{2n} \ = \ \big( B^2 \Big)^n \ = \ \mathbb{I}^n \ = \ \mathbb{I}$$
#
#
# <br>
#
#
# $$B^{2n \ + \ 1} \ = \ B \ \big( B^2 \Big)^n \ = \ B \ \mathbb{I}^n \ = \ B \ \mathbb{I} \ = \ B$$
#
#
# <br>
#
# Substituting in this new information, we get:
#
# <br>
#
#
# $$\displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(-1)^n \gamma^{2n} B^{2n}}{(2n)!} \ + \ i \displaystyle\sum_{n \ = \ 0}^{\infty} \frac{(-1)^n \gamma^{2n + 1} B^{2n + 1}}{(2n + 1)!} \ = \ \mathbb{I} \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{(-1)^n \gamma^{2n}}{(2n)!} \ + \ i B \displaystyle\sum_{n \ = \ 0}^{\infty} \frac{(-1)^n \gamma^{2n + 1}}{(2n + 1)!} \ = \ \cos (\gamma) \mathbb{I} \ + \ i \sin (\gamma) B$$
#
#
# <br>
#
# This fact is extremely useful in quantum computation. Consider the Pauli matrices:
#
# <br>
#
#
# $$\sigma_x \ = \ \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}$$
#
#
# <br>
#
#
# $$\sigma_y \ = \ \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}$$
#
#
# <br>
#
#
# $$\sigma_z \ = \ \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}$$
#
#
# <br>
#
# These matrices are among the fundamental "quantum gates" used to manipulate qubits. These operations are not only unitary, they are also **Hermitian** and **Involutory**. This means that a matrix of the form $e^{i \gamma \sigma_k} \ k \ \in \ \{x, \ y, \ z\}$ is not only a valid unitary matrix that can act upon a quantum state vector (a qubit), but it can be expressed using the sine-cosine relationship that we just proved. This is very powerful, and is seen throughout quantum computational theory, as gates of this type are used all the time.
#
# One last important fact about matrix exponentials: if we have some matrix $M$, with eigenvectors $|v\rangle$ and corresponding eigenvalues $v$, then:
#
# <br>
#
#
# $$e^{M} |v\rangle \ = \ e^v |v\rangle$$
#
#
# <br>
#
# This one is much more straightforward to prove:
#
# <br>
#
#
# $$e^M |v\rangle \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{M^n |v\rangle}{n!} \ = \ \displaystyle\sum_{n \ = \ 0}^{\infty} \ \frac{v^n |v\rangle}{n!} \ = \ e^v |v\rangle$$
#
#
# <br>
#
# This fact is also very useful. When creating quantum circuits that simulate a certain Hamiltonian (especially for variational circuits), we frequently use gates of the form $e^{i \gamma \sigma_z}$. Since $|0\rangle$ and $|1\rangle$ are eigenvalues of $\sigma_z$, we can easily determine mathematically that $e^{i \gamma \sigma_z}$ will add a phase of $e^{i \gamma}$ to $|0\rangle$, and will add a phase of $e^{-i\gamma}$ to $|1\rangle$. We can then construct this gate in terms of $CNOT$ and phase/rotation gates fairly easily, as we know the mathematical outcome of the gate on each of the computational basis states.
#
# This fact doesn't only apply to exponentials of the $\sigma_z$ gate. For example, we can determine the outcome of a gate of the form $e^{i \gamma \sigma_x}$ on the eigenvectors of $\sigma_x$, $(|0\rangle \ + \ |1\rangle)/\sqrt{2}$ and $(|0\rangle \ - \ |1\rangle)/\sqrt{2}$. The same applies to exponentials of the $\sigma_y$ matrix.
# ## References
# [1] <NAME>. “A Memoir on the Theory of Matrices.” Philosophical Transactions of the Royal Society of London, vol. 148, 1858, pp. 17–37. JSTOR.
#
# [2] A New Branch of Mathematics: The Ausdehnungslehre of 1844 and Other Works: <NAME>, <NAME>: 9780812692761
| content/ch-appendix/linear_algebra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Coronavirus progression dashboard - Overview
# - Objective is to build a tool to track the progression of the Covid-19 pandemic, with up to date and reliable data
# - Data comes for the Johs Hopkins University, the source usually used in media outlets (e.g. ABC news, Le Monde...)
#
# The reaon for this dashboard to exist is that data usually available is either:
# - incomplete (e.g. limitied number of coutries, limited time period)
# - poorly presented (e.g. different scales for diffrent coutries, lack of context)
# - not granular enough (e.g. no breakdown by state for the US or Australia)
#
# This dashboard uses all data available from Johns Hopkins and offers the following functionalities:
# - Easy to use: the output is an .html file than can be sinky opened in a web browser. No installation of any sort is required\
# - Easy to share: the .html file can simply be sent by email
# - Free: No licence is required to ue and manipulate the dashboard
# - Easy to update with new date: Update process takes ~15s
# - Comprehensive: all time series avaiable from John Hopkins is included
# - Flexible: The user can decide what to display and how to doplay it
#
# The dashboard is a very flexible tool and the user can play with the following:
# List of avaialble metrics:
# - Number of cumulated Confirmed cases, by day or week
# - Number of cumulated Deaths, by day or week
# - Number of Confirmed cases, by day or week
# - Number of Deaths, by day or week
# - 7-days Rolling averages of Confirmed cases, by day or week
# - 7-days Rolling averages of Deaths, by day or week
#
# Data can be displayed by Coutry, Statem, or filtered on a selection of countries or states. For instance, it is possible to display the evolution of the total number of cases by State in the US, or by Country in Europe, or the totals worlwide.
# Data can also be filterd by date (e.g. display only the lates date to see the total number currently confirmed cases)
#
# User can also chose how to visualize the data:
# - Line chart of the 7-day rolling average of confirmed cases to see the trend of a selection of coutries
# - Bar chart of or the death rate by country
# - Many more visualizations are avaible (table, heatmaps, area charts....)
# # Import and display options
import pandas as pd
import numpy as np
from pivottablejs import pivot_ui
# Pandas options
pd.options.display.max_rows = 999
pd.set_option('expand_frame_repr', False)
# jupyter options
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# %matplotlib inline
# # Define functions
# - string_to_date turn a string column to datetime in a Panadas DF
# - read_and_stack read the global data and stack it for furher manipulations
# - read_and_stack_US read the US data and stack it for furher manipulations
# +
def _string_to_date(s, format):
"""
This is an extremely fast approach to datetime parsing.
For large data, the same dates are often repeated. Rather than
re-parse these, we store all unique dates, parse them, and
use a lookup to convert all dates.
"""
dates = {date:pd.to_datetime(date, format = format) for date in s.unique()}
return s.map(dates)
def _read_and_stack(file):
""" Reads the data and format it in long format with .stack()
with one row by country, province and date.
This format is more flexible than the original format and can be
use to quickly build a dashboard with pivottablejs
Parameters
----------
file: String
the file to import, from the COVID-19 repository on github:
https://github.com/CSSEGISandData/COVID-19.git
Returns
-------
- df:
pandas Series, stacked, with 1 row per ['Province/State', 'Country/Region', 'Date', 'Week']
See Also
--------
Examples
--------
confirmed = _read_and_stack("Confirmed")
"""
df = (pd
.read_csv(rf"csse_covid_19_data\csse_covid_19_time_series\time_series_covid19_{file}_global.csv")
.reset_index()
.drop(["index", "Lat", "Long"], axis=1)
.set_index(['Province/State', 'Country/Region'])
.stack()
.reset_index()
)
df["Date"] = _string_to_date(df["level_2"], "%m/%d/%y")
df["Week"] = df.Date.dt.year * 100 + df.Date.dt.week
df = (df
.set_index(['Province/State', 'Country/Region', 'Date', 'Week'])
.drop("level_2", axis=1)
)
df.columns = [file]
return df[file]
def _read_and_stack_US(file):
""" Reads the data and format it in long format with .stack()
with one row by country, province, county and date.
This format is more flexible than the original format and can be
use to quickly build a dashboard with pivottablejs
Parameters
----------
file: String
the file to import, from the COVID-19 repository on github:
https://github.com/CSSEGISandData/COVID-19.git
Returns
-------
- df:
pandas Series, stacked, with 1 row per ['Province/State', 'Country/Region', 'Date', 'Week']
See Also
--------
Examples
--------
confirmed = _read_and_stack("Confirmed")
"""
df = (pd
.read_csv(rf"csse_covid_19_data\csse_covid_19_time_series\time_series_covid19_{file}_US.csv")
.reset_index()
.rename(columns={ "Province_State":"Province/State"
, "Country_Region": "Country/Region"
, "Admin2":"County"}
)
.drop([ "index", "Lat", "Long", "Long_"
, "UID", "iso2", "iso3", "code3"
, "FIPS", "Combined_Key", "Population"]
, axis=1, errors="ignore")
.set_index(['Province/State', 'Country/Region', 'County'])
.stack()
.reset_index()
)
df["Date"] = _string_to_date(df["level_3"], "%m/%d/%y")
df["Week"] = df.Date.dt.year * 100 + df.Date.dt.week
df = (df
.set_index(['Province/State', 'Country/Region', "County", 'Date', 'Week'])
.drop("level_3", axis=1)
)
df.columns = [file]
return df[file]
# df = _read_and_stack("Confirmed")
# # df.columns
# df.head()
# -
# # Covid-19 progression dashboard
# Per country, province and date: confirmed, death, new confirmed and new deaths
# ## Prepare the data
# +
# 1. Build a sigle DataFrame with count of Confirmed cases and count of Deaths
# by country, province and date
# 1.1 Global data
files=["Confirmed", "Deaths"]
df_global = pd.DataFrame()
for file in files:
df_global[file] = _read_and_stack(file)
df_global = df_global.reset_index()
# 1.2 US data
# a. Read raw data - county level
df_US = pd.DataFrame()
for file in files:
df_US[file] = _read_and_stack_US(file)
df_US = df_US.reset_index()
# b. aggregate at State level to be consistent with the global data
df_US = (df_US
.groupby(['Country/Region', 'Province/State', 'Date', 'Week'])
.sum()
.reset_index()
)
# 2. Clean up the data
# 2.1 Compile Global and US data
df = pd.concat([ df_global[df_global["Country/Region"] != "US"]
, df_US]
, sort=True
)
df = df[["Country/Region", "Province/State", "Date", "Week", "Confirmed", "Deaths"]]
# 2.2 Replace missing Province/State by the name of the country
""" When Province/State is mussing , it refers to the main bulk of
the country. e.g for France, when Province/State is missing the data refers
to metropolitan France, as oppose to some specific Province
"""
df['Province/State'] = df['Province/State'].fillna(df['Country/Region'])
# 2.3 sort by Country, Province and date
df = (df
.sort_values(['Country/Region', "Province/State", "Date"])
.reset_index()
.drop("index", axis=1)
)
# 3. Add the mortality rate, number of new confirmed and number of new deaths
"""
rate = death / confirmed
new_confirmed = daily count of confirmed cases
new_deaths = dailty count of deaths
"""
# 3 Add the mortality rate, number of new confirmed and number of new deaths
df["rate"] = (df.Deaths / df.Confirmed).fillna(0)
df["New Confirmed"] = df.groupby(['Country/Region', "Province/State"]).Confirmed.diff().fillna(0)
df["New Deaths"] = df.groupby(['Country/Region', "Province/State"]).Deaths.diff().fillna(0)
# 4. Add rolling averages for New cases and new deaths
rolling = df.groupby(['Country/Region', "Province/State"])["New Confirmed", "New Deaths"].rolling(7).mean()
rolling.columns = ["New Confirmed Rolling", "New Deaths Rolling"]
# df = df.set_index(['Country/Region', "Province/State"])
df["New Confirmed Rolling"] = rolling.reset_index()["New Confirmed Rolling"]
df["New Deaths Rolling"] = rolling.reset_index()["New Deaths Rolling"]
# 5. Add continent
df_pop = pd.read_csv("population.csv")
df = pd.merge(df, df_pop[["Country/Region", "Continent"]], on="Country/Region")
# df.loc[df["Country/Region"] == "US"]
# -
# ## Export the dashboard with pivottablejs.pivot_ui
# Export the interactive table / chart
pivot_ui(df,outfile_path="State_level_data.html")
# # Rescaled dashboard to compare progression per country
# ## Rescale to log scale and start when the number of confirmed cases reaches 30
# +
# Get country lelbel totals
df2 = df.groupby(["Country/Region", "Date"]).sum()
df2 = df2.loc[df2.Confirmed >= 30]
df2["log_Confirmed"] = np.log(df2["Confirmed"])
df2["log_Deaths"] = np.log(df2["Deaths"])
df2["Rate"] = df2["Deaths"] / df2["Confirmed"]
df2 = df2.replace([np.inf, -np.inf], 0)
df2 = df2.reset_index()
df2["Date_ini"] = df2["Date"]
df2["Date"] = df2.groupby(["Country/Region"]).cumcount()
df2 = (df2
.set_index(["Country/Region", "Date", "Date_ini"])
.loc[:, ["Confirmed", "log_Confirmed", "Deaths", "log_Deaths", "rate"]]
)
# Recalculate the rate
df2["rate"] = df2.Deaths / df2.Confirmed
# -
# ## Export the dashboard with pivottablejs.pivot_ui
# Export the interactive table / chart
pivot_ui(df2,outfile_path="country_data_comparison_rescaled.html")
# # Cases per Million
# +
# Sum per country
df3 = df.groupby(["Country/Region", "Date"]).sum().reset_index()
# Add population and continent
df_pop = pd.read_csv("population.csv")
df3 = pd.merge(df3, df_pop, on="Country/Region")
# Nb cases per Million
list_var = ["Confirmed", "Deaths", "New Confirmed", "New Deaths", "New Confirmed Rolling", "New Deaths Rolling"]
for x in list_var:
df3[f"{x} per Million"] = (df3[x] / df3["Population"]) * 10000
# Export the interactive table / chart
pivot_ui(df3,outfile_path="country_data_comparison_per_million.html")
# -
| interactive_chart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def names(letter):
names = ['Anne', 'Amy', 'Bob', 'David', 'Carrie', 'Barbara', 'Zach']
b_names = [ name for name in names if name.startswith(letter) ]
print(b_names)
names('A')
number = [1,2,3,5,4,6,8,9,11,12]
multiples_of_4 = list(filter(lambda a: (a%4 == 0) ,number))
print(multiples_of_4)
def multiples():
number = [1,2,3,5,4,6,8,
9,11,12]
multiples_of_4 = list(filter(lambda a: (a%4 == 0) ,number))
print(multiples_of_4)
multiples()
import pandas as pd
import numpy as np
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk.classify
from nltk import NaiveBayesClassifier
import os
import re
from bs4 import BeautifulSoup
import sys
import time
analyzer = SentimentIntensityAnalyzer()
def getData(name):
df1 = pd.DataFrame() # defines df1 as a dataframe
df1 = pd.read_csv(name, header = 0)
return df1
def list_dir_files(relevant_path):
# https://clay-atlas.com/us/blog/2019/10/27/python-english-tutorial-solved-unicodeescape-error-escape-syntaxerror/?doing_wp_cron=1618286551.1528689861297607421875
#need to change \ to /
import os
included_extensions = ['csv']
file_names = [fn for fn in os.listdir(relevant_path) # uses os.listdir to display only .csv files
if any(fn.endswith(ext) for ext in included_extensions)]
print('Path: ', relevant_path)
for f in file_names:
print(f)
def lemmatize1(df, stop_words):
print('\nLemmatizing ...')
import nltk #not in original code
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
''' import nltk #not in original code
from nltk.corpus import stopwords
stop_words = stopwords.words('english')'''
nltk.download('wordnet') #not in original code
#print('stop_words = \n', stop_words)
# Lemmatize the text
lemmer = WordNetLemmatizer()
# %time df['body'] = df['body'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
print('stop_words = \n', stop_words)
return df
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# +
# Remove stopwords
stop_words = stopwords.words('english')
#adds new stopwords to list
new_stop_words = ['intc', 'nvda', 'tsla', 'mu', 'msft', 'tsm', 'adbe', 'unh', '39', ' 270',
'270000', '4033477', '244', '16', '399', '800', '270', '000', '60', '74',
'1600', '993', '392', '98', '00', '1601', 'amd', 'aapl']
for w in new_stop_words:
stop_words.append(w)
print(stop_words)
#removes the stopwords from the column body_Processed
# %time df['body'] = df['body'].map(lambda x : ' '.join([w for w in x.split() if w not in stop_words]))
df.head()
# +
# Lemmatize the text
lemmer = WordNetLemmatizer()
import nltk #not in original code
nltk.download('wordnet') #not in original code
# %time df['body'] = df['body'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
df.head()
# -
def lemmatize(df):
print('\nLemmatizing ...')
import nltk #not in original code
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
nltk.download('wordnet') #not in original code
#print('stop_words = \n', stop_words)
# Lemmatize the text
lemmer = WordNetLemmatizer()
# %time df['body'] = df['body'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
print('stop_words = \n', stop_words)
return df
# +
relevant_path = 'C:/Users/pstri/OneDrive/Documents/Personal/Kokoro/NLTK/Code Project/Scraped Files'
print('Here is a list of the csv files to choose from: \n')
list_dir_files(relevant_path) # gives all of the file options in the relevant path.
time.sleep(2)
name = input('\nWhat file do you want to use? \n')
df = getData(relevant_path + '/' + name) #returns df; reads csv file into df
print('Imported the csv file.')
import nltk #not in original code
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
#df = lemmatize1(df, stop_words)
df = lemmatize(df)
# -
# 120 Allows user to manually input value when stocktwits sentiment value is "None"
# It counts every 20 edits and gives the user the option to quit. If the user chooses to quit
# it breaks from the while look and writes the df to a csv file so all work is saved up to that point.
# upon start up it ask if thie is the first time processing the raw data. If no it loads the csv file into
# the dataframe and starts where the previous session left off. If "modified?" is "Yes and "sentiment" is "None"
# it skips the record. Therefore it will re-start at the first "modified?" is "No" and "sentiment" is "None"
def edit(df):
import copy
i = 0
counter = 0 # counter to see if user want to stop
while i < len(df):
#while i < 6:
if df.loc[i,('sentiment')] == 'None' and df.loc[i,('modified?')] == 'No': # Column 9 is 'modified?'
print('\nindex number:', i, '\n', df.loc[i, ('body')])
#print('This is the body of the tweet:\n', df..log[i,('body')])
rating = int(input('Enter your rating (1, 0 or -1.):'))
df.loc[i,('modified_rating')] = copy.deepcopy(rating) # writes inputed number to the 'modified_rating'
df.loc[i,('modified?')] = 'Yes' # sets "modified?" equal to 'Yes' to identify which records have been modified; so that it can start at the next record at start up
counter += 1
elif df.loc[i,('sentiment')] == 'Bearish':
df.loc[i,('modified_rating')] = df.loc[i,('sentiment_number')] #copies the stocktwits 'sentiment_number' (7) to the 'modified_rating(8)
elif df.loc[i,('sentiment')] == 'Bullish':
df.loc[i,('modified_rating')] = df.loc[i,('sentiment_number')] #copies the stocktwits 'sentiment_number' (7) to the 'modified_rating(8)
if counter == 20: # represents 20 edits
quit = input('Do you want to quit? (Enter either a "y" or "Y") ')
if quit == 'y' or quit == 'Y':
print('You are exiting.')
break
else:
counter = 0 # resets the counter to 0 so there must be another 20 records reviewed and modified
i += 1
#df.to_csv(filename, index = False)
#print('The csv file was written. File name: ', filename)
return df
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
# для интеграции Postgres с Python, используется модуль psycopg2.
# psycopg2 - это адаптер базы данных Postgres для Python
import psycopg2
con = psycopg2.connect(
database="Nenashev_schedule",
user="postgres",
password="<PASSWORD>",
host="127.0.0.1",
port="5432"
)
print("Database opened successfully")
# + [markdown] pycharm={"name": "#%% md\n"}
# # Creation of database
# + pycharm={"name": "#%%\n", "is_executing": false}
cur = con.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS form (
id_ int8 PRIMARY KEY,
name_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS day_of_the_week (
id_ int8 PRIMARY KEY,
name_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS form_st (
id_ int8 PRIMARY KEY,
name_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS st_group (
id_ int8 PRIMARY KEY,
number_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS audience (
id_ int8 PRIMARY KEY,
number_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS lesson (
id_ int8 PRIMARY KEY,
name_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS teacher (
id_ int8 PRIMARY KEY,
name_ varchar not null
) WITHOUT OIDS;
CREATE TABLE IF NOT EXISTS class_ (
id_ serial8 PRIMARY KEY,
number_ int8,
time_start time not null,
time_end time not null,
lesson_id int8 not null,
teacher_id int8 not null,
group_id int8 not null,
form_id int8 not null,
day_of_the_week_id int8 not null,
audience_id int8 not null,
FOREIGN KEY (lesson_id) REFERENCES lesson (id_),
FOREIGN KEY (teacher_id) REFERENCES teacher (id_),
FOREIGN KEY (group_id) REFERENCES st_group (id_),
FOREIGN KEY (form_id) REFERENCES form (id_),
FOREIGN KEY (day_of_the_week_id) REFERENCES day_of_the_week (id_),
FOREIGN KEY (audience_id) REFERENCES audience (id_)
) WITHOUT OIDS; ''')
# + [markdown] pycharm={"name": "#%% md\n"}
# # Insert template data
# + pycharm={"name": "#%%\n", "is_executing": false}
# дни недели
days_of_the_week = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday',
'sunday']
for i in range(1,len(days_of_the_week)+1):
query = "INSERT INTO day_of_the_week VALUES (" + str(i) + ",\'" + days_of_the_week[i-1]\
+ "\');"
cur.execute(query)
con.commit()
# апдейт значений
for i in range(1, len(days_of_the_week)+1):
query = "UPDATE day_of_the_week SET name_ = \'" + days_of_the_week[i-1] + "\' " + \
"WHERE day_of_the_week.id_ = " + str(i) + ";"
#cur.execute(query)
con.commit()
# удаление элемента с id_ = 0
#cur.execute('''DELETE FROM day_of_the_week WHERE id_ = 0''')
# + [markdown] pycharm={"name": "#%% md\n"}
# ### checking days of the week
# + pycharm={"name": "#%%\n", "is_executing": false}
cur.execute('''SELECT * FROM day_of_the_week''')
for row in cur:
print(row)
# + pycharm={"name": "#%%\n", "is_executing": false}
# форма обучения
form_st = ['full-time', 'part-time']
for i in range(1,len(form_st) + 1):
query = "INSERT INTO form_st VALUES (" + str(i) + ",\'" + form_st[i-1] + "\');"
cur.execute(query)
con.commit()
# апдейт значений
#cur.execute('''UPDATE form_st SET name_ = 'full-time' WHERE form_st.id_ = 1''')
#cur.execute('''UPDATE form_st SET name_ = 'part-time' WHERE form_st.id_ = 2''')
# + pycharm={"name": "#%%\n", "is_executing": false}
# форма занятия: стандартная, факультативная
form = ['standard', 'optional']
for i in range(1,len(form)):
query = "INSERT INTO form VALUES (" + str(i) + ",\'" + form[i-1] + "\');"
cur.execute(query)
con.commit()
cur.execute('''SELECT * FROM form''')
for row in cur:
print(row)
# + pycharm={"name": "#%%\n"}
# + pycharm={"name": "#%%\n", "is_executing": false}
# преподаватели
teachers = ['<NAME>.', '<NAME>.', 'Бутырский Е. Ю.', '<NAME>.',
'<NAME>.', '<NAME>.']
for i in range(1,len(teachers)+1):
query = "INSERT INTO teacher VALUES (" + str(i) + ",\'" + teachers[i-1] + "\');"
cur.execute(query)
con.commit()
cur.execute('''SELECT * FROM teacher''')
for row in cur:
print(row)
# + pycharm={"name": "#%%\n", "is_executing": false}
# предметы
lessons = ['Основы программирования', 'Проектирование баз данных', 'Алгоритмы',
'Теория устойчивости', 'Математический анализ', 'Теория управления']
for i in range(1,len(lessons)+1):
query = "INSERT INTO lesson VALUES (" + str(i) + ",\'" + lessons[i-1] + "\');"
cur.execute(query)
con.commit()
cur.execute('''SELECT * FROM lesson''')
for row in cur:
print(row)
# + pycharm={"name": "#%%\n", "is_executing": false}
# аудитории
audiences = ['225Д', '403Е', '447Д', '123Д', '480Д', '111А', '808Е']
for i in range(1,len(audiences)+1):
query = "INSERT INTO audience VALUES (" + str(i) + ",\'" + audiences[i-1] + "\');"
cur.execute(query)
con.commit()
cur.execute('''SELECT * FROM audience''')
for row in cur:
print(row)
# + pycharm={"name": "#%%\n", "is_executing": false}
# академические группы
st_group = ['19.11', '19.12', '19.13', '19.14', '19.15', '19.16', '19.17', '19.18']
for i in range(1,len(st_group)+1):
query = "INSERT INTO st_group VALUES (" + str(i) + ",\'" + st_group[i-1] + "\');"
cur.execute(query)
con.commit()
cur.execute('''SELECT * FROM st_group''')
for row in cur:
print(row)
# + pycharm={"name": "#%%\n", "is_executing": false}
# функция добавления пары в БД
def add_class(time_start, time_end,lesson_id, teacher_id, group_id, audience_id, number_ = 1, form_id = 1, day_of_the_week_id = 1):
query = '''INSERT INTO class_ (time_start, time_end, lesson_id, teacher_id, group_id, audience_id, number_, form_id, day_of_the_week_id) VALUES ("
+ "\'" + str(time_start) + ',' + str(time_end) + ',' + str(lesson_id) + ',' + str(teacher_id) + ',' + str(group_id) + ','
+ str(audience_id) + ',' + str(number_) + ',' + str(form_id) + ',' + str(day_of_the_week_id)' +'\');'''
cur.execute(query)
con.commit()
# + pycharm={"name": "#%%\n", "is_executing": false}
from datetime import datetime, date, time
import pandas as pd
def print_res():
for row in cur:
print(row)
def view_schedule():
query = '''SELECT class_.id_, lesson.name_ as lesson_, class_.time_start, class_.time_end, teacher.name_ as teacher_, st_group.number_ as group_,
form.name_ as form_, day_of_the_week.name_ as day_, audience.number_ as audience_
FROM class_, lesson, teacher, st_group, form, day_of_the_week, audience WHERE class_.lesson_id = lesson.id_ AND
class_.teacher_id = teacher.id_ AND class_.group_id = group_.id_ AND class_.form_id = form.id_
AND class_.day_of_the_week_id = day_of_the_week.id_ AND class_.audience_id = audience.id_'''
cur.execute(query)
res = []
for row in cur:
for c in row:
res.append(c)
res[2] = res[2].strftime("%H:%M")
res[3] = res[3].strftime("%H:%M")
print(*res)
con.commit()
print_res()
# + pycharm={"name": "#%%\n", "is_executing": false}
view_schedule()
# + pycharm={"name": "#%%\n", "is_executing": false}
def add_class(time_start, time_end,lesson_id, teacher_id, group_id, audience_id, number_ = 1, form_id = 1, day_of_the_week_id = 1):
query = '''INSERT INTO class_ (time_start, time_end, lesson_id, teacher_id, group_id, audience_id, number_, form_id, day_of_the_week_id) VALUES ("
+ "\'" + str(time_start) + ',' + str(time_end) + ',' + str(lesson_id) + ',' + str(teacher_id) + ',' + str(group_id) + ','
+ str(audience_id) + ',' + str(number_) + ',' + str(form_id) + ',' + str(day_of_the_week_id)' +'\');'''
cur.execute(query)
con.commit()
# + pycharm={"name": "#%%\n", "is_executing": false}
view_schedule()
# + pycharm={"name": "#%%\n", "is_executing": false}
con.commit()
con.close()
| Database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
sns.set()
tesla = pd.read_csv('TSLA.csv')
tesla.head()
def anchor(signal, weight):
buffer = []
last = signal[0]
for i in signal:
smoothed_val = last * weight + (1 - weight) * i
buffer.append(smoothed_val)
last = smoothed_val
return buffer
signal = np.copy(tesla.Close.values)
anchor_3 = anchor(signal, 0.3)
anchor_5 = anchor(signal, 0.5)
anchor_8 = anchor(signal, 0.8)
plt.figure(figsize=(15, 7))
plt.plot(np.arange(len(tesla.Close)), tesla.Close, label ='close TSLA')
plt.plot(np.arange(len(tesla.Close)), anchor_3, label = 'anchor 0.3 TSLA')
plt.plot(np.arange(len(tesla.Close)), anchor_5, label = 'anchor 0.5 TSLA')
plt.plot(np.arange(len(tesla.Close)), anchor_8, label = 'anchor 0.8 TSLA')
plt.legend()
plt.show()
| timeseries/anchor-smooth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# Source: https://hackernoon.com/named-entity-recognition-applications-and-use-cases-c2ef0904e9fe
ner_dir = '/stanford/ner/'
# +
# Copy from https://en.wikipedia.org/wiki/Stanford_University
article = "The university was founded in 1885 by Leland and <NAME> in memory of \
their only child, <NAME> Jr., who had died of typhoid fever at age 15 the previous \
year. Stanford was a former Governor of California and U.S. Senator; he made his fortune as a railroad tycoon. \
The school admitted its first students on October 1, 1891,[2][3] as a coeducational and non-denominational institution."
# -
article2 = 'New York, New York , NY N.Y. new york'
# # Stanford NER
# +
import nltk
import os
java_path = "C:/Program Files/Java/jdk1.8.0_261/jre/bin/java.exe"
os.environ['JAVAHOME'] = java_path
print('NTLK Version: %s' % nltk.__version__)
from nltk.tag import StanfordNERTagger
stanford_ner_tagger = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz', 'stanford-ner.jar')
# -
results = stanford_ner_tagger.tag(article.split())
print('Original Sentence: %s' % (article))
print()
for result in results:
tag_value = result[0]
tag_type = result[1]
if tag_type != 'O':
print('Type: %s, Value: %s' % (tag_type, tag_value))
results = stanford_ner_tagger.tag(article2.split())
print('Original Sentence: %s' % (article2))
print()
for result in results:
tag_value = result[0]
tag_type = result[1]
if tag_type != 'O':
print('Type: %s, Value: %s' % (tag_type, tag_value))
# # NLTK NE
# +
import nltk
print('NTLK version: %s' % (nltk.__version__))
from nltk import word_tokenize, pos_tag, ne_chunk
nltk.download('words')
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
nltk.download('maxent_ne_chunker')
# -
results = ne_chunk(pos_tag(word_tokenize(article)))
print('Original Sentence: %s' % (article))
print()
for x in str(results).split('\n'):
if '/NNP' in x:
print(x)
results = ne_chunk(pos_tag(word_tokenize(article2)))
print('Original Sentence: %s' % (article2))
print()
for x in str(results).split('\n'):
if '/NNP' in x:
print(x)
# # Spacy
# +
import spacy
print('spaCy: %s' % (spacy.__version__))
# -
#spacy_nlp = spacy.load('en')
spacy_nlp = spacy.load('en_core_web_sm')
# +
document = spacy_nlp(article)
print('Original Sentence: %s' % (article))
print()
for element in document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
# +
document = spacy_nlp(article2)
print('Original Sentence: %s' % (article2))
print()
for element in document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
# -
| SCG/nlp-named_entity_recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook count all keywords (ex: buyer, seller) on template library.
import json
import re
with open("./all_contract.json") as f:
data = json.load(f)
out = []
for key in data.keys():
print(key)
text = "".join(data[key]["/text/grammar.tem.md"])
result = re.finditer("({[^{]*?)\w(?=\})}", text)
out+=list(map(lambda x: x.string[x.start(): x.end()], result))
len(out)
import pandas as pd
df = pd.DataFrame()
df["text"] = out
df["count"] = 1
count = df.groupby("text").sum()
count = count.sort_values("count", ascending=False)
count
for _, row in count.iterrows():
print(f"{row.values[0]}\t{row.name}")
| crawler_data/count_keyword.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Tutorial 2: Hidden Markov Model
# **Week 3, Day 2: Hidden Dynamics**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME> with help from <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# ---
# + [markdown] colab_type="text"
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# -
# # Tutorial objectives
#
# The world around us is often changing state over time, but we may only have access to these states through noisy sensory measurements. Similarly, organisms and neural systems often are thought to transition between a set of discrete states (up/down states, sleep/wake, etc.) which may only be indirectly observable through their impact on neural activity. Hidden Markov Models are a class of models that allow us to reason about the dynamics of a set of unobserved states that lead to the changing sensory inputs or data we observe.
#
# In this notebook, we'll first simulate a Hidden Markov Model and observe how changing the transition probability and observation noise impact what the samples look like. Then we'll look at how uncertainty increases as we make future predictions without evidence (from observations) and how to gain information from the observations.
# The HMM model we use in the first part of the tutorial will have a binary latent variable $s_t \in \{0,1\}$ that switches randomly between the two states, and a 1D Gaussian emission model $m_t|s_t \sim \mathcal{N}(\mu_{s_t},\sigma^2_{s_t})$ that provides evidence about the current state. You will learn how to:
#
# * Build an HMM in Python and generate sample data.
# * Calculate how predictive probabilities propagates in a Markov Chain with no evidence.
# * Combine new evidence and prediction from past evidence to estimate latent states.
#
# ---
#
# There is an an **optional** part for you to get a sense of how to perform parameter estimation of an HMM using the EM algorithm. **We encourage you to do these bonus exercises only _after_ you complete the core material in Tutorials 3 and 4.**
#
# In the optional part, you will implement an HMM of a network of Poisson spiking neurons mentioned in today's intro and:
#
# * Implement the forward-backward algorithm
# * Complete the E-step and M-step
# * Learn parameters for the example problem using the EM algorithm
# * Get an intuition of how the EM algorithm monotonically increases data likelihood
# + cellView="form"
# @title Video 1: Introduction
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Ph411Z7Xc", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="ceQXN0OUaFo", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# + cellView="both"
# !pip install hmmlearn --quiet
import numpy as np
from scipy import stats
from scipy.optimize import linear_sum_assignment
from hmmlearn import hmm
import matplotlib.pyplot as plt
from matplotlib import patches
# + cellView="form"
#@title Figure Settings
import ipywidgets as widgets # interactive display
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle")
# + cellView="form"
#@title Helper functions
def plot_hmm1(model, states, observations):
"""Plots HMM states and observations for 1d states and observations.
Args:
model (hmmlearn model): hmmlearn model used to get state means.
states (numpy array of floats): Samples of the states.
observations (numpy array of floats): Samples of the states.
"""
nsteps = states.size
fig, ax1 = plt.subplots()
states_forplot = list(map(lambda s: model.means_[s], states))
ax1.step(np.arange(nstep), states_forplot, "--", where="mid", alpha=1.0, c="green")
ax1.set_xlabel("Time")
ax1.set_ylabel("Latent State", c="green")
ax1.set_yticks([-1, 1])
ax1.set_yticklabels(["State 1", "State 0"])
ax2 = ax1.twinx()
ax2.plot(np.arange(nstep), observations.flatten(), c="blue")
ax2.set_ylabel("Observations", c="blue")
ax1.set_ylim(ax2.get_ylim())
plt.show(fig)
def plot_marginal_seq(predictive_probs, switch_prob):
"""Plots the sequence of marginal predictive distributions.
Args:
predictive_probs (list of numpy vectors): sequence of predictive probability vectors
switch_prob (float): Probability of switching states.
"""
T = len(predictive_probs)
prob_0 = [p_vec[0] for p_vec in predictive_probs]
prob_1 = [p_vec[1] for p_vec in predictive_probs]
fig, ax = plt.subplots()
ax.plot(np.arange(T), prob_0, color="orange")
ax.plot(np.arange(T), prob_1, color="blue")
ax.legend([
"prob in state 0", "prob in state 1"
])
ax.text(T/2, 0.05, "switching probability={}".format(switch_prob), fontsize=12,
bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.6))
ax.set_xlabel("Time")
ax.set_ylabel("Probability")
ax.set_title("Forgetting curve in a changing world")
plt.show(fig)
def plot_evidence_vs_noevidence(posterior_matrix, predictive_probs):
"""Plots the average posterior probabilities with evidence v.s. no evidence
Args:
posterior_matrix: (2d numpy array of floats): The posterior probabilities in state 1 from evidence (samples, time)
predictive_probs (numpy array of floats): Predictive probabilities in state 1 without evidence
"""
nsample, T = posterior_matrix.shape
posterior_mean = posterior_matrix.mean(axis=0)
fig, ax = plt.subplots(1)
ax.plot([0.0, T],[0.5, 0.5], color="red", linestyle="dashed")
ax.plot(np.arange(T), predictive_probs, c="orange", linewidth=2, label="No evidence")
ax.scatter(np.tile(np.arange(T), (nsample, 1)), posterior_matrix, s=0.8, c="green", alpha=0.3, label="With evidence(Sample)")
ax.plot(np.arange(T), posterior_mean, c='green', linewidth=2, label="With evidence(Average)")
ax.legend()
ax.set_yticks([0.0, 0.25, 0.5, 0.75, 1.0])
ax.set_xlabel("Time")
ax.set_ylabel("Probability in State 0")
ax.set_title("Gain confidence with evidence")
plt.show(fig)
def simulate_forward_inference(model, T, data=None):
"""
Given HMM `model`, calculate posterior marginal predictions of x_t for T-1 time steps ahead based on
evidence `data`. If `data` is not give, generate a sequence of observations from first component.
Args:
model (GaussianHMM instance): the HMM
T (int): length of returned array
Returns:
predictive_state1: predictive probabilities in first state w.r.t no evidence
posterior_state1: posterior probabilities in first state w.r.t evidence
"""
# First re-calculate hte predictive probabilities without evidence
predictive_probs = simulate_prediction_only(model, T)
# Generate an observation trajectory condtioned on that latent state x is always 1
if data is not None:
Y = data
else:
Y = np.asarray([model._generate_sample_from_state(0) for _ in range(T)])
# Calculate marginal for each latent state x_t
pt = np.exp(model._compute_log_likelihood(Y[[0]])) * model.startprob_
pt /= np.sum(pt)
posterior_probs = np.zeros((T, pt.size))
posterior_probs[0] = pt
for t in range(1, T):
posterior = one_step_update(model, posterior_probs[t-1], Y[[t]])
# normalize and add to the list
posterior /= np.sum(posterior)
posterior_probs[t] = posterior
posterior_state1 = np.asarray([p[0] for p in posterior_probs])
predictive_state1 = np.asarray([p[0] for p in predictive_probs])
return predictive_state1, posterior_state1
def plot_forward_inference(model, states, observations, states_inferred):
"""Plot ground truth state sequence with noisy observations, and ground truth states v.s. inferred ones
Args:
model (instance of hmmlearn.GaussianHMM): an instance of HMM
states (numpy vector): vector of 0 or 1(int or Bool), the sequences of true latent states
observations (numpy vector of numpy vector): the un-flattened Gaussian observations at each time point, element has size (1,)
states_inferred (numpy vector): vector of 0 or 1(int or Bool), the sequences of inferred latent states
"""
plot_hmm1(model, states, observations)
fig, ax = plt.subplots()
# state 0 has larger mean
ax.step(np.arange(nstep), 1-states, color="green", label="Ground Truth")
ax.step(np.arange(nstep), 1-states_inferred, linestyle="dashed", color="orange", label="Inferred")
ax.legend()
ax.set_title("Infer latent states from data")
ax.set_xlabel("Time")
ax.set_ylabel("Latent State")
ax.set_yticks([0, 1])
ax.set_yticklabels(["State 1", "State 0"])
plt.show(fig)
# -
# ---
# # Section 1: Binary HMM with Gaussian observations
# + cellView="form"
# @title Video 2: Simulating a binary HMM with Gaussian observations
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1XZ4y1u7So", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="7cTnoe6Xt80", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# In contrast with the Sequential Probability Ratio Test, the latent state in an HMM is no longer fixed over time. Instead, it can probabilistically switch or jump to a different state at each time step. However, the time dependence of states at different times is simple: the probability of the state at time $t$ is wholely determined by the state at time $t-1$. This is called called the **Markov property** and the dependency of the whole state sequence $\{s_1,...,s_t\}$ can be described by a chain structure called a Markov Chain:
#
# <img src=" https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_DecisionMaking/static/W2D3_Tutorial2_markov_chain_diagram.png?raw=true" alt="Markov chain drawing" width="400"/>
#
# (Please note that this should be s in diagram above, it will be changed)
#
# **Markov model for latent dynamics**
#
# Here we will reuse the switching process or telegraph process you saw in a previous tutorial. Quantitatively, the probability of switching to state $s_t=j$ from the previous state $s_{t-1}=i$ is a conditional probability distribution $p(s_t=j|s_{t-1}=i)$.
#
# Since the states are binary, we can represent the probability of the current state as a 2-dimensional vector $p(s=i)=p_{i}$ (or, including time, as $p(s_t=i)=p_{ti}$), and can represent the transition probability as a 2$\times$2 matrix $A_{ij}$. This is a convenient representation for coding. We can then use this representation to update the probabilities over time following the Markov process.
# $$p(s_t=j) = \sum_{i} p(s_t=j|s_{t-1}=i)p(s_{t-1}=i)$$
# or equivalently
# $$p_{tj}=\sum_j A_{ij} p_{(t-1)i} \tag{1}$$
# or, using vectors, $p_t=Ap_{t-1}$. Note that here $A_{ij}$ represents the transition probability to switch **FROM state $i$ TO state $j$** at next time step.
#
# **Measurements**
#
# In a _Hidden_ Markov model, we cannot directly observe the latent states $s_t$. What we can observe instead is a noisy measurement $m_t$ generated from $s_t$.
#
#
# ## Coding Exercise 1: Simulate a binary HMM with Gaussian observations
#
# In this exercise, you will use the package `hmmlearn` to implement a two-state HMM with Gaussian measurements (sometimes called emissions). Your HMM will start in State 0 and transition between states (both $0 \rightarrow 1$ and $1 \rightarrow 0$) with probability `switch_prob`. Each state emits observations drawn from a Gaussian with $\mu = 1$ for State 0 and $\mu = -1$ for State 1. The variance of both states is fixed at `noise_level`.
#
# Please familiarize yourself with the code and complete the following exercises in the next cell. You will need to:
#
# 1. To implement the state transitions, complete the transition matrix `transmat_` (i.e., $A_{ij}$) in the code below.
# \begin{equation*}
# A_{i,j} =
# \begin{pmatrix}
# p_{\rm stay} & p_{\rm switch} \\
# p_{\rm switch} & p_{\rm stay} \\
# \end{pmatrix}
# \end{equation*}
# with $p_{\rm stay} = 1 - p_{\rm switch}$.
#
# 2. The *hidden* part of HMM means that we do not directly output the current state $s_t$, but instead observe a noisy emission $m_t | s_t$, here generated by a Gaussian. The means have already been filled in for you, but you must complete the covariance matrix `covars_`. Set each state's observation variance to `noise_level`. In the code, the required shape given below is $2\times 1\times 1$, for two $1\times 1$ covariances which are really scalar variances. This seems like a weird shape for storing two numbers, but it makes things easier for the rest of the code.
#
#
#
# +
def create_HMM(switch_prob=0.1, noise_level=1e-8, startprob=[1.0, 0.0]):
"""Create an HMM with binary state variable and 1D Gaussian observations
The probability to switch to the other state is `switch_prob`. Two
observation models have mean 1.0 and -1.0 respectively. `noise_level`
specifies the standard deviation of the observation models.
Args:
switch_prob (float): probability to jump to the other state
noise_level (float): standard deviation of observation models. Same for
two components
Returns:
model (hmm.GaussianHMM instance): the described HMM
"""
############################################################################
# Insert your code here to:
# * Create the transition matrix, `transmat_` so that the odds of
# switching is `switch_prob`
# * Set the observation model variances, `covars_`, to `noise_level`
raise NotImplementedError("`create_HMM` is incomplete")
############################################################################
n_components = 2
# Initialize model
model = hmm.GaussianHMM(n_components=n_components, covariance_type="full")
model.startprob_ = np.asarray(startprob)
# Make transition matrix, should be shape (2, 2), i.e., a transition matrix for 2 states
model.transmat_ = ...
# Create means
model.means_ = np.array([[1.0], [-1.0]])
# Create covariance matrices, should be shape (2, 1, 1), i.e., 2 1x1 covariance matrices
model.covars_ = ...
model.sample(1)
return model
# Set random seed
np.random.seed(101)
# Number of steps
nstep = 50
# Create HMM
model = create_HMM()
# Sample from HMM
observations, states = model.sample(nstep)
# Visualize
plot_hmm1(model, states, observations)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_9e86d4ae.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial2_Solution_9e86d4ae_0.png>
#
#
# -
# ## Interactive Demo 1: Binary HMM
#
# In the demo below, we simulate a similar HMM and plot. You can change the probability of switching states and the noise level.
#
# First, think and discuss these questions.
#
#
# 1. What will happen if the switching probability is zero? What about if it's one?
# 2. What will happen with high noise? Low?
#
#
#
# Then, play with the demo to see if you were correct or not.
# + cellView="form"
#@title
#@markdown Make sure you execute this cell to enable the widget!
np.random.seed(101)
nstep = 100
@widgets.interact
def plot(switch_prob=(0., 1, .01), log10_noise_level=(-8., 1., .01)):
model = create_HMM(switch_prob=switch_prob,
noise_level=10.**log10_noise_level)
observations, states = model.sample(nstep)
observations = observations.flatten()
plot_hmm1(model, states, observations)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_41ccf32d.py)
#
#
# -
#
# **Harkening** back to our fishing example, you can imagine that the time series you measure is related to the number of fish caught at different times as the school of fish move from left to right. Or you could envision it as the voltage across a membrane affected by an ion channel in two states, open and closed. Or it could represent EEG frequency measurements as the brain moves between sleep states. What phenomena can you imagine modeling with these HMMs?
# ---
#
# # Section 2: Forgetting information and gaining confidence with a known initial state
# ## Section 2.1: Forgetting information
# + cellView="form"
# @title Video 3: Forgetting in a changing world
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1NK4y1x7fo", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="pRRo_L-n8nc", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ### Coding Exercise 2: Forgetting in a changing world
#
# Since the world (latent state) is changing over time, even if we know for sure that we are in state 0 at some time, we will be more and more uncertain that we'll remain in state 0 as time goes. In other words, when we try to make predictions of future states in a Markov Chain based on our current knowledge without future evidence, the influence of current state will decay over time.
#
# In this exercise, we'll inspect how we "forget" the current state information when predicting future states without any observation.
#
# Using the model you just defined, let's now make some predictions about $s_t$ given that we know $s_0=0$ for sure. We've already imposed this assumption by setting prior probabilities of $p(s_0)$ to $[1,0]$ earlier.
#
# 1. Complete the code in function `markov_forward` to calculate the predictive marginal distribution at next time step using `p_next = A.T @ p_current`
#
# 3. Take a look at function `simulate_prediction_only` and understand how the predictive distribution propagates along the Markov chain
#
# 4. Using our provided code, plot the predictive probabilities as a function of time
#
#
#
# + cellView="form"
# @markdown Execute this cell to enable the function simulate_prediction_only
def simulate_prediction_only(model, nstep):
"""
Simulate the diffusion of HMM with no observations
Args:
model (hmm.GaussianHMM instance): the HMM instance
nstep (int): total number of time steps to simulate(include initial time)
Returns:
predictive_probs (list of numpy vector): the list of marginal probabilities
"""
entropy_list = []
predictive_probs = []
prob = model.startprob_
for i in range(nstep):
# calculate entropy
predictive_probs.append(prob)
# one step forward
prob = markov_forward(prob, model.transmat_)
return predictive_probs
# +
def markov_forward(p0, A):
"""Calculate the forward predictive distribution in a discrete Markov chain
Args:
p0 (numpy vector): a discrete probability vector
A (numpy matrix): the transition matrix, A[i,j] means the prob. to
switch FROM i TO j
Returns:
p1 (numpy vector): the predictive probabilities in next time step
"""
############################################################################
# Insert your code here to:
# Compute the marginal distribution of Markov chain in next time step
# Hint: use matrix multiply and be careful about the index orders
raise NotImplementedError("function `markov_forward` incomplete")
############################################################################
p1 = ...
return p1
# Set random seed
np.random.seed(101)
# Set parameters of HMM
T = 100
switch_prob = 0.1
noise_level = 2.0
# Create HMM
model = create_HMM(switch_prob=switch_prob, noise_level=noise_level)
# Get predictive probabilities
predictive_probs = simulate_prediction_only(model, T)
# Visualize
plot_marginal_seq(predictive_probs, switch_prob)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_61403d8f.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial2_Solution_61403d8f_0.png>
#
#
# -
# ### Interactive Demo 2: Forgetting
#
# In the following demo, we look at the same visualization but you can play with the probability of switching states, using the slider.
#
# 1. Do you forget more quickly with low or high switching probability?
#
# 2. How does the curve look at when `prob_switch` $>0.5?
#
# + cellView="form"
#@markdown Make sure you execute this cell to enable the widget!
np.random.seed(101)
T = 100
noise_level = 0.5
@widgets.interact
def plot(switch_prob=(0.01, .99, .01)):
model = create_HMM(switch_prob=switch_prob, noise_level=noise_level)
predictive_probs = simulate_prediction_only(model, T)
plot_marginal_seq(predictive_probs, switch_prob)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_d90e17f2.py)
#
#
# -
# ## Section 2.2: Gaining confidence
# + cellView="form"
# @title Video 4: Gain confidence from evidence
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Az4y1Q7VR", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="dDjoxUxMgC0", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# As shown in Exercise 2, you lose information and increase uncertainty exponentially when predicting into future in the absence of further evidence, because the state randomly diffuses from its last known value. However the HMM also generates a measurement $m_t$ at each time step, and we can use this evidence to improve our state estimate.
#
# <img src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_DecisionMaking/static/W2D3_Tutorial2_HMM_diagram.png?raw=true" alt="HMM drawing" width="400"/>
#
# (We will UPDATE FIGURE with $x,y\to s,m$**)
#
# Now let's incorporate evidence into our inference. In this exercise we will calculate the **posterior marginal** distribution $p(s_t|m_{1:t})$, ie the marginal probability of the current state given the entire history of measurements. This is a crucial computation, and it is tractable because of the simple structure of the HMM.
#
# We compute this probability recursively. Suppose we know the posterior marginal probability for the previous time step, $p(s_{t-1}|m_{1:t-1})$. Now we receive a new measurement $m_t$. From Bayes' rule and the Markov property, we can calculate $p(s_{t}|m_{1:t})$. We do this in two steps.
#
# First, we make a prediction for $s_t$ given our previous knowledge. We can say "yesterday's posterior becomes today's prior," where the Markov transition matrix accounts for the change from $t$ to $t-1$, as in the last exercise. This gives us the prior probability
# $$p(s_t|m_{1:t-1})=\sum_{s_{t-1}}p(s_t|s_{t-1})p(s_{t-1}|m_{1:t-1}) \tag{2}$$
# Observe that the history $m_{1:t-1}$ does not yet include the new measurement at time $t$.
#
# Second, we use the usual Bayesian inference to incorporate this new evidence, multiplying our prior $p(s_t|m_{1:t-1})$ times our likelihood $p(m_t|s_t)$, to obtain
# $$p(s_t|m_{1:t})\propto p(s_t|m_{1:t-1})p(s_{t}|m_t) \tag{3}$$.
#
# Putting (2) and (3) together, we obtain the forward recursion equation for a Hidden Markov Model,
# $$p(s_t|m_{1:t-1})\propto p(m_t|s_t)\sum_{s_{t-1}}p(s_t|s_{t-1})p(s_{t-1}|m_{1:t-1}) \tag{4}$$
#
#
#
# ### Coding Exercise 3: Gain confidence from evidence
#
# 1. Create a model with switching probability $0.1$ and noise level $0.5$ using function `create_HMM(switch_prob, noise_level)`
#
# 2. Complete the code to calculate marginal posterior distribution $p(s_t|m_{1:t-1})$ at time $t$ from last posterior $p(s_{t-1}|m_{1:t-1})$ at time $t-1$
# - Calculate the predictive distribution $p(s_t =j|m_{1:t-1})=\sum_i A_{ij} p(s_{t-1}=i|m_{1:t-1})$
# - Calculate the likelihood of new data under each component using `exp(model._compute_log_likelihood(yt))`
# - Multiply likelihood and prediction element-wise and normalize over two components to get the new posterior probabilities
#
# 3. Using provided code, plot the average posterior probabilities over time due to evidence together with predictive probabilities without evidence
#
#
# +
def one_step_update(model, posterior_tm1, Y_t):
"""Given a HMM model, calculate the one-time-step updates to the posterior.
Args:
model (GaussianHMM instance): the HMM
posterior_tm1 (numpy array): Posterior at `t-1`
Y_t (numpy array): Observation at `t`
Returns:
posterior_t (numpy array): Posterior at `t`
"""
##############################################################################
# Insert your code here to:
# 1. Calculate the predicted state given the previous
# estimate (`posterior_tm1`). Note that `model.transmat_` is equvalent
# to `A.T`, not `A`.
# 2. Using `model._compute_log_likelihood()`, calculate the likelihood
# given `Y_t`.
raise NotImplementedError("`one_step_update` is incomplete")
##############################################################################
prediction = ... @ posterior_tm1
likelihood = np.exp(...)
posterior_t = prediction * likelihood
return posterior_t
np.random.seed(101)
switch_prob = 0.1
noise_level = 0.5
nsample = 50
T = 160
model = create_HMM(switch_prob, noise_level)
posterior_list = []
for i in range(nsample):
predictive_probs, posterior_probs = simulate_forward_inference(model, T)
posterior_list.append(posterior_probs)
posterior_matrix = np.asarray(posterior_list)
plot_evidence_vs_noevidence(posterior_matrix, predictive_probs)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_0601fea5.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial2_Solution_0601fea5_0.png>
#
#
# -
# Now you've got a plot of information loss due to diffusion together with the information recovered/uncertainty reduced due to evidence. The difference between the former and the latter is the amount of uncertainty that still remains because of observation noise, as we'll see in the next demo.
# ### Interactive Demo 2.2: Noise in confidence
# In this demo, you can adjust the switch probability and noise level and observe how information gain changes with signal-to-noise ratio and/or switch probability.
#
# + cellView="form"
#@title
#@markdown Make sure you execute this cell to enable the widget!
np.random.seed(101)
@widgets.interact
def plot(switch_prob=(0.01, .99, .01), noise_level=(.1, 3, .05),
nsample=(5, 200, 5), T=(20, 300, 5)):
model = create_HMM(switch_prob, noise_level)
posterior_list = []
for i in range(nsample):
predictive_probs, posterior_probs = simulate_forward_inference(model, T)
posterior_list.append(posterior_probs)
posterior_matrix = np.asarray(posterior_list)
plot_evidence_vs_noevidence(posterior_matrix, predictive_probs)
# -
# ---
#
# # Section 3: Inference in a dynamic world
# ### Coding Exercise 4: Forward inference of HMM
#
# If you set `switch_prob` or `noise_level` to be large in the last exercise, you will observe that some sample inference dots fall below 0.5. This means we are making false inferences about which latent state we are in.
#
# In this exercise, let's make a forward inference of a random state sequence rather than a constant one by observing its noisy Gaussian outputs. Different from Exercise 1, here we assume we know the switching probability but don't know the prior (`startprob_`).
#
# 1. Build a HMM with prior probabilities= $(0.5,0.5)$, switching probability=$0.1$, and noise level=$1.0$ by calling `create_HMM(switch_prob, noise_level, startprob)`
# 2. Generate a sample sequence along with observations by calling `model.sample(nstep)`, and use our provided code to visualize the latent trajectory and observations
# 3. Calculate posterior probabilities given data by calling `simulate_forward_inference(model, nstep, observations)`, and make inference of latent states by picking the component with larger posterior probability
# 4. Use our provided code to visualize the inferred state sequence together with the ground truth
#
#
#
#
# +
np.random.seed(101)
nstep = 100
switch_prob = 0.1
log10_noise_level = -1
# Build model
model = create_HMM(switch_prob=switch_prob,
noise_level=10.**log10_noise_level,
startprob=[0.5, 0.5])
observations, states = model.sample(nstep)
# Infer state sequence
predictive_probs, posterior_probs = simulate_forward_inference(model, nstep,
observations)
############################################################################
# Insert your code here to:
# Calculate inferred states from posterior probabilities at state 0
# Hint: Compare the probabilities with 0.5 and note that you should
# return 0 if prob > 0.5
############################################################################
# states_inferred = ...
################################################################################
# After finishing the above exercises, please un-comment the following lines
################################################################################
#plot_forward_inference(model, states, observations, states_inferred)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_c36f28a6.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial2_Solution_c36f28a6_0.png>
#
# <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial2_Solution_c36f28a6_1.png>
#
#
# -
# ## Interactive Demo 4: Forward inference
#
# Try different values of switching probability (`prob_switch`) and noise level (`noise_level`) either by hand or the widget in section **Interactive Cell**. When do we start to make false inferences?
#
# + cellView="form"
#@title
#@markdown Make sure you execute this cell to enable the widget!
np.random.seed(101)
nstep = 100
@widgets.interact
def plot(switch_prob=(0.01, .99, .01), log10_noise_level=(-8, 1, .01)):
model = create_HMM(switch_prob=switch_prob,
noise_level=10.**log10_noise_level,
startprob = [0.5,0.5])
observations, states = model.sample(nstep)
# observations = observations.flatten()
# Infer state sequence
predictive_probs, posterior_probs = simulate_forward_inference(model, nstep, observations)
states_inferred = posterior_probs <= 0.5
plot_forward_inference(model, states, observations, states_inferred)
# -
# ---
# # Summary
#
# # Bonus
# We, the organizers, know that the next sections are much longer and more challenging than most other tutorial content. **We do not expect you to finish it—or even start it—right now**. In fact, we strongly suggest saving your time and energy for the Kalman Filtering introduced in Tutorials 3 and 4, because it will play an important role in tomorrow's material too.
#
# That said, the EM algorithm can be a very useful and powerful optimization tool. Since it is typically taught in the context of Hidden Markov Models, we have included it here for your reference.
#
# To reiterate, the remainder of this notebook is *completely* and *absolutely* optional. It is not essential to understand the rest of the NMA content. By this point in Tutorial 2, we believe that you will have seen enough about HMMs to know when/if they might be relevant for your own research. When that day comes, or you are just feeling curious, this material will be here waiting!
# ---
#
# ## Bonus Section 1: HMM for Poisson spiking neuronal network
# + cellView="form"
# @title Video 5: HMM for Poisson spiking neurons case study
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1uT4y1j7nZ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Wb8mf5chmyI", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# Given noisy neural or behavioral measurements, we as neuroscientists often want to infer the unobserved latent variables as they change over time. Thalamic relay neurons fire in two distinct modes: a tonic mode where spikes are produced one at a time, and a 'burst mode' where several action potentials are produced in rapid succession. These modes are thought to differentially encode how the neurons relay information from sensory receptors to cortex. A distinct molecular mechanism, T-type calcium channels, switches neurons between modes, but it is very challenging to measure in the brain of a living monkey. However, statistical approaches let us recover the hidden state of those calcium channels purely from their spiking activity, which can be measured in a behaving monkey.
#
# Here, we're going to tackle a simplified version of that problem.
#
#
# Let's consider the formulation mentioned in the intro lecture.
# We have a network of $C$ neurons switching between $K$ states. Neuron $c$ has firing rate $\lambda_i^c$ in state $i$. The transition between states are represented by the $K\times K$ transition matrix $A_{ij}$ and initial probability vector $\psi$ with length $K$ at time $t=1$.
#
# Let $y_t^c$ be the number of spikes for cell $c$ in time bin $t$.
#
# In the following exercises (4 and 5) and tutorials, you will
#
# * Define an instance of such model with $C=5$ and $K=3$
# * (**Exercise 4**) Generate a dataset from this model
# * (**Exercise 5**) Implement the M-step for this HMM
# * Run EM to estimate all parameters $A,\psi,\lambda_i^c$
# * Plot the learning likelihood curve
# * Plot expected complete log likelihood versus data log likelihood
# * Compare learnt parameters versus true parameters
# ---
#
#
# ## **Optional** Section: Define model and generate data
#
# Let's first generate a random state sequence from the hidden Markov Chain, and generate `n_frozen_trials` different trials of spike trains for each cell assuming they all use the same underlying sequence we just generated.
#
# **Suggestions**
#
# 1. Run the following two sections **Model and simulation parameters** and **Initialize true model** to define a true model and parameters that will be used in our following exercises. Please take a look at the parameters and come back to these two cells if you encounter a variable you don't know in the future.
#
# 2. Complete the code to convert a given state sequence to corresponding spike rates for all cells at all times, and use provided code to visualize all spike trains.
#
#
# + cellView="form"
#@title Helper functions
def plot_spike_train(X, Y, dt):
"""Plots the spike train for cells across trials and overlay the state.
Args:
X: (2d numpy array of binary values): The state sequence in a one-hot
representation. (T, states)
Y: (3d numpy array of floats): The spike sequence.
(trials, T, C)
dt (float): Interval for a bin.
"""
n_trials, T, C = Y.shape
trial_T = T * dt
fig = plt.figure(figsize=(.7 * (12.8 + 6.4), .7 * 9.6))
# plot state sequence
starts = [0] + list(np.diff(X.nonzero()[1]).nonzero()[0])
stops = list(np.diff(X.nonzero()[1]).nonzero()[0]) + [T]
states = [X[i + 1].nonzero()[0][0] for i in starts]
for a, b, i in zip(starts, stops, states):
rect = patches.Rectangle((a * dt, 0), (b - a) * dt, n_trials * C,
facecolor=plt.get_cmap('tab10').colors[i],
alpha=0.15)
plt.gca().add_patch(rect)
# plot rasters
for c in range(C):
if c > 0:
plt.plot([0, trial_T], [c * n_trials, c * n_trials],
color=plt.get_cmap('tab10').colors[0])
for r in range(n_trials):
tmp = Y[r, :, c].nonzero()[0]
if len(tmp) > 0: plt.plot(np.stack((tmp, tmp)) * dt,
(c * n_trials + r + 0.1,
c * n_trials + r + .9),
'k')
ax = plt.gca()
plt.yticks(np.arange(0, n_trials * C, n_trials),
labels=np.arange(C, dtype=int))
plt.xlabel('time (s)', fontsize=16)
plt.ylabel('Cell number', fontsize=16)
def run_em(epochs, Y, psi, A, L, dt):
"""Run EM for the HMM spiking model.
Args:
epochs (int): Number of epochs of EM to run
Y (numpy 3d array): Tensor of recordings, has shape (n_trials, T, C)
psi (numpy vector): Initial probabilities for each state
A (numpy matrix): Transition matrix, A[i,j] represents the prob to switch
from j to i. Has shape (K,K)
L (numpy matrix): Poisson rate parameter for different cells.
Has shape (C,K)
dt (float): Duration of a time bin
Returns:
save_vals (lists of floats): Data for later plotting
lls (list of flots): ll Before each EM step
psi (numpy vector): Estimated initial probabilities for each state
A (numpy matrix): Estimated transition matrix, A[i,j] represents
the prob to switch from j to i. Has shape (K,K)
L (numpy matrix): Estimated Poisson rate parameter for different
cells. Has shape (C,K)
"""
save_vals = []
lls = []
for e in range(epochs):
# Run E-step
ll, gamma, xi = e_step(Y, psi, A, L, dt)
lls.append(ll) # log the data log likelihood for current cycle
if e % print_every == 0: print(f'epoch: {e:3d}, ll = {ll}') # log progress
# Run M-step
psi_new, A_new, L_new = m_step(gamma, xi, dt)
"""Booking keeping for later plotting
Calculate the difference of parameters for later
interpolation/extrapolation
"""
dp, dA, dL = psi_new - psi, A_new - A, L_new - L
# Calculate LLs and ECLLs for later plotting
if e in plot_epochs:
b_min = -min([np.min(psi[dp > 0] / dp[dp > 0]),
np.min(A[dA > 0] / dA[dA > 0]),
np.min(L[dL > 0] / dL[dL > 0])])
b_max = -max([np.max(psi[dp < 0] / dp[dp < 0]),
np.max(A[dA < 0] / dA[dA < 0]),
np.max(L[dL < 0] / dL[dL < 0])])
b_min = np.max([.99 * b_min, b_lims[0]])
b_max = np.min([.99 * b_max, b_lims[1]])
bs = np.linspace(b_min, b_max, num_plot_vals)
bs = sorted(list(set(np.hstack((bs, [0, 1])))))
bs = np.array(bs)
lls_for_plot = []
eclls_for_plot = []
for i, b in enumerate(bs):
ll = e_step(Y, psi + b * dp, A + b * dA, L + b * dL, dt)[0]
lls_for_plot.append(ll)
rate = (L + b * dL) * dt
ecll = ((gamma[:, 0] @ np.log(psi + b * dp) +
(xi * np.log(A + b * dA)).sum(axis=(-1, -2, -3)) +
(gamma * stats.poisson(rate).logpmf(Y[..., np.newaxis]).sum(-2)
).sum(axis=(-1, -2))).mean() / T / dt)
eclls_for_plot.append(ecll)
if b == 0:
diff_ll = ll - ecll
lls_for_plot = np.array(lls_for_plot)
eclls_for_plot = np.array(eclls_for_plot) + diff_ll
save_vals.append((bs, lls_for_plot, eclls_for_plot))
# return new parameter
psi, A, L = psi_new, A_new, L_new
ll = e_step(Y, psi, A, L, dt)[0]
lls.append(ll)
print(f'epoch: {epochs:3d}, ll = {ll}')
return save_vals, lls, psi, A, L
def plot_lls(lls):
"""Plots log likelihoods at each epoch.
Args:
lls (list of floats) log likelihoods at each epoch.
"""
epochs = len(lls)
fig, ax = plt.subplots()
ax.plot(range(epochs) , lls, linewidth=3)
span = max(lls) - min(lls)
ax.set_ylim(min(lls) - span * 0.05, max(lls) + span * 0.05)
plt.xlabel('iteration')
plt.ylabel('log likelihood')
plt.show(fig)
def plot_lls_eclls(plot_epochs, save_vals):
"""Plots log likelihoods at each epoch.
Args:
plot_epochs (list of ints): Which epochs were saved to plot.
save_vals (lists of floats): Different likelihoods from EM for plotting.
"""
rows = int(np.ceil(min(len(plot_epochs), len(save_vals)) / 3))
fig, axes = plt.subplots(rows, 3, figsize=(.7 * 6.4 * 3, .7 * 4.8 * rows))
axes = axes.flatten()
minll, maxll = np.inf, -np.inf
for i, (ax, (bs, lls_for_plot, eclls_for_plot)) in enumerate(zip(axes, save_vals)):
ax.set_xlim([-1.15, 2.15])
min_val = np.stack((lls_for_plot, eclls_for_plot)).min()
max_val = np.stack((lls_for_plot, eclls_for_plot)).max()
ax.plot([0, 0], [min_val, lls_for_plot[bs == 0]], '--b')
ax.plot([1, 1], [min_val, lls_for_plot[bs == 1]], '--b')
ax.set_xticks([0, 1])
ax.set_xticklabels([f'$\\theta^{plot_epochs[i]}$',
f'$\\theta^{plot_epochs[i] + 1}$'])
ax.tick_params(axis='y')
ax.tick_params(axis='x')
ax.plot(bs, lls_for_plot)
ax.plot(bs, eclls_for_plot)
if min_val < minll: minll = min_val
if max_val > maxll: maxll = max_val
if i % 3 == 0: ax.set_ylabel('log likelihood')
if i == 4:
l = ax.legend(ax.lines[-2:], ['LL', 'ECLL'], framealpha=1)
plt.show(fig)
def plot_learnt_vs_true(L_true, L, A_true, A, dt):
"""Plot and compare the true and learnt parameters.
Args:
L_true (numpy array): True L.
L (numpy array): Estimated L.
A_true (numpy array): True A.
A (numpy array): Estimated A.
dt (float): Bin length.
"""
C, K = L.shape
fig = plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.plot([0, L_true.max() * 1.05], [0, L_true.max() * 1.05], '--b')
for i in range(K):
for c in range(C):
plt.plot(L_true[c, i], L[c, i], color='C{}'.format(c),
marker=['o', '*', 'd'][i]) # this line will fail for K > 3
ax = plt.gca()
ax.axis('equal')
plt.xlabel('True firing rate (Hz)')
plt.ylabel('Inferred firing rate (Hz)')
xlim, ylim = ax.get_xlim(), ax.get_ylim()
for c in range(C):
plt.plot([-10^6], [-10^6], 'o', color='C{}'.format(c))
for i in range(K):
plt.plot([-10^6], [-10^6], '.', marker=['o', '*', 'd'][i], c="black")
l = plt.legend(ax.lines[-C - K:],
[f'cell {c + 1}' for c in range(C)] + [f'state {i + 1}' for i in range(K)])
ax.set_xlim(xlim), ax.set_ylim(ylim)
plt.subplot(122)
ymax = np.max(A_true - np.diag(np.diag(A_true))) / dt * 1.05
plt.plot([0, ymax], [0, ymax], '--b')
for j in range(K):
for i in range(K):
if i == j: continue
plt.plot(A_true[i, j] / dt, A[i, j] / dt, 'o')
ax = plt.gca()
ax.axis('equal')
plt.xlabel('True transition rate (Hz)')
plt.ylabel('Inferred transition rate (Hz)')
l = plt.legend(ax.lines[1:], ['state 1 -> 2',
'state 1 -> 3',
'state 2 -> 1',
'state 2 -> 3',
'state 3 -> 1',
'state 3 -> 2'
])
plt.show(fig)
# -
# #### Model and simulation parameters
# +
# model and data parameters
C = 5 # number of cells
K = 3 # number of states
dt = 0.002 # seconds
trial_T = 2.0 # seconds
n_frozen_trials = 20 # used to plot multiple trials with the same state sequence
n_trials = 300 # number of trials (each has it's own state sequence)
# for random data
max_firing_rate = 50 # Hz
max_transition_rate = 3 # Hz
# needed to plot LL and ECLL for every M-step
# **This substantially slows things down!!**
num_plot_vals = 10 # resolution of the plot (this is the expensive part)
b_lims = (-1, 2) # lower limit on graph (b = 0 is start-of-M-step LL; b = 1 is end-of-M-step LL)
plot_epochs = list(range(9)) # list of epochs to plot
# -
# #### Initialize true model
# +
np.random.seed(101)
T = round(trial_T / dt)
ts = np.arange(T)
# initial state distribution
psi = np.arange(1, K + 1)
psi = psi / psi.sum()
# off-diagonal transition rates sampled uniformly
A = np.random.rand(K, K) * max_transition_rate * dt
A = (1. - np.eye(K)) * A
A = A + np.diag(1 - A.sum(1))
# hand-crafted firing rates make good plots
L = np.array([
[.02, .8, .37],
[1., .7, .1],
[.92, .07, .5],
[.25, .42, .75],
[.15, .2, .85]
]) * max_firing_rate # (C,K)
# Save true parameters for comparison later
psi_true = psi
A_true = A
L_true = L
# -
# #### Generate data with frozen sequence and plot
# Given a state sequence `[0,1,1,3,2,...]`, we'll first convert each state in to sequence in to the so-called "one-hot" coding. For example, with 5 total states, the one-hot coding of state `0` is `[1,0,0,0,0]` and the coding for state `3` is `[0,0,0,1,0]`. Suppose we now have a sequence of length `T`, the one-hot coding of this sequence `Xf` will have shape `(T,K)`
# +
np.random.seed(101)
# sample n_frozen_trials state sequences
Xf = np.zeros(T, dtype=int)
Xf[0] = (psi.cumsum() > np.random.rand()).argmax()
for t in range(1, T):
Xf[t] = (A[Xf[t - 1],:].cumsum() > np.random.rand()).argmax()
# switch to one-hot encoding of the state
Xf = np.eye(K, dtype=int)[Xf] # (T,K)
# get the Y values
Rates = np.squeeze(L @ Xf[..., None]) * dt # (T,C)
Rates = np.tile(Rates, [n_frozen_trials, 1, 1]) # (n_trials, T, C)
Yf = stats.poisson(Rates).rvs()
with plt.xkcd():
plot_spike_train(Xf, Yf, dt)
# -
# #### Generate data for EM learning
#
# The previous dataset is generated with the same state sequence for visualization. Now let's generate `n_trials` trials of observations, each one with its own randomly generated sequence
# +
np.random.seed(101)
# sample n_trials state sequences
X = np.zeros((n_trials, T), dtype=int)
X[:, 0] = (psi_true.cumsum(0)[:, None] > np.random.rand(n_trials)).argmax(0)
for t in range(1, T):
X[:, t] = (A_true[X[:, t - 1], :].T.cumsum(0) > np.random.rand(n_trials)).argmax(0)
# switch to one-hot encoding of the state
one_hot = np.eye(K)[np.array(X).reshape(-1)]
X = one_hot.reshape(list(X.shape) + [K])
# get the Y values
Y = stats.poisson(np.squeeze(L_true @ X[..., None]) * dt).rvs() # (n_trials, T, C)
print("Y has shape: (n_trial={},T={},C={})".format(*Y.shape))
# -
# ---
#
# ## **Optional** Section: EM algorithm for HMM
# + cellView="form"
# @title Video 6: EM Tutorial
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1T5411a7F4", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="umU4wUWlKvg", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# Finding the optimal values of parameters that maximizes the data likelihood is practically infeasible since we need to integrating out all latent variables $x_{1:T}$. The time needed is exponential to $T$. Thus as an alternative approach, we use the Expectation-Maximization algorithm, which iteratively performing a E-step followed by a M-step and is guaranteed to not decrease(usually increase) the data likelihood after each EM cycle.
#
#
# In this section we will briefly review the EM algorithm for HMM and list
#
# * Recursive equations for forward and backward probabilities $a_i(t)$ and $b_i(t)$
# * Expressions for singleton and pairwise marginal distributions after seeing data: $\gamma_{i}(t):=p_{\theta}\left(x_{t}=i | Y_{1: T}\right)$ and $\xi_{i j}(t) = p_{\theta}(x_t=i,x_{t+1}=j|Y_{1:T})$
# * Closed-form solutions for updated values of $A,\psi,\lambda$ which increases data likelihood
#
#
# ---
# #### E-step: Forward-backward algorithm
# In the forward pass, we calculate the **forward probabilities**, or the joint probability of $x_t$ and current and past data $Y_{1:t}$: $a_i(t):=p(x_t=i,Y_{1:t})$ recursively by
#
# $$a_i(t) = p_(y_t|x_i=t)\sum_j A_{ji} a_j(t-1)$$
#
# In contrast to the intro, now $A_{ji}$ means **the transition probability from state $j$ to state $i$.**
#
# The backward pass calculate the **backward probabilities** $b_i(t):=p_{\theta}(Y_{t+1:T}|x_t=i)$, which is the likelihood of observing all future data points given current state $x_t$. The recursion of $b_i(t)$ is given by
#
# $$ b_i(t) = \sum_j p_{\theta}(y_{t+1}|x_{t+1}=j)b_j(t+1)A_{ij} $$
#
# Combining all past and future information, the **singleton and pairwise marginal distributions** are given by
#
# $$ \gamma_{i}(t):=p_{\theta}\left(x_{t}=i | Y_{1: T}\right)=\frac{a_{i}(t) b_{i}(t)}{p_{\theta}\left(Y_{1: T}\right)} $$
#
# $$ \xi_{i j}(t) = p_{\theta}(x_t=i,x_{t+1}=j|Y_{1:T}) =\frac{b_{j}(t+1)p_{\theta}\left(y_{t+1} | x_{t+1}=j\right) A_{i j} a_{i}(t)}{p_{\theta}\left(Y_{1: T}\right)} $$
#
# where $p_{\theta}(Y_{1:T})=\sum_i a_i(T)$.
#
# ---
# #### M-step
#
# The M-step for HMM has a closed-form solution. First the new transition matrix is given by
# $$
# A_{ij} =\frac{\sum_{t=1}^{T-1} \xi_{i j}(t)}{\sum_{t=1}^{T-1} \gamma_{i}(t)}
# $$
#
# which is the expected empirical transition probabilities.
# New initial probabilities and parameters of the emission models are also given by their empirical values given single and pairwise marginal distributions:
#
# $$ \psi_i = \frac{1}{N}\sum_{trials}\gamma_i(1) $$
#
# $$ \lambda_{i}^{c}=\frac{\sum_{t} \gamma_{i}(t) y_{t}^{c}}{\sum_{t} \gamma_{i}(t) d t}$$
# ---
#
#
# ### E-step: forward and backward algorithm
#
# **(Optional)**
#
# In this section you will read through the code for the forward-backward algorithm and understand how to implement the computation efficiently in `numpy` by calculating the recursion for all trials at once.
#
# ---
#
# Let's re-write the forward and backward recursions in a more compact form:
#
# $$ a_i^t = \sum_j A_{ji}o_j^t a_j^{t-1} $$
#
#
# $$b^t_i = \sum_j A_{ij} o_j^{t+1}b_j^{t+1} $$ where $o_j^{t}=p(y_{t}|x_{t}=j)$.
#
#
# Let's take the backward recursion for example. In practice we will handle all trials together since they are independent of each other. After adding a trial index $l$ to the recursion equations, the backward recursion becomes:
#
# $$b^t_{li} = \sum_j A_{ij} o_{lj}^{t+1}b_{lj}^{t+1} $$
#
# What we have in hand are:
# * `A`: matrix of size `(K,K)`
# * `o^{t+1}`: array of size `(N,K)` is the log data likelihood for all trials at a given time
# * `b^{t+1}`: array of size `(N,K)` is the backward probability for all trials at a given time
#
# where `N` stands for the number of trials.
#
# The index size and meaning doesn't match for these three arrays: the index is $i$ for $A$ in the first dimension and is $l$ for $o$ and $b$, so we can't just multiply them together. However, we can do this by viewing vectors $o^{t+1}_{l\cdot}$ and $b^{t+1}_{l\cdot}$ as a matrix with 1 row and re-write the backward equation as:
#
# $$b^t_{li} = \sum_j A_{ij} o_{l1j}^{t+1}b_{l1j}^{t+1} $$
#
# Now we can just multiply these three arrays element-wise and sum over the last dimension.
#
# In `numpy`, we can achieve this by indexing the array with `None` at the location we want to insert a dimension. Take `b` with size `(N,T,K)` for example,`b[:,t,:]` will have shape `(N,K)`, `b[:,t,None,:]` will have shape `(N,1,K)` and `b[:,t,:,None]` will have shape `(N,K,1)`.
#
# So the backward recursion computation can be implemented as
#
# ```python
# b[:,t,:] = (A * o[:,t+1,None,:] * b[:,t+1,None,:]).sum(-1)
# ```
#
# ---
#
# In addition to the trick introduced above, in this exercise we will work in the **log scale** for numerical stability.
#
#
# **Suggestions**
#
# 1. Take a look at the code for the forward recursion and backward recursion.
#
#
#
#
#
def e_step(Y, psi, A, L, dt):
"""Calculate the E-step for the HMM spiking model.
Args:
Y (numpy 3d array): tensor of recordings, has shape (n_trials, T, C)
psi (numpy vector): initial probabilities for each state
A (numpy matrix): transition matrix, A[i,j] represents the prob to
switch from i to j. Has shape (K,K)
L (numpy matrix): Poisson rate parameter for different cells.
Has shape (C,K)
dt (float): Bin length
Returns:
ll (float): data log likelihood
gamma (numpy 3d array): singleton marginal distribution.
Has shape (n_trials, T, K)
xi (numpy 4d array): pairwise marginal distribution for adjacent
nodes . Has shape (n_trials, T-1, K, K)
"""
n_trials = Y.shape[0]
T = Y.shape[1]
K = psi.size
log_a = np.zeros((n_trials, T, K))
log_b = np.zeros((n_trials, T, K))
log_A = np.log(A)
log_obs = stats.poisson(L * dt).logpmf(Y[..., None]).sum(-2) # n_trials, T, K
# forward pass
log_a[:, 0] = log_obs[:, 0] + np.log(psi)
for t in range(1, T):
tmp = log_A + log_a[:, t - 1, : ,None] # (n_trials, K,K)
maxtmp = tmp.max(-2) # (n_trials,K)
log_a[:, t] = (log_obs[:, t] + maxtmp +
np.log(np.exp(tmp - maxtmp[:, None]).sum(-2)))
# backward pass
for t in range(T - 2, -1, -1):
tmp = log_A + log_b[:, t + 1, None] + log_obs[:, t + 1, None]
maxtmp = tmp.max(-1)
log_b[:, t] = maxtmp + np.log(np.exp(tmp - maxtmp[..., None]).sum(-1))
# data log likelihood
maxtmp = log_a[:, -1].max(-1)
ll = np.log(np.exp(log_a[:, -1] - maxtmp[:, None]).sum(-1)) + maxtmp
# singleton and pairwise marginal distributions
gamma = np.exp(log_a + log_b - ll[:, None, None])
xi = np.exp(log_a[:, :-1, :, None] + (log_obs + log_b)[:, 1:, None] +
log_A - ll[:, None, None, None])
return ll.mean() / T / dt, gamma, xi
# + cellView="form"
# @title EXERCISE 7: Implement the M-step Video
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1kK4y1e7c4", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="H4GGTg_9BaE", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ---
#
# ### EXERCISE 5: Implement the M-step
#
# In this exercise you will complete the M-step for this HMM using closed form solutions mentioned before.
#
# **Suggestions**
#
# 1. Calculate new initial probabilities as empirical counts of singleton marginals
#
# $$ \psi_i = \frac{1}{N}\sum_{trials}\gamma_i(1) $$
#
# 2. Remember the extra trial dimension and average over all trials
#
#
# **For reference:**
#
# New transition matrix is calculated as empirical counts of transition events from marginals
#
# $$ A_{ij} =\frac{\sum_{t=1}^{T-1} \xi_{i j}(t)}{\sum_{t=1}^{T-1} \gamma_{i}(t)}$$
#
#
# New spiking rates for each cell and each state are given by
#
# $$ \lambda_{i}^{c}=\frac{\sum_{t} \gamma_{i}(t) y_{t}^{c}}{\sum_{t} \gamma_{i}(t) d t} $$
#
def m_step(gamma, xi, dt):
"""Calculate the M-step updates for the HMM spiking model.
Args:
gamma (): Number of epochs of EM to run
xi (numpy 3d array): Tensor of recordings, has shape (n_trials, T, C)
dt (float): Duration of a time bin
Returns:
psi_new (numpy vector): Updated initial probabilities for each state
A_new (numpy matrix): Updated transition matrix, A[i,j] represents the
prob. to switch from j to i. Has shape (K,K)
L_new (numpy matrix): Updated Poisson rate parameter for different
cells. Has shape (C,K)
"""
raise NotImplementedError("`m_step` need to be implemented")
############################################################################
# Insert your code here to:
# Calculate the new prior probabilities in each state at time 0
# Hint: Take the first time step and average over all trials
###########################################################################
psi_new = ...
# Make sure the probabilities are normalized
psi_new /= psi_new.sum()
# Calculate new transition matrix
A_new = xi.sum(axis=(0, 1)) / gamma[:, :-1].sum(axis=(0, 1))[:, np.newaxis]
# Calculate new firing rates
L_new = (np.swapaxes(Y, -1, -2) @ gamma).sum(axis=0) / gamma.sum(axis=(0, 1)) / dt
return psi_new, A_new, L_new
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_ab737584.py)
#
#
# + cellView="form"
# @title Video 8: Running and plotting EM
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV15k4y1m77q", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="6UTsXxE3hG0", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ---
#
# ### Run EM
#
# ####Initialization for parameters
#
# +
np.random.seed(101)
# number of EM steps
epochs = 9
print_every = 1
# initial state distribution
psi = np.arange(1, K + 1)
psi = psi / psi.sum()
# off-diagonal transition rates sampled uniformly
A = np.ones((K, K)) * max_transition_rate * dt / 2
A = (1 - np.eye(K)) * A
A = A + np.diag(1 - A.sum(1))
# firing rates sampled uniformly
L = np.random.rand(C, K) * max_firing_rate
# +
# LL for true vs. initial parameters
print(f'LL for true 𝜃: {e_step(Y, psi_true, A_true, L_true, dt)[0]}')
print(f'LL for initial 𝜃: {e_step(Y, psi, A, L, dt)[0]}\n')
# Run EM
save_vals, lls, psi, A, L = run_em(epochs, Y, psi, A, L, dt)
# +
# EM doesn't guarantee the order of learnt latent states are the same as that of true model
# so we need to sort learnt parameters
# Compare all true and estimated latents across cells
cost_mat = np.sum((L_true[..., np.newaxis] - L[:, np.newaxis])**2, axis=0)
true_ind, est_ind = linear_sum_assignment(cost_mat)
psi = psi[est_ind]
A = A[est_ind]
A = A[:, est_ind]
L = L[:, est_ind]
# -
# ---
# ## **Optional** Section: Plotting the training process and learnt model
# ### Plotting progress during EM!
#
# Now you can
#
# * Plot the likelihood during training
# * Plot the M-step log likelihood versus expected complete log likelihood(ECLL) to get an intuition of how EM works and the convexity of ECLL
# * Plot learnt parameters versus true parameters
# Plot the log likelihood after each epoch of EM
with plt.xkcd():
plot_lls(lls)
# For each saved epoch, plot the log likelihood and expected complete log likelihood
# for the initial and final parameter values
with plt.xkcd():
plot_lls_eclls(plot_epochs, save_vals)
# ### Plot learnt parameters vs. true parameters
#
# Now we will plot the (sorted) learnt parameters with true parameters to see if we successfully recovered all the parameters
#
# Compare true and learnt parameters
with plt.xkcd():
plot_learnt_vs_true(L_true, L, A_true, A, dt)
| tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 4. előadás
# _Tartalom_: Fájlok, flagek (r, w, b, +), szöveges formátumok, csv, matplotlib
# A mai előadás során a `matplotlib` néhány funkcióját vesszük sorra. A `matplotlib` egy python modul, ami egyszerűbb (és összettettebb) grafikonok megjelenítését segítő függvények, osztályok definícióit tartalmazza. Ennek telepítése után a
# ``` python
# import matplotlib.pyplot as plt
# ```
# parancsával importálhatjuk. Ha (mint jelen esetben is) jupyter notebookban és nem felugró ablakként szeretnénk a grafikonokat megjeleníteni akkor szükség lesz a
# ``` python
# # %matplotlib inline
# ```
# parancsra is. Nézzünk pár egyszerű példát.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
x0 = [1, 2, 3, 4, 5, 6, 7]
y0 = [9, 8, 2, 6, 5, 4, 3]
y1 = [3, 4, 0, 9, 9, 1, 0]
# -
plt.plot(x0, y1)
plt.show()
plt.plot(x0, y1, "o--")
plt.show()
plt.bar(x0, y0, alpha=0.8, label="Y0 értékei")
plt.plot(x0, y1, "rx-", label="Y1 értékei")
plt.legend()
plt.show()
# Windowsban visszaperrel (backslash) jelöljük a az elérési út összefűzését, míg linuxon perrel (slash).
#
# Linux verzió:
# ``` python
# /herno/Desktop/textfile.txt
# ```
#
# Windows verzió:
# ``` python
# \\herno\\Desktop\\textfile.txt
# ```
#
# Hogy ezt az ellenmondást feloldjuk a Pythonban használhatjuk az `os.path.join` függvényt. Például, próbáljuk ki a következő kódot:
# ``` python
# import os
# print(os.path.join("herno", "Desktop", "textfile.txt"))
# ```
#
# ekkor az eredmény linuxon
# ```
# /herno/Desktop/textfile.txt
# ```
# lesz, míg windowson
# ```
# herno\Desktop\textfile.txt
# ```
# vagy
# ```
# "\\herno\\Desktop\\textfile.txt"
# ```
#
# Példa feladatunk telemetriás adatokból indul ki.
# A csatolt `.csv` fájlok táblázatos formában tartalmazzák egyetemünk Szenergy hallgatói csapatának telemetriás adatait többek közt spanyol (Murcia), holland (Rotterdam) és angol (London) versenyekről. A csapat belső szabályzatának értelmében ezek érzékeny adatok, így nincs benne minden mért adat, illetve kisebb mintavételezéssel vannak bent adatok. Ettől függetlenül a `.csv`-k valós versenyeken mért sebességeket és egy egyéb adatokat tartalmaznak. A mért adatok nem ekvidisztánsak, nem tökéletesen tartják a 2 Hz-es (0,5 másodperc) mintavételezést, ennek oka, hogy egy mobil internettel a verseny közben feltöltött adatbázisból származnak.
#
# A `.csv` fájlok nagyjából így néznek ki:
#
# ```
# Time;SpeedRpm;SpeedKmph;GasSignal;LapNumber;MotorCommError; ... AuxiliaryBattery
# 38,45;0;0;0,019043;0;false;false;false;14;0,80563;51,5603;false;1;12,9713
# 41,75;0;0;0,0193481;0;false;false;false;14;0,80563;51,5603;false;1;12,9713
# 42,3;0;0;0,0190735;0;false;false;false;14;0,80563;51,5603;false;1;12,9687
# ...
# ```
#
# |Time|SpeedRpm|SpeedKmph|GasSignal|LapNumber|MotorCommError|...|AuxiliaryBattery|
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# |38,45|0|0|0,019043|0|false|...|12,9713|
# |41,75|0|0|0,0193481|0|false|...|12,9713|
# |42,3|0|0|0,0190735|0|false|...|12,9687|
#
# Fontosabb mezők:
# - Time: az idő másodpercben `(float)`
# - SpeedKmph: a sebesség km/h-ban `(float)`
# - LapNumber: az aktuális kör `(int)`
# - BmsVoltage: akkumulátorfeszülség `(float)`
#
# #### Feladat
#
# Írjuk ki 1010 és 1015 másodperc között a idő értékekhez tartozó akkumulátorfeszülséget és sebességet.
# +
import csv
import os
import matplotlib.pyplot as plt
# %matplotlib inline
file = os.path.join("data", "szenergy_2014_10_17_16_06_rotterdam.csv")
with open(file, 'rt') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=';', quotechar='"')
for row in spamreader:
if 1010 < float(row['Time'].replace(',','.')) < 1015:
print("%s s\t%s km/h\t%s v" % (row['Time'], row['SpeedKmph'], row['BmsVoltage']))
# -
# #### Feladat
#
# Rajzoltassuk ki a 2-4. kör sebességegeit km/h-ban.
file = os.path.join("data", "szenergy_2017_05_25_17_35_london.csv")
#file = os.path.join("data", "szenergy_2014_10_17_16_06_rotterdam.csv")
with open(file, 'rt') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=';', quotechar='"')
xt = []
ys = []
for row in spamreader:
if 2 <= int(row['LapNumber'].replace(',','.')) <= 4:
xt.append(float(row['Time'].replace(',','.')))
ys.append(float(row['SpeedKmph'].replace(',','.')))
plt.plot(xt, ys)
plt.show()
# Ahogy látszik a jármű 20 és 30 km/h közötti sebességgel haladt legtöbbször, lévén a verseny célja az üzemanyag takarékosság, ahol a nem a leggyorsabb, hanem a legkevesebb felhasznált energiával megtett kör a
# feladat.
# ## _Used sources_ / Felhasznált források
# - [Shannon Turner: Python lessons repository](https://github.com/shannonturner/python-lessons) MIT license (c) <NAME> 2013-2014
# - [Siki Zoltán: Python mogyoróhéjban](http://www.agt.bme.hu/gis/python/python_oktato.pdf) GNU FDL license (c) Siki Zoltán
# - [BME AUT](https://github.com/bmeaut) MIT License Copyright (c) BME AUT 2016-2018
| eload/.ipynb_checkpoints/ea04-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="7tUfdOqx4E59" outputId="235d9030-25ba-470e-f127-8585d6512ae9"
#import the dataset
from keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000)
# + colab={} colab_type="code" id="XGaw0Yi34kk2"
# Encoding the integer sequences into a binary matrix
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# Vectorize the labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# + colab={} colab_type="code" id="7c64lL9Z6IcB"
from keras import models
from keras import layers
# Define the structure of the model
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# use sigmoid for the last layer as we need a binary output.
# + colab={} colab_type="code" id="QT-Pq9XQ6MLO"
# Compile the model, configure the optimizer
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# + colab={} colab_type="code" id="zOI6NlXf7VSw"
# Split the training set for hyperparameter tuning.
# Here we try to tune the ideal number of epochs to get the best result
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# + colab={"base_uri": "https://localhost:8080/", "height": 714} colab_type="code" id="vdBVsV5H7ajJ" outputId="85d867eb-93c5-4a68-8fd7-e3b07a1b1a07"
# Training Phase. Record the accuracy and error/loss for tuning later on
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="JMpf6BfM7cG5" outputId="75a39106-7b21-4b8d-ba30-8abbaead268f"
# get the loss and accuracy from history
history_dict = history.history
history_dict.keys()
# + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" id="-wSRpOfe7pJZ" outputId="47c72509-48f3-4d80-fb46-682d462a1797"
# Plotting the training and validation loss for tuning the number of epochs
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc = history_dict['acc']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" id="TSuLs3OX704x" outputId="60acd7f3-cc9c-4e01-938b-dc302016ee12"
# Plotting the accuracy curves
plt.clf()
acc_values = history_dict['acc']
val_acc = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="w33_PPRg8MNC" outputId="9302c49c-949e-4eb7-9e52-9dd617de8ba7"
# The ideal number of epochs looks to be 3. So retrain the model using all the
# training examples (all of 25k)
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3, batch_size=512)
results = model.evaluate(x_test, y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="GuLqu7UL8Uwy" outputId="922da04a-750e-460e-ae05-e81d7f295ff5"
# check the final accuracy and loss
results
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="EJHuJ58K8Y0R" outputId="a2356507-7f1d-4ebc-e1a6-0e3f27f30130"
# make prediction on unseen data now - test data
predictions = model.predict(x_test)
predictions = predictions >= 0.5
# change the datatype to float from bool
predictions = np.asarray(predictions).astype('float32')
# convert it to array
predictions = np.reshape(predictions, -1)
# size of test
print(len(y_test))
# size of prediction - should be equal to test
print(len(predictions))
correct_predictions = np.sum(y_test == predictions)
print(correct_predictions)
print(float(correct_predictions) / len(y_test))
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="2yyJkpyO8cfz" outputId="58ea4c46-851d-45c6-da58-05e023197e96"
model.evaluate(x_test, y_test)
# + colab={} colab_type="code" id="QghVidCzEb8t"
# -
| IMDB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Automating the creation of a Wardley Map
#
# ## Improvements
#
# - Need to re-scale
# - Need to be able to modify y-axis
#
# ## Original Map (created in draw.io)
#
# The map below took along time to create...it's not actually a map yet - it's just a value chain. I'm totally curious to try out the automated generation of Wardley Maps that Dinis has created so let's see how that goes.
#
# <img width="800" alt="Capture" src="https://user-images.githubusercontent.com/50445147/59312494-36e54b80-8ca5-11e9-9574-565df824a40f.PNG">
#
#
#
# + language="javascript"
# $('.container').width('95%')
# +
from IPython.display import display_html, HTML, Javascript,display
from osbot_aws.apis.Lambda import Lambda
def maps_create(code):
aws_lambda = Lambda('osbot_browser.lambdas.lambda_browser')
params = ["maps", "exec_js"]
params.extend(code.split(' '))
payload = {"params": params,
'data': {}}
png_data = aws_lambda.invoke(payload)
show_png(png_data)
#maps_create(map)
def show_png(png_data,height=200):
html = '<img style="width:200%;height:1000px;border:1px solid black" align="left" src="data:image/png;base64,{1}"/>'.format(height,png_data)
display_html(html, raw=True)
# +
map_code = """
add("User need: Impress PBX Group so much so that they offer me a job", 1, 0.1)
add("Security Knowledge", 1, 2)
add("Soft Skills", 3, 2)
add("Technical Skills", 1, 4)
add("Non-technical Skills", 2.5, 4)
add("Qualifications", 4, 4)
add("Creating Technical Documentation", 1, 7)
add("Risk Identification", 2, 6)
add("App Security", 3, 7)
add("Consulting", 4, 7)
link("User need: Impress PBX Group so much so that they offer me a job", "Security Knowledge")
link("User need: Impress PBX Group so much so that they offer me a job", "Soft Skills")
link("Security Knowledge", "Qualifications")
link("Security Knowledge","Non-technical Skills")
link("Security Knowledge","Technical Skills")
link("Technical Skills","Creating Technical Documentation")
link("Technical Skills","App Security")
link("Technical Skills","Risk Identification")
link("Technical Skills","Consulting")
link("knows spell encantation","formal education")
link("knows spell encantation","word of mouth spell")
link("knows spell encantation","dark magic")
"""
maps_create(map_code)
# -
| notebooks/users/Lauren/Automating the creation of a Wardley Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# # How To: Provisioning Data Science Virtual Machine (DSVM)
#
# __Notebook Version:__ 1.0<br>
# __Python Version:__ Python 3.6<br>
# __Platforms Supported:__<br>
# - Azure Notebooks Free Compute
# __Data Source Required:__<br>
# - no
#
# ### Description
# The sample notebook shows how to provision a Azure DSVM as an alternate computing resource for hosting Azure Notebooks.
#
# Azure Notebooks provides Free Compute as the default computing resource, which is free of charge. However, sometimes you do want to have a powerful computing environment, and you don't want to go through Direct Compute route which requires JupyterHub installation on Linux machines, then Data Science Virtual Machine (DSVM) becomes a vital choice.
#
# You may reference <a href='https://docs.microsoft.com/en-us/azure/notebooks/configure-manage-azure-notebooks-projects' target='_blank'>this article</a> for details. In a nutshell, you need to select Linux VM with Ubuntu flavor. And keep in mind that on Azure DSVM, if you want to use Python 3.6 which is required by Azure Sentinel notebooks, you need to <font color=red> select Python 3.6 - AzureML.</font>
# ## Table of Contents
#
# 1. How to create a new DSVM
# 2. How to use DSVM
# 3. Things to know about using DSVM
# ## 1. How to create a new DSVM
#
# 0. First, please read <a href='https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/dsvm-ubuntu-intro' target='_blank'>this article</a> for details
# 1. Go to Azure portal
# 2. Search for Data Science Virtual Machine under All Services<br>
# <br>
# 3. Select DSVM for Linux (Ubuntu), read the introduction, click Create button. On the following page shown below, following the instruction to complete the form. You need to use the same Azure subscription that you are using for your Azure Sentinel and Azure Log Analytics<br>
# <br>
# 4. Once a DSVM created, make sure you keep SSH public key and password in a safe place.
# 5. If you want to remote into the VM using SSH, you can add inbound port rule for port 22.
# ## 2. How to use DSVM
#
# 1. Now that you have a DSVM, when you login to https://notebooks.azure.com, you can see you DSVM on the drop down list under Free Compute and Direct Compute.<br>
# <br>
# 2. Of course you will select DSVM, it will ask you to validate your JIT credentials.<br>
# <br>
# 3. Once you pick a notebook to run, you may encounter the following warning:<br>
# <br>
# As you may see, [Python 3.6 - AzureML] is the correct answer.
#
# ## 3. Things to know about using DSVM
#
# 1. The most important thing to know about Azure Notebooks on DSVM is that: Azure Notebooks project home directory is not mounted on the DSVM. So any references to Azure Notebooks folders / files will incur File/folder not found exception. In other words, each ipynb notebook need to be independent of other files.
# 2. There are work-around solutions:<br>
# a. Data files can be stored on Azure Blob storage and <a href='https://github.com/Azure/azure-storage-fuse' target='_blank'>blobfufe</a><br>
# b. Python files can be added to the notebook by using the Jupyter magic, you can find an example here: <a href='https://github.com/Microsoft/connect-petdetector/blob/master/setup.ipynb' target='_blank'>%%writefile</a><br>
# c. Configuration files are a bit more complicated. Using our Azure Sentinel config.json as an example, it is generated when you import Azure Sentinel Jupyter project from GitHub repo through Azure portal. The configuration JSON is Azure Log Analytics workspace specific file, so you clone one project for one Log Analytics workspace. You can find the config.json file at the root of the project home directory. <a href='https://orion-zhaozp.notebooks.azure.com/j/notebooks/Notebooks/Get%20Start.ipynb' target='_blank'>Get Start.jpynb</a> section 1 demonstrates how to set the configuration settings manually.
| Notebooks/HowTos/ProvisioningDSVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 314} colab_type="code" id="RkdPOxEdFFjG" outputId="d917e328-db37-4dc9-f8d0-80b54a6adec1"
# !pip install basilica
# -
# # Recommend Five strains based on strain, effects, and flavors proposed by app user.
#
# Documents: are constructed by string representations of **Strain**, **Effects**, and **Flavors**.
#
# - Extract usable features from high-dimensional data via Basilica
# - Train our embeddings on data points
# - The docs are vectorized
# - A function is defined to take a text, searches the space, and returns three closest matches.
#
# + colab={} colab_type="code" id="_lZkWPV7Fj5_"
""" Import Statements:
"""
# Classics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
import os
import random
import re
# Base
import basilica
# Pipeline
import time
import sklearn.decomposition
import sklearn.neighbors
import sklearn.preprocessing
from sklearn.pipeline import Pipeline
from sklearn.neighbors import NearestNeighbors
# -
# ## Connection.
# + colab={"base_uri": "https://localhost:8080/", "height": 55} colab_type="code" id="B5Kt6M9OFTLb" outputId="7057377d-622d-4486-9857-bea9166a5cb3"
sentences = [
"This is a sentence!",
"This is a similar sentence!",
"I don't think this sentence is very similar at all...",
]
with basilica.Connection('Hidden Key') as c:
embeddings = list(c.embed_sentences(sentences))
print(embeddings)
# -
# ## Import Data.
# + colab={"base_uri": "https://localhost:8080/", "height": 72, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} colab_type="code" id="IXSsdaU2FWqC" outputId="63fe40df-87d6-4f25-921b-ec2b5e88853d"
from google.colab import files
uploaded = files.upload()
# -
# ## Exploratory Data Analysis.
# + colab={"base_uri": "https://localhost:8080/", "height": 302} colab_type="code" id="3btXBFo6FeOB" outputId="08d42e73-4bc0-482c-8373-61f4e3a9574e"
df = pd.read_csv('/Users/jorge/Med-Cabinet-2/Data/cannabis.csv')
df = df.fillna('none')
print(df.shape)
df.head()
# -
# ## Limit Search and Results to Quality Strains.
# + colab={} colab_type="code" id="JwmXx4tyFvZB"
# Replace blank flavor with " "
good_stuff = df[df['Rating'] >= 4.0]
good_stuff = df.replace(np.nan, '', regex=True)
# -
# ## A function to Standardize our sting input:
# + colab={} colab_type="code" id="kLXR0JM6F4TW"
def clean_string(strng):
s = strng.replace(","," ") # comma-> space
s = s.replace("("," ") # (-> space
s = s.replace(")"," ") # (-> space
s = s.lower()
return s
# -
# ## Combine labels into one string
# + colab={"base_uri": "https://localhost:8080/", "height": 285} colab_type="code" id="tCWv5lt6F_KF" outputId="ffca8905-a768-40f4-f157-8bddce6e8758"
# replace blank flavor with ""
good_stuff = df.replace(np.nan, '', regex=True)
# cols = good_stuff.columns
cols = ['Type', 'Effects', 'Flavor', 'Description']
for col in cols:
good_stuff[col] = good_stuff[col].apply(clean_string)
good_stuff['text'] = good_stuff['Type'] + " " + good_stuff['Effects'] + " " + good_stuff['Flavor']
good_stuff.head()
# -
# ## Build a list of docs to use fitting our model.
# + colab={} colab_type="code" id="8o0w6TOOGBEP"
def gather_docs(df):
""" Produces List of Documents from a dataframe.
df: a Pandas dataframe that has the column 'text'.
Returns a list of strings.
"""
docs = list(df['text'])
return docs
# + colab={"base_uri": "https://localhost:8080/", "height": 104} colab_type="code" id="Ujc6yrIFGC47" outputId="16be5648-f5cd-4c5d-ae39-e259cf273dc5"
docs = gather_docs(good_stuff)
docs[:5]
# -
# ## Testing Basilica
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="MpzUkwuqGEkl" outputId="d47046e9-4082-4306-9e65-345c4f33c8d2"
with basilica.Connection('Hidden Key') as c:
embeddings = list(c.embed_sentences(docs))
type(embeddings)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="-ddMOCcrGLkT" outputId="5714a248-a605-4cc3-fe71-4022d57fee7f"
len(embeddings)
# + colab={} colab_type="code" id="RCT0S9nlHVKk"
with open('embeddings.txt', 'wb') as f:
np.save(f, embeddings, allow_pickle=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="iBsSCoerHYxa" outputId="1151484e-1a45-4b21-a465-96c8ba985529"
with open('embeddings.txt', 'rb') as f:
embeddings2 = np.load(f)
len(embeddings2)
# -
# ## Instandiate and fit nearest neighbors learner.
# + colab={} colab_type="code" id="cL_R9RL9Ha0y"
nbrs = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(embeddings)
# + colab={} colab_type="code" id="Zmtno7p0Hdb8"
random_text = ['hybrid creative energetic tingly euphoric relaxed earthy sweet citrus']
# + colab={} colab_type="code" id="JTYQ9L4KHhmf"
#### having problems here!!!
with basilica.Connection('Hidden Key') as c:
random_text_coords = list(c.embed_sentence(random_text))
# random_text_coords
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="Cu3TTv7PJMCv" outputId="6995c9b1-4a06-4ed3-e951-b3c72054902e"
df.columns
# + colab={} colab_type="code" id="R7SOT1K9Hl6p"
### Test Part ii using basilica
# combine all text features into one string:
df['combined_text'] = df.Strain + " " + df.Type + " " + df.Effects + " " + df.Flavor + " " + df.Description
# Removing punctuations from our string
df["combined_text"] = df['combined_text'].str.replace('[^\w\s]',' ')
df["combined_text"] = df['combined_text'].str.replace('none','')
# + colab={} colab_type="code" id="CMa-qEn2Iqqa"
API_KEY = 'Hidden Key'
with basilica.Connection(API_KEY) as c:
embedded = []
for row in df['combined_text']:
sentence = row
embedding = list(c.embed_sentence(sentence))
embedded.append(embedding)
df['embedded'] = embedded
# + colab={"base_uri": "https://localhost:8080/", "height": 544} colab_type="code" id="JrG_AoFyKL4L" outputId="1e69ea20-b7db-4832-cb1d-fb5a71f81f7c"
df.head()
# -
# ## Saving new embedded dataframe .csv
# + colab={} colab_type="code" id="2wIU8tEtKHvR"
df.to_csv('embedded_df.csv', index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="C0R41-TCKK1P" outputId="a17f1ae5-5798-497b-d7db-3c3ada0b40c5"
data_input = np.stack(df['embedded'].values, axis=0)
scaler = sklearn.preprocessing.StandardScaler(with_std=False)
pca = sklearn.decomposition.PCA(n_components=75, whiten=True)
data_input = scaler.fit_transform(data_input)
data_input = pca.fit_transform(data_input)
data_input = sklearn.preprocessing.normalize(data_input)
print(data_input.shape)
dtm = pd.DataFrame(data_input)
# Fit on DTM
nn3 = NearestNeighbors(n_neighbors=5, algorithm='ball_tree').fit(dtm)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="peExlL8YKEPU" outputId="19849e0a-7e8f-446d-b348-d3c43bce1546"
user_input = "I need something to help with my back pain and has a sweet flavor"
with basilica.Connection(API_KEY) as c:
embedded = c.embed_sentence(user_input)
embedded = np.stack([embedded], axis=0)
user_input = scaler.transform(embedded)
user_input = pca.transform(user_input)
user_input = sklearn.preprocessing.normalize(user_input)
score, strain_index = nn3.kneighbors(user_input)
print(score, strain_index)
# + colab={"base_uri": "https://localhost:8080/", "height": 155} colab_type="code" id="e2R4CjekKkB-" outputId="e51d7c3c-0068-4f38-b383-3239220f2c76"
strains = [df[['Strain', 'Flavor', 'Effects']].loc[n] for n in strain_index]
print(strains)
# + colab={} colab_type="code" id="BsukBi10O8Co"
| Med-Cabinet-2/DS-ML-Engineering-/Notebooks/Basilica_test_1.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Section 4 Homework - Fill in the Blanks
# Import the packages needed to perform the analysis
import numpy as np
import matplotlib.pyplot as plt
# Load the data provided for the exercise
# +
#Seasons
Seasons = ["2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]
#Players
Players = ["KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade"]
#Free Throws
KobeBryant_FT = [696,667,623,483,439,483,381,525,18,196]
JoeJohnson_FT = [261,235,316,299,220,195,158,132,159,141]
LeBronJames_FT = [601,489,549,594,593,503,387,403,439,375]
CarmeloAnthony_FT = [573,459,464,371,508,507,295,425,459,189]
DwightHoward_FT = [356,390,529,504,483,546,281,355,349,143]
ChrisBosh_FT = [474,463,472,504,470,384,229,241,223,179]
ChrisPaul_FT = [394,292,332,455,161,337,260,286,295,289]
KevinDurant_FT = [209,209,391,452,756,594,431,679,703,146]
DerrickRose_FT = [146,146,146,197,259,476,194,0,27,152]
DwayneWade_FT = [629,432,354,590,534,494,235,308,189,284]
#Free Throw Attempts
KobeBryant_FTA = [819,768,742,564,541,583,451,626,21,241]
JoeJohnson_FTA = [330,314,379,362,269,243,186,161,195,176]
LeBronJames_FTA = [814,701,771,762,773,663,502,535,585,528]
CarmeloAnthony_FTA = [709,568,590,468,612,605,367,512,541,237]
DwightHoward_FTA = [598,666,897,849,816,916,572,721,638,271]
ChrisBosh_FTA = [581,590,559,617,590,471,279,302,272,232]
ChrisPaul_FTA = [465,357,390,524,190,384,302,323,345,321]
KevinDurant_FTA = [256,256,448,524,840,675,501,750,805,171]
DerrickRose_FTA = [205,205,205,250,338,555,239,0,32,187]
DwayneWade_FTA = [803,535,467,771,702,652,297,425,258,370]
# +
# Create the dicts representing Seasons and Players
# Seasons
Sdict = {"2005":0,"2006":1,"2007":2,"2008":3,"2009":4,"2010":5,"2011":6,"2012":7,"2013":8,"2014":9}
# Players
Pdict = {"KobeBryant":0,"JoeJohnson":1,"LeBronJames":2,"CarmeloAnthony":3,"DwightHoward":4,"ChrisBosh":5,"ChrisPaul":6,"KevinDurant":7,"DerrickRose":8,"DwayneWade":9}
# +
# Matrix for Free Throws
# Bind the given vectors to form the matrix
FreeThrows = _([KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT])
# Remove vectors - we don't need them anymore
del (KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
# Matrix for Free Throws Attempts
FreeThrowAttempts = _([KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA])
# Remove vectors - we don't need them anymore
del (KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
# -
# Check the matrixes
FreeThrows
# Check the matrixes
FreeThrowAttempts
# Create the plotting function
def myplot(data, playerlist = Players):
Col = {"KobeBryant":"Black","JoeJohnson":"Red","LeBronJames":"Green","CarmeloAnthony":"Blue","DwightHoward":"Magenta","ChrisBosh":"Black","ChrisPaul":"Red","KevinDurant":"Green","DerrickRose":"Blue","DwayneWade":"Magenta"}
Mrk = {"KobeBryant":"s","JoeJohnson":"o","LeBronJames":"^","CarmeloAnthony":"D","DwightHoward":"s","ChrisBosh":"o","ChrisPaul":"^","KevinDurant":"D","DerrickRose":"s","DwayneWade":"o"}
for name in playerlist:
plt.plot(data[Pdict[name]],c=Col[name],ls='--',marker=Mrk[name],ms='7',label=name)
plt.legend(loc='upper left',bbox_to_anchor=(1,1))
plt.xticks(list(range(0,10)),Seasons,rotation='vertical')
plt.show()
#Visualize the new matrices
_(FreeThrows)
#Visualize the new matrices
_(FreeThrowAttempts)
# ### Part 1
# +
#Free Throw Attempts Per Game
#(You will need the Games matrix)
#Games
KobeBryant_G = [80,77,82,82,73,82,58,78,6,35]
JoeJohnson_G = [82,57,82,79,76,72,60,72,79,80]
LeBronJames_G = [79,78,75,81,76,79,62,76,77,69]
CarmeloAnthony_G = [80,65,77,66,69,77,55,67,77,40]
DwightHoward_G = [82,82,82,79,82,78,54,76,71,41]
ChrisBosh_G = [70,69,67,77,70,77,57,74,79,44]
ChrisPaul_G = [78,64,80,78,45,80,60,70,62,82]
KevinDurant_G = [35,35,80,74,82,78,66,81,81,27]
DerrickRose_G = [40,40,40,81,78,81,39,0,10,51]
DwayneWade_G = [75,51,51,79,77,76,49,69,54,62]
#Matrix
Games = np.array([KobeBryant_G, JoeJohnson_G, LeBronJames_G, CarmeloAnthony_G, DwightHoward_G, ChrisBosh_G, ChrisPaul_G, KevinDurant_G, DerrickRose_G, DwayneWade_G])
# -
myplot(_/_)
# Notice how <NAME> gets few attempts per game
# ### Part 2
# Free Throw Accuracy
myplot(_/_)
# And yet <NAME>'s accuracy is one of the highest Chances are his team would get more points if he had more FTA's.
#
# Also notice that Dwight Howard's FT Accuracy is extremely poor compared to other players. If you recall, Dwight Howard's Field Goal Accuracy was exceptional:
# +
#Field Goals
KobeBryant_FG = [978,813,775,800,716,740,574,738,31,266]
JoeJohnson_FG = [632,536,647,620,635,514,423,445,462,446]
LeBronJames_FG = [875,772,794,789,768,758,621,765,767,624]
CarmeloAnthony_FG = [756,691,728,535,688,684,441,669,743,358]
DwightHoward_FG = [468,526,583,560,510,619,416,470,473,251]
ChrisBosh_FG = [549,543,507,615,600,524,393,485,492,343]
ChrisPaul_FG = [407,381,630,631,314,430,425,412,406,568]
KevinDurant_FG = [306,306,587,661,794,711,643,731,849,238]
DerrickRose_FG = [208,208,208,574,672,711,302,0,58,338]
DwayneWade_FG = [699,472,439,854,719,692,416,569,415,509]
#Matrix
FieldGoals = np.array([KobeBryant_FG, JoeJohnson_FG, LeBronJames_FG, CarmeloAnthony_FG, DwightHoward_FG, ChrisBosh_FG, ChrisPaul_FG, KevinDurant_FG, DerrickRose_FG, DwayneWade_FG])
#Field Goal Attempts
KobeBryant_FGA = [2173,1757,1690,1712,1569,1639,1336,1595,73,713]
JoeJohnson_FGA = [1395,1139,1497,1420,1386,1161,931,1052,1018,1025]
LeBronJames_FGA = [1823,1621,1642,1613,1528,1485,1169,1354,1353,1279]
CarmeloAnthony_FGA = [1572,1453,1481,1207,1502,1503,1025,1489,1643,806]
DwightHoward_FGA = [881,873,974,979,834,1044,726,813,800,423]
ChrisBosh_FGA = [1087,1094,1027,1263,1158,1056,807,907,953,745]
ChrisPaul_FGA = [947,871,1291,1255,637,928,890,856,870,1170]
KevinDurant_FGA = [647,647,1366,1390,1668,1538,1297,1433,1688,467]
DerrickRose_FGA = [436,436,436,1208,1373,1597,695,0,164,835]
DwayneWade_FGA = [1413,962,937,1739,1511,1384,837,1093,761,1084]
#Matrix
FieldGoalAttempts = np.array([KobeBryant_FGA, JoeJohnson_FGA, LeBronJames_FGA, CarmeloAnthony_FGA, DwightHoward_FGA, ChrisBosh_FGA, ChrisPaul_FGA, KevinDurant_FGA, DerrickRose_FGA, DwayneWade_FGA])
myplot(FieldGoals/FieldGoalAttempts)
# -
# How could this be? Why is there such a drastic difference?
#
# We will see just now...
# ### Part 3
# +
#Points
KobeBryant_PTS = [2832,2430,2323,2201,1970,2078,1616,2133,83,782]
JoeJohnson_PTS = [1653,1426,1779,1688,1619,1312,1129,1170,1245,1154]
LeBronJames_PTS = [2478,2132,2250,2304,2258,2111,1683,2036,2089,1743]
CarmeloAnthony_PTS = [2122,1881,1978,1504,1943,1970,1245,1920,2112,966]
DwightHoward_PTS = [1292,1443,1695,1624,1503,1784,1113,1296,1297,646]
ChrisBosh_PTS = [1572,1561,1496,1746,1678,1438,1025,1232,1281,928]
ChrisPaul_PTS = [1258,1104,1684,1781,841,1268,1189,1186,1185,1564]
KevinDurant_PTS = [903,903,1624,1871,2472,2161,1850,2280,2593,686]
DerrickRose_PTS = [597,597,597,1361,1619,2026,852,0,159,904]
DwayneWade_PTS = [2040,1397,1254,2386,2045,1941,1082,1463,1028,1331]
#Matrix
Points = np.array([KobeBryant_PTS, JoeJohnson_PTS, LeBronJames_PTS, CarmeloAnthony_PTS, DwightHoward_PTS, ChrisBosh_PTS, ChrisPaul_PTS, KevinDurant_PTS, DerrickRose_PTS, DwayneWade_PTS])
# -
# Player Style Patterns Excluding Free Throws
myplot((_-_)/_)
# Because we have excluded free throws, this plot now shows us
# the true representation of player style change. We can verify
# that this is the case because all the marks without exception
# on this plot are between 2 and 3. That is because Field Goals
# can only be for either 2 points or 3 points.
#
# Insights:
# 1. You can see how players' preference for 2 or 3 point shots
# changes throughout their career. We can see that almost all
# players in this dataset experiment with their style throughout
# their careers. Perhaps, the most drastic change in style has
# been experienced by <NAME>.
#
# 2. There is one exception. You can see that one player has not
# changed his style at all - almost always scoring only 2-pointers.
# Who is this mystert player? It's <NAME>!
# Now that explains a lot. The reason that Dwight Howard's
# Field Goal accuracy is so good is because he almost always
# scores 2-pointers only. That means he can be close to the basket
# or even in contact with it. Free throws, on the other hand require
# the player to stand 15ft (4.57m) away from the hoop. That's
# probably why Dwight Howard's Free Throw Accuracy is poor.
| section4-basketball/Section_4_Homework_Fill_in_the_Blanks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/00_prerequisites/01_intermediate_python/01-exceptions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="copyright"
# #### Copyright 2019 Google LLC.
# + colab={} colab_type="code" id="h8rAl_sPizbx"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="zXUyqihI0cQ8"
# # Intermediate Python - Exceptions
# + [markdown] colab_type="text" id="u_ff6_8Yi6yV"
# In this colab, we will move into a more advanced concept called exceptions. You'll learn how to handle pre-built exceptions and how to build your own exceptions.
# + [markdown] colab_type="text" id="kRCwiJVGO8SW"
# ## Exceptions
# + [markdown] colab_type="text" id="mjefp6Xi7JGY"
# Inevitably in any coding language, things will go wrong. Data might be of the wrong type, memory might run out, an object that you try to iterate on might be non-iterable, the list goes on and on.
#
# Exceptions are a way to handle these cases, and tell you where you went wrong. Below is an example of an exception when you try to divide by zero.
# + colab={} colab_type="code" id="zNM0AH847Iu1"
1 / 0
# + [markdown] colab_type="text" id="AcPX9OEG7_Hc"
# Dividing by zero is undefined in mathematics. Whenever you try to divide by zero in Python, you will get the `ZeroDivisionError` exception.
#
# In practice, you'd likely never hard-code a zero as a denominator. However, you might have two computed variables that you want to calculate the ratio of.
# + colab={} colab_type="code" id="3_ivi-549K3T"
my_array = [2, 3, 4]
your_array = []
ratio = len(my_array) / len(your_array)
# + [markdown] colab_type="text" id="Ax4rAeYt9jQi"
# There are a few ways to handle this scenario. One is defensive programming, where you check if the denominator is zero using an `if` statement. When you change the number of entries in `your_array`, you will see the output of the cell change.
# + colab={} colab_type="code" id="nFw0-hIJ9ohH"
my_array = [2, 3, 4]
your_array = []
ratio = 0
if len(your_array) != 0:
ratio = len(my_array) / len(your_array)
else:
print("Couldn't calculate ratio, denominator is zero")
# + [markdown] colab_type="text" id="kTYwCr7I956S"
# Another option is to allow an exception to be thrown, but then catch the exception. You can do this using the `try` keyword, which tries to complete any code within the block, unless an exception matching the `except` keyword is thrown.
# + colab={} colab_type="code" id="IFXloMvr95kI"
my_array = [2, 3, 4]
your_array = []
ratio = 0
try:
ratio = len(my_array) / len(your_array)
except ZeroDivisionError:
print("Couldn't calculate ratio, denominator is zero")
# + [markdown] colab_type="text" id="OzKm4yf9-Zt2"
# In the example above we caught the `ZeroDivisionError`. This code block could have been written to catch any exception by leaving out the error name.
# + colab={} colab_type="code" id="PsPsjywS-qZC"
my_array = [2, 3, 4]
your_array = []
ratio = 0
try:
ratio = len(my_array) / len(your_array)
except:
print("Couldn't calculate ratio, some error occurred")
# + [markdown] colab_type="text" id="jUyR0kkM-wy0"
# Catching every possible exception in the `except` block is easy, but can be problematic because you can hide bigger problems in your program. Typically it is best to catch and handle specific errors only.
#
# If an exception is thrown and not handled with an `except`, it terminates your program. In some cases, this is what you want to happen. For instance, if the program is out of memory, there isn't much you can do at the moment to handle the problem.
#
# There are varying opinions on whether it is better practice to prevent or handle exceptions. In the example above, is it best to check if a value is zero before dividing by it, or is it best to wrap division in a `try`/`except` block?
#
# In general, using exceptions for control flow is probably not a good idea. As the name suggests, exceptions should be used for "exceptional" cases - things that you don't expect.
#
# Let's look at some other common exceptions you'll see.
# + [markdown] colab_type="text" id="NDaQ6OMLBMqF"
# You'll get a `KeyError` if you try to access an element in a dictionary with square braces and the key doesn't exist.
# + colab={} colab_type="code" id="Dky7cLlXBEwQ"
my_dict = {
"a": 1234
}
my_dict["b"]
# + [markdown] colab_type="text" id="CH6fqWSYBdkL"
# You'll get an `IndexError` if you try to access an index in a string, list, or tuple and that index doesn't exist.
# + colab={} colab_type="code" id="HVEG7IbCBY-4"
my_array = [1, 2, 3, 4]
my_array[56]
# + [markdown] colab_type="text" id="ZstGPJOjCAVg"
# The comprehensive list of built-in exceptions can be found in the [official Python documentation](https://docs.python.org/3/library/exceptions.html). Built-in in exceptions are core exceptions provided by Python.
# + [markdown] colab_type="text" id="BDqieWwCCOER"
# #### Creating Your Own Exceptions
#
# To create your own error, you simply need to create a class that inherits from the built-in `Exception` class and then `raise` an instance of that class.
# + colab={} colab_type="code" id="iFCWl9KfDCEL"
class MyVeryOwnError(Exception):
pass
raise MyVeryOwnError
# + [markdown] colab_type="text" id="uNmC8ligDnAF"
# You can then use your error just like any system error. The custom exception is raised in `my_func` if the input is zero. When you change the value of the input to `my_func` in the `try` block, it changes whether the exception is thrown.
# + colab={} colab_type="code" id="Y7RVjfO_DrQP"
class MyVeryOwnError(Exception):
pass
def my_func(x):
if x == 0:
raise MyVeryOwnError
else:
return x
try:
print(my_func(0))
except MyVeryOwnError:
print("Handling my custom exception")
# + [markdown] colab_type="text" id="GnavZP1tFWEZ"
# # Exercises
# + [markdown] colab_type="text" id="l6FbU_oSPKwk"
# ## Exercise 1
# + [markdown] colab_type="text" id="0gBLFnUXFZBS"
# What are some reasons that you might want to create your own exception?
#
# + [markdown] colab_type="text" id="J45CYZQE7l3Q"
# ### Student Solution
# + [markdown] colab_type="text" id="_IyHuI-dHWKM"
# *Your answer here*
# + [markdown] colab_type="text" id="5_x1oionanus"
# ---
# + [markdown] colab_type="text" id="exercise-1-solution-1"
# **Solution**
# + [markdown] colab_type="text" id="pBF0zPgPIIus"
# To provide more readable and specific information when the code throws an error, which is helpful for debugging.
#
# This article has a nice explanation: https://dbader.org/blog/python-custom-exceptions
# + [markdown] colab_type="text" id="S3qFdLwtPNig"
# ## Exercise 2
# + [markdown] colab_type="text" id="crpuwPeQFm3g"
# Handle the exception in the code block below using `try`/`except`. If the addition can't be done, print "Unable to add".
# + [markdown] colab_type="text" id="yZgWoufL8ACG"
# ### Student Solution
# + colab={} colab_type="code" id="eo18xFbzFuIT"
left = 1
right = "2"
### YOUR CODE HERE ###
left + right
# + [markdown] colab_type="text" id="gEKp-R2ga2o_"
# ---
# + [markdown] colab_type="text" id="rBXxC_IEPU5c"
# ## Exercise 3
# + [markdown] colab_type="text" id="A2ypEESJGsDu"
# Using `if`/`else` or some other flow control, prevent the exception in the code below from being thrown.
# + [markdown] colab_type="text" id="QaJNZW8H8JxL"
# ### Student Solution
# + colab={} colab_type="code" id="Cb2DNg3kG5dh"
array_one = [1, 2, 3]
array_two = [4, 5]
### YOUR CODE HERE ###
for i in range(len(array_one)):
print(array_one[i] + array_two[i])
# + [markdown] colab_type="text" id="tOdueeOWa7PE"
# ---
| content/00_prerequisites/01_intermediate_python/01-exceptions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python
# coding: utf-8
# import the necessary packages
from pyimagesearch.transform import four_point_transform
from skimage.filters import threshold_local
import numpy as np
import argparse
import cv2
import imutils
import pytesseract
from PIL import Image, ImageEnhance, ImageFilter
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image to be scanned")
args = vars(ap.parse_args())
# args = vars('~/Documents/Jupyter/OCR/images/example_01.png')
# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread(args["image"])
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
# show the original image and the edge detected image
# print("STEP 1: Edge Detection")
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline) of the piece of paper
print("STEP 2: Find contours of paper")
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
cv2.imshow("Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = threshold_local(warped, 11, offset = 10, method = "gaussian")
warped = (warped > T).astype("uint8") * 255
# show the original and scanned images
print("STEP 3: Apply perspective transform")
cv2.imshow("Scanned", imutils.resize(warped, height = 650))
cv2.waitKey(0)
#use tessearct to read text
# im = warped.filter(ImageFilter.MedianFilter())
# enhancer = ImageEnhance.Contrast(im)
# im = enhancer.enhance(2)
# im = im.convert('1')
text = pytesseract.image_to_string(warped, config='--psm 12 --oem 1')
print("STEP 4: Use Tesseract's LSTM to read texts")
print(text)
# -
| OCR2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scraping_class
logfile = 'log.txt'## name your log file.
connector = scraping_class.Connector(logfile)
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm_notebook
import pandas as pd
import numpy as np
import html5lib
import sys
import pickle
from tqdm import tqdm_notebook
with open('df_final.pkl', 'rb') as f:
df = pickle.load(f)
df
| Exam project/Load final data set (ONLY DUPLICATE).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Configuration
#
# NOTES: The warnings after the import are referred to the fact that Tensorflow 2.x versions are built to directly look for a GPU in the system. The warning can be forgot if you are not going to use the GPU.
# -
LENGTH_CHOSEN = 126520
# !pip install fsspec
# !virtualenv myenv
# !python3 -m venv myenv
# !source myenv/bin/activate
# !pip install seaborn
# !pip install ipywidgets
# + tags=[]
import os
import librosa
import numpy as np
from tqdm.notebook import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
sns.set_style('whitegrid')
import IPython.display as ipd
import librosa.display
import numpy as np
import pickle
import scipy
import ipywidgets
import math
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, confusion_matrix
from scipy.cluster.hierarchy import dendrogram
from sklearn.cluster import AgglomerativeClustering
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout, Conv2D, AveragePooling1D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import regularizers
# from livelossplot import PlotLossesKeras
tf.config.list_physical_devices('GPU')
# -
# # Get data from datasets
# +
main_path = '/media/helemanc/OS/Users/i2CAT/Desktop/Datasets SER/'
TESS = os.path.join(main_path, "tess/TESS Toronto emotional speech set data/")
RAV = os.path.join(main_path, "ravdess-emotional-speech-audio/audio_speech_actors_01-24")
SAVEE = os.path.join(main_path, "savee/ALL/")
CREMA = os.path.join(main_path, "creamd/AudioWAV/")
dir_list = os.listdir(RAV)
# + [markdown] tags=[]
# ## RADVESS
# +
lst = []
emotion = []
voc_channel = []
full_path = []
modality = []
intensity = []
actors = []
phrase =[]
for root, dirs, files in tqdm(os.walk(RAV)):
for file in files:
try:
#Load librosa array, obtain mfcss, store the file and the mfcss information in a new array
# X, sample_rate = librosa.load(os.path.join(root,file), res_type='kaiser_fast')
# mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
# The instruction below converts the labels (from 1 to 8) to a series from 0 to 7
# This is because our predictor needs to start from 0 otherwise it will try to predict also 0.
modal = int(file[1:2])
vchan = int(file[4:5])
lab = int(file[7:8])
ints = int(file[10:11])
phr = int(file[13:14])
act = int(file[18:20])
# arr = mfccs, lab
# lst.append(arr)
modality.append(modal)
voc_channel.append(vchan)
emotion.append(lab) #only labels
intensity.append(ints)
phrase.append(phr)
actors.append(act)
full_path.append((root, file)) # only files
# If the file is not valid, skip it
except ValueError:
continue
# +
# 01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised
emotions_list = ['neutral', 'calm', 'happy', 'sadness', 'angry', 'fear', 'disgust', 'surprise']
emotion_dict = {em[0]+1:em[1] for em in enumerate(emotions_list)}
df = pd.DataFrame([emotion, voc_channel, modality, intensity, actors, actors,phrase, full_path]).T
df.columns = ['emotion', 'voc_channel', 'modality', 'intensity', 'actors', 'gender', 'phrase', 'path']
df['emotion'] = df['emotion'].map(emotion_dict)
df['voc_channel'] = df['voc_channel'].map({1: 'speech', 2:'song'})
df['modality'] = df['modality'].map({1: 'full AV', 2:'video only', 3:'audio only'})
df['intensity'] = df['intensity'].map({1: 'normal', 2:'strong'})
df['actors'] = df['actors']
df['gender'] = df['actors'].apply(lambda x: 'female' if x%2 == 0 else 'male')
df['phrase'] = df['phrase'].map({1: 'Kids are talking by the door', 2:'Dogs are sitting by the door'})
df['path'] = df['path'].apply(lambda x: x[0] + '/' + x[1])
# -
# remove files with noise to apply the same noise to all files for data augmentation
df = df[~df.path.str.contains('noise')]
df.head()
# only speech
RAV_df = df
RAV_df = RAV_df.loc[RAV_df.voc_channel == 'speech']
RAV_df.insert(0, "emotion_label", RAV_df.emotion, True)
RAV_df = RAV_df.drop(['emotion', 'voc_channel', 'modality', 'intensity', 'phrase'], 1)
RAV_df
# + tags=[]
RAV_train = []
RAV_val = []
RAV_test = []
# -
for index, row in RAV_df.iterrows():
if row['actors'] in range(1,21):
RAV_train.append(row)
elif row['actors'] in range(21,23):
RAV_val.append(row)
elif row['actors'] in range(23,25):
RAV_test.append(row)
len(RAV_train), len(RAV_val), len(RAV_test)
RAV_train = pd.DataFrame(RAV_train)
RAV_val = pd.DataFrame(RAV_val)
RAV_test = pd.DataFrame(RAV_test)
# + tags=[]
RAV_train = RAV_train.drop(['actors'], 1)
RAV_val = RAV_val.drop(['actors'], 1)
RAV_test = RAV_test.drop(['actors'], 1)
# -
RAV_train.head()
RAV_val.head()
# + [markdown] tags=[]
# ## SAVEE
# +
# Get the data location for SAVEE
dir_list = os.listdir(SAVEE)
# parse the filename to get the emotions
emotion=[]
path = []
actors = []
gender = []
for i in dir_list:
actors.append(i[:2])
if i[-8:-6]=='_a':
emotion.append('angry')
gender.append('male')
elif i[-8:-6]=='_d':
emotion.append('disgust')
gender.append('male')
elif i[-8:-6]=='_f':
emotion.append('fear')
gender.append('male')
elif i[-8:-6]=='_h':
emotion.append('happy')
gender.append('male')
elif i[-8:-6]=='_n':
emotion.append('neutral')
gender.append('male')
elif i[-8:-6]=='sa':
emotion.append('sadness')
gender.append('male')
elif i[-8:-6]=='su':
emotion.append('surprise')
gender.append('male')
else:
emotion.append('Unknown')
path.append(SAVEE + i)
# Now check out the label count distribution
SAVEE_df = pd.DataFrame(emotion, columns = ['emotion_label'])
SAVEE_df = pd.concat([SAVEE_df,
pd.DataFrame(actors, columns = ['actors']),
pd.DataFrame(gender, columns = ['gender']),
pd.DataFrame(path, columns = ['path'])], axis = 1)
SAVEE_df.emotion_label.value_counts()
# -
SAVEE_df.head()
SAVEE_train = []
SAVEE_val = []
SAVEE_test = []
#DC, JE, JK, KL
for index, row in SAVEE_df.iterrows():
if row['actors'] == 'DC' or row ['actors'] == 'JE':
SAVEE_train.append(row)
elif row['actors'] == 'JK':
SAVEE_val.append(row)
else:
SAVEE_test.append(row)
len(SAVEE_train), len(SAVEE_val), len(SAVEE_test)
SAVEE_train = pd.DataFrame(SAVEE_train)
SAVEE_val = pd.DataFrame(SAVEE_val)
SAVEE_test = pd.DataFrame(SAVEE_test)
SAVEE_train = SAVEE_train.drop(['actors'], 1)
SAVEE_val = SAVEE_val.drop(['actors'], 1)
SAVEE_test = SAVEE_test.drop(['actors'], 1)
# ## TESS
# +
dir_list = os.listdir(TESS)
dir_list.sort()
dir_list
path = []
emotion = []
gender = []
actors = []
for i in dir_list:
fname = os.listdir(TESS + i)
for f in fname:
if i == 'OAF_angry':
emotion.append('angry')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_angry':
emotion.append('angry')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_disgust' :
emotion.append('disgust')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_disgust':
emotion.append('disgust')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_Fear':
emotion.append('fear')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_fear':
emotion.append('fear')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_happy' :
emotion.append('happy')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_happy':
emotion.append('angry')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_neutral':
emotion.append('neutral')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_neutral':
emotion.append('neutral')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_Pleasant_surprise':
emotion.append('surprise')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_pleasant_surprised':
emotion.append('surprise')
gender.append('female')
actors.append('YAF')
elif i == 'OAF_Sad':
emotion.append('sadness')
gender.append('female')
actors.append('OAF')
elif i == 'YAF_sad':
emotion.append('sadness')
gender.append('female')
actors.append('YAF')
else:
emotion.append('Unknown')
path.append(TESS + i + "/" + f)
TESS_df = pd.DataFrame(emotion, columns = ['emotion_label'])
TESS_df = pd.concat([TESS_df, pd.DataFrame(gender, columns = ['gender']),
pd.DataFrame(actors, columns= ['actors']),
pd.DataFrame(path, columns = ['path'])],axis=1)
TESS_df.emotion_label.value_counts()
# + tags=[]
TESS_df= TESS_df[~TESS_df.path.str.contains('noise')]
# -
TESS_train = []
TESS_test = []
for index, row in TESS_df.iterrows():
if row['actors'] == 'YAF':
TESS_train.append(row)
else:
TESS_test.append(row)
len(TESS_train), len(TESS_test)
# + tags=[]
TESS_train = pd.DataFrame(TESS_train)
TESS_test = pd.DataFrame(TESS_test)
# -
# ## CREMA-D
# + tags=[]
males = [1,
5,
11,
14,
15,
16,
17,
19,
22,
23,
26,
27,
31,
32,
33,
34,
35,
36,
38,
39,
41,
42,
44,
45,
48,
50,
51,
57,
59,
62,
64,
65,
66,
67,
68,
69,
70,
71,
77,
80,
81,
83,
85,
86,
87,
88,
90]
# -
females = [ 2,
3,
4,
6,
7,
8,
9,
10,
12,
13,
18,
20,
21,
24,
25,
28,
29,
30,
37,
40,
43,
46,
47,
49,
52,
53,
54,
55,
56,
58,
60,
61,
63,
72,
73,
74,
75,
76,
78,
79,
82,
84,
89,
91]
# +
crema_directory_list = os.listdir(CREMA)
file_emotion = []
file_path = []
actors = []
gender = []
for file in crema_directory_list:
# storing file emotions
part=file.split('_')
# use only high intensity files
if "HI" in part[3] :
actor = part[0][2:]
actors.append(actor)
if int(actor) in males:
gender.append('male')
else:
gender.append('female')
# storing file paths
file_path.append(CREMA + file)
if part[2] == 'SAD':
file_emotion.append('sadness')
elif part[2] == 'ANG':
file_emotion.append('angry')
elif part[2] == 'DIS':
file_emotion.append('disgust')
elif part[2] == 'FEA':
file_emotion.append('fear')
elif part[2] == 'HAP':
file_emotion.append('happy')
elif part[2] == 'NEU':
file_emotion.append('neutral')
else:
file_emotion.append('Unknown')
# dataframe for emotion of files
emotion_df = pd.DataFrame(file_emotion, columns=['emotion_label'])
# dataframe for path of files.
path_df = pd.DataFrame(file_path, columns=['path'])
actors_df = pd.DataFrame(actors, columns=['actors'])
gender_df = pd.DataFrame(gender, columns=['gender'])
Crema_df = pd.concat([emotion_df, actors_df, gender_df, path_df], axis=1)
Crema_df.head()
# -
Crema_df.shape
# +
actor_files = {}
for index, row in Crema_df.iterrows():
actor = row['actors']
if actor not in actor_files.keys():
actor_files[actor] = 1
else:
actor_files[actor]+=1
# -
actor_files
count_males = 0
count_females = 0
male_list = []
for index, row in Crema_df.iterrows():
gender = row['gender']
actor = row['actors']
if gender == 'male':
count_males +=1
if actor not in male_list:
male_list.append(actor)
else:
count_females +=1
count_males, count_females
# Since there are more males than females we will remove randomly 3 male actors (since there are exactly 5 audio files per actor)
import random
random.seed(42)
males_to_remove = random.sample(male_list, 3)
males_to_remove
new_df = []
for index, row in Crema_df.iterrows():
if row['actors'] not in males_to_remove:
new_df.append(row)
CREMA_df = pd.DataFrame(new_df)
for index, row in CREMA_df.iterrows():
if row['actors'] == '17':
print("Elements not removed")
count_males = 0
count_females = 0
male_list = []
female_list = []
for index, row in CREMA_df.iterrows():
gender = row['gender']
actor = row['actors']
if gender == 'male':
count_males +=1
if actor not in male_list:
male_list.append(actor)
else:
count_females +=1
if actor not in female_list:
female_list.append(actor)
count_males, count_females
len(female_list)
len(male_list)
CREMA_train = []
CREMA_val = []
CREMA_test = []
# +
females_train = random.sample(female_list, 32)
males_train = random.sample(male_list, 32)
# remove the elements assigned to train
for element in females_train:
if element in female_list:
female_list.remove(element)
for element in males_train:
if element in male_list:
male_list.remove(element)
females_val = random.sample(female_list, 6)
males_val = random.sample(male_list, 6)
# remove the elements assigned to val
for element in females_val:
if element in female_list:
female_list.remove(element)
for element in males_val:
if element in male_list:
male_list.remove(element)
females_test = random.sample(female_list, 6)
males_test = random.sample(male_list, 6)
# -
females_train, males_train, females_val, males_val, females_test, males_test
train = females_train + males_train
val = females_val + males_val
test = females_test + males_test
# + tags=[]
for index, row in CREMA_df.iterrows():
gender = row['gender']
actor = row['actors']
if actor in train:
CREMA_train.append(row)
elif actor in val:
CREMA_val.append(row)
else:
CREMA_test.append(row)
# -
CREMA_train = pd.DataFrame(CREMA_train)
CREMA_val = pd.DataFrame(CREMA_val)
CREMA_test = pd.DataFrame(CREMA_test)
# + tags=[]
CREMA_train.shape, CREMA_val.shape, CREMA_test.shape
# + tags=[]
CREMA_train.head()
# + tags=[]
CREMA_train = CREMA_train.drop(['actors'], 1)
CREMA_val = CREMA_val.drop(['actors'], 1)
CREMA_test = CREMA_test.drop(['actors'], 1)
# -
# ## Combine datasets
df_train = pd.concat([RAV_train, SAVEE_train, TESS_train, CREMA_train])
df_val = pd.concat([RAV_val, SAVEE_val, CREMA_val])
df_test = pd.concat([RAV_test, SAVEE_test, TESS_test, CREMA_test])
# + jupyter={"outputs_hidden": true} tags=[]
df_train.shape, df_val.shape, df_test.shape
# -
# ## Save dataframes to retrieve paths for Training, Val and Test
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
df_train.to_csv(os.path.join(preprocess_path,"df_train.csv"), index=False)
df_val.to_csv(os.path.join(preprocess_path,"df_val.csv"), index=False)
df_test.to_csv(os.path.join(preprocess_path,"df_test.csv"), index=False)
# + [markdown] tags=[]
# # Load Dataframes
# + tags=[]
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
df_train = pd.read_csv(os.path.join(preprocess_path,"df_train.csv"))
df_val = pd.read_csv(os.path.join(preprocess_path,"df_val.csv"))
df_test = pd.read_csv(os.path.join(preprocess_path,"df_test.csv"))
# -
# # Check Bit-Depth of wav files
from soundfile import SoundFile
# +
ravdess_bd = []
savee_bd = []
tess_bd = []
crema_bd = []
for index, row in df_train.iterrows():
path = row['path']
file = SoundFile(path)
if 'ravdess' in path:
ravdess_bd.append(file.subtype)
elif 'savee' in path:
savee_bd.append(file.subtype)
elif 'creamd' in path:
crema_bd.append(file.subtype)
else:
tess_bd.append(file.subtype)
# -
# check that all the audio files of a dataset have the same bith depth
for el in ravdess_bd:
if el != 'PCM_16':
print('Diff')
for el in savee_bd:
if el != 'PCM_16':
print('Diff')
for el in tess_bd:
if el != 'PCM_16':
print('Diff')
for el in crema_bd:
if el != 'PCM_16':
print('Diff')
# + [markdown] tags=[]
# # Check the volume of wav files - PENDING
#
# To check the volume we are going to compute the rms for each audio files. Later we will plot the distribution of the volume for each database.
# https://docs.python.org/3/library/audioop.html
# -
ravdess_volume = []
savee_volume = []
tess_volume = []
crema_volume = []
# +
import audioop
import wave
for index, row in df_train.iterrows():
path = row['path']
print(path)
data, samplerate = librosa.load(path)
if 'ravdess' in path:
#print(audioop.rms(data,2))
#print(path)
print(np.mean(librosa.feature.rms(data)))
#ravdess_bd.append(audioop.rms(file,2))
'''
elif 'savee' in path:
savee_bd.append(file.subtype)
elif 'creamd' in path:
crema_bd.append(file.subtype)
else:
tess_bd.append(file.subtype)
'''
# -
# + [markdown] tags=[]
# # Feature extraction
# -
df_train['emotion_label'].unique()
plt.title('Emotions distribution')
plt.hist(df_train['emotion_label'])
# plt.hist(y)
plt.show()
plt.title('Emotions distribution')
plt.hist(df_val['emotion_label'])
# plt.hist(y)
plt.show()
plt.title('Emotions distribution')
plt.hist(df_test['emotion_label'])
# plt.hist(y)
plt.show()
df_train.shape
# ## Utils
# +
def load_files(df):
X = []
for i in tqdm(df['path']):
X.append(librosa.load(i, res_type='kaiser_fast', sr=44100))
return X
def extract_samples(X):
samples = []
for ind,i in enumerate(X):
samples.append(i[0])
return samples
def extract_labels(df):
labels = df['emotion_label'].copy()
return labels
def compute_lengths(samples):
lengths = [len(x) for x in samples]
return lengths
def check_outliers(lengths):
# outliers
lengths = np.array(lengths)
print((lengths > 300000).sum())
new_lengths = lengths[lengths < 300000]
return new_lengths
def compute_mean_length(lengths):
return lengths.mean()
def cut_and_pad(samples, labels, length_chosen = LENGTH_CHOSEN):
X_new = []
y_new = []
count = 0
for ind,i in enumerate(samples):
if i.shape[0] < 300000:
if i.shape[0] > length_chosen:
new = i[:length_chosen]
X_new.append(new)
elif i.shape[0] < length_chosen:
new = np.pad(i,math.ceil((length_chosen-i.shape[0])/2), mode='median')
X_new.append(new)
else:
X_new.append(i)
y_new.append(labels[count])
count+=1
return X_new, y_new
# Data Augmentation
def noise(data):
noise_amp = 0.035*np.random.uniform()*np.amax(data)
data = data + noise_amp*np.random.normal(size=data.shape[0])
return data
# Data Augmentation
def pitch(data, sampling_rate, pitch_factor=0.7):
return librosa.effects.pitch_shift(data, sampling_rate, pitch_factor)
def compute_mfccs(samples):
mfccs = []
for i in tqdm(samples):
mfcc = librosa.feature.mfcc(y=i, sr=44100, n_mfcc=40)
mfcc = mfcc.T
mfccs.append(mfcc)
mfccs = np.array(mfccs)
return mfccs
def compute_mfccs_augmentation(samples, labels):
mfccs = []
counter = 0
for i in tqdm(samples):
# Weiner Filtering on original noise
samples_weiner = scipy.signal.wiener(i)
is_fin = np.isfinite(samples_weiner).all()
# Data Augmentation - Noise
noise_audio = noise(samples_weiner)
# Data Augmentation - Pitch
pitch_audio = pitch(samples_weiner, sampling_rate=44100)
# Data Augmentation - pitch + noise
pn = pitch(noise_audio, sampling_rate = 44100)
if is_fin:
# MFCC
mfcc = librosa.feature.mfcc(y=i, sr=44100, n_mfcc=40)
mfcc = mfcc.T
mfccs.append(mfcc)
mfcc_augmented = librosa.feature.mfcc(y=samples_weiner, sr=44100, n_mfcc=40)
mfcc_augmented = mfcc_augmented.T
mfccs.append(mfcc_augmented)
mfcc_augmented_pitch = librosa.feature.mfcc(y=noise_audio, sr=44100, n_mfcc=40)
mfcc_augmented_pitch = mfcc_augmented_pitch.T
mfccs.append(mfcc_augmented_pitch)
mfcc_augmented_p = librosa.feature.mfcc(y=pitch_audio, sr=44100, n_mfcc=40)
mfcc_augmented_p = mfcc_augmented_p.T
mfccs.append(mfcc_augmented_p)
mfcc_augmented_pn = librosa.feature.mfcc(y=pn, sr=44100, n_mfcc=40)
mfcc_augmented_pn = mfcc_augmented_pn.T
mfccs.append(mfcc_augmented_pn)
mfccs = np.array(mfccs)
# Copy labels
y_prov = []
y = labels
for i in range(len(y)):
y_prov.append(y[i])
y_prov.append(y[i])
y_prov.append(y[i])
y_prov.append(y[i])
y_prov.append(y[i])
y = np.asarray(y_prov)
return mfccs, y
# -
# ## Train
# ### Load samples and labels
load_train = load_files(df_train)
samples_train = extract_samples(load_train)
labels_train = extract_labels(df_train)
# ### Decide length
lengths = compute_lengths(samples_train)
new_lengths = check_outliers(lengths)
mean_length = compute_mean_length(new_lengths)
mean_length
# ### Cut and Pad
samples_train, labels_train = cut_and_pad(samples_train, labels_train)
samples_train = np.array(samples_train)
labels_train = np.array(labels_train)
print(samples_train.shape, labels_train.shape)
# ### Feature Extraction - Without Data Augmentation
mfccs_train = compute_mfccs(samples_train)
mfccs_train.shape
# ### Save features and labels
# +
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
mfccs_train_path = os.path.join(preprocess_path,"mfccs_train.pkl")
labels_train_path = os.path.join(preprocess_path,"labels_train.pkl")
with open(mfccs_train_path, 'wb') as f:
pickle.dump(mfccs_train , f)
with open(labels_train_path, 'wb') as f:
pickle.dump(labels_train , f)
# -
# ### Feature Extraction - With Data Augmentation
mfccs_train_aug, labels_train_aug = compute_mfccs_augmentation(samples_train, labels_train)
# + tags=[]
mfccs_train_aug.shape, labels_train_aug.shape
# -
# ### Save features and labels
# +
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
mfccs_aug_train_path = os.path.join(preprocess_path,"mfccs_train_aug.pkl")
labels_aug_train_path = os.path.join(preprocess_path,"labels_train_aug.pkl")
with open(mfccs_aug_train_path, 'wb') as f:
pickle.dump(mfccs_train_aug , f)
with open(labels_aug_train_path, 'wb') as f:
pickle.dump(labels_train_aug , f)
# + [markdown] tags=[]
# ## Val
# + [markdown] tags=[]
# ### Load samples and labels
# -
load_val = load_files(df_val)
samples_val = extract_samples(load_val)
# + tags=[]
labels_val = extract_labels(df_val)
# -
# ### Cut and Pad
samples_val, labels_val = cut_and_pad(samples_val, labels_val)
samples_val = np.array(samples_val)
labels_val = np.array(labels_val)
print(samples_val.shape, labels_val.shape)
# ### Feature Extraction
mfccs_val = compute_mfccs(samples_val)
mfccs_val.shape
# ### Save features and labels
# +
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
mfccs_val_path = os.path.join(preprocess_path,"mfccs_val.pkl")
labels_val_path = os.path.join(preprocess_path,"labels_val.pkl")
with open(mfccs_val_path, 'wb') as f:
pickle.dump(mfccs_val , f)
with open(labels_val_path, 'wb') as f:
pickle.dump(labels_val , f)
# -
# ## Test
# + [markdown] tags=[]
# ### Load samples and labels
# -
load_test = load_files(df_test)
samples_test = extract_samples(load_test)
# + tags=[]
labels_test = extract_labels(df_test)
# -
# ### Cut and Pad
samples_test, labels_test = cut_and_pad(samples_test, labels_test)
samples_test = np.array(samples_test)
labels_test = np.array(labels_test)
print(samples_test.shape, labels_test.shape)
# ### Feature Extraction
mfccs_test = compute_mfccs(samples_test)
mfccs_test.shape
# ### Save features and labels
# + tags=[]
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
mfccs_test_path = os.path.join(preprocess_path,"mfccs_test.pkl")
labels_test_path = os.path.join(preprocess_path,"labels_test.pkl")
with open(mfccs_test_path, 'wb') as f:
pickle.dump(mfccs_test , f)
with open(labels_test_path, 'wb') as f:
pickle.dump(labels_test , f)
# -
# # Load features and Labels
# +
preprocess_path = "/home/helemanc/Desktop/Binary_Model/pre-processed"
mfccs_train_path = os.path.join(preprocess_path,"mfccs_train.pkl")
labels_train_path = os.path.join(preprocess_path,"labels_train.pkl")
mfccs_aug_train_path = os.path.join(preprocess_path,"mfccs_train_aug.pkl")
labels_aug_train_path = os.path.join(preprocess_path,"labels_train_aug.pkl")
mfccs_val_path = os.path.join(preprocess_path,"mfccs_val.pkl")
labels_val_path = os.path.join(preprocess_path,"labels_val.pkl")
mfccs_test_path = os.path.join(preprocess_path,"mfccs_test.pkl")
labels_test_path = os.path.join(preprocess_path,"labels_test.pkl")
# +
mfccs_train = pickle.load(open(mfccs_train_path, 'rb'))
labels_train = pickle.load(open(labels_train_path, 'rb'))
mfccs_train_aug = pickle.load(open(mfccs_aug_train_path, 'rb'))
labels_train_aug = pickle.load(open(labels_aug_train_path, 'rb'))
mfccs_val = pickle.load(open(mfccs_val_path, 'rb'))
labels_val = pickle.load(open(labels_val_path, 'rb'))
mfccs_test = pickle.load(open(mfccs_test_path, 'rb'))
labels_test = pickle.load(open(labels_test_path, 'rb'))
# + [markdown] tags=[]
# # Encode Labels - Binary
# + tags=[]
emotion_enc = {'fear':1, 'disgust':1, 'neutral':0, 'calm':0, 'happy':0, 'sadness':1, 'surprise':0, 'angry':1}
# -
y_train = pd.Series(labels_train).replace(emotion_enc)
y_train_aug = pd.Series(labels_train_aug).map(emotion_enc)
y_val = pd.Series(labels_val).map(emotion_enc)
y_test = pd.Series(labels_test).map(emotion_enc)
# # Train, Val, Test (X)
X_train = mfccs_train
X_train_aug = mfccs_train_aug
X_val = mfccs_val
X_test = mfccs_test
# # Standard Scaling
# ## Without Augmentation
# + tags=[]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train.reshape(-1, X_train.shape[-1])).reshape(X_train.shape)
X_test = scaler.transform(X_test.reshape(-1, X_test.shape[-1])).reshape(X_test.shape)
X_val = scaler.transform(X_val.reshape(-1, X_val.shape[-1])).reshape(X_val.shape)
# -
# ## Save Scaler
data_model_path = "/home/helemanc/Desktop/Binary_Model/data_model"
scaler_path = os.path.join(data_model_path,"scaler.pkl")
with open(scaler_path, 'wb') as f:
pickle.dump(scaler,f)
# ## Save scaled data
# +
X_train_path = os.path.join(data_model_path,"X_train.pkl")
X_test_path = os.path.join(data_model_path,"X_test.pkl")
X_val_path = os.path.join(data_model_path,"X_val.pkl")
y_train_path = os.path.join(data_model_path,"y_train.pkl")
y_test_path = os.path.join(data_model_path,"y_test.pkl")
y_val_path = os.path.join(data_model_path,"y_val.pkl")
with open(scaler_path, 'wb') as f:
pickle.dump(scaler,f)
with open(X_train_path, 'wb') as f:
pickle.dump(X_train,f)
with open(X_test_path, 'wb') as f:
pickle.dump(X_test,f)
with open(X_val_path, 'wb') as f:
pickle.dump(X_val,f)
with open(y_train_path, 'wb') as f:
pickle.dump(y_train,f)
with open(y_test_path, 'wb') as f:
pickle.dump(y_test,f)
with open(y_val_path, 'wb') as f:
pickle.dump(y_val,f)
# -
# ## With Augmentation
from sklearn.preprocessing import StandardScaler
scaler_aug = StandardScaler()
X_train_aug = scaler_aug.fit_transform(X_train_aug.reshape(-1, X_train_aug.shape[-1])).reshape(X_train_aug.shape)
X_test_aug = scaler_aug.transform(X_test.reshape(-1, X_test.shape[-1])).reshape(X_test.shape)
X_val_aug = scaler_aug.transform(X_val.reshape(-1, X_val.shape[-1])).reshape(X_val.shape)
# ## Save Scaler
data_model_path = "/home/helemanc/Desktop/Binary_Model/data_model_augmented"
scaler_aug_path = os.path.join(data_model_path,"scaler_aug.pkl")
with open(scaler_aug_path, 'wb') as f:
pickle.dump(scaler_aug,f)
# +
X_train_path = os.path.join(data_model_path,"X_train_aug.pkl")
X_test_path = os.path.join(data_model_path,"X_test.pkl")
X_val_path = os.path.join(data_model_path,"X_val.pkl")
y_train_path = os.path.join(data_model_path,"y_train_aug.pkl")
y_test_path = os.path.join(data_model_path,"y_test.pkl")
y_val_path = os.path.join(data_model_path,"y_val.pkl")
with open(scaler_path, 'wb') as f:
pickle.dump(scaler,f)
with open(X_train_path, 'wb') as f:
pickle.dump(X_train_aug,f)
with open(X_test_path, 'wb') as f:
pickle.dump(X_test,f)
with open(X_val_path, 'wb') as f:
pickle.dump(X_val,f)
with open(y_train_path, 'wb') as f:
pickle.dump(y_train_aug,f)
with open(y_test_path, 'wb') as f:
pickle.dump(y_test,f)
with open(y_val_path, 'wb') as f:
pickle.dump(y_val,f)
# + [markdown] tags=[]
# # Model
# +
model = Sequential()
model.add(layers.Conv1D(128, 5,padding='same',
input_shape=(248,40), kernel_initializer=tf.keras.initializers.HeNormal(seed=0)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling1D(pool_size=(4)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv1D(64, 5,padding='same', kernel_initializer=tf.keras.initializers.HeNormal(seed=0)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling1D(pool_size=(4)))
model.add(layers.Dropout(0.1))
model.add(layers.Conv1D(32, 5,padding='same', kernel_initializer=tf.keras.initializers.HeNormal(seed=0)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling1D(pool_size=(4)))
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(32, kernel_initializer=tf.keras.initializers.HeNormal(seed=0)))
model.add(layers.Dense(1))
model.add(layers.Activation('sigmoid'))
model.summary()
# -
# # Training
# + [markdown] tags=[]
# ## Without Augmentation
# + [markdown] tags=[]
# ### Load Data
# -
X_train = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_train.pkl", 'rb'))
X_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_val.pkl", 'rb'))
X_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_test.pkl", 'rb'))
y_train = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_train.pkl", 'rb'))
y_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_val.pkl", 'rb'))
y_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_test.pkl", 'rb'))
# +
weight_path = "/home/helemanc/Desktop/Binary_Model/weights/binary_model_l1l2.hdf5"
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.5, patience=4,
verbose=1, mode='max',
min_lr=0.00001)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=45,
verbose=1)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=weight_path,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# classweight
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
class_weights = {l:c for l,c in zip(np.unique(y_train), class_weights)}
# -
model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')
# ### Train
history = model.fit(X_train, y_train, batch_size=16, epochs=500, validation_data=(X_val, y_val),
callbacks=[reduce_lr, early_stop, model_checkpoint], class_weight = class_weights)
# ### Plot Training Graphs
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# ### Save Model
model.save("/home/helemanc/Desktop/Binary_Model/models/binary_model")
# ### Evaluate Model
model_loaded = tf.keras.models.load_model("/home/helemanc/Desktop/Binary_Model/models/binary_model")
model_loaded.evaluate(X_test, y_test, batch_size=16)
# + jupyter={"outputs_hidden": true} tags=[]
from sklearn.metrics import classification_report
predictions = model_loaded.predict(X_test)
pred = [1 * (x[0]>=0.50) for x in predictions] #0.5 o 0.52?
print(classification_report(y_test, pred))
# + [markdown] tags=[]
# ## Without Augmentation - Shuffle Input Data
# + [markdown] tags=[]
# ### Load Data
# -
X_train = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_train.pkl", 'rb'))
X_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_val.pkl", 'rb'))
X_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_test.pkl", 'rb'))
y_train = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_train.pkl", 'rb'))
y_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_val.pkl", 'rb'))
y_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_test.pkl", 'rb'))
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
# +
weight_path = "/home/helemanc/Desktop/Binary_Model/weights/binary_model_shuffle.hdf5"
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.5, patience=4,
verbose=1, mode='max',
min_lr=0.00001)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=45,
verbose=1)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=weight_path,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# classweight
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
class_weights = {l:c for l,c in zip(np.unique(y_train), class_weights)}
# -
model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')
# ### Train
history = model.fit(X_train, y_train, batch_size=16, epochs=500, validation_data=(X_val, y_val),
callbacks=[reduce_lr, early_stop, model_checkpoint], class_weight = class_weights)
# ### Plot Training Graphs
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# ### Save Model
model.save("/home/helemanc/Desktop/Binary_Model/models/binary_model_shuffle")
# ### Evaluate Model
model_loaded = tf.keras.models.load_model("/home/helemanc/Desktop/Binary_Model/models/binary_model_shuffle")
model_loaded.evaluate(X_test, y_test, batch_size=16)
# + tags=[]
from sklearn.metrics import classification_report
predictions = model_loaded.predict(X_test)
pred = [1 * (x[0]>=0.50) for x in predictions] #0.5 o 0.52?
print(classification_report(y_test, pred))
# + [markdown] tags=[]
# ## Without Augmentation - CrossValidation
# -
# ### Load Data
X_train = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_train.pkl", 'rb'))
X_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_val.pkl", 'rb'))
X_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/X_test.pkl", 'rb'))
y_train = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_train.pkl", 'rb'))
y_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_val.pkl", 'rb'))
y_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model/y_test.pkl", 'rb'))
# +
#weight_path = "/home/helemanc/Desktop/Binary_Model/weights/binary_model.hdf5"
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.5, patience=4,
verbose=1, mode='max',
min_lr=0.00001)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=45,
verbose=1)
#model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=weight_path,
# save_weights_only=True,
# monitor='val_accuracy',
# mode='max',
# save_best_only=True)
# classweight
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
class_weights = {l:c for l,c in zip(np.unique(y_train), class_weights)}
# -
def create_model():
model = Sequential()
model.add(layers.Conv1D(256, 5,padding='same',
input_shape=(248,40), kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling1D(pool_size=(8)))
model.add(layers.Dropout(0.2))
model.add(layers.Conv1D(128, 5,padding='same', kernel_regularizer=regularizers.l2(0.001)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling1D(pool_size=(4)))
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(64))
model.add(layers.Dense(1))
model.add(layers.Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')
model.summary()
return model
# ### Train
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import RepeatedKFold, cross_val_score
estimator= KerasRegressor(build_fn= create_model(), epochs=500, batch_size=16, verbose=0)
kfold= RepeatedKFold(n_splits=5, n_repeats=100)
results= cross_val_score(estimator, X_train, y_train, cv=kfold, n_jobs=1) # 2 cpus
results.mean()
history = model.fit(X_train, y_train, batch_size=16, epochs=500, validation_data=(X_val, y_val),
callbacks=[reduce_lr, early_stop], class_weight = class_weights)
# ### Plot Training Graphs
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# ### Save Model
model.save("/home/helemanc/Desktop/Binary_Model/models/binary_model")
# ### Evaluate Model
model_loaded = tf.keras.models.load_model("/home/helemanc/Desktop/Binary_Model/models/binary_model")
model_loaded.evaluate(X_test, y_test, batch_size=16)
# + jupyter={"outputs_hidden": true} tags=[]
from sklearn.metrics import classification_report
predictions = model_loaded.predict(X_test)
pred = [1 * (x[0]>=0.50) for x in predictions] #0.5 o 0.52?
print(classification_report(y_test, pred))
# + [markdown] tags=[]
# ## With Augmentation
# + [markdown] tags=[]
# ### Load Data
# -
X_train_aug = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model_augmented/X_train_aug.pkl", 'rb'))
X_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model_augmented/X_val.pkl", 'rb'))
X_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model_augmented/X_test.pkl", 'rb'))
y_train_aug = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model_augmented/y_train_aug.pkl", 'rb'))
y_val = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model_augmented/y_val.pkl", 'rb'))
y_test = pickle.load(open( "/home/helemanc/Desktop/Binary_Model/data_model_augmented/y_test.pkl", 'rb'))
(unique, counts) = np.unique(y_train, return_counts=True)
frequencies = np.asarray((unique, counts)).T
print(frequencies)
# +
weight_path = "/home/helemanc/Desktop/Binary_Model/weights/binary_model_augmented.hdf5"
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.5, patience=4,
verbose=1, mode='max',
min_lr=0.00001)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=45,
verbose=1)
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=weight_path,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# classweight
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train_aug), y_train_aug)
class_weights = {l:c for l,c in zip(np.unique(y_train_aug), class_weights)}
# -
model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')
# ### Train
history = model.fit(X_train_aug, y_train_aug, batch_size=16, epochs=500, validation_data=(X_val_aug, y_val),
callbacks=[reduce_lr, early_stop, model_checkpoint], class_weight = class_weights)
# ### Plot Training Graphs
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# ### Save Model
model.save("/home/helemanc/Desktop/Binary_Model/models/binary_model_aug")
# ### Evaluate Model
model_loaded = tf.keras.models.load_model("/home/helemanc/Desktop/Binary_Model/models/binary_model_aug")
model_loaded.evaluate(X_test, y_test, batch_size=16)
# + tags=[]
from sklearn.metrics import classification_report
predictions = model_loaded.predict(X_test)
pred = [1 * (x[0]>=0.50) for x in predictions] #0.5 o 0.52?
print(classification_report(y_test, pred))
# -
| notebooks_binary_model/Build_Audio_Vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# export
from local.core.imports import *
from local.utils import compose
from local.test import *
from local.notebook.export import *
from local.notebook.showdoc import *
import nbformat
from notebook import notebookapp
from nbconvert.preprocessors import ExecutePreprocessor, Preprocessor
from nbconvert import HTMLExporter,MarkdownExporter
from nbformat.sign import NotebookNotary
from traitlets.config import Config
# +
# default_exp notebook.export2html
# default_cls_lvl 3
# -
# # Converting notebooks to html
#
# > The functions that transform the dev notebooks in the documentation of the library
# ## Preprocessing notebook
# ### Cell processors
#export
def remove_widget_state(cell):
"Remove widgets in the output of `cells`"
if cell['cell_type'] == 'code' and 'outputs' in cell:
cell['outputs'] = [l for l in cell['outputs']
if not ('data' in l and 'application/vnd.jupyter.widget-view+json' in l.data)]
return cell
#export
# Matches any cell that has a `show_doc` or an `#export` in it
_re_cell_to_hide = r's*show_doc\(|^\s*#\s*export\s+'
#export
def hide_cells(cell):
"Hide `cell` that need to be hidden"
if check_re(cell, _re_cell_to_hide): cell['metadata'] = {'hide_input': True}
return cell
# +
for source in ['show_doc(read_nb)', '# export\nfrom local.core import *']:
cell = {'cell_type': 'code', 'source': 'show_doc(read_nb)'}
cell1 = hide_cells(cell.copy())
assert 'metadata' in cell1
assert 'hide_input' in cell1['metadata']
assert cell1['metadata']['hide_input']
cell = {'cell_type': 'code', 'source': '# exports\nfrom local.core import *'}
test_eq(hide_cells(cell.copy()), cell)
# -
#export
# Matches any line containing an #exports
_re_exports = re.compile(r'^#\s*exports[^\n]*\n')
#export
def clean_exports(cell):
"Remove exports flag from `cell`"
cell['source'] = _re_exports.sub('', cell['source'])
return cell
cell = {'cell_type': 'code', 'source': '# exports\nfrom local.core import *'}
test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': 'from local.core import *'})
cell = {'cell_type': 'code', 'source': '# exports core\nfrom local.core import *'}
test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': 'from local.core import *'})
#export
def treat_backticks(cell):
"Add links to backticks words in `cell`"
if cell['cell_type'] == 'markdown': cell['source'] = add_doc_links(cell['source'])
return cell
cell = {'cell_type': 'markdown', 'source': 'This is a `Tensor`'}
test_eq(treat_backticks(cell), {'cell_type': 'markdown',
'source': 'This is a [`Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor)'})
#export
_re_nb_link = re.compile(r"""
# Catches any link to a local notebook and keeps the title in group 1, the link without .ipynb in group 2
\[ # Opening [
([^\]]*) # Catching group for any character except ]
\]\( # Closing ], opening (
([^http] # Catching group that must not begin by html (local notebook)
[^\)]*) # and containing anything but )
.ipynb\) # .ipynb and closing )
""", re.VERBOSE)
# export
def convert_links(cell):
"Convert the .ipynb links to .html"
if cell['cell_type'] == 'markdown':
cell['source'] = _re_nb_link.sub(r'[\1](\2.html)', cell['source'])
return cell
cell = {'cell_type': 'markdown', 'source': "This is a link to a [notebook](01_core.ipynb)."}
test_eq(convert_links(cell), {'cell_type': 'markdown',
'source': "This is a link to a [notebook](01_core.html)."})
cell = {'cell_type': 'markdown', 'source': "This is a link to a [page](01_core.html)."}
test_eq(convert_links(cell.copy()), cell)
cell = {'cell_type': 'markdown', 'source': "This is a link to an [external nb](http://01_core.ipynb)."}
test_eq(convert_links(cell.copy()), cell)
#export
_re_block_notes = re.compile(r"""
# Catches any pattern > Title: content with title in group 1 and content in group 2
^>\s* # > followed by any number of whitespace
([^:]*) # Catching group for any character but :
:\s* # : then any number of whitespace
([^\n]*) # Catching group for anything but a new line character
(?:\n|$) # Non-catching group for either a new line or the end of the text
""", re.VERBOSE)
#export
def add_jekyll_notes(cell):
"Convert block quotes to jekyll notes in `cell`"
t2style = {'Note': 'info', 'Warning': 'danger', 'Important': 'warning'}
def _inner(m):
title,text = m.groups()
style = t2style.get(title, None)
if style is None: return f"> {m.groups()[0]}: {m.groups()[1]}"
res = f'<div markdown="span" class="alert alert-{style}" role="alert">'
return res + f'<i class="fa fa-{style}-circle"></i> <b>{title}: </b>{text}</div>'
if cell['cell_type'] == 'markdown':
cell['source'] = _re_block_notes.sub(_inner, cell['source'])
return cell
# Supported styles are `Warning`, `Note` and `Important`:
#
# > Warning: There will be no second warning!
#
# > Important: Pay attention! This is important.
#
# > Note: Take note of this.
for w,s in zip(['Warning', 'Note', 'Important', 'Bla'], ['danger', 'info', 'warning', 'info']):
cell = {'cell_type': 'markdown', 'source': f"> {w}: This is my final {w.lower()}!"}
res = f'<div markdown="span" class="alert alert-{s}" role="alert">'
res += f'<i class="fa fa-{s}-circle"></i> <b>{w}: </b>This is my final {w.lower()}!</div>'
if w != 'Bla': test_eq(add_jekyll_notes(cell), {'cell_type': 'markdown', 'source': res})
else: test_eq(add_jekyll_notes(cell), cell)
#export
_re_image = re.compile(r"""
# Catches any image file used, either with `` or `<img src="image_file">`
^!\[ # Beginning of line (since re.MULTILINE is passed) followed by ![
[^\]]* # Anything but ]
\]\( # Closing ] and opening (
([^\)]*) # Catching block with any character but )
\) # Closing )
| # OR
<img\ src=" # <img src="
([^"]*) # Catching block with any character except "
" # Closing
""", re.MULTILINE | re.VERBOSE)
#export
def copy_images(cell, fname, dest):
if cell['cell_type'] == 'markdown' and _re_image.search(cell['source']):
grps = _re_image.search(cell['source']).groups()
src = grps[0] or grps[1]
os.makedirs((Path(dest)/src).parent, exist_ok=True)
shutil.copy(Path(fname).parent/src, Path(dest)/src)
return cell
dest_img = Path('docs')/'images'/'pixelshuffle.png'
dest_bak = Path('docs')/'images'/'pixelshuffle.bak'
if dest_img.exists(): shutil.move(dest_img, dest_bak)
for text in ['Text\n',
'Text\n<img src="images/pixelshuffle.png" alt="Pixelshuffle" style="width: 100%; height: auto;"/>']:
cell = {'cell_type': 'markdown', 'source': text}
cell1 = copy_images(cell, Path('10_layers.ipynb'), Path('docs'))
#Function doesn't touch cell
test_eq(cell, cell1)
#Image has been copied
assert dest_img.exists()
os.remove(dest_img)
if dest_bak.exists(): shutil.move(dest_bak, dest_img)
#export
#Matches any cell with #hide or #default_exp or #default_cls_lvl
_re_cell_to_remove = re.compile(r'^\s*#\s*(hide|default_exp|default_cls_lvl)\s+')
#export
def remove_hidden(cells):
"Remove in `cells` the ones with a flag `#hide` or `#default_exp`"
return [c for c in cells if _re_cell_to_remove.search(c['source']) is None]
# +
cells = [{'cell_type': 'code', 'source': source} for source in [
'# export\nfrom local.core import *',
'# hide\nfrom local.core import *',
'#exports\nsuper code',
'#default_exp notebook.export',
'show_doc(read_nb)',
'#default_cls_lvl 3']] + [{'cell_type': 'markdown', 'source': source} for source in [
'nice', '#hide\n\nto hide']]
cells1 = remove_hidden(cells)
test_eq(len(cells1), 4)
test_eq(cells1[0], cells[0])
test_eq(cells1[1], cells[2])
test_eq(cells1[2], cells[4])
test_eq(cells1[3], cells[6])
# -
#export
_re_default_cls_lvl = re.compile(r"""
^ # Beginning of line (since re.MULTILINE is passed)
\s*\#\s* # Any number of whitespace, #, any number of whitespace
default_cls_lvl # default_cls_lvl
\s* # Any number of whitespace
(\d*) # Catching group for any number of digits
\s*$ # Any number of whitespace and end of line (since re.MULTILINE is passed)
""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
# export
def find_default_level(cells):
"Find in `cells` the default export module."
for cell in cells:
tst = check_re(cell, _re_default_cls_lvl)
if tst: return int(tst.groups()[0])
return 2
tst_nb = read_nb('91_notebook_export.ipynb')
test_eq(find_default_level(tst_nb['cells']), 3)
#export
#Find a cell with #export(s)
_re_export = re.compile(r'^\s*#\s*exports?\s*', re.IGNORECASE | re.MULTILINE)
_re_show_doc = re.compile(r"""
# First one catches any cell with a #export or #exports, second one catches any show_doc and get the first argument in group 1
show_doc # show_doc
\s*\(\s* # Any number of whitespace, opening (, any number of whitespace
([^,\)\s]*) # Catching group for any character but a comma, a closing ) or a whitespace
[,\)\s] # A comma, a closing ) or a whitespace
""", re.MULTILINE | re.VERBOSE)
# +
#export
def _show_doc_cell(name, cls_lvl=None):
return {'cell_type': 'code',
'execution_count': None,
'metadata': {},
'outputs': [],
'source': f"show_doc({name}{'' if cls_lvl is None else f', default_cls_level={cls_lvl}'})"}
def add_show_docs(cells, cls_lvl=None):
"Add `show_doc` for each exported function or class"
documented = [_re_show_doc.search(cell['source']).groups()[0] for cell in cells
if cell['cell_type']=='code' and _re_show_doc.search(cell['source']) is not None]
res = []
for cell in cells:
res.append(cell)
if check_re(cell, _re_export):
names = export_names(cell['source'], func_only=True)
for n in names:
if n not in documented: res.append(_show_doc_cell(n, cls_lvl=cls_lvl))
return res
# +
for i,cell in enumerate(tst_nb['cells']):
if cell['source'].startswith('#export\ndef read_nb'): break
tst_cells = [c.copy() for c in tst_nb['cells'][i-1:i+1]]
added_cells = add_show_docs(tst_cells, cls_lvl=3)
test_eq(len(added_cells), 3)
test_eq(added_cells[0], tst_nb['cells'][i-1])
test_eq(added_cells[1], tst_nb['cells'][i])
test_eq(added_cells[2], _show_doc_cell('read_nb', cls_lvl=3))
test_eq(added_cells[2]['source'], 'show_doc(read_nb, default_cls_level=3)')
#Check show_doc isn't added if it was already there.
tst_cells1 = [{'cell_type':'code', 'source': '#export\ndef my_func(x):\n return x'},
{'cell_type':'code', 'source': 'show_doc(my_func)'}]
test_eq(add_show_docs(tst_cells1), tst_cells1)
tst_cells1 = [{'cell_type':'code', 'source': '#export\ndef my_func(x):\n return x'},
{'cell_type':'markdown', 'source': 'Some text'},
{'cell_type':'code', 'source': 'show_doc(my_func, title_level=3)'}]
test_eq(add_show_docs(tst_cells1), tst_cells1)
# -
#export
_re_fake_header = re.compile(r"""
# Matches any fake header (one that ends with -)
\#+ # One or more #
\s+ # One or more of whitespace
.* # Any char
-\s* # A dash followed by any number of white space
$ # End of text
""", re.VERBOSE)
# export
def remove_fake_headers(cells):
"Remove in `cells` the fake header"
return [c for c in cells if c['cell_type']=='code' or _re_fake_header.search(c['source']) is None]
cells = [{'cell_type': 'markdown',
'metadata': {},
'source': '### Fake-'}] + tst_nb['cells'][:10]
cells1 = remove_fake_headers(cells)
test_eq(len(cells1), len(cells)-1)
test_eq(cells1[0], cells[1])
# export
def remove_empty(cells):
"Remove in `cells` the empty cells"
return [c for c in cells if len(c['source']) >0]
# ### Grabbing metada
# +
#export
_re_title_summary = re.compile(r"""
# Catches the title and summary of the notebook, presented as # Title > summary, with title in group 1 and summary in group 2
^\s* # Beginning of text followe by any number of whitespace
\#\s+ # # followed by one or more of whitespace
([^\n]*) # Catching group for any character except a new line
\n+ # One or more new lines
>\s* # > followed by any number of whitespace
([^\n]*) # Catching group for any character except a new line
""", re.VERBOSE)
_re_properties = re.compile(r"""
^-\s+ # Beginnig of a line followed by - and at least one space
(.*?) # Any pattern (shortest possible)
\s*:\s* # Any number of whitespace, :, any number of whitespace
(.*?)$ # Any pattern (shortest possible) then end of line
""", re.MULTILINE | re.VERBOSE)
# -
# export
def get_metadata(cells):
"Find the cell with title and summary in `cells`."
for i,cell in enumerate(cells):
if cell['cell_type'] == 'markdown':
match = _re_title_summary.match(cell['source'])
if match:
cells.pop(i)
attrs = {k:v for k,v in _re_properties.findall(cell['source'])}
return {'keywords': 'fastai',
'summary' : match.groups()[1],
'title' : match.groups()[0],
**attrs}
return {'keywords': 'fastai',
'summary' : 'summary',
'title' : 'Title'}
tst_nb = read_nb('91_notebook_export.ipynb')
test_eq(get_metadata(tst_nb['cells']), {
'keywords': 'fastai',
'summary': 'The functions that transform the dev notebooks in the fastai library',
'title': 'Converting notebooks to modules',
'author': '"<NAME>"'})
#The cell with the metada is poped out, so if we do it a second time we get the default.
test_eq(get_metadata(tst_nb['cells']), {'keywords': 'fastai',
'summary' : 'summary',
'title' : 'Title'})
# ## Executing show_doc cells
#export
#Catches any cell with a show_doc or an import from local
_re_cell_to_execute = re.compile(r"^\s*show_doc\(([^\)]*)\)|^from local\.", re.MULTILINE)
# export
class ExecuteShowDocPreprocessor(ExecutePreprocessor):
"An `ExecutePreprocessor` that only executes `show_doc` and `import` cells"
def preprocess_cell(self, cell, resources, index):
if 'source' in cell and cell['cell_type'] == "code":
if _re_cell_to_execute.search(cell['source']):
return super().preprocess_cell(cell, resources, index)
return cell, resources
# +
# export
def _import_show_doc_cell(mod=None, name=None):
"Add an import show_doc cell + deal with the _file_ hack if necessary."
source = f"#export\nfrom local.notebook.showdoc import show_doc"
if mod: source += f"\nfrom local.{mod} import *"
if name: source += f"\nfrom pathlib import Path\n_file_ = {name}"
return {'cell_type': 'code',
'execution_count': None,
'metadata': {'hide_input': True},
'outputs': [],
'source': source}
def execute_nb(nb, mod=None, metadata=None, show_doc_only=True, name=None):
"Execute `nb` (or only the `show_doc` cells) with `metadata`"
nb['cells'].insert(0, _import_show_doc_cell(mod, name))
ep_cls = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor
ep = ep_cls(timeout=600, kernel_name='python3')
metadata = metadata or {}
pnb = nbformat.from_dict(nb)
ep.preprocess(pnb, metadata)
return pnb
# -
fake_nb = {k:v for k,v in tst_nb.items() if k != 'cells'}
fake_nb['cells'] = [tst_nb['cells'][0].copy()] + added_cells
fake_nb = execute_nb(fake_nb, mod='notebook.export')
assert len(fake_nb['cells'][-1]['outputs']) > 0
# ## Conversion
#hide
#Tricking jupyter notebook to have a __file__ attribute. All _file_ will be replaced by __file__
_file_ = Path('local').absolute()/'notebook'/'export.py'
# export
def _exporter(markdown=False):
cfg = Config()
exporter = (HTMLExporter,MarkdownExporter)[markdown](cfg)
exporter.exclude_input_prompt=True
exporter.exclude_output_prompt=True
exporter.template_file = ('jekyll.tpl','jekyll-md.tpl')[markdown]
exporter.template_path.append(str(Path(_file_).parent))
return exporter
# export
process_cells = [remove_fake_headers, remove_hidden, remove_empty]
process_cell = [hide_cells, remove_widget_state, add_jekyll_notes, convert_links]
#export
_re_file = re.compile(r"""
^_file_ # _file_ at the beginning of a line (since re.MULTILINE is passed)
\s*=\s* # Any number of whitespace, =, any number of whitespace
(\S*) # Catching group for any non-whitespace characters
\s*$ # Any number of whitespace then the end of line
""", re.MULTILINE | re.VERBOSE)
# export
def _find_file(cells):
"Find in `cells` if a _file_ is defined."
for cell in cells:
if cell['cell_type']=='code' and _re_file.search(cell['source']):
return _re_file.search(cell['source']).groups()[0]
#hide
tst_nb = read_nb('91_notebook_export.ipynb')
test_eq(_find_file(tst_nb['cells']), "Path('local').absolute()/'notebook'/'export.py'")
#export
def notebook_path():
"Returns the absolute path of the Notebook or None if it cannot be determined"
#NOTE: works only when the security is token-based or there is no password
kernel_id = Path(ipykernel.get_connection_file()).stem.split('-', 1)[1]
for srv in notebookapp.list_running_servers():
try:
sessions = json.load(urlopen(f"{srv['url']}api/sessions{srv['token']}"))
return next(Path(srv['notebook_dir'])/sess['notebook']['path']
for sess in sessions if sess['kernel']['id']==kernel_id)
except: pass # There may be stale entries in the runtime directory
# +
#test_eq(notebook_path().name, '93_notebook_export2html.ipynb')
#test_eq(notebook_path().parent, Path().absolute())
# -
# export
def convert_nb(fname, dest_path='docs'):
"Convert a notebook `fname` to html file in `dest_path`."
fname = Path(fname).absolute()
nb = read_nb(fname)
cls_lvl = find_default_level(nb['cells'])
_name = _find_file(nb['cells'])
mod = find_default_export(nb['cells'])
nb['cells'] = compose(*process_cells,partial(add_show_docs, cls_lvl=cls_lvl))(nb['cells'])
nb['cells'] = [compose(partial(copy_images, fname=fname, dest=dest_path), *process_cell, treat_backticks)(c)
for c in nb['cells']]
fname = Path(fname).absolute()
dest_name = '.'.join(fname.with_suffix('.html').name.split('_')[1:])
meta_jekyll = get_metadata(nb['cells'])
meta_jekyll['nb_path'] = f'{fname.parent.name}/{fname.name}'
nb = execute_nb(nb, mod=mod, name=_name)
nb['cells'] = [clean_exports(c) for c in nb['cells']]
#print(f'{dest_path}/{dest_name}')
with open(f'{dest_path}/{dest_name}','w') as f:
#res = _exporter().from_notebook_node(nb, resources=meta_jekyll)[0]
#print(res)
f.write(_exporter().from_notebook_node(nb, resources=meta_jekyll)[0])
convert_nb('40_tabular_core.ipynb', '../docs')
# export
def convert_all(path='.', dest_path='../docs', force_all=False):
"Convert all notebooks in `path` to html files in `dest_path`."
path = Path(path)
changed_cnt = 0
for fname in path.glob("[0-9]*.ipynb"):
# only rebuild modified files
if fname.name.startswith('_'): continue
fname_out = Path(dest_path)/'.'.join(fname.with_suffix('.html').name.split('_')[1:])
if not force_all and fname_out.exists() and os.path.getmtime(fname) < os.path.getmtime(fname_out):
continue
print(f"converting: {fname} => {fname_out}")
changed_cnt += 1
try: convert_nb(fname, dest_path=dest_path)
except Exception as e: print(e)
if changed_cnt==0: print("No notebooks were modified")
#hide
convert_all(force_all=True)
# export
def convert_post(fname, dest_path='posts'):
"Convert a notebook `fname` to blog post markdown in `dest_path`."
fname = Path(fname).absolute()
nb = read_nb(fname)
meta_jekyll = get_metadata(nb['cells'])
nb['cells'] = compose(*process_cells)(nb['cells'])
nb['cells'] = [compose(*process_cell)(c) for c in nb['cells']]
fname = Path(fname).absolute()
dest_name = fname.with_suffix('.md').name
exp = _exporter(markdown=True)
with (Path(dest_path)/dest_name).open('w') as f:
f.write(exp.from_notebook_node(nb, resources=meta_jekyll)[0])
convert_post('posts/2019-08-06-delegation.ipynb')
# ## Export-
#hide
notebook2script(all_fs=True)
#hide
def debug_nb(fname, dest=None):
fname = Path(fname).absolute()
nb = read_nb(fname)
cls_lvl = find_default_level(nb['cells'])
_name = _find_file(nb['cells'])
nb['cells'] = compose(*process_cells, partial(add_show_docs, cls_lvl=cls_lvl))(nb['cells'])
nb['cells'] = [compose(*process_cell)(c) for c in nb['cells']]
fname = Path(fname).absolute()
nb = execute_nb(nb, name=_name)
dest = dest or fname.with_suffix('.dbg.ipynb')
nbformat.write(nbformat.from_dict(nb), open(dest, 'w'), version=4)
# +
#hide
# debug_nb('93_notebook_export2html.ipynb')
# -
| dev/93_notebook_export2html.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/codeforhk/python_practitioner/blob/master/py_practitioner_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="ejMS_tRueq8A"
#
#
# <img src="https://www.codefor.hk/wp-content/themes/DC_CUSTOM_THEME/img/logo-code-for-hk-logo.svg" height="150" width="150" align="center"/>
# <h1><center>Code For Hong Kong - Python Practitioner class 3</center></h1>
# <h6><center>Written by <NAME></center></h6>
#
# + [markdown] colab_type="text" id="nLzB2n5JE1AA"
# # Coding Test
# + [markdown] colab_type="text" id="TMG24tU8A8ZU"
#
#
# 
#
# Reaching this stage of the course, I want to test you on some interesting concepts. By now, you should understand a bit of how computer works, and what you can do with it.
#
#
#
# + [markdown] colab_type="text" id="O402_lQKEoWg"
# One thing with programming is data security - why are computer virus harmful?
#
# Understanding how to harm your computer is actually a great way to know the consequence.
# + [markdown] colab_type="text" id="9Gqy-3nBEmAF"
# Using your knowledge, can you try the test below?
# + [markdown] colab_type="text" id="z0YHO5usBCvA"
# ## Test 1
# I have a 4 digit password with lower case letter and number. Can you write a code to crack it?
# + colab={} colab_type="code" id="HyeL46FBBe-s"
# Your code here
# + [markdown] colab_type="text" id="D_S0VKDeBRJG"
# ## Test 2
#
# Write the most dangerous program you can think of, and run it on the colab cloud. (Don't, don't, don't run it on your local labtop!)
# + colab={} colab_type="code" id="wmlILUsc_Ght"
# Your code here
while True:
# + [markdown] colab_type="text" id="f6zg5RvFDZBV"
# ## Test 3
# + [markdown] colab_type="text" id="jhKmaeTND1zu"
# Remember our student picker?
# 1) Try to write a function with a student picker
# 2) Think of a way to write a function, if the student fail to answer a question, increase the chance of the students getting picked.
# +
# Your code here
# (If you can't answer question 2, we will show you later in class :)
import random
students = {
"Denis": -1,
"Jerry": 0,
"Nick": 0,
"Phoebe": 0,
"Ryan": 1
}
def pick():
# sort the students
sorted_students = list(sorted(students.items()))
# get a slice of the list
picky_students = sorted_students[0:3]
# randomly pick from the picky_students
return random.choice(picky_students)
def answer_teacher(student):
student_answer = input("How do you answer?")
if student_answer == 0 or student_answer == "0":
students[student] -= 1
elif students[student] == 1 or student_answer == "1":
students[student] += 1
def student_picker():
while True:
student = pick()
print(type(student))
answer_teacher(student)
student_picker()
# + [markdown] colab_type="text" id="CoEwMdGloFit"
# # 4.0.0 Practical Python Packages & Skills
# + [markdown] colab_type="text" id="nbRxeRf7HvTp"
# ## 4.2.0 Pandas - Spreadsheet related
# + [markdown] colab_type="text" id="QngPW2JbONF4"
# 
# + [markdown] colab_type="text" id="faYQDHldPLTV"
# 
# + [markdown] colab_type="text" id="LHlq_KkuQcpf"
# 
# + colab={} colab_type="code" id="kxQHde8BIUic"
import pandas as pd
# + [markdown] colab_type="text" id="LYuWdGAYQPjv"
# ## 4.3.0 Summary of basic pandas function
# + [markdown] colab_type="text" id="sNZSxGdSNSfE"
# ### 4.3.1 To Create a dataframe
# + colab={} colab_type="code" id="ouCGT40gNSfG"
import pandas as pd
df = pd.DataFrame({'A':[1,2,3]})
# + colab={} colab_type="code" id="iuJleWD7NSfI" outputId="727985bb-1efe-4de1-ab41-a14196c632d8"
df
# + colab={} colab_type="code" id="9CkqxVE8NSfQ"
df.index = ['a','b','c']
# + colab={} colab_type="code" id="i1p3vUmhNSfU" outputId="1bbe9e4b-69c3-471b-83b8-039ef200d2b4"
df
# + colab={} colab_type="code" id="Pu-LrBLyNSfZ" outputId="9bd50c61-efea-4b6d-a9f2-ab317060a9fa"
import pandas as pd
df = pd.DataFrame({'A':[1,2,3],'B':['a','b','c']})
df
# + [markdown] colab_type="text" id="ykV0Cfx6NSfc"
# ...or read from some other source
# + colab={} colab_type="code" id="wZTF6t9JNSfe"
url = 'https://raw.githubusercontent.com/miga101/course-DSML-101/master/pandas_class/diamonds.csv'
df2 = pd.read_csv(url)
# + [markdown] colab_type="text" id="vCp80o6jQDPY"
# ### 4.3.2 To select a subset of dataframe
# + [markdown] colab_type="text" id="bWawQ-6BQDPa"
# You can select a column by square bracket, column names
# + colab={} colab_type="code" id="8Nat2xUeQDPa" outputId="d04c2f45-1bbb-4ae0-a885-d150f75bd43c"
df
# + colab={} colab_type="code" id="q4aqDaK-QDPd" outputId="f85e2fff-3b8e-4c6f-b388-06acaa0a10d1"
df['B']
# + colab={} colab_type="code" id="qq3iecvQQDPh" outputId="abb6d3be-b3c2-4c8a-8ef2-6589fed58f80"
df['A']
# + colab={} colab_type="code" id="SrSOGehwQDPk" outputId="ca1ea952-6ca4-4205-e62c-880ec4331f15"
#e.g to select a column called A
df.A
df['A']
# + [markdown] colab_type="text" id="zTju9wZdQDPm"
# You can use "columns" list all the columns
# + colab={} colab_type="code" id="9ZvvpiqpQDPn"
df.columns = ['patrick','data']
# + colab={} colab_type="code" id="f-HiD3W-QDPt" outputId="9d03c8a3-51c4-436f-fd98-2689390e3e1a"
df.columns
df.columns = ['A','B']
df
# + [markdown] colab_type="text" id="I3CPmtseQDPy"
# Or, you can use "loc" to select a subset of the dataframe
# + colab={} colab_type="code" id="6WVCIOrvQDP1" outputId="56ea7052-7136-4ff3-cb53-a6f358568951"
df.loc[]
# + colab={} colab_type="code" id="qK4K07RTQDP5"
df.loc[2,'B'] = 'patrick'
# + colab={} colab_type="code" id="NVF53Na7QDP8" outputId="89d20ec6-6a59-4f33-e958-31b053b144a4"
df
# + [markdown] colab_type="text" id="JAEHcmHnQDQA"
# Or, you can use "iloc" to use index to select a subset of the dataframe
# + colab={} colab_type="code" id="0r4rm23FQDQB" outputId="452b3ad0-fee7-4d79-ae15-ad20021adc59"
df.iloc[2,0]
# + [markdown] colab_type="text" id="0Hm0tjizQDQC"
# ### 4.3.3 Manipulating the dataset
# + [markdown] colab_type="text" id="bwKi1yGBQDQD"
# ##### simply use a new column name to add a column
# + colab={} colab_type="code" id="xK_3lJ34QDQF" outputId="bb15eb76-9d99-439b-d873-23f5a0c91456"
df['C'] = [0.1,0.2,0.3]
df
# + colab={} colab_type="code" id="HAah4tkLQDQJ"
df['C'] = [0.1,0.2,0.4]
# + colab={} colab_type="code" id="z2h9sjyBQDQP" outputId="a2b14b79-a821-4b62-8cb4-c377b5f16ade"
# IMPORTANT
df['C'].apply(lambda x: x*4)
# + colab={} colab_type="code" id="3URjakEQQDQT" outputId="0ef796d5-efbd-4ca2-d48c-2a771b595451"
df
# + colab={} colab_type="code" id="qHJA15zyQDQU" outputId="16de4d93-291a-4dce-e5b4-a5ef592174f4"
df['C'] = [0.1,0.2,0.3]
df
# + [markdown] colab_type="text" id="nh5p4kKeQDQX"
# ##### Use apply to manipulate a column
# + colab={} colab_type="code" id="mtWyZtlLQDQY"
df['C'] = [i*2 for i in df['C']]
# + colab={} colab_type="code" id="gyNiyjGxQDQa" outputId="00c26dae-4d55-4e36-a0c5-57d819a45be8"
df
# + colab={} colab_type="code" id="RmDOXGWzQDQc"
df['C'] = df['C'].apply(lambda x: x*2)
# + colab={} colab_type="code" id="_HMoHrw3QDQe" outputId="a788e9d9-20c8-4b21-d618-bfa998aba9d5"
df
# + colab={} colab_type="code" id="e6vG3K8jQDQg" outputId="c04b1611-1b04-4b60-b3b0-8604bdcf0857"
df['C'] = df['C'].apply(lambda x: x*2)
df
# + [markdown] colab_type="text" id="rqV5jRvoQDQi"
# ##### Use 'drop' to drop a column
# + colab={} colab_type="code" id="cECvaBu1QDQi"
df = df.drop(['C'], axis = 1)
# + colab={} colab_type="code" id="6OjLTaUzQDQk" outputId="13447729-18d3-40f2-d271-0cd07370bc12"
df
# + colab={} colab_type="code" id="6hm8yDGlQDQl"
df = df.drop(['B', 'C'], axis=1)
# + colab={} colab_type="code" id="zpu1D-LLQDQn" outputId="d934a675-724b-40a8-e8f0-e03b08e754cd"
df
# + [markdown] colab_type="text" id="8IjOROoeQDQt"
# ##### Use 'drop' to drop a row
# + colab={} colab_type="code" id="9BRwA_FkQDQu"
df['Flora'] = 1
# + colab={} colab_type="code" id="rNnJYEK_QDQx" outputId="91ce7c06-9330-4ee7-e32d-9eb37b027ac1"
df.drop([1,2], axis = 0)
# + colab={} colab_type="code" id="8vg3O4gbQDQz" outputId="27f657f9-579c-409d-825e-4401adbc095a"
df.drop([1,2], axis=0)
# + [markdown] colab_type="text" id="6JoDBReZQDQ2"
# ### 4.3.4 Exploring the dataset
# + [markdown] colab_type="text" id="SgcznxvVQDQ2"
# ###### Use head and tail to quickly peek the dataset
# + [markdown] colab_type="text" id="7LtUqF2GT2dD"
#
# + colab={} colab_type="code" id="Gt4jVqkfQDQ3"
df = pd.DataFrame({'A':[1,2,3,4,5], 'B':[1,2,3,4,5]})
# + colab={} colab_type="code" id="rgPqjUyZQDQ4" outputId="b5604abf-9ef8-4f6e-dabb-eeced477467d"
df.tail(2)
# + colab={} colab_type="code" id="uiqlDqCOQDQ7" outputId="880c1461-5eef-4320-bdcb-942818197408"
df.head(2) #shows the top 2 rows
df.tail(2) #shows the last 2 rows
# + [markdown] colab_type="text" id="dd-z-79fQDQ9"
# ###### Use shape, info & describe to quickly understand the size/data type of the dataset
# + colab={} colab_type="code" id="vL--Xg7VQDQ9" outputId="053be1cd-8a57-4a83-9583-9695151e8427"
df.shape
# + colab={} colab_type="code" id="8FtZanSXQDRA" outputId="0d07e9a3-f379-4145-d571-6b540400ad1d"
df.info()
# + colab={} colab_type="code" id="n8VcIrsYQDRD" outputId="32483026-82a8-49c1-95da-ad9dab786b7a"
df.shape
df.info()
df.describe()
# + [markdown] colab_type="text" id="g5FQU663SwDE"
# ## 4.4.0 Advance pandas technique
# + [markdown] colab_type="text" id="2zR8CHNqNSjo"
# ### 4.4.1 Reading Data
#
#
#
# - To create a DataFrame out of common Python data structures, we can pass a dictionary of lists to the DataFrame constructor.
# - Using the columns parameter allows us to tell the constructor how we'd like the columns ordered. By default, the DataFrame constructor will order the columns alphabetically (though this isn't the case when reading from a file - more on that next).
# + [markdown] colab_type="text" id="ieM14ryhNSjp"
# ### 4.4.2 CSV file extraction - Reading data online with a valid url link:
# + [markdown] colab_type="text" id="dUx32oUYNSjq"
# - With this example we will learn how to access online real data (e.g. stock market)
# - Let's get this dataset from a website location, providing a valid link. In this example we are using real Tesla's prices.
# + colab={} colab_type="code" id="PxpDCyT_NSjx" outputId="60d917bc-3470-4c1c-e27a-17b44d3d1bee"
url = "https://raw.githubusercontent.com/miga101/course-DSML-101/master/pandas_class/TSLA.csv"
tesla = pd.read_csv(url, index_col=0, parse_dates=True) # index_col = 0, means that we want date to be our index
# parse_dates=True, means we let Panda to "understand" the date format for us
tesla
# + [markdown] colab_type="text" id="MOLe0E7YNSj0"
# Plotting using pandas
# + colab={} colab_type="code" id="2j4ce0_INSj0" outputId="8645200d-d554-44cf-a925-9006f08e6c9e"
tesla.shape
# + colab={} colab_type="code" id="kS-TPrrVNSj1" outputId="a87ea990-6666-4a3b-f5a6-9deb71fe0ad2"
import matplotlib.pyplot as plt
tesla.plot(y = ['Adj Close','High','Low'])
plt.show()
# + colab={} colab_type="code" id="7108hQHVNSj3" outputId="96ad9280-8b59-4fe9-c2b0-1c55ef1ef7d8"
import matplotlib.pyplot as plt
tesla.plot(y=['Adj Close','Volume']) # plotting by indicating which column we want the values from...
plt.show()
# #pd.plot?
# + colab={} colab_type="code" id="xe30koJNNSj4" outputId="6a5b94f7-55eb-4d30-9456-c86a8346540f"
import matplotlib.pyplot as plt
tesla.plot(y=['Adj Close','Open']) # plotting by indicating which column we want the values from...
plt.show()
# + colab={} colab_type="code" id="mJ_F4ZUqNSj5" outputId="cecc46d6-be5c-4372-aae9-ee84ace17fd3"
tesla[tesla.index>'2017-08-30'].plot(y=['Adj Close','Open'])
plt.show()
# + colab={} colab_type="code" id="-6HN_ocqNSj6" outputId="751b9018-12e8-46f0-8a00-b5f96172dacd"
tesla['Close'].mean() # getting the mean by just calling the mean function
# + colab={} colab_type="code" id="t2dY88-JNSj7" outputId="ad10e668-47df-49af-973b-4155380621da"
tesla.describe() # the describe function will give us a statistical summary
# + [markdown] colab_type="text" id="Erw4VAK2NSj8"
# #### Exercise 4.4.3 - read the csv from the link
# + [markdown] colab_type="text" id="rGrale6nNSj8"
# - Read the csv using pandas
# - Peek into the data by only viewing the first few lines. How would you do that?
# + colab={} colab_type="code" id="CYy7hMjoNSj8" outputId="53fe6d35-2496-4285-c33f-b03b3e57146d"
import pandas as pd
# another example, list of countries
url_c = "https://raw.githubusercontent.com/cs109/2014_data/master/countries.csv"
# Your code here
df = pd.read_csv(url_c)
df.head(5)
# + [markdown] colab_type="text" id="ruEsKkhCNSj9"
# ### 4.4.4 - Build your own Pandas dataframe
# + [markdown] colab_type="text" id="XEGexPZINSj-"
# - Below is how to build the dataframe structure using a dictionary - which is just a combination of lists
# + colab={} colab_type="code" id="Gq-RQTLWNSj-" outputId="f2ffa4a3-b4a6-4283-89b7-c5b7d101080f"
data = {'year': [2010, 2011, 2012, 2011, 2012, 2010, 2011, 2012],
'team': ['Bears', 'Bears', 'Bears', 'Packers', 'Packers', 'Lions', 'Lions', 'Lions'],
'wins': [11, 8, 10, 15, 11, 6, 10, 4],
'losses': [5, 8, 6, 1, 5, 10, 6, 12]}
football = pd.DataFrame(data, columns=['year', 'team', 'wins', 'losses'])
football
# + [markdown] colab_type="text" id="EztC9iTwNSj_"
# ### 4.4.5 - Build your pandas dataframe using list
# + [markdown] colab_type="text" id="8DuwReH5NSj_"
# Back to our hangman example. Let's say I want to build a dataframe, with all the words used in hangman game, and an extra column counting all the len of words. How would I do it?
# + colab={} colab_type="code" id="I0_p_qQ5NSkA" outputId="3c6bc6ba-fe23-40a2-e201-84e01f8845a3"
word_list = ["stick","john","pencil","rubber","glove","quick","brown","fox","jumps","over"
,"lazy","dog","manner","house","food","brain","history","love","peace",
"object"]
df = pd.DataFrame(word_list, columns = ['words'] )
df['word_count'] = df['words'].apply(lambda x: len(x))
df
# + [markdown] colab_type="text" id="hwIswl-WNSkB"
# ### 4.4.6 Dataset from a local CSV file.
#
# Reading a CSV is as simple as calling the read_csv function. By default, the read_csv function expects the column separator to be a comma, but you can change that using the sep parameter.
# + colab={} colab_type="code" id="j0vEY33ANSkB" outputId="f5ad8730-6c31-464e-950e-00f499563e91"
import os
os.getcwd()
os.listdir()
# + colab={} colab_type="code" id="8XjFez-_NSkC"
df = pd.read_csv('football.csv')
# + colab={} colab_type="code" id="-HOIUuLuNSkC"
df.to_csv('df.csv')
# + colab={} colab_type="code" id="UgXL3Vg8NSkD"
football.to_csv('df.csv')
# + colab={} colab_type="code" id="TcjRUvntNSkE"
# First, write the dataframe to a local hard drive
football.to_csv('football.csv')
# + colab={} colab_type="code" id="ZQD9b4ToNSkE" outputId="f6ab1ea7-9de7-44cb-e18e-c5b17bff6350"
pd.read_csv('df.csv')
# + colab={} colab_type="code" id="E074U2Q-NSkF" outputId="8986e873-0964-463d-a301-44d3d6d8bad5"
from_csv = pd.read_csv('football.csv')
from_csv.head(2)
# + colab={} colab_type="code" id="ApTGKEJkNSkI" outputId="e23a37ee-92b2-42f9-b50c-1be3b95c0f51"
# this is the DataFrame we created from a dictionary earlier
football.head()
# + [markdown] colab_type="text" id="8HuCQ0JxNSkL"
# ### 4.4.7 Exporting our data to excel
# + colab={} colab_type="code" id="_4SZIGbmNSkL"
# since our index on the football DataFrame is meaningless, let's not write it
#import openpyxl
#football.to_excel('football.xlsx', index=False)
# + colab={} colab_type="code" id="n3v5ukxkNSkM"
# delete the old DataFrame
del football
# + colab={} colab_type="code" id="BJtKeHfqNSkO"
# read from Excel
football = pd.read_excel('football.xlsx', 'Sheet1')
football
# + colab={} colab_type="code" id="1OeL5Y_cSZba"
# + [markdown] colab_type="text" id="STd1VSJUNSkQ"
# ## 4.5.0 Webscrapping using pandas
#
# Use read_html & read_table to get information off the internet
# + colab={} colab_type="code" id="mGBblvYmNSkR"
#pip install pandas
#pip install lxml
#sudo pip3 install html5lib
#pip install BeautifulSoup4
# + colab={"base_uri": "https://localhost:8080/", "height": 669} colab_type="code" id="3SPK9CsGNSkS" outputId="dd12b24e-2fb8-47cb-e692-25496d0274b8"
import pandas as pd
import html5lib
url='http://www.skysports.com/premier-league-table'
epltable = pd.read_html(url)
epltable[0]
# + colab={} colab_type="code" id="xSJUhzPeNSke" outputId="17769e89-64c6-481d-a437-500cc8f93e2c"
url = 'https://raw.github.com/gjreda/best-sandwiches/master/data/best-sandwiches-geocode.tsv'
# fetch the text from the URL and read it into a DataFrame
from_url = pd.read_table(url, sep='\t')
from_url.head(3)
# + [markdown] colab_type="text" id="9EXfFkWbNSkh"
# #### Exercise 4.5.1 Download from internet
# - Try to download all US states using pandas, and save it do an object called "us"
# + colab={} colab_type="code" id="cu4eS1dtNSkh"
import pandas as pd
url = 'https://simple.wikipedia.org/wiki/List_of_U.S._states'
# + colab={} colab_type="code" id="4Lq0kkntNSkj" outputId="92cfbb49-d8c0-48a6-b6b8-8e1092a691fb"
states
# + [markdown] colab_type="text" id="7PtyVLz_NSkk"
# - Try to make the below table a 'human readable' pandas table
# + colab={} colab_type="code" id="GDu18FKzNSkm"
url = 'https://redirect.viglink.com/?format=go&jsonp=vglnk_150580104072412&key=949efb41171ac6ec1bf7f206d57e90b8&libId=j7r70ik301021u9s000DAfhsh97p2&loc=https%3A%2F%2Fwww.r-bloggers.com%2Fgetting-data-from-an-online-source%2F&v=1&out=https%3A%2F%2Fsakai.unc.edu%2Faccess%2Fcontent%2Fgroup%2F3d1eb92e-7848-4f55-90c3-7c72a54e7e43%2Fpublic%2Fdata%2Fbycatch.csv&ref=https%3A%2F%2Fwww.google.com.hk%2F&title=Getting%20Data%20From%20An%20Online%20Source%20%7C%20R-bloggers&txt=https%3A%2F%2Fsakai.unc.edu%2Faccess%2Fcontent%2Fgroup%2F3d1eb92e-7848-4f55-90c3-7c72a54e7e43%2Fpublic%2Fdata%2Fbycatch.csv'
# fetch the text from the URL and read it into a DataFrame
#fom_url = pd.read_table(url, sep = ',')
#from_url
# + colab={} colab_type="code" id="0yXqcGW0NSko" outputId="4ae6f38d-b3c5-495c-b621-6e1bdedf618b"
# another, list of countries
url="https://raw.githubusercontent.com/cs109/2014_data/master/countries.csv"
countries = pd.read_csv(url)
countries.Region.value_counts()
# + [markdown] colab_type="text" id="0F9igBiHRxop"
# ## 4.6.0 Send Email using python (TO BE UPDATED)
# + [markdown] colab_type="text" id="qJmSYio5YJrR"
# https://blog.mailtrap.io/send-emails-with-gmail-api/
# + colab={} colab_type="code" id="10v8mRNBR2_1"
#import hashlib, binascii
import smtplib
def send_email( user, pwd, recipient, subject, body):
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print('successfully sent the mail')
except:
print("failed to send mail")
# + colab={} colab_type="code" id="lvNRkeGKR2sy"
send_email('<EMAIL>',pwd,['<EMAIL>','<EMAIL>'],'hello','nonosense')
# + [markdown] colab_type="text" id="poLS_eOdsNPi"
#
# + [markdown] colab_type="text" id="iS-eYYHlsHW6"
# # 5.0.0 Drafting your Python Project
# + [markdown] colab_type="text" id="Dh4dhthx-R0_"
# 
# + [markdown] colab_type="text" id="fIM-wXdq3tJj"
# Let's continue drafting our python project on the "common project" sheets
# + [markdown] colab_type="text" id="Oe3EHtEt33Er"
# https://github.com/codeforhk/python_practitioner/blob/master/common_project.ipynb
# + [markdown] colab_type="text" id="v1qDqjaGroE2"
# # 6.0.0 Structuring Python Project
# + [markdown] colab_type="text" id="CXoJY4rC2OYA"
# 
# + [markdown] colab_type="text" id="-9wcHujb1Zuk"
# Here we will be starting to take our hands off jupyter notebook, and try to make our hands dirty and work with command lines / folders.
#
# I have attached below a finished copy of the program. You can download the program and run it locally.
#
# https://github.com/codeforhk/project/tree/master/crypto_arb
#
# Ideally, I would want you to do 2 things:
#
# * Try to re-create our trading program.
# * Try to create a Github account, and upload your project.
#
#
#
# + [markdown] colab_type="text" id="P7y7xjm6sQ0z"
# ## 6.1.0 Python Classes and Methods
# + [markdown] colab_type="text" id="y4AHpQ3H-vmM"
# 
# + [markdown] colab_type="text" id="-KBLxsRb-dVL"
# 
# + [markdown] colab_type="text" id="xRYgcPAN-qPV"
# 
# + [markdown] colab_type="text" id="aA75bNNrsrPI"
# Before we start structuring, there is a very important concept. We will be structuring our code to a "class"
#
# [What is a class?](https://www.hackerearth.com/practice/python/object-oriented-programming/classes-and-objects-i/tutorial/)
#
# Python is an “object-oriented programming language.” This means that almost all the code is implemented using a special construct called classes. Programmers use classes to keep related things together. This is done using the keyword “class,” which is a grouping of object-oriented constructs.
#
# By the end of this tutorial you will be able to:
#
# * Define what is a class
# * Describe how to create a class
# * Define what is a method
# * Describe how to do object instantiation
# * Describe how to create instance attributes in Python
# + [markdown] colab_type="text" id="gYG6oFMUsjKE"
# ## 6.2.0 What is a class?
# + [markdown] colab_type="text" id="j1NzkucStBRU"
# A class is a code template for creating objects. Objects have member variables and have behaviour associated with them. In python a class is created by the keyword class.
#
# An object is created using the constructor of the class. This object will then be called the instance of the class. In Python we create instances in the following manner
# + [markdown] colab_type="text" id="yheCHivgIgjA"
# ## 6.3.0 How to create a class?
# + [markdown] colab_type="text" id="950k8aEYI_DC"
# The simplest class can be created using the class keyword. For example, let's create a simple, empty class with no functionalities.
# + colab={} colab_type="code" id="W8ryHFsHJIRn"
class Snake:
pass
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ZW_UhSMtJL6x" outputId="b93b55b8-f17c-45fa-90b4-51bc5fa4f411"
snake = Snake()
snake
# + [markdown] colab_type="text" id="R3Ga77cqJLqr"
# Try Create a class called "PriceCrawler"
# + colab={} colab_type="code" id="tg2ULnhwJU80"
# Your code here
# + [markdown] colab_type="text" id="Ff4D8qYBIkF1"
# ## 6.4.0 Attributes in class
# + [markdown] colab_type="text" id="1iSu7xqlJe-5"
#
# A class by itself is of no use unless there is some functionality associated with it. Functionalities are defined by setting attributes, which act as containers for data and functions related to those attributes. Those functions are called methods.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="AcDcbfLzJeXN" outputId="2267b79c-03c0-4b5d-9794-75f5aba69c8d"
class Snake:
name = 'python'
Snake.name
# + [markdown] colab_type="text" id="zyMA3MGaIrIm"
# ## 6.5.0 Methods in class
# + [markdown] colab_type="text" id="CYjUNosYJ1aW"
# Once there are attributes that “belong” to the class, you can define functions that will access the class attribute. These functions are called methods. When you define methods, you will need to always provide the first argument to the method with a self keyword.
#
# For example, you can define a class Snake, which has one attribute name and one method change_name. The method change name will take in an argument new_name along with the keyword self.
# + colab={} colab_type="code" id="JKYCBP9trqEC"
class Snake:
name = "python"
def change_name(self, new_name): # note that the first argument is self
self.name = new_name
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="O8YLchbsJ6OU" outputId="c8a8bbc3-97dd-478a-d5bf-d0e428bdddfe"
# instantiate the class
snake = Snake()
snake.change_name('anaconda')
snake.name
# + [markdown] colab_type="text" id="DGG6JXu9KaUa"
# ### Example 6.5.0
#
# 1) Try to instaniate a class called PriceCrawler
#
# 2) Create an attribute called url
#
# 3) Create a method to get the price from the exchange using the url
# + colab={} colab_type="code" id="EpzjhGPxKcvF"
class PriceCrawler:
url = 'https://api.huobi.pro/market/depth?symbol=ethbtc&type=step1'
def get_hb(self, url):
r = requests.get()
r = eval(r.content)
return r
# + [markdown] colab_type="text" id="rpUMT3B2KFc6"
# ### Exercise 6.5.0
#
# 1) Try to instaniate a class called PriceCrawler
#
# 2) Create an attribute called url
#
# 3) Create a method to get the price from the exchange using the url
# + colab={} colab_type="code" id="5dNbwG40Kskr"
class PriceCrawler:
url = 'https://poloniex.com/public?command=returnOrderBook¤cyPair=BTC_ETH'
#Your code here
# + [markdown] colab_type="text" id="pu04oLFdIvrC"
# ## 6.6.0 Instance attributes in python and the init method
# + [markdown] colab_type="text" id="qYxC_9wqKD22"
# You can also provide the values for the attributes at runtime. This is done by defining the attributes inside the init method. The following example illustrates this.
# + colab={} colab_type="code" id="HAoRklu3r98m"
class Snake:
def __init__(self, name):
self.name = name
def change_name(self, new_name):
self.name = new_name
# + [markdown] colab_type="text" id="iDUYU_pMK0rM"
# ### Example 6.6.0
#
# 1) Create a class call PriceCrawker
#
# 2) Instantiate the value of the url at start
#
# 3) use the url to create the method
# + colab={} colab_type="code" id="E5JQPEpvLACV"
class PriceCrawler:
def __init__(self):
self.url = 'https://api.huobi.pro/market/depth?symbol=ethbtc&type=step1'
def get_hb(self, url):
r = requests.get()
r = eval(r.content)
return r
# + [markdown] colab_type="text" id="LVWl4o6TLGEC"
# ### Exercise 6.6.0
#
# 1) Create a class call PriceCrawker
#
# 2) Instantiate the value of the url at start
#
# 3) use the url to create the method
# + colab={} colab_type="code" id="l1kiAIbtLMSX"
class PriceCrawler:
url = 'https://poloniex.com/public?command=returnOrderBook¤cyPair=BTC_ETH'
#Your code here
# + [markdown] colab_type="text" id="nFgT9nxsLQCh"
# ## Challenge 6.6.0
#
# + [markdown] colab_type="text" id="4bJ58REMLa6i"
#
# Remember our student picker?
# 1) Try to write a function with a student picker
# 2) Think of a way to write a function, if the student fail to answer a question, increase the chance of the students getting picked.
# + [markdown] colab_type="text" id="De6XmgJPLdtW"
# 1) What do you want to instantiate?
#
# 2) What should be in the init?
#
# 3) What should be the attribute?
#
# 4) What should be in the method?
# + colab={} colab_type="code" id="tZLDBdQ_Lb8B"
# Your code here
# + [markdown] colab_type="text" id="g7eble1avp1c"
# ## 6.7.0 Designing our trading project into class
# + [markdown] colab_type="text" id="X0gt3zDawBRQ"
# Think of it this way
#
# * How many functions do we have?
# * What are the components?
# * What do we want to instantiate?
# * What methods do we want to have?
#
#
# + [markdown] colab_type="text" id="aAalqR9DL8uP"
# You should now switch to the project
# + [markdown] colab_type="text" id="95dveepxvvXk"
# ## 6.8.0 Uploading to Github
#
#
#
# * Create a Github account
# * Create a repository
# * Upload the program on your repo
# * Write a readme
#
#
# + [markdown] colab_type="text" id="TmgOi8cIskjr"
# # 7.0.0 Advance web scraping - Selenium
# + [markdown] colab_type="text" id="1iuIFeoiPfPH"
# ## 7.1.0 How to run it on colab?
# + [markdown] colab_type="text" id="U-eIYgZAO9FS"
# The code below helps to install selenium on colab
# + colab={"base_uri": "https://localhost:8080/", "height": 731} colab_type="code" id="ALTwfpQRslpg" outputId="19868deb-1ce6-4adf-fbb2-d2d8b09a43a7"
# !pip install selenium
# !apt-get update # to update ubuntu to correctly run apt install
# !apt install chromium-chromedriver
# !cp /usr/lib/chromium-browser/chromedriver /usr/bin
import sys
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
# + [markdown] colab_type="text" id="INxGW75XO8ag"
# You can start selenium by running the code below. This also helps you to run it in a "headless" mode
# + colab={} colab_type="code" id="icmfElK6O6nW"
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
# This code launch your browser
driver = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
# + [markdown] colab_type="text" id="OqOxHLJAPL_T"
# You can use the code to navigate to a website
# + colab={} colab_type="code" id="zVxfTN3yOflz"
driver.get("https://www.skyscanner.com")
# + [markdown] colab_type="text" id="0qc1-LF1PTqW"
# Because we are in a headless mode, we can't really see our browser. You can make use of save screenshot to help you
# + colab={"base_uri": "https://localhost:8080/", "height": 617} colab_type="code" id="H4aJLAgqPQwL" outputId="62fafd1e-4646-48db-f6c2-1bbee87088cc"
from IPython.display import Image
driver.save_screenshot('screenie.png')
Image("screenie.png")
# + [markdown] colab_type="text" id="yBt1sOoZPiP7"
# ## 7.2.0 How to do web scrapping using Selenium?
# + colab={"base_uri": "https://localhost:8080/", "height": 617} colab_type="code" id="T9UHm7B1PnnE" outputId="a29efe50-9744-4292-c706-9ccd7948d4d3"
driver.get("https://www.google.com/search?q=DJI+mavic+mini")
driver.save_screenshot('screenie.png')
Image("screenie.png")
# + [markdown] colab_type="text" id="QOxKwoztQ5G1"
# Let's say I want to navigate to page two
# + colab={} colab_type="code" id="cfEVtdpAPsmW"
# I can click a page
driver.find_element_by_css_selector('#nav > tbody > tr > td:nth-child(3) > a').click()
# + colab={"base_uri": "https://localhost:8080/", "height": 617} colab_type="code" id="U4AmhegWQYcM" outputId="25982cbd-2e99-4300-86e5-e4b22a717d14"
driver.save_screenshot('screenie.png')
Image("screenie.png")
# + [markdown] colab_type="text" id="e1-vbIfdRcSW"
# Parse it via BeautifulSoup
# + colab={} colab_type="code" id="fyYqv9wARCgd"
from bs4 import BeautifulSoup as bs
soup = bs(driver.page_source,'lxml')
# + [markdown] colab_type="text" id="-Sp9i7uFRenl"
# Get all the link
# + colab={"base_uri": "https://localhost:8080/", "height": 360} colab_type="code" id="EyKBg_YARLLE" outputId="6b820ed1-6eeb-4fcb-987a-98ef2a2eeccd"
for i in soup.find_all('a')[-20:]:
try:
print(i['href'])
except:
''
# + [markdown] colab_type="text" id="sfWv0CJG9qQW"
# ## 7.3.0 How to run it on Mac?
# + colab={} colab_type="code" id="en_W_fgqRtAw"
# try to run the below on terminal, it should work
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" < /dev/null 2> /dev/null
brew cask install chromedriver
pip3 install selenium
# + [markdown] colab_type="text" id="CQgPyvQN9so6"
# ## 7.4.0 How to run on window?
#
# Unfortunately, I can't figure out a good way to install on window. If you have window 10, it is recommended to install window sub-system of Linux, or bash on window
#
# https://itsfoss.com/install-bash-on-windows/
# + [markdown] colab_type="text" id="pKgPmGHQv3vU"
# # 8.0.0 Starting with your own project
# + [markdown] colab_type="text" id="ZqR5z9GIOHV6"
# Please start working on your own project.
#
# I have gave it a try on the skyscanner project - this would give you an idea on how to structure your project in mind.
# + colab={} colab_type="code" id="7uQG77Yjv6kL"
from selenium import webdriver
from bs4 import BeautifulSoup as bs
import time
import pandas as pd
class skyscanner:
def __init__(self):
self.TICKET = 'EcoTicketWrapper_ecoContainer'
self.LEG = 'LegDetails_container'
self.PRICE = 'Price_mainPriceContainer'
self.driver = webdriver.Chrome()
url = 'https://www.skyscanner.com.hk/transport/flights/hkg/osaa/191208/200212/'
self.driver.get(url)
def quit_browser(self):
self.driver.quit()
def flatten(self, l):
return [a for b in l for a in b]
def search_class_name(self, key_word):
target = [i for i in self.all_class if key_word in i]
return list(set(target))[0]
def search_all_class(self):
all_class = []
for i in self.soup.find_all('div'):
try:
all_class.append(i['class'])
except:
''
return all_class
def execute(self, start, end):
url = 'https://www.skyscanner.com.hk/transport/flights/hkg/osaa/{start}/{end}/'.format(start = start, end = end)
self.driver.get(url)
time.sleep(30)
self.driver.execute_script("window.scrollTo(0, 100)")
self.soup = bs(self.driver.page_source)
self.all_class = self.flatten(self.search_all_class())
ticket_details = self.search_class_name(self.TICKET)
leg_details = self.search_class_name(self.LEG)
price_details = self.search_class_name(self.PRICE)
ticket_body_list = self.soup.find_all('div', class_ = ticket_details)#
all_tickets = []
for i in ticket_body_list:
legs = []
for leg in i.find_all('div', class_ = leg_details):
legs.append([i.text for i in leg.find_all('span')])
ticket_price = (i.find('div', class_ = price_details).text)
all_tickets.append({'leg_1': legs[0], 'leg_2': legs[1], 'price':ticket_price})
return pd.DataFrame(all_tickets)
| 2020-02-practioneer/course-material/py_practitioner_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 1 of 3: Getting Started with OpenPNM
#
# > This tutorial is intended to show the basic outline of how OpenPNM works, and necessarily skips many of the more useful and powerful features of the package. So if you find yourself asking "why is this step so labor intensive" it's probably because this tutorial deliberately simplifies some features to provide a more smooth introduction. The second and third tutorials dive into the package more deeply, but those features are best appreciated once the basics are understood.
# **Learning Objectives**
#
# * Introduce the main OpenPNM objects and their roles
# * Explore the way OpenPNM stores data, including network topology
# * Learn some handy tools for working with objects
# * Generate a standard cubic **Network** topology
# * Calculate geometrical properties and assign them to a **Geometry** object
# * Calculate thermophysical properties and assign to a **Phase** object
# * Define pore-scale physics and assign transport parameters to a **Physics** object
# * Run a permeability simulation using the pre-defined **Algorithm**
# * Use the package to calculate the permeability coefficient of a porous media
# > **Python and Numpy Tutorials**
# >
# > Before diving into OpenPNM it is probably a good idea to become familar with Python and Numpy. The following resources should be helpful.
# > * OpenPNM is written in Python. One of the best guides to learning Python is the set of Tutorials available on the [official Python website](https://docs.python.org/3.5/tutorial). The web is literally overrun with excellent Python tutorials owing to the popularity and importance of the language. The official Python website also provides [a long list of resources](https://www.python.org/about/gettingstarted/)
# > * For information on using Numpy, Scipy and generally doing scientific computing in Python checkout the [Scipy lecture notes](http://www.scipy-lectures.org/). The Scipy website also offers as solid introduction to [using Numpy arrays](https://docs.scipy.org/doc/numpy-dev/user/quickstart.html).
# > * The [Stackoverflow](http://www.stackoverflow.com) website is an incredible resource for all computing related questions, including simple usage of Python, Scipy and Numpy functions.
# > * For users more familiar with Matlab, there is a [Matlab-Numpy cheat sheet](http://mathesaurus.sourceforge.net/matlab-numpy.html) that explains how to translate familiar Matlab commands to Numpy.
# ## Overview of Data Storage in OpenPNM
#
# Before creating an OpenPNM simulation it is necessary to give a quick description of how data is stored in OpenPNM; after all, a significant part of OpenPNM is dedicated to data storage and handling.
#
#
# ### Python Dictionaries or *dicts*
#
# OpenPNM employs 5 main objects which each store and manage a different type of information or data:
#
# * **Network**: Manages topological data such as pore spatial locations and pore-to-pore connections
# * **Geometry**: Manages geometrical properties such as pore diameter and throat length
# * **Phase**: Manages thermophysical properties such as temperature and viscosity
# * **Physics**: Manages pore-scale transport parameters such as hydraulic conductance
# * **Algorithm**: Contains algorithms that use the data from other objects to perform simulations, such as diffusion or drainage
#
# We will encounter each of these objects in action before the end of this tutorial.
#
# Each of the above objects is a *subclass* of the Python *dictionary* or *dict*, which is a very general storage container that allows values to be accessed by a name using syntax like:
foo = dict() # Create an empty dict
foo['bar'] = 1 # Store an integer under the key 'bar'
print(foo['bar']) # Retrieve the integer stored in 'bar'
# A detailed tutorial on dictionaries [can be found here](http://learnpythonthehardway.org/book/ex39.html). The *dict* does not offer much functionality aside from basic storage of arbitrary objects, and it is meant to be extended. OpenPNM extends the *dict* to have functionality specifically suited for dealing with OpenPNM data.
# ### *Numpy* Arrays of Pore and Throat Data
#
# All data are stored in arrays which can accessed using standard array syntax.
#
# - All pore and throat properties are stored in [Numpy arrays](https://docs.scipy.org/doc/numpy-dev/user/quickstart.html). All data will be automatically converted to a *Numpy* array if necessary.
# - The data for pore *i* (or throat *i*) can be found in element of *i* of an array. This means that pores and throat have indices which are implied by their position in arrays. When we speak of retrieving pore locations, it refers to the indices in the *Numpy* arrays.
# - Each property is stored in it's own array, meaning that 'pore diameter' and 'throat volume' are each stored in a separate array.
# - Arrays that store pore data are *Np*-long, while arrays that store throat data are *Nt*-long, where *Np* is the number of pores and *Nt* is the number of throats in the network.
# - Arrays can be any size in the other dimensions. For instance, triplets of pore coordinates (i.e. [x, y, z]) can be stored for each pore creating an *Np-by-3* array.
# - The storage of topological connections is also very nicely accomplished with this 'list-based' format, by creating an array (``'throat.conns'``) that stores which pore indices are found on either end of a throat. This leads to an *Nt-by-2* array.
# ### OpenPNM Objects: Combining *dicts* and *Numpy* Arrays
#
# OpenPNM objects combine the above two levels of data storage, meaning they are *dicts* that are filled with *Numpy* arrays. OpenPNM enforces several rules to help maintain data consistency:
#
# * When storing arrays in an OpenPNM object, their name (or *dictionary key*) must be prefixed with ``'pore.'`` or ``'throat.'``.
# * OpenPNM uses the prefix of the *dictionary key* to infer how long the array must be.
# * The specific property that is stored in each array is indicated by the suffix such as ``'pore.diameter'`` or ``'throat.length'``.
# * Writing scalar values to OpenPNM objects automatically results in conversion to a full length array filled with the scalar value.
# * Arrays containing *Boolean* data are treated as *labels*, which are explained later in this tutorial.
#
# The following code snippets give examples of how all these pieces fit together using an empty network as an example:
import openpnm as op
import scipy as sp
net = op.network.GenericNetwork(Np=10, Nt=10) # Instantiate an empty network object with 10 pores and 10 throats
net['pore.foo'] = sp.ones([net.Np, ]) # Assign an Np-long array of ones
net['pore.bar'] = range(0, net.Np) # Assign an Np-long array of increasing ints
print(type(net['pore.bar'])) # The Python range iterator is converted to a proper Numpy array
net['pore.foo'][4] = 44.0 # Overwrite values in the array
print(net['pore.foo'][4]) # Retrieve values from the array
print(net['pore.foo'][2:6]) # Extract a slice of the array
print(net['pore.foo'][[2, 4, 6]]) # Extract specific locations
net['throat.foo'] = 2 # Assign a scalar
print(len(net['throat.foo'])) # The scalar values is converted to an Nt-long array
print(net['throat.foo'][4]) # The scalar value was placed into all locations
# ## Generate a Cubic Network
#
# Now that we have seen the rough outline of how OpenPNM objects store data, we can begin building a simulation. Start by importing OpenPNM and the Scipy package:
import openpnm as op
import scipy as sp
# Next, generate a **Network** by choosing the **Cubic** class, then create an *instance* with the desired parameters:
pn = op.network.Cubic(shape=[4, 3, 1], spacing=0.0001)
# The **Network** object stored in ``pn`` contains pores at the correct spatial positions and connections between the pores according the cubic topology.
#
# * The ``shape`` argument specifies the number of pores in the [X, Y, Z] directions of the cube. Networks in OpenPNM are always 3D dimensional, meaning that a 2D or "flat" network is still 1 layer of pores "thick" so [X, Y, Z] = [20, 10, 1], thus ``pn`` in this tutorial is 2D which is easier for visualization.
# * The ``spacing`` argument controls the center-to-center distance between pores and it can be a scalar or vector (i.e. [0.0001, 0.0002, 0.0003]).
#
# The resulting network looks like:
#
# 
#
# This image was creating using [Paraview](http://www.paraview.org), using the instructions given here: [Example in the OpenPNM-Example collection](https://github.com/PMEAL/OpenPNM-Examples/blob/master/IO_and_Visualization/paraview.md)
# ### Inspecting Object Properties
#
# OpenPNM objects have additional methods for querying their relevant properties, like the number of pores or throats, which properties have been defined, and so on:
print('The total number of pores on the network is:', pn.num_pores())
print('A short-cut to the total number of pores is:', pn.Np)
print('The total number of throats on the network is:', pn.num_throats())
print('A short-cut to the total number of throats is:', pn.Nt)
print('A list of all calculated properties is availble with:\n', pn.props())
# ### Accessing Pores and Throats via Labels
#
# One simple but important feature of OpenPNM is the ability to *label* pores and throats. When a **Cubic** network is created, several labels are automatically created: the pores on each face are labeled 'left', 'right', etc. These labels can be used as follows:
print(pn.pores('left'))
# The ability to retrieve pore indices is handy for querying pore properties, such as retrieving the pore coordinates of all pores on the 'left' face:
print(pn['pore.coords'][pn.pores('left')])
# A list of all labels currently assigned to the network can be obtained with:
print(pn.labels())
# ## Create a Geometry Object and Assign Geometric Properties to Pores and Throats
#
# The **Network** ``pn`` does not contain any information about pore and throat sizes at this point. The next step is to create a **Geometry** object to manage the geometrical properties.
geom = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts)
# This statement contains three arguments:
#
# * ``network`` tells the **Geometry** object which **Network** it is associated with. There can be multiple networks defined in a given session, so all objects must be associated with a single network.
# * ``pores`` and ``throats`` indicate the locations in the **Network** where this **Geometry** object will apply. In this tutorial ``geom`` applies to *all* pores and throats, but there are many cases where different regions of the network have different geometrical properties, so OpenPNM allows multiple **Geometry** objects to be created for managing the data in each region, but this will not be used in this tutorial.
# ### Add Pore and Throat Size Information
#
# This freshly instantiated **Geometry** object (``geom``) contains no geometric properties as yet. For this tutorial we'll use the direct assignment of manually calculated values.
#
# We'll start by assigning diameters to each pore from a random distribution, spanning 0 um to 100 um. The upper limit matches the ``spacing`` of the **Network** which was set to 0.0001 m (i.e. 100 um), so pore diameters exceeding 100 um might overlap with their neighbors. Using the Scipy ``rand`` function creates an array of random numbers between 0 and 0.0001 that is *Np*-long, meaning each pore is assigned a unique random number
geom['pore.diameter'] = sp.rand(pn.Np)*0.0001 # Units of meters
# We usually want the throat diameters to always be smaller than the two pores which it connects to maintain physical consistency. This requires understanding a little bit about how OpenPNM stores network topology. Consider the following:
P12 = pn['throat.conns'] # An Nt x 2 list of pores on the end of each throat
D12 = geom['pore.diameter'][P12] # An Nt x 2 list of pore diameters
Dt = sp.amin(D12, axis=1) # An Nt x 1 list of the smaller pore from each pair
geom['throat.diameter'] = Dt
# Let's dissect the above lines.
#
# * Firstly, ``P12`` is a direct copy of the *Network's* ``'throat.conns'`` array, which contains the indices of the pore-pair connected by each throat.
# * Next, this *Nt-by-2* array is used to index into the ``'pore.diameter'`` array, resulting in another *Nt-by-2* array containing the diameters of the pores on each end of a throat.
# * Finally, the Scipy function ``amin`` is used to find the minimum diameter of each pore-pair by specifying the ``axis`` argument as 1, and the resulting *Nt-by-1* array is assigned to ``geom['throat.diameter']``.
# * This trick of using ``'throat.conns'`` to index into a pore property array is commonly used in OpenPNM and you should have a second look at the above code to understand it fully.
# We must still specify the remaining geometrical properties of the pores and throats. Since we're creating a "Stick-and-Ball" geometry, the sizes are calculated from the geometrical equations for spheres and cylinders.
# For pore volumes, assume a sphere:
Rp = geom['pore.diameter']/2
geom['pore.volume'] = (4/3)*3.14159*(Rp)**3
# The length of each throat is the center-to-center distance between pores, minus the radius of each of two neighboring pores.
C2C = 0.0001 # The center-to-center distance between pores
Rp12 = Rp[pn['throat.conns']]
geom['throat.length'] = C2C - sp.sum(Rp12, axis=1)
# The volume of each throat is found assuming a cylinder:
Rt = geom['throat.diameter']/2
Lt = geom['throat.length']
geom['throat.volume'] = 3.14159*(Rt)**2*Lt
# The basic geometrical properties of the network are now defined. The **Geometry** class possesses a method called ``plot_histograms`` that produces a plot of the most pertinent geometrical properties. The following figure doesn't look very good since the network in this example has only 12 pores, but the utility of the plot for quick inspection is apparent.
#
# 
# ## Create a Phase Object
#
# The simulation is now topologically and geometrically defined. It has pore coordinates, pore and throat sizes and so on. In order to perform any simulations it is necessary to define a **Phase** object to manage all the thermophysical properties of the fluids in the simulation:
water = op.phases.GenericPhase(network=pn)
# Some notes on this line:
# * ``pn`` is passed as an argument because **Phases** must know to which **Network** they belong.
# * Note that ``pores`` and ``throats`` are *NOT* specified; this is because **Phases** are mobile and can exist anywhere or everywhere in the domain, so providing specific locations does not make sense. Algorithms for dynamically determining actual phase distributions are discussed later.
# ### Add Thermophysical Properties
#
# Now it is necessary to fill this **Phase** object with the desired thermophysical properties. OpenPNM includes a framework for calculating thermophysical properties from models and correlations, but this is covered in :ref:`intermediate_usage`. For this tutorial, we'll use the basic approach of simply assigning static values as follows:
water['pore.temperature'] = 298.0
water['pore.viscosity'] = 0.001
# * The above lines utilize the fact that OpenPNM converts scalars to full length arrays, essentially setting the temperature in each pore to 298.0 K.
# ## Create a Physics Object
#
# We are still not ready to perform any simulations. The last step is to define the desired pore-scale physics models, which dictate how the phase and geometrical properties interact to give the *transport parameters*. A classic example of this is the Hagen-Poiseuille equation for fluid flow through a throat to predict the flow rate as a function of the pressure drop. The flow rate is proportional to the geometrical size of the throat (radius and length) as well as properties of the fluid (viscosity) and thus combines geometrical and thermophysical properties:
phys_water = op.physics.GenericPhysics(network=pn, phase=water, geometry=geom)
# * As with all objects, the ``Network`` must be specified
# * **Physics** objects combine information from a **Phase** (i.e. viscosity) and a **Geometry** (i.e. throat diameter), so each of these must be specified.
# * **Physics** objects do not require the specification of which ``pores`` and ``throats`` where they apply, since this information is implied by the ``geometry`` argument which was already assigned to specific locations.
# ### Specify Desired Pore-Scale Transport Parameters
#
# We need to calculate the numerical values representing our chosen pore-scale physics. To continue with the Hagen-Poiseuille example lets calculate the hydraulic conductance of each throat in the network. The throat radius and length are easily accessed as:
R = geom['throat.diameter']/2
L = geom['throat.length']
# The viscosity of the **Phases** was only defined in the pores; however, the hydraulic conductance must be calculated for each throat. There are several options, but to keep this tutorial simple we'll create a scalar value:
mu_w = 0.001
phys_water['throat.hydraulic_conductance'] = 3.14159*R**4/(8*mu_w*L)
# Numpy arrays support *vectorization*, so since both ``L`` and ``R`` are arrays of *Nt*-length, their multiplication in this way results in another array that is also *Nt*-long.
# ## Create an Algorithm Object for Performing a Permeability Simulation
#
# Finally, it is now possible to run some useful simulations. The code below estimates the permeability through the network by applying a pressure gradient across and calculating the flux. This starts by creating a **StokesFlow** algorithm, which is pre-defined in OpenPNM:
alg = op.algorithms.StokesFlow(network=pn)
alg.setup(phase=water)
# * Like all the above objects, **Algorithms** must be assigned to a **Network** via the ``network`` argument.
# * This algorithm is also associated with a **Phase** object, in this case ``water``, which dictates which pore-scale **Physics** properties to use (recall that ``phys_water`` was associated with ``water``). This can be passed as an argument to the instantiation or to the ``setup`` function.
#
# Next the boundary conditions are applied using the ``set_boundary_conditions`` method on the **Algorithm** object. Let's apply a 1 atm pressure gradient between the left and right sides of the domain:
BC1_pores = pn.pores('front')
alg.set_value_BC(values=202650, pores=BC1_pores)
BC2_pores = pn.pores('back')
alg.set_value_BC(values=101325, pores=BC2_pores)
# To actually run the algorithm use the ``run`` method:
alg.run()
# This builds the coefficient matrix from the existing values of hydraulic conductance, and inverts the matrix to solve for pressure in each pore, and stores the results within the **Algorithm's** dictionary under ``'pore.pressure'``.
#
# To determine the permeability coefficient, we must invoke Darcy's law: Q = KA/uL(Pin - Pout). Everything in this equation is known except for the volumetric flow rate Q. The **StokesFlow** algorithm possesses a ``rate`` method that calculates the rate of a quantity leaving a specified set of pores:
Q = alg.rate(pores=pn.pores('front'))
A = 0.0001*3*1 # Cross-sectional area for flow
L = 0.0001*4 # Length of flow path
del_P = 101325 # Specified pressure gradient
K = Q*mu_w*L/(A*del_P)
print(K)
# The **StokesFlow** class was developed with permeability simulations in mind, so a specific method is available for determining the permeability coefficient that essentially applies the recipe from above. This method could struggle with non-uniform geometries though, so use with caution:
K = alg.calc_effective_permeability(domain_area=A, domain_length=L)
print(K)
# The results (``'pore.pressure'``) are held within the ``alg`` object and must be explicitly returned to the *Phase* object by the user if they wish to use these values in a subsequent calculation. The point of this data containment is to prevent unintentional overwriting of data. Each algorithm has a method called ``results`` which returns a dictionary of the pertinent simulation results, which can be added to the phase of interest using the ``update`` method.
water.update(alg.results())
# Using Paraview for Visualization, the resulting pressure gradient across the network can be seen:
#
# 
| examples/tutorials/01 - Intro to OpenPNM - Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Suicides in Canada
# This is my first project in data science, analyzing suides in Canada from 1987 to 2013.
#
# Dataset: https://www.kaggle.com/russellyates88/suicide-rates-overview-1985-to-2016
#
# Here are the questions I aim to answer:
# 1. What is the sex distribution of suicides (male-female)?
# 2. On average, which age group has the most suicides?
# 3. In which period of years had the most suicides?
# 4. Are the numbers of suicides increasing per year?
# First lets import the libraries we will be using.
#Importing Libraries
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
# And then let's open the tabular data file, and see how it looks like.
suicide = pd.read_csv('Suicide_Rates.csv')
suicide
# Since this document is only concerned with the suicides in Canada, lets make a new DataFrame that only contains Canadian suicides. Maybe later I will revisit to view global trends in suicides.
#
# The loc function from the Pandas library is perfect, as it will allow us to get a specific row from the suicide DataFrame.
suicide_can = suicide.loc[suicide['country'] == 'Canada']
# ## What is the sex distribution of suicides in Canada?
#
# Let's start off with looking at the sex distribution of suicides in Canada.
#
# First, let's sum the amount of suicides per sex, so it can be visualized in a pie chart. A pie chart is visualization of choice as we are concerned with what percentage of suicides come from each sex.
#Getting the sum of the suicides per gender
total_suicide_can_m = suicide_can.loc[suicide_can['sex'] == 'male', 'suicides_no'].sum()
total_suicide_can_f = suicide_can.loc[suicide_can['sex'] == 'female', 'suicides_no'].sum()
# Next, let's plot the data with mathplotlib's pie function. We'll create some labels, add some colors, and seperate one part of the chart to make it look nicer. We'll also create an array that holds the size for each sex. More than 2 values can be added, but for this purpose, 2 is sufficient. LAstly, we'll use the pie function to make our pie chart.
# +
#Creating labels and making it look a bit nice
labels = 'Male', 'Female'
sizes = [total_suicide_can_m, total_suicide_can_f]
colors = ['lightskyblue', 'lightcoral']
explode = (0,0.1)
#Plotting
plt.pie(sizes, explode, labels, colors, autopct = '%1.1f%%', shadow = True, startangle = 270)
plt.axis('equal')
plt.show()
# -
# This pie chart demonstrates that the sex distribution of suicides in Canada is overwhelmingly male, with more than 3 times the suicides than females from 1983-2013.
# ## On average, which age group has the most suicides?
#
# Next, let's look at which age group has the most suicides. The suicide_can DataFrame has classified each suicide per age group, so we can use that!
#
# The age groups are (in years):
# 1. 5-14
# 2. 15-24
# 3. 24-34
# 4. 35-54
# 5. 55-74
# 6. 75+
#
# Note that age groups 1 and 2 only represent 10 years, 4 and 5 represent 20 years, and age group 6 represents more than 20 groups. As the progression is not completely linear, we should be expecting a larger number of from age groups 4 onwards.
#
# Let's sort the suidice_can DataFrame by year, it'll help us in later. We'll also sum the suicides per age group to get a rough estimate of how our graph will look like.
# +
#Visualizing age distributions of suicides in a Bar Chart in Canada
suicide_can.sort_values(by=['year'])
#Getting the sums of the suicide per age distribution
suicide_can_0 = suicide_can.loc[suicide_can['age'] == '5-14 years']['suicides_no'].sum()
suicide_can_1 = suicide_can.loc[suicide_can['age'] == '15-24 years']['suicides_no'].sum()
suicide_can_2 = suicide_can.loc[suicide_can['age'] == '25-34 years']['suicides_no'].sum()
suicide_can_3 = suicide_can.loc[suicide_can['age'] == '35-54 years']['suicides_no'].sum()
suicide_can_4 = suicide_can.loc[suicide_can['age'] == '55-74 years']['suicides_no'].sum()
suicide_can_5 = suicide_can.loc[suicide_can['age'] == '75+ years']['suicides_no'].sum()
print('Age group suicides')
print('5-14:', suicide_can_0)
print('15-24:', suicide_can_1)
print('25-34:', suicide_can_2)
print('35-54:', suicide_can_3)
print('55-74:', suicide_can_4)
print('75+:', suicide_can_5)
# -
# Now that we have all of our values, let's put them into a DataFrame, and then use Seaborn's barplot function to generate a bar plot. I chose to use a bar plot as it the clearest way to visualize the dataset, and come to conclusions based on it.
# +
#Entering the values into a dataframe
suicide_can_age = pd.DataFrame({'Age Group (Years)': ['5-14','15-24', '25-34', '35-54', '54-74', '75+'],
'Suicides': [suicide_can_0, suicide_can_1, suicide_can_2, suicide_can_3, suicide_can_4, suicide_can_5]})
#Visualizing the data
plt.title('Suicides Per Age Group in Canada')
bar_plot = sn.barplot(x = 'Age Group (Years)', y = 'Suicides', data = suicide_can_age)
plt.show()
# -
# Wow! It looks like the age group 35-54 has the most suicides in comparison to everything else. Keep in mind that age group represents 20 years of age, while everything to the left represents only 10 years. Does this mean that if you are 34-54 years old, you are most likely to commit suicide? Well, I'm not here to say that, but the data does show that there are a lot of suicides in that age group.
#
# In fact, why not visualize it in a pie chart to check out the distribution? Let's try using Pandas Pie Chart instead of mathplotlib let we did earlier.
#
# Unfortunately, Pandas does not show percentage (hence use mathplotlib), so we'll have to do that by ourselves too.
# +
#Creating a new DataFrame to hold values
suicides = [suicide_can_0, suicide_can_1, suicide_can_2, suicide_can_3, suicide_can_4, suicide_can_5]
age_groups = ['5-14', '15-24', '25-34', '35-54', '54-75', '75+']
age_pie = pd.DataFrame({'Suicides': suicides},
index = age_groups)
plot = age_pie.plot.pie(y = 'Suicides', subplots = True, figsize = (10,7))
#Getting total suicides in Canada
sum = 0
for i in range(len(suicides)):
sum+= suicides[i]
#Outputting percentages
print('Percentages')
for i in range(len(suicides)):
print(age_groups[i], ':', round((100*suicides[i]/sum),2),'%')
# -
# From this pie chart, we can see that more than a third of suicides come from people in the age group of 35-54.
#
# But what about the number of suicides per age group get year? This is a great opportunity to use a stacked bar graph, and will flow nicely into our next question! For simplicity sake, I will be categorizing each year into groups by the quarter. It makes for a nicer graph :)
#
# I'll define a function that will create each bar per year group. The function will add up the first 14 suicide numbers. 14 Represents all 7 years in the group, and multiplied by 2 for each sex. This is just how the dataset is organized, and more work can be done to change it, but I thought it was intuitive to do it all in one step.
# +
#Further Visualizing the age distributions of suicides in a Bar Chart Per Year in Canada
#This function sums up the age group's suicides per 7 years (a quarter of the dataset)
def create_bars(suicide,age_group):
#Creating a key-value pair to emulate a switch case function in C++
group = {'0' : 0, '1': 14, '2': 28, '3': 42}
#Getting the value from the pair
i = group.get(str(age_group),'default')
#Ensure the loop runs for only a quarter
runs = int(i) + 13
#Resetting the total
total = 0
#Loop will run for 14 times
for i in range(runs):
#Summing the amount of suicides
total+= suicide.iloc[i]['suicides_no']
return total
# -
# Cool! Now that we have our function, lets create the bars. Each bar will represent the amount of suicides per age group, per year group.
#
# I'll start by classifying the suicides per age group, then creating each bar as an array to hold the amount of suicides poer age group per year group.
# +
#Classifying suicides per age group
suicide_can_0 = suicide_can.loc[suicide_can['age'] == '5-14 years']
suicide_can_1 = suicide_can.loc[suicide_can['age'] == '15-24 years']
suicide_can_2 = suicide_can.loc[suicide_can['age'] == '25-34 years']
suicide_can_3 = suicide_can.loc[suicide_can['age'] == '35-54 years']
suicide_can_4 = suicide_can.loc[suicide_can['age'] == '55-74 years']
suicide_can_5 = suicide_can.loc[suicide_can['age'] == '75+ years']
#Creating each individual bar for every age group per 7 years (quarter) using the defined function
bar_0 = [create_bars(suicide_can_0,0), create_bars(suicide_can_1,0),
create_bars(suicide_can_2,0), create_bars(suicide_can_3,0),
create_bars(suicide_can_4,0), create_bars(suicide_can_5,0)]
bar_1 = [create_bars(suicide_can_0,1), create_bars(suicide_can_1,1),
create_bars(suicide_can_2,1), create_bars(suicide_can_3,1),
create_bars(suicide_can_4,1), create_bars(suicide_can_5,1)]
bar_2 = [create_bars(suicide_can_0,2), create_bars(suicide_can_1,2),
create_bars(suicide_can_2,2), create_bars(suicide_can_3,2),
create_bars(suicide_can_4,2), create_bars(suicide_can_5,2)]
bar_3 = [create_bars(suicide_can_0,3), create_bars(suicide_can_1,3),
create_bars(suicide_can_2,3), create_bars(suicide_can_3,3),
create_bars(suicide_can_4,3), create_bars(suicide_can_5,3)]
# -
# All that's left is to format the stacked bar chart, and plot it. Note how I'm stacking each bar on top of eachother to create the visualization.
# +
#Creating index array and intializing the width of the bars
ind = [0,1,2,3,4,5]
width = 0.75
#Creating each bar section
p0 = plt.bar(ind, bar_0, width)
p1 = plt.bar(ind, bar_1, width, bottom = bar_0)
p2 = plt.bar(ind, bar_2, width, bottom = bar_1)
p3 = plt.bar(ind, bar_3, width, bottom = bar_2)
#Labelling the Graph
plt.ylabel('Suicides')
plt.xlabel('Age Group')
plt.title('Suicides in Canada per Age Group and Year')
plt.xticks(ind, ('5-14','15-24','25-34','35-54','55-74','75+'))
plt.yticks(np.arange(0, 5, 10))
plt.legend((p0[0], p1[0], p2[0], p3[0]), ('1985-1992', '1993-2000', '2001-2008','2008-2014'))
#displaying Graph
plt.show()
# -
# Looks good! It seems like there is an awful amount of suicides in the last quadrant, from the years 2008-2014. Let's see if it has been goingup, i.e: Are the numbers of suicides increasing per year?
#
# ## Are the numbers of suicides increasing per year?
#
# Let's check this out by making a new DataFrame holding the years and the total suicides per year. The for loop sums the total suicides, and then appends them to our new DataFrame.
# +
#Creating a new DataFrame to hold suicides per year
suicide_year_can = pd.DataFrame(columns = ['Year', 'Suicides'])
year = 1985
#Loop will run for the amount of years
for i in range(29):
#Creating a temporary dataframe, and appending it to existing.
temp_df = pd.DataFrame({'Year': [year],
'Suicides' :[suicide_can.loc[suicide_can['year'] == year]['suicides_no'].sum()]})
suicide_year_can = suicide_year_can.append(temp_df, ignore_index = True)
year+=1
suicide_year_can
# -
# Now that we have our suicides per year, let's make a scatter plot with Matplotlib. I'm using the numpy polyfit function to calculate the slope and line of best fit for our regression line.
# +
m,b = np.polyfit(np.array(suicide_year_can.Year,dtype=float),np.array(suicide_year_can.Suicides,dtype=float),1)
plt.plot(suicide_year_can.Year, suicide_year_can.Suicides,'o')
plt.plot(suicide_year_can.Year,m*suicide_year_can.Year + b )
plt.title('Canadian Suicides per year')
plt.xlabel('Year')
plt.ylabel('Suicides')
plt.show()
# -
# Well that doesn't look too convincing. Yes, the regression line is increasing, but not by much, let's compare this to the world suicides per year and see if they're alike! Let's just follow the process we had before.
# +
#Creating a new DataFrame to hold suicides per year
suicide_year = pd.DataFrame(columns = ['Year', 'Suicides'])
year = 1985
#Loop will run for the amount of years
for i in range(29):
#Creating a temporary dataframe, and appending it to existing.
temp_df = pd.DataFrame({'Year': [year],
'Suicides' :[suicide.loc[suicide['year'] == year]['suicides_no'].sum()]})
suicide_year = suicide_year.append(temp_df, ignore_index = True)
year+=1
suicide_year
# -
# Now let's plot it!
# +
m,b = np.polyfit(np.array(suicide_year.Year,dtype=float),np.array(suicide_year.Suicides,dtype=float),1)
plt.plot(suicide_year.Year, suicide_year.Suicides,'o')
plt.plot(suicide_year.Year,m*suicide_year.Year + b )
plt.title('Global Suicides per year')
plt.xlabel('Year')
plt.ylabel('Suicides')
plt.show()
# -
# Although there is a big jump in suicides from 1988 to 1990, there still isn't a clear enough relationship for us to determine that the amount of suicides have gone up with every year.
# # Conclusion
#
# If you made it down here, I would first like to thank you for reading! This is my first project in data science, and I'm excited to keep analyzing data, and come to appropriate conclusions.
#
# Reminder that the question posed are all Canada-based.
#
# Here were the questions I aim to answer:
# 1. What is the sex distribution of suicides (male-female)?
# 2. On average, which age group has the most suicides?
# 3. In which period of years had the most suicides?
# 4. Are the numbers of suicides increasing per year?
#
# Here are my conclusions
# 1. The sex distribution of suicides is overwhelmingly male-dominated, with males making up three times more suicides than woman (77.5% : 22.5%)
# 2. On average, the age group of 34-55 had the most suicides, being responsible for 40.92% of total suicides in Canada for the time period.
# 3. 2008-2014 period had the most suicides.
# 4. Interestingly, no! There isn't a strong enough correlation to confidently say that there is a positive correlation between the years and suicide numbers.
#
# Once again, thank you for taking the time to read all of this! This notebook has given me a entry into the world of data science, and I can't wait to see what's next :)
# Feel free to reach out to me if you have any questions/comments/suggestions on how I can improve for the future.
#
# Contact info:
#
# LinkedIn: https://www.linkedin.com/in/quinn-ha/
#
# Email: <EMAIL>
#
#
| 1 - Suicide Rates Overview/.ipynb_checkpoints/Suicides in Canada-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# # `Asap.jl`
# Another structural analysis package. Developed for ease-of-information extraction after matrix analysis of structural systems. Stores internal forces, displacements, and connectivities of constituent nodes and elements for downstream analysis and processing. Let's make a structure!
using Asap
# There are 4 main data types with the following key fields:
#
# 1. `Node`: position, degrees of freedom
# 2. `Element`: node connectivity, material properties
# 3. `Load`: position, force magnitudes
# 4. `Structure`: nodes, elements, and loads
#
# We will walk through the same structure in `example3d.jl`. First, defining the nodes. We can create a new `Node` by inputting the global position and degrees of freedom for the given node. 2D/3D, truss/frame is parsed based on the length of the position and DOF vectors:
#
# `n = Node([position1, position2 (, position3)], [dof1, dof2 (,dof3,...)])`
node1 = Node([72.0, 0.0, 0.0], [true, false, true])
node2 = Node([0.0, 36.0, 0.0], [false, false, false])
node3 = Node([0.0, 36.0, 72.0], [false, false, false])
node4 = Node([0.0, 0.0, -48.0], [false, false, false])
# Since there are common DOF restrictions for most analysis types (pinned, free, fixed, etc.), you can also use predefined symbol combinations of structure type (truss/frame) and node fixity. Node fixities include `:fixed, pinned, xfixed, xfree, pinfixed`, where `xfree` and `xfixed` can be replaced with `y` and `z` respectively. The following is an equivalent definition of nodes:
node1 = Node([72.0, 0.0, 0.0], :truss, :yfixed)
node2 = Node([0.0, 36.0, 0.0], :truss, :fixed)
node3 = Node([0.0, 36.0, 72.0], :truss, :fixed)
node4 = Node([0.0, 0.0, -48.0], :truss, :fixed)
# We create a vector of nodes:
nodes = [node1, node2, node3, node4];
# Next, we define the elements. They are created by `Element(nodes, [connectivity], materialProperties...)`
#
# 1. `[connectivity]` is a length 2 vector of start and end node positions with respect to the `nodes` vector.
# 2. `materialProperties` is a varied length of relevant material property values. They are in order: `E, A, G, Iz, Iy, J` or the modulus of elasticity, cross section area, shear constant, strong-axis moment of inertia, weak-axis moment of inertia, torsional constant. `E` and `A` are always needed for analysis. The rest depend on the dimensionality and type of structure you are analyzing. Defining only the material properties needed for your analysis will create the proper `Element` type.
#
# We are analyzing a truss, so it suffices to define `E` and `A` only:
# +
# Material properties
E = 1.2e6
A1 = 0.302
A2 = 0.729
A3 = 0.187
# Defining elements
element1 = Element(nodes, [1,2], E, A1)
element2 = Element(nodes, [1,3], E, A2)
element3 = Element(nodes, [1,4], E, A3)
# -
# Creating a vector of elements:
elements = [element1, element2, element3];
# Next we define the loads. A `Load` structure has the same DOFs as the nodes under analysis, but with the relevant force/moment values instead of Boolean fixity values. They are defined by `Load(nodes, position, [forces])`.
#
# If we want to apply loads at a specific node, we can take advantage of the `.position` field for all nodes. Let's place a downwards (-Z) load on node 1:
load1 = Load(nodes, node1.position, [0., 0., -1000.])
# As always, we create a vector of loads:
loads = [load1]
# Now we can assemble the structure using `Structure`:
structure = Structure(nodes, elements, loads);
# We can then perform a linear elastic direct stiffness analysis of the structure:
analyze(structure)
# When we analyze a structure, not only does the global displacement vector get calculated, the connectivity between nodes/elements are stored, internal force value fields are populated for elements, nodal displacements and reactions (if applicable) are stored, and a scaled deformation of the structure is stored. Let's look at the displacements:
displacements = structure.U
# And the reactions:
structure.reactions
# If we want to know the internal axial force for element 2:
e2_axialforce = element2.axialForce
# Or the reaction forces at node 3:
n3_reactions = node3.reaction
# If we want to visualize the structure, we first convert it into a `Geometry` type:
geo = Geometry(structure);
# This converts custom structures into easily plottable types. We can use `structurePlot` to visualize:
fig = structurePlot(geo);
display(fig)
# Using `CairoMakie` will get nicer figures that are publication ready! This is a very simple structure (so less impressive), but we can do:
# +
using CairoMakie
CairoMakie.activate!()
fig2 = axo(geo; mode = :displaced, lw = 4)
# -
# And save it!
save("testfig.svg", fig2)
| examples/walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
target = "2t.grib"
print (target)
# I get the data
# +
import cfgrib
# -
# ### The data have been retrieved and saved in a grib file called 2t.grib
# We will use gribapi to read the grib in a numpy array, and to extract the necessary information to georeference the plot.
import Magics.macro as magics
import eccodes as grib
import numpy as numpy
import json
# +
target = "2t.grib"
file = open(target)
#Getting the first message from the file
field = grib.grib_new_from_file(file)
nj = grib.grib_get(field,"Nj")
ni = grib.grib_get(field,"Ni")
print ni, nj
metadata = { "paramId" : grib.grib_get(field,"paramId"),
"units" : grib.grib_get(field,"units"),
"typeOfLevel": grib.grib_get(field,"typeOfLevel"),
"marsType": grib.grib_get(field,"marsType"),
"marsClass": grib.grib_get(field,"marsClass"),
"marsStream": grib.grib_get_string(field,"marsStream"),
"level": grib.grib_get(field,"level") }
print metadata
# +
firstlat = grib.grib_get(field, "latitudeOfFirstGridPointInDegrees")
steplat = -grib.grib_get(field, "jDirectionIncrementInDegrees")
firstlon = grib.grib_get(field, "longitudeOfFirstGridPointInDegrees")
steplon = grib.grib_get(field, "iDirectionIncrementInDegrees")
print "Latitude", firstlat, "Step", steplat
print "Longitude", firstlon, "Step", steplon
# +
#Getting the field values
values = grib.grib_get_values(field).reshape(nj, ni)
data = magics.minput( input_field = values,
input_field_initial_latitude = firstlat,
input_field_latitude_step = steplat,
input_field_initial_longitude = firstlon,
input_field_longitude_step = steplon,
input_mars_metadata = json.dumps(metadata),
)
#data = magics.mgrib(grib_input_file_name=target)
#Setting the field contour
contour = magics.mcont(
contour_shade= "on",
legend= "on",
contour_highlight = "off",
contour_shade_method = "area_fill",
contour_shade_colour_direction = "clockwise",
contour_shade_colour_method = "calculate",
contour_shade_max_level_colour= "red",
contour_shade_min_level_colour= " blue")
#Setting the title
title = magics.mtext(text_lines=["<magics_title/>", "Using Grib API and arrays..."],
text_colour="charcoal",
text_font_size='0.8',
text_justification='left')
europe = magics.mmap(
subpage_map_projection = "cylindrical",
subpage_lower_left_latitude = -90.00,
subpage_lower_left_longitude = 0.00,
subpage_upper_right_latitude = 90.00,
subpage_upper_right_longitude = 360.00
)
contour = magics.mcont(
contour_automatic_setting="web",
legend="on"
)
legend = magics.mlegend( legend_display_type = "continuous")
#Plot the map
magics.plot( europe, data, contour, magics.mcoast(), title)
# -
| notebook/array-mars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import datasets
from sklearn import tree
clf=tree.DecisionTreeClassifier()
from sklearn.model_selection import train_test_split
for i in dir(datasets):
if 'diabetes' in i:
print(i)
from sklearn.datasets import load_diabetes
dir(load_diabetes
)
dia=datasets.load_diabetes()
dia
dir(dia)
dia.feature_names
features=dia.data
features
dia.target_filename
dia.target
dia.target.shape
dia.DESCR
dia.data.shape
import matplotlib.pyplot as plt
dia.feature_names
age=dia.data[:,0]
sex=dia.data[:,1]
bmi=dia.data[:,2]
bp=dia.data[:,3]
samples=dia.data[:,4:-1]
result=dia.target[]
plt.scatter(age,result)
data_split=train_test_split(features,result)
train_fea,test_fea,train_res,test_res=data_split
trained=clf.fit(train_fea,train_res)
predicted=trained.predict(test_fea)
from sklearn.metrics import accuracy_score
accuracy_score(test_res,predicted)
result
| diabetes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import xarray as xr
import pandas as pd
import numpy as np
from cast_to_xarray import *
# +
# extract temperature and salinity at the nearest points to the bottle pH and apply MLR there
# +
data_dir = 'data_cruise2'
fileend = '_loop_filter_teos10_bin.cnv'
cast6 = cast_to_xarray(data_dir+'/station6'+fileend, 'S6')
cast7 = cast_to_xarray(data_dir+'/station7'+fileend, 'S7')
cast8 = cast_to_xarray(data_dir+'/station8'+fileend, 'S8')
cast9 = cast_to_xarray(data_dir+'/slope'+fileend, 'S9')
# +
# load the spec pH data
pH_specpd = pd.read_csv('specpH.csv')
# conver to xarray
pH_spec= pH_specpd.set_index('Station').to_xarray()
# -
pH_spec
# +
# Extract CTD data at spec pH depths
# -
# +
# add the MLR within the function
# add the O2 MLR as well
def extract_CTD_pred(specstn,castarray):
specdepths = pH_spec.sel(Station=specstn).Pressure
Tm = 13.20
Ts = 5.92
Sm = 34.40
Ss = 1.49
Om = 214.5
Os = 44.7
CTDtemps = []
CTDsals = []
pHests = []
pHests2 = []
for i in range(len(specdepths)):
CTDtemp = castarray.sel(depth=specdepths[i], method='nearest', tolerance=1).temperature.values
CTDtemps.append(CTDtemp.item())
CTDsal = castarray.sel(depth=specdepths[i], method='nearest', tolerance=1).salinity.values
CTDsals.append(CTDsal.item())
# add MLR into function
Tnorm = (CTDtemp-Tm)/Ts
Snorm = (CTDsal-Sm)/Ss
Onorm = (pH_spec.sel(Station=specstn).Oxygen.values[i]-Om)/Os
pHest = 8.00+(3.59E-2)*Tnorm+(-5.51E-3)*Snorm+(6.84E-3)*Tnorm*Snorm
pHests.append(pHest)
pHest2 = 8.01+(2.86E-2)*Tnorm+(6.06E-2)*Snorm+(7.82E-2)*Onorm+(-2.09E-2)*Tnorm*Snorm+(-8.24E-3)*Snorm*Onorm
pHests2.append(pHest2)
return pHests, pHests2
# -
pHest6, pHest6_2 = extract_CTD_pred(6, cast6)
pHest7, pHest7_2 = extract_CTD_pred(7, cast7)
pHest8, pHest8_2 = extract_CTD_pred(8, cast8)
pHest9, pHest9_2 = extract_CTD_pred(9, cast9)
# combine the pHest
pHestall = [pHest6 + pHest7+ pHest8 + pHest9]
pHest2all = [pHest6_2 + pHest7_2 + pHest8_2 + pHest9_2]
# +
plt.figure(figsize=(13,10))
plt.rcParams.update({'font.size': 16})
plt.scatter(pH_spec.pH.values, pHestall)
plt.xlabel('measured pH')
plt.ylabel('MLR pH estimate')
plt.savefig('pHeval.png')
# +
correlation_matrix = np.corrcoef(pH_spec.pH.values, pHestall)
correlation_xy = correlation_matrix[0,1]
r_squared = correlation_xy**2
print(r_squared)
# +
correlation_matrix = np.corrcoef(pH_spec.pH.values, pHest2all)
correlation_xy = correlation_matrix[0,1]
r_squared = correlation_xy**2
#print(r_squared)
plt.figure(figsize=(13,10))
plt.rcParams.update({'font.size': 16})
plt.scatter(pH_spec.pH.values, pHest2all)
plt.xlabel('measured pH')
plt.ylabel('MLR pH estimate')
plt.title('predictors: T, S, O2')
c=round(r_squared,2)
s=str(c)
ss = 'R$^2$='+ s
plt.text(8.25, 7.9, ss)
plt.savefig('pHeval2.png')
# -
| extract_pH.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from qiskit import *
import numpy as np
from qiskit.providers.ibmq import least_busy
from qiskit.visualization import plot_histogram
from qiskit.visualization import plot_bloch_vector
from cmath import rect
from math import pi
sz=(1.5,1.5)
# The following code shows the Bloch plot for few of the states.
# $$ |q\rangle = cos \frac{\theta}{2} |0\rangle + e^{i~\phi} sin\frac{\theta}{2}|1\rangle $$
# The global state of a qbit is not represented in Bloch shpere
# $$|1\rangle = e^{i~0}sin\frac{\pi}{2}|1\rangle$$
plot_bloch_vector([1,pi,0],coord_type="spherical",figsize=sz)
# $$|1\rangle = e^{i~\pi/2}sin\frac{\pi}{2}|1\rangle$$
plot_bloch_vector([1,pi,pi/2],coord_type="spherical",figsize=sz)
# $$|+\rangle = cos\frac{\pi}{4}|0\rangle+e^{i~0}sin\frac{\pi}{4}|1\rangle$$
plot_bloch_vector([1,pi/2,0],coord_type="spherical",figsize=sz)
# $$|-\rangle = cos\frac{\pi}{4}|0\rangle+e^{i~\pi}sin\frac{\pi}{4}|1\rangle$$
# Hadamard + with a phase difference of pi
plot_bloch_vector([1,pi/2,pi],coord_type="spherical",figsize=sz)
# $$|i\rangle = cos\frac{\pi}{4}|0\rangle+e^{i~\pi/2}sin\frac{\pi}{4}|1\rangle$$
plot_bloch_vector([1,pi/2,pi/2],coord_type="spherical",figsize=sz)
# $$|-i\rangle = cos\frac{\pi}{4}|0\rangle+e^{-i~\pi/2}sin\frac{\pi}{4}|1\rangle$$
plot_bloch_vector([1,pi/2,-pi/2],coord_type="spherical",figsize=sz)
| Bloch_sphere.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simple model for tabular data
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.tabular.models import TabularModel
# + hide_input=true
show_doc(TabularModel)
# -
# `emb_szs` match each categorical variable size with an embedding size, `n_cont` is the number of continuous variables. The model consists of `Embedding` layers for the categorical variables, followed by a `Dropout` of `emb_drop`, and a `BatchNorm` for the continuous variables. The results are concatenated and followed by blocks of `BatchNorm`, `Dropout`, `Linear` and `ReLU` (the first block skips `BatchNorm` and `Dropout`, the last block skips the `ReLU`).
#
# The sizes of the blocks are given in [`layers`](/layers.html#layers) and the probabilities of the `Dropout` in `ps`. The last size is `out_sz`, and we add a last activation that is a sigmoid rescaled to cover `y_range` (if it's not `None`). Lastly, if `use_bn` is set to False, all `BatchNorm` layers are skipped except the one applied to the continuous variables.
#
# Generally it's easiest to just create a learner with [`tabular_learner`](/tabular.data.html#tabular_learner), which will automatically create a [`TabularModel`](/tabular.models.html#TabularModel) for you.
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
show_doc(TabularModel.forward)
show_doc(TabularModel.get_sizes)
# ## New Methods - Please document or move to the undocumented section
| docs_src/tabular.models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="xpj6twWatL1e"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="JAPoU8Sm5E6e"
# <table align="left">
#
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/neo4j/graph_paysim.ipynb" target="_blank">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/community/neo4j/graph_paysim.ipynb" target="_blank">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] id="YasYy1BqRHY8"
# # Overview
# In this notebook, you will learn how to use Neo4j AuraDS to create graph features. You'll then use those new features to solve a classification problem with Vertex AI.
#
# + [markdown] id="ze4-nDLfK4pw"
# ## Dataset
# This notebook uses a version of the PaySim dataset that has been modified to work with Neo4j's graph database. PaySim is a synthetic fraud dataset. The goal is to identify whether or not a given transaction constitutes fraud. The [original version of the dataset](https://github.com/EdgarLopezPhD/PaySim) has tabular data.
#
# Neo4j has worked on a modified version that generates a graph dataset [here](https://github.com/voutilad/PaySim). We've pregenerated a copy of that dataset that you can grab [here](https://storage.googleapis.com/neo4j-datasets/paysim.dump). You'll want to download that dataset and then upload it to Neo4j AuraDS. AuraDS is a graph data science tool that is offered as a service on GCP. Instructions on signing up and uploading the dataset are available [here](https://github.com/neo4j-partners/aurads-paysim).
# + [markdown] id="pD0fZLPdsAYf"
# ##Costs
# This tutorial uses billable components of Google Cloud:
#
# * Cloud Storage
# * Vertex AI
#
# Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage.
# + [markdown] id="m51HUN1aHNid"
# # Setup
# + [markdown] id="mbIYWyMksbpC"
# ## Set up your development environment
# We suggest you use Colab for this notebook.
# + [markdown] id="zLocKiyCwtR7"
# ## Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
#
# 1. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).
#
# 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
#
# 1. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + [markdown] id="BKipBL0kWY7w"
# ## Install additional Packages
# First off, you'll also need to install a few packages.
# + id="qwKogqD_He_e"
# !pip install --quiet --upgrade neo4j
# + id="tDipS8p-27qg"
# !pip install --quiet google-cloud-storage
# + id="Ix0KpBl-hnxF"
# !pip install --quiet google.cloud.aiplatform
# + [markdown] id="JBXAh7fVt9Ou"
# ## (Colab only) Restart the kernel
# After you install the additional packages, you need to restart the notebook kernel so it can find the packages. When you run this, you may get a notification that the kernel crashed. You can disregard that.
# + id="ySSyV4T_3dQB"
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="4ldFrUMIHVHP"
# # Working with Neo4j
# + [markdown] id="jMwl92_1HoIl"
# ## Define Neo4J related variables
#
# You'll need to enter the credentials from your AuraDS instance below. You can get your credentials by following this [walkthrough](https://github.com/neo4j-partners/aurads-paysim).
#
# The "DB_NAME" is always neo4j for AuraDS. It is different from the name you gave your database tenant in the AuraDS console.
# + id="96IVMcJeH3N4"
DB_URL = "neo4j+s://XXXXX.databases.neo4j.io"
DB_USER = "neo4j"
DB_PASS = "<PASSWORD>"
DB_NAME = "neo4j"
# + [markdown] id="tpNk1MvcWY7x"
# In this section we're going to connect to Neo4j and look around the database. We're going to generate some new features in the dataset using Neo4j's Graph Data Science library. Finally, we'll load the data into a Pandas dataframe so that it's all ready to put into GCP Feature Store.
# + [markdown] id="RJptLsHUHgCV"
# ## Exploring the database
# + id="QiFDi4uLWY7x"
import pandas as pd
from neo4j import GraphDatabase
# + id="sgEy4q7iWY7y"
driver = GraphDatabase.driver(DB_URL, auth=(DB_USER, DB_PASS))
# + [markdown] id="yBpL6dY3HEMD"
# Now, let's explore the data in the database a bit to understand what we have to work with.
# + id="z4a0_CqVWY7y"
# node labels
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
CALL db.labels() YIELD label
CALL apoc.cypher.run('MATCH (:`'+label+'`) RETURN count(*) as freq', {})
YIELD value
RETURN label, value.freq AS freq
"""
).data()
)
df = pd.DataFrame(result)
display(df)
# + id="SrETUiWdFDoy"
# relationship types
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
CALL db.relationshipTypes() YIELD relationshipType as type
CALL apoc.cypher.run('MATCH ()-[:`'+type+'`]->() RETURN count(*) as freq', {})
YIELD value
RETURN type AS relationshipType, value.freq AS freq
ORDER by freq DESC
"""
).data()
)
df = pd.DataFrame(result)
display(df)
# + id="Lsfbg8rpJcXo"
# transaction types
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
MATCH (t:Transaction)
WITH sum(t.amount) AS globalSum, count(t) AS globalCnt
WITH *, 10^3 AS scaleFactor
UNWIND ['CashIn', 'CashOut', 'Payment', 'Debit', 'Transfer'] AS txType
CALL apoc.cypher.run('MATCH (t:' + txType + ')
RETURN sum(t.amount) as txAmount, count(t) AS txCnt', {})
YIELD value
RETURN txType,value.txAmount AS TotalMarketValue
"""
).data()
)
df = pd.DataFrame(result)
display(df)
# + [markdown] id="AKo4m-A4J9F8"
# ## Create a New Feature with a Graph Embedding using Neo4j
# First we're going to create an in memory graph represtation of the data in Neo4j Graph Data Science (GDS).
#
# Note, if you get an error saying the graph already exists, that's probably because you ran this code before. You can destroy it using the command in the cleanup section of this notebook.
# + id="vdBkeDV7J8Ke"
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
CALL gds.graph.create.cypher('client_graph',
'MATCH (c:Client) RETURN id(c) as id, c.num_transactions as num_transactions, c.total_transaction_amnt as total_transaction_amnt, c.is_fraudster as is_fraudster',
'MATCH (c:Client)-[:PERFORMED]->(t:Transaction)-[:TO]->(c2:Client) return id(c) as source, id(c2) as target, sum(t.amount) as amount, "TRANSACTED_WITH" as type ')
"""
).data()
)
df = pd.DataFrame(result)
display(df)
# + [markdown] id="WewKw5g4NKVo"
# Now we can generate an embedding from that graph. This is a new feature we can use in our predictions. We're using FastRP, which is a more full featured and higher performance of Node2Vec. You can learn more about that [here](https://neo4j.com/docs/graph-data-science/current/algorithms/fastrp/).
# + id="fBa8ofijEtHn"
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
CALL gds.fastRP.mutate('client_graph',{
relationshipWeightProperty:'amount',
iterationWeights: [0.0, 1.00, 1.00, 0.80, 0.60],
featureProperties: ['num_transactions', 'total_transaction_amnt'],
propertyRatio: 0.25,
nodeSelfInfluence: 0.15,
embeddingDimension: 16,
randomSeed: 1,
mutateProperty:'embedding'
})
"""
).data()
)
df = pd.DataFrame(result)
display(df)
# + [markdown] id="7PCI0yiUNpLZ"
# Finally we dump that out to a dataframe
# + id="gkALAMl5NtDg"
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
CALL gds.graph.streamNodeProperties
('client_graph', ['embedding', 'num_transactions', 'total_transaction_amnt', 'is_fraudster'])
YIELD nodeId, nodeProperty, propertyValue
RETURN nodeId, nodeProperty, propertyValue
"""
).data()
)
df = pd.DataFrame(result)
df.head()
# + [markdown] id="gzDiV7Efv40X"
# Now we need to take that dataframe and shape it into something that better represents our classification problem.
# + id="JkzFxCgdqeTt"
x = df.pivot(index="nodeId", columns="nodeProperty", values="propertyValue")
x = x.reset_index()
x.columns.name = None
x.head()
# + [markdown] id="uPim4AGB8w3Q"
# is_fraudster will have a value of 0 or 1 if populated. If the value is -9223372036854775808 then it's unlabeled, so we're going to drop it.
# + id="2jZZg6ln8wW_"
x = x.loc[x["is_fraudster"] != -9223372036854775808]
x.head()
# + [markdown] id="gOt_DjoPVirz"
# Note that the embedding row is an array. To make this dataset more consumable, we should flatten that out into multiple individual features: embedding_0, embedding_1, ... embedding_n.
# + id="h9j0PPn9H4RD"
FEATURES_FILENAME = "features.csv"
embeddings = pd.DataFrame(x["embedding"].values.tolist()).add_prefix("embedding_")
merged = x.drop(columns=["embedding"]).merge(
embeddings, left_index=True, right_index=True
)
features_df = merged.drop(
columns=["is_fraudster", "num_transactions", "total_transaction_amnt"]
)
train_df = merged.drop(columns=["nodeId"])
features_df.to_csv(FEATURES_FILENAME, index=False)
# + [markdown] id="cWBOxHrusEXk"
# This dataset is too small to use with Vertex AI for AutoML tabular data. For sake of demonstration, we're going to repeat it a few times. Don't do this in the real world.
# + id="JMTvbcsvdVfb"
TRAINING_FILENAME = "train.csv"
pd.concat([train_df for i in range(10)]).to_csv(TRAINING_FILENAME, index=False)
# + [markdown] id="hpNFaHfKK6jK"
# And that's it! The dataframe now has a nice dataset that we can use with GCP Vertex AI.
# + [markdown] id="GTIKVdXJIOaF"
# # Using Vertex AI with Neo4j data
# + [markdown] id="QrJyXGJAHr_M"
# ## Define Google Cloud variables
# You'll need to set a few variables for your GCP environment. PROJECT_ID and STORAGE_BUCKET are most critical. The others will probably work with the defaults given.
# + id="K-FC4GI1H3jx"
# Edit these variables!
PROJECT_ID = "YOUR-PROJECT-ID"
STORAGE_BUCKET = "YOUR-BUCKET-NAME"
# You can leave these defaults
REGION = "us-central1"
STORAGE_PATH = "paysim"
EMBEDDING_DIMENSION = 16
FEATURESTORE_ID = "paysim"
ENTITY_NAME = "payer"
# + id="1XoT1nT_JlYx"
import os
os.environ["GCLOUD_PROJECT"] = PROJECT_ID
# + [markdown] id="Id6tjQDbgf2S"
# ## Authenticate your Google Cloud account
#
# + id="HucMnpmVgfmX"
try:
from google.colab import auth as google_auth
google_auth.authenticate_user()
except:
pass
# + [markdown] id="FUU7z4FjJS90"
# ##Upload to a GCP Cloud Storage Bucket
#
# To get the data into Vertex AI, we must first put it in a bucket as a CSV.
# + id="e3nbLg1cKJpJ"
from google.cloud import storage
client = storage.Client()
# + id="4dAkAU5ALnUo"
bucket = client.bucket(STORAGE_BUCKET)
client.create_bucket(bucket)
# + id="UTo7-_oJL_dZ"
# Upload our files to that bucket
for filename in [FEATURES_FILENAME, TRAINING_FILENAME]:
upload_path = os.path.join(STORAGE_PATH, filename)
blob = bucket.blob(upload_path)
blob.upload_from_filename(filename)
# + [markdown] id="ArK3cfKsdT1x"
# ## Train and deploy a model on GCP
# We'll use the engineered features to train an AutoML Tables model, then deploy it to an endpoint
# + id="KGjrD-k3dsCN"
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
dataset = aiplatform.TabularDataset.create(
display_name="paysim",
gcs_source=os.path.join("gs://", STORAGE_BUCKET, STORAGE_PATH, TRAINING_FILENAME),
)
dataset.wait()
print(f'\tDataset: "{dataset.display_name}"')
print(f'\tname: "{dataset.resource_name}"')
# + id="oaSPuk31N2xS"
embedding_column_names = ["embedding_{}".format(i) for i in range(EMBEDDING_DIMENSION)]
other_column_names = ["num_transactions", "total_transaction_amnt"]
all_columns = other_column_names + embedding_column_names
column_specs = {column: "numeric" for column in all_columns}
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-paysim-automl-1",
optimization_prediction_type="classification",
column_specs=column_specs,
)
# + id="Fqf44y_G8vi1"
model = job.run(
dataset=dataset,
target_column="is_fraudster",
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="paysim-prediction-model",
disable_early_stopping=False,
budget_milli_node_hours=1000,
)
# + id="IoVThi28VO_R"
endpoint = model.deploy(machine_type="n1-standard-4")
# + [markdown] id="-NnDaATyWY7z"
# ## Loading Data into GCP Feature Store
# In this section, we'll take our dataframe with newly engineered features and load that into GCP feature store.
# + id="r0DcYzPkRrzj"
from google.cloud.aiplatform_v1 import FeaturestoreServiceClient
api_endpoint = "{}-aiplatform.googleapis.com".format(REGION)
fs_client = FeaturestoreServiceClient(client_options={"api_endpoint": api_endpoint})
resource_path = fs_client.common_location_path(PROJECT_ID, REGION)
fs_path = fs_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID)
entity_path = fs_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, ENTITY_NAME
)
# + [markdown] id="wMN4Ue2hjdL3"
# First, let's check if the Feature Store already exists
# + id="tYQknjQFsVNC"
from grpc import StatusCode
def check_has_resource(callable):
has_resource = False
try:
callable()
has_resource = True
except Exception as e:
if (
not hasattr(e, "grpc_status_code")
or e.grpc_status_code != StatusCode.NOT_FOUND
):
raise e
return has_resource
# + id="qTVIsom6eejQ"
feature_store_exists = check_has_resource(
lambda: fs_client.get_featurestore(name=fs_path)
)
# + id="caTWbgeChd_x"
from google.cloud.aiplatform_v1.types import entity_type as entity_type_pb2
from google.cloud.aiplatform_v1.types import feature as feature_pb2
from google.cloud.aiplatform_v1.types import featurestore as featurestore_pb2
from google.cloud.aiplatform_v1.types import \
featurestore_service as featurestore_service_pb2
from google.cloud.aiplatform_v1.types import io as io_pb2
if not feature_store_exists:
create_lro = fs_client.create_featurestore(
featurestore_service_pb2.CreateFeaturestoreRequest(
parent=resource_path,
featurestore_id=FEATURESTORE_ID,
featurestore=featurestore_pb2.Featurestore(
online_serving_config=featurestore_pb2.Featurestore.OnlineServingConfig(
fixed_node_count=1
),
),
)
)
print(create_lro.result())
# + id="Q1JRwvvYJMBy"
entity_type_exists = check_has_resource(
lambda: fs_client.get_entity_type(name=entity_path)
)
if not entity_type_exists:
users_entity_type_lro = fs_client.create_entity_type(
featurestore_service_pb2.CreateEntityTypeRequest(
parent=fs_path,
entity_type_id=ENTITY_NAME,
entity_type=entity_type_pb2.EntityType(
description="Main entity type",
),
)
)
print(users_entity_type_lro.result())
feature_requests = [
featurestore_service_pb2.CreateFeatureRequest(
feature=feature_pb2.Feature(
value_type=feature_pb2.Feature.ValueType.DOUBLE,
description="Embedding {} from Neo4j".format(i),
),
feature_id="embedding_{}".format(i),
)
for i in range(EMBEDDING_DIMENSION)
]
create_features_lro = fs_client.batch_create_features(
parent=entity_path,
requests=feature_requests,
)
print(create_features_lro.result())
# + id="Uz78rmNrwK0V"
feature_specs = [
featurestore_service_pb2.ImportFeatureValuesRequest.FeatureSpec(
id="embedding_{}".format(i)
)
for i in range(EMBEDDING_DIMENSION)
]
from google.protobuf.timestamp_pb2 import Timestamp
feature_time = Timestamp()
feature_time.GetCurrentTime()
feature_time.nanos = 0
import_request = fs_client.import_feature_values(
featurestore_service_pb2.ImportFeatureValuesRequest(
entity_type=entity_path,
csv_source=io_pb2.CsvSource(
gcs_source=io_pb2.GcsSource(
uris=[
os.path.join(
"gs://", STORAGE_BUCKET, STORAGE_PATH, FEATURES_FILENAME
)
]
)
),
entity_id_field="nodeId",
feature_specs=feature_specs,
worker_count=1,
feature_time=feature_time,
)
)
print(import_request.result())
# + [markdown] id="mOeih_WxWhSx"
# ## Sending a prediction using features from the feature store
# + id="HFr8zWyiWxOa"
from google.cloud.aiplatform_v1 import FeaturestoreOnlineServingServiceClient
data_client = FeaturestoreOnlineServingServiceClient(
client_options={"api_endpoint": api_endpoint}
)
# + id="CnVC3BHmWylQ"
# Retrieve Neo4j embeddings from feature store
from google.cloud.aiplatform_v1.types import FeatureSelector, IdMatcher
from google.cloud.aiplatform_v1.types import \
featurestore_online_service as featurestore_online_service_pb2
feature_selector = FeatureSelector(
id_matcher=IdMatcher(
ids=["embedding_{}".format(i) for i in range(EMBEDDING_DIMENSION)]
)
)
fs_features = data_client.read_feature_values(
featurestore_online_service_pb2.ReadFeatureValuesRequest(
entity_type=entity_path,
entity_id="5",
feature_selector=feature_selector,
)
)
saved_embeddings = dict(
zip(
(fd.id for fd in fs_features.header.feature_descriptors),
(str(d.value.double_value) for d in fs_features.entity_view.data),
)
)
# + id="jgW3Ks0SihdN"
# Combine with other features. These might be sourced per transaction
all_features = {"num_transactions": "80", "total_dollar_amnt": "7484459.618641878"}
all_features.update(saved_embeddings)
instances = [{key: str(value) for key, value in all_features.items()}]
# + id="DnK_FJeIi--4"
# Send a prediction
endpoint.predict(instances=instances)
# + [markdown] id="DU79nGz2gv_M"
# # Cleanup
#
# + [markdown] id="cBqqQEW_Kggf"
# ## Neo4j cleanup
#
# To delete the Graph Data Science representation of the graph, run this:
# + id="ICSNRLM5YQ5N"
with driver.session(database=DB_NAME) as session:
result = session.read_transaction(
lambda tx: tx.run(
"""
CALL gds.graph.drop('client_graph')
"""
).data()
)
# + [markdown] id="NAM7PXfmKikz"
# ## Google Cloud cleanup
#
# Delete the feature store and turn down the endpoint
# + id="Es9wPH3UVbP-"
fs_client.delete_featurestore(
request=featurestore_service_pb2.DeleteFeaturestoreRequest(
name=fs_client.featurestore_path(PROJECT_ID, REGION, FEATURESTORE_ID),
force=True,
)
).result()
endpoint.delete()
| notebooks/community/neo4j/graph_paysim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# +
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Bidirectional
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from yahoo_fin import stock_info as si
from collections import deque
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import os
import random
import multiprocessing
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
tf.keras.mixed_precision.experimental.set_policy(policy)
# -
def create_model(sequence_length, units=256, cell=LSTM, n_layers=2, dropout=0.3,
loss="mean_absolute_error", optimizer="rmsprop", bidirectional=False,layer_activation="linear"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True), input_shape=(None, sequence_length)))
else:
model.add(cell(units, return_sequences=True, input_shape=(None, sequence_length)))
elif i == n_layers - 1:
# last layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=False)))
else:
model.add(cell(units, return_sequences=False))
else:
# hidden layers
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
# add dropout after each layer
model.add(Dropout(dropout))
model.add(Dense(4, activation=layer_activation))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
# +
#def run_tensorflow():
window_size = 320
# create these folders if they does not exist
# Window size or the sequence length
N_STEPS = (window_size * 8) - 8
# Lookup step, 1 is the next day
#LOOKUP_STEP = int(run_dict[run]["LOOKUP_STEP"])
# test ratio size, 0.2 is 20%
TEST_SIZE = 0.3
# features to use
items = ["close", "ema", "high", "low", "open", "rsi", "sma", "volume"]
day_counts = [f"_{i}" for i in range(0, window_size -1)]
FEATURE_COLUMNS = []
for day_count in day_counts:
for item in items:
FEATURE_COLUMNS.append(f"{item}{day_count}")
TARGET_COLUMNS = []
for item in ["close", "high", "low", "open"]:
TARGET_COLUMNS.append(f"{item}_{window_size - 1}")
# date now
date_now = time.strftime("%Y-%m-%d")
### model parameters
N_LAYERS = 3
# LSTM cell
CELL = LSTM
# 256 LSTM neurons
UNITS = 1000
# 40% dropout
DROPOUT = 0.25
# whether to use bidirectional RNNs
BIDIRECTIONAL = True
### training parameters
# mean absolute error loss
# LOSS = "mae"
# huber loss
LOSS = "huber_loss"
OPTIMIZER = "adam"
BATCH_SIZE = 64
EPOCHS = 25
LAYER_ACTIVATION = "linear"
# Stock market
ticker = "MIXED"
ticker_data_filename = os.path.join("data", f"{ticker}_{date_now}.csv")
# model name to save, making it as unique as possible based on parameters
model_name = f"{date_now}_{ticker}-{window_size}-{LOSS}-{OPTIMIZER}-{CELL.__name__}-{LAYER_ACTIVATION}-layers-{N_LAYERS}-units-{UNITS}"
if BIDIRECTIONAL:
model_name += "-b"
#----------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------#
#try:
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# load the data
data = pd.read_csv(f"../data/processed/all_processed_{window_size}.csv")
# construct the model
model = create_model(N_STEPS, loss=LOSS, units=UNITS, cell=CELL, n_layers=N_LAYERS,
dropout=DROPOUT, optimizer=OPTIMIZER, bidirectional=BIDIRECTIONAL, layer_activation=LAYER_ACTIVATION)
# some tensorflow callbacks
checkpointer = ModelCheckpoint(os.path.join("results", model_name + ".h5"), save_weights_only=True, save_best_only=True, verbose=1)
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
X = data[FEATURE_COLUMNS]
y = data[TARGET_COLUMNS]
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
# reshape X to fit the neural network
X = X.reshape((X.shape[0], 1, X.shape[1]))
# split the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, shuffle=True)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, y_test),
callbacks=[checkpointer, tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
#except:
# print("There was an attempt.")
tf.keras.backend.clear_session()
# +
import lazypredict
# test ratio size, 0.2 is 20%
TEST_SIZE = 0.3
# features to use
items = ["close", "ema", "high", "low", "open", "rsi", "sma", "volume"]
day_counts = [f"_{i}" for i in range(0, window_size -1)]
FEATURE_COLUMNS = []
for day_count in day_counts:
for item in items:
FEATURE_COLUMNS.append(f"{item}{day_count}")
TARGET_COLUMNS = []
for item in ["close", "high", "low", "open"]:
TARGET_COLUMNS.append(f"{item}_{window_size - 1}")
data = pd.read_csv(f"../data/processed/all_processed_{window_size}.csv")
# -
| notebooks/model_trainer_320_linear_3_1000.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="xupONShN6OJi" outputId="69d27665-0902-48af-809b-e99f0676bd74"
# !pip install feast[aws]==0.19.3
# + colab={"base_uri": "https://localhost:8080/"} id="DR2im1Pe8IEZ" outputId="bf5e2daf-5035-43f5-bf6c-fa5344a5ef52"
# !feast init -t aws customer_segmentation
# + colab={"base_uri": "https://localhost:8080/"} id="KNWPXqaDln12" outputId="faa3b044-3333-4b02-beea-7bc847e12bb3"
# !ls -R /content/customer_segmentation/
# + id="K_R7U0hSNues" colab={"base_uri": "https://localhost:8080/"} outputId="c5890907-472d-4124-b9b2-dd92b1b8977e"
# %cd customer_segmentation/
# !rm -rf driver_repo.py test.py
# + colab={"base_uri": "https://localhost:8080/"} id="qv2RGoXY8cPW" outputId="8656c26b-4205-44fb-b339-ded32cde7381"
# !cat /content/customer_segmentation/feature_store.yaml
# + colab={"base_uri": "https://localhost:8080/"} id="hreQlxriOGzY" outputId="41a74304-a5fa-463f-92ea-5475431d1e94"
# !feast apply
# + id="UtKq4HpsOrhw" colab={"base_uri": "https://localhost:8080/"} outputId="71be812f-f4cc-40e9-955e-0ace8469bb4d"
from feast import FeatureStore
store = FeatureStore(repo_path=".")
# + colab={"base_uri": "https://localhost:8080/"} id="-6trLQKoPa-U" outputId="f5ede051-957b-499c-8b16-a73439cbbd45"
print(f"List of entities: {store.list_entities()}")
print(f"List of FeatureViews: {store.list_feature_views()}")
| Chapter04/ch4_Feast_aws_initialization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
df = pd.DataFrame({'square_feet': [1000, 2000],
'num_bedrooms': [2, 4],
'proximity_to_ocean': ['<1 HOUR', 'NO'],
'price': [490000, 690000]})
df
# +
non_numeric = list(df.select_dtypes(exclude='number').columns)
target = 'price'
data = df.loc[:, df.columns.difference(non_numeric)]
sns.pairplot(data=data,
x_vars=data.drop(columns=target),
y_vars=[target]);
| multiple_scatterplots_features_vs_target.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Tutorial: Model Comparison and Assessment for Microtubule Catastrophe
# #### Written by : <NAME>, <NAME>, and <NAME>
# This notebook is a guide to using the code provided in our package. This example illustrates how we have used our package to analyze our microtubule catastrophe data.
#
# The first thing we would need to do is to clone the package from git and install using ```pip install -e MCAT_pkg```
# +
# %load_ext autoreload
# %autoreload 2
import os, sys
# Using alias for packages can reduce having to write out the entire name every single time you call them
import bokeh
import pandas as pd
import holoviews as hv
import MCAT_pkg as mc
import warnings
warnings.simplefilter('ignore')
# -
# You can find out more information about a package or function by adding "?" or "??" behind its name:
# +
# mc?
# -
# After we import our package, we will need to load in our data.
# +
# mc??
# +
# Identify the location that the data file is in
data_path = "data/gardner_mt_catastrophe_only_tubulin.csv"
# Read in the file as df
df = pd.read_csv(data_path, comment = "#")
# -
# Let's take a look at the first 5 rows of the data using the command:
df.head()
# We would like to parse the dataframe into a tidy format. For more information about tidy data, go to the link [here.](https://bebi103a.github.io/lessons/06/tidy_data.html)
# Use mc.parse_df to parse the data into working format from the file path
df_parsed = mc.parse_df("data/gardner_mt_catastrophe_only_tubulin.csv")
# Let's take a look at the dataframe.
df_parsed.head()
# Next, we would like to define a categorical color palette to use for different tubulin concentrations
# Define a color palette to use throughout plots, one color per experiment trial
colors = bokeh.palettes.YlGn[6]
print(colors)
# We would now perform exploratory analysis on the dataset using an ECDF, since they visualize the cumulative density function (CDF). For more information, visit the link [here.](https://bebi103a.github.io/lessons/06/visualizing_distributions.html)
p = mc.categorical_plot(df_parsed, variable = "Time to catastrophe (s)", cats = "Concentration (uM)",
palette = colors, order = ['14 uM', '12 uM', '10 uM', '9 uM', '7 uM'])
p.title.text = "ECDF of Catastrophe times by Concentration"
mc.bokeh.io.show(p)
# From the ECDF, we can see that the tubulin concentration 14uM has the longest overall time to catastrophe, followed by 12 uM, 10uM, and 7uM/9uM. We can see a general trend that higher tubulin concentrations tend to have longer catastrophe times.
# We now want to see what the individual tubulin concentrations would look like as stripbox plots.
p_strip = mc.categorical_plot(df_parsed, variable = "Time to catastrophe (s)", cats = "Concentration (uM)",
format = "stripbox",
order=['14 uM', '12 uM', '10 uM', '9 uM', '7 uM'],
palette = colors)
p_strip.title.text = "Stripbox Plot of Catastrophe Times, separated by Concentration"
mc.bokeh.io.show(p_strip)
# Here, we can see a similar trend that higher tubulin concentrations tend to have longer catastrophe times. One thing to note, however, is that catastrophe times of tubulin concentration 12uM has more outliers, which can influence/bias the mean.
# Now, we would like to compare whether a Gamma distribution or an exponential distribution is a better model for this data. For simplicity, we will first perform the tests on tubulin concentration 12uM and then the rest of the concentrations once we determine which model is better.
# We need to pull out the data for catastrophe times for a tubulin concentration of 12 uM.
# Generate array of times to catastrophe for 12 uM concentration of tubulin
conc_12_times = df_parsed.loc[df_parsed["Concentration (uM)"] == '12 uM', 'Time to catastrophe (s)'].values
# #### Gamma Distribution Model
# We will first generate parameter estimates according to the Gamma distribution. You can look at the functions within the MCAT_pgk folder.
mc.mle_iid_gamma(conc_12_times)
# Perform maximum likelihood estimates for parameters for i.i.d. gamma measurements, parametrized by alpha, b=1/beta
mc.log_like_iid_gamma_log_params(mc.mle_iid_gamma(conc_12_times), conc_12_times)
# We will draw bootstrap replicates of our MLE.
gamma_bs_reps = mc.draw_bs_reps(conc_12_times, mc.mle_iid_gamma, size=100)
# This will give us the MLE estimate.
gamma_mle = mc.np.mean(gamma_bs_reps, axis = 0)
p = mc.overlay_models(conc_12_times, "Time to catastrophe (s)", gamma_mle,
exp_color = "#addd8e",
theor_color = "dimgray")
p.title.text = "Gamma Theoretical Distribution"
bokeh.io.show(p)
# #### Exponential Distribution Model
# We will move on to generate parameter estimates according to the exponential distribution.
# To obtain MLEs for both beta values, we begin with the formula for the PDF : $f(t; \beta_1, \beta_2) = \frac{\beta_1 \beta_2}{\beta_2 - \beta_1}(e^{-\beta_1t} - e^{-\beta_2t})$. We want to find the MLE, so we will consider the problem of maximizing the log likelihood rather than attempting to maximize this expression and avoid errors with dividing by zero when $\beta_1 = \beta_2$. However, we will first rewrite this expression by setting a new variable, $\Delta \beta = \beta_2 - \beta_1$ to simplify some of the algebraic manipulation. We also do this to avoid the potential of underflowing errors in the code when we get extremely small values of $\beta$. This gives us:
#
# $L(t; \beta_1, \Delta \beta) = \prod_{i = 1}^n\frac{(\beta_1)(\beta_1 + \Delta\beta)}{\Delta\beta}(e^{-\beta_1 t})(1 - e^{-\Delta \beta})$
#
# Taking the natural logarithm of this expression for likelihood, the product will turn into a summation, which is much easier to work with. We get:
#
# $l(t; \beta_1, \Delta \beta) = \sum_{i = 1}^n \mbox{ln}\beta_1 +\mbox{ln}(\beta_1 + \Delta \beta) - \mbox{ln}\Delta \beta - \beta t_i + \mbox{ln}(1 - e^{-\Delta \beta t_i})$
#
# This is the form for log likelihood that we will use below.
# For example sake, we will only be bootstrapping 100 replicates
# instead of 10000 that we used in our analysis
exp_bs_reps = mc.draw_bs_reps(conc_12_times, mc.mle_iid_exp, size=100)
exp_mle = mc.np.mean(exp_bs_reps, axis = 0)
exp_mle
p = mc.overlay_models(conc_12_times, "Time to catastrophe (s)", exp_mle,
cdf_fun = mc.model_cdf,
exp_color = "#addd8e",
theor_color = "dimgray")
p.title.text = "Exponential Theoretical Distribution"
bokeh.io.show(p)
# We want to get the AIC values for each distribution
# Exponential distribution
mc.AIC(exp_mle, mc.log_like_iid_exp_log_params, conc_12_times)
# Gamma distribution
mc.AIC(gamma_mle, mc.MLE_analysis.log_like_iid_gamma_log_params, conc_12_times)
# We can now plot the QQ plot for the exponential model.
p = mc.QQ_plot(conc_12_times, mc.gen_exponential, exp_mle, size = 10000,
axis_label = "Time to catastrophe (s)", title = "Exponential QQ-Plot", color = "#78c679")
p.background_fill_color = '#fafafa'
mc.bokeh.io.show(p)
# The QQ plot for the gamma model.
p2 = mc.QQ_plot(conc_12_times, mc.gen_gamma, gamma_mle, size = 10000,
axis_label = "Time to catastrophe (s)", title = "Gamma QQ-Plot",
color = "#78c679")
p2.background_fill_color = '#fafafa'
mc.bokeh.io.show(p2)
# We would also like to compare the ECDF of theoretical distribution to experimental for each model.
# Exponential distribution
p3 = mc.predictive_ecdf(conc_12_times, mc.gen_exponential, exp_mle, title = 'Exponential Distribution ECDF',
color = "green")
p3.background_fill_color = '#fafafa'
mc.bokeh.io.show(p3)
# Gamma distribution
p4 = mc.predictive_ecdf(conc_12_times, mc.gen_gamma, gamma_mle, title = 'Gamma Distribution ECDF',
color = "green")
p4.background_fill_color = '#fafafa'
mc.bokeh.io.show(p4)
# Since the AIC of the exponential distribution (9327.391952721562) is higher than that of the gamma distribution (9278.358353623804), the gamma model is preferred. Based on the plots for predictive ECDF, we see that the gamma distribution is more closely aligned with the experimental results. Similarly, from the QQ-plots, we can see that there is more separation of the generative quantiles from the observed quantiles in the exponential distribution model than the gamma distribution model. As a result, based on AIC (Akaike information criterion) and QQ plot, the gamma distribution model is better model.
# ### Parameter Estimates for the Other Tubulin Concentrations
# Now that we have a preferred model, we will obtain parameter estimates for the other tubulin concentrations using the exponential model.
# Get unique concentrations and take out 12uM
all_concentrations = df_parsed["Concentration (uM)"].unique()
concentrations = mc.np.delete(all_concentrations, 0)
all_other_bs_reps = []
all_other_conf_int = []
all_other_mean = []
# Generate bootstrap replicates for the other tubulin concentrations using the exponential model
# Generate array of times to catastrophe for other concentrations of tubulin
for conc in concentrations:
conc_times = df_parsed.loc[df_parsed["Concentration (uM)"] == conc,
'Time to catastrophe (s)'].values
bs_reps = mc.draw_bs_reps(conc_times, mc.mle_iid_gamma, size=100)
all_other_bs_reps.append(bs_reps)
mean = mc.np.mean(bs_reps, axis = 0)
all_other_mean.append(mean)
conf_int = mc.np.percentile(bs_reps, [2.5, 97.5], axis=0)
all_other_conf_int.append(conf_int)
print('The MLE for number of arrivals for a catastrophe to occur (b_1) and difference in b_1 and b_2 (delta_b) is respectively {} with a 95% confidence interval of \n{}\n'
.format(mean, conf_int))
# From the plot of $\beta_1$ and $\Delta\beta$ of the different tubulin concentrations, we can see that higher tubulin concentrations seem to have lower $\beta_1$ values (1/(time to first catastrophe)). All the mean $\beta_1$values for the different tubulin concentrations, however, are all on the order of 1e-3. In general, by looking at the values of the parameters, we see that the $\Delta\beta$ values are on the order of 0 to 1e-4 or 1e-5, which means that $\beta_1$ and $\beta_2$ are often the same or are very similar in value, indicating that catastrophe times one after another are often very similar and may be dependent.
# %load_ext watermark
# %watermark -v -p MCAT_pkg,numpy,scipy,pandas,bokeh,iqplot,tqdm,jupyterlab
# Acknowledgements!
#
# We thank the publishers of Gardner et. al. for sharing their data, the BeBi103 TA’s for their guidance, the makers of Poole for this website template, <NAME> and <NAME> for design inspiration, and of course, <NAME> for his assistance, insight, and useful code!
| code/MCAT_Tutorial_Part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.0
# language: julia
# name: julia-0.6
# ---
# # Quadratic Stiffness
#
# In this notebook we will explore the quadratic stiffness problem. References:
#
# The composite Euler method for stiff stochastic
# differential equations
#
# <NAME>, <NAME>
#
# And
#
# S-ROCK: CHEBYSHEV METHODS FOR STIFF STOCHASTIC
# DIFFERENTIAL EQUATIONS
#
# ASSYR ABDULLE AND <NAME>
#
# This is a scalar SDE with two arguments. The first controls the deterministic stiffness and the later controls the diffusion stiffness.
using DiffEqProblemLibrary, StochasticDiffEq, DiffEqDevTools
using Plots; gr()
prob = DiffEqProblemLibrary.generate_stiff_quad(50.0,1.0)
sol = solve(prob,SRIW1())
plot(sol)
prob = DiffEqProblemLibrary.generate_stiff_quad(500.0,1.0)
sol = solve(prob,SRIW1())
plot(sol)
# ## Top dts
#
# Let's first determine the maximum dts which are allowed. Anything higher is mostly unstable.
# ### Deterministic Stiffness Mild
prob = DiffEqProblemLibrary.generate_stiff_quad(50.0,1.0)
@time sol = solve(prob,SRIW1())
@time sol = solve(prob,SRIW1(),adaptive=false,dt=0.01)
@time sol = solve(prob,ImplicitRKMil(),dt=0.005)
@time sol = solve(prob,EM(),dt=0.01);
# ### Deterministic Stiffness High
prob = DiffEqProblemLibrary.generate_stiff_quad(500.0,1.0)
@time sol = solve(prob,SRIW1())
@time sol = solve(prob,SRIW1(),adaptive=false,dt=0.002)
@time sol = solve(prob,ImplicitRKMil(),dt=0.001)
@time sol = solve(prob,EM(),dt=0.002);
# ### Mixed Stiffness
prob = DiffEqProblemLibrary.generate_stiff_quad(5000.0,70.0)
@time sol = solve(prob,SRIW1(),dt=0.0001)
@time sol = solve(prob,SRIW1(),adaptive=false,dt=0.00001)
@time sol = solve(prob,ImplicitRKMil(),dt=0.00001)
@time sol = solve(prob,EM(),dt=0.00001);
# Notice that in this problem, the stiffness in the noise term still prevents the semi-implicit integrator to do well. In that case, the advantage of implicitness does not take effect, and thus explicit methods do well. When we don't care about the error, Euler-Maruyama is fastest. When there's mixed stiffness, the adaptive algorithm is unstable.
# ## Work-Precision Diagrams
# +
prob = DiffEqProblemLibrary.generate_stiff_quad(50.0,1.0)
reltols = 1.0./10.0.^(3:5)
abstols = reltols#[0.0 for i in eachindex(reltols)]
setups = [Dict(:alg=>SRIW1())
Dict(:alg=>EM(),:dts=>1.0./8.0.^((1:length(reltols)) + 1))
Dict(:alg=>RKMil(),:dts=>1.0./8.0.^((1:length(reltols)) + 1))
Dict(:alg=>SRIW1(),:dts=>1.0./8.0.^((1:length(reltols)) + 1),:adaptive=>false)
]
names = ["SRIW1","EM","RKMil","SRIW1 Fixed"]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=10,names=names,error_estimate=:l2)
plot(wp)
# +
prob = DiffEqProblemLibrary.generate_stiff_quad(500.0,1.0)
reltols = 1.0./10.0.^(3:5)
abstols = reltols#[0.0 for i in eachindex(reltols)]
setups = [Dict(:alg=>SRIW1())
Dict(:alg=>EM(),:dts=>1.0./8.0.^((1:length(reltols)) + 2))
Dict(:alg=>RKMil(),:dts=>1.0./8.0.^((1:length(reltols)) + 2))
Dict(:alg=>SRIW1(),:dts=>1.0./8.0.^((1:length(reltols)) + 2),:adaptive=>false)
]
names = ["SRIW1","EM","RKMil","SRIW1 Fixed"]
wp = WorkPrecisionSet(prob,abstols,reltols,setups;numruns=10,names=names,error_estimate=:l2,print_names=true)
plot(wp)
# -
# ## Conclusion
#
# Noise stiffness is tough. Right now the best solution is to run an explicit integrator with a low enough dt. Adaptivity does have a cost in this case, likely due to memory management.
| StiffSDE/QuadraticStiffness.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## pad and resize image
# +
import os
import cv2
from glob import glob
WHITE = [255,255,255]
BLACK = [0,0,0]
def padding_img(img, hw_ratio=2.0):
"""
hwight:width = hw_ratio pad後画像を返す
"""
size = img.shape
if (size[0] % 2 == 1):
img = img[:-1, :]
if (size[1] % 2 == 1):
img = img[:, :-1]
padded_image = img
if (size[0] > size[1]*hw_ratio):
pad_size = int((size[0] - size[1]*hw_ratio) / 2)
padded_image = cv2.copyMakeBorder(img, 0, 0, pad_size, pad_size, cv2.BORDER_CONSTANT, value=BLACK)
if (size[0] < size[1]*hw_ratio):
pad_size = int((size[1]*hw_ratio - size[0]) / 2)
padded_image = cv2.copyMakeBorder(img, pad_size, pad_size, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
return padded_image
def padding_imgs(input_dir, hw_ratio=2.0, output_dir=None):
image_paths = glob(os.path.join(input_dir, "*")) # ここjpeg以外でも大丈夫なように
if (output_dir==None):
output_dir = input_dir
for image_path in image_paths:
# print(image_path)
img = cv2.imread(image_path)
padded_image = padding_img(img, hw_ratio)
filename = os.path.basename(image_path)
output_path = os.path.join(output_dir, filename)
# print(output_path)
cv2.imwrite(output_path, padded_image)
# -
images_dir = "/home/liquid/src/Person_reID_baseline_pytorch/data/test0/"
output_dir = "/Users/ogura/Downloads/skinhead_test/padded/"
| resize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Now You Code 3: Syracuse Historical Weather
#
# Write a program to prompt a user to input a date and time string, and then it should **report the temperature and weather conditions on that day and time for Syracuse, NY.**
#
# To accomplish this, use the Dark Sky Time Machine API, here: https://darksky.net/dev/docs/time-machine
#
# You're going to have to read about the API and understand how it works before you can write this program, which is the point of the exercise.
#
# The date and time that the user inputs must be in the following format: `YYYY-MM-DDThh:mm:ss`
#
# For example January 7, 2016 at 4:30pm would be: `2016-01-07T16:30:00`
#
# Be sure to use the GPS coordinates for Syracuse, NY which are (lat=43.048122,lng=-76.147424)
#
# Example Run (Based on the exchange rates from 2017-03-06:
#
# ```
# Syracuse, NY Historical Weather
# Enter a date and time in the following format: YYYY-MM-DDThh:mm:ss => 2016-07-23T14:30:00
# On 2016-07-23T14:30:00 Syracuse, NY was Partly Cloudy with a temperature of 85
# ```
#
# ## Step 1: Problem Analysis
#
# Inputs: Date, year
#
# Outputs: weather on date
#
# Algorithm (Steps in Program):
#
# ```
# todo write algorithm here
# ```
#
import requests
import json
url = 'https://darksky.net/dev/docs/time-machine'
search = input('Enter a date and time in the following format: YYYY-MM-DDThh:mm:ss')
options = { 'q' : search, 'format' : 'json'}
response = requests.get(url, params = options)
geodata = response.json()
# ## Step 3: Questions
#
# 1. What happens when you enter `1/1/2017` as date input? Which error to you get? Fix the program in step 2 so that it handles this error.
#
# Answer:
#
#
# 2. Put your laptop in Airplane mode (disable the wifi) and then run the program. What happens? Fix the program in step 4 so that it handles this error.
#
# Answer:
#
#
# ## Step 4: Reflection
#
# Reflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements?
#
# To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise.
#
# Keep your response to between 100 and 250 words.
#
# `--== Write Your Reflection Below Here ==--`
#
#
| content/lessons/10/Now-You-Code/NYC3-Historical-Weather.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="g_nWetWWd_ns"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="2pHVBk_seED1"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" colab={} colab_type="code" id="N_fMsQ-N8I7j"
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="pZJ3uY9O17VN"
# # Salvataggio e caricamento di modelli
# + [markdown] colab_type="text" id="M4Ata7_wMul1"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/save_and_load"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Visualizza su TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/it/tutorials/keras/save_and_load.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Esegui in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/it/tutorials/keras/save_and_load.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Visualizza il sorgente su GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/it/tutorials/keras/save_and_load.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Scarica il notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="BYzaKBe8YXg0"
# Note: Questi documenti sono stati tradotti dalla nostra comunità di TensorFlow. Poichè queste traduzioni sono *best-effort*, non è garantito che rispecchino in maniera precisa e aggiornata la [documentazione ufficiale in inglese](https://www.tensorflow.org/?hl=en).
# Se avete suggerimenti per migliorare questa traduzione, mandate per favore una pull request al repository Github [tensorflow/docs](https://github.com/tensorflow/docs).
# Per proporsi come volontari alla scrittura o alla review delle traduzioni della comunità contattate la
# [mailing list <EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
# + [markdown] colab_type="text" id="mBdde4YJeJKF"
# I cambiamenti dei modelli durante-edopo-l'addestramento possono essere salvati. Ciò significa che un modello può riprendere da dove aveva lasciato, ed evitare lunghi tempi di addestramento. Il salvataggio significa anche che potete condividere il vostro modello ed altri possono ricreare il vostro lavoro. La maggioranza di chi lavora nel machine learning, quando pubblica modelli e tecniche di ricerca, condivide:
#
# * il codice per creare il modello, and
# * i trained weights, o i parametri, per il modello
#
# Condividere questi dati aiuta gli altri a capire come lavora il modello e provarlo in autonomia con nuovi dati.
#
# Attenzione: Siate cauti con il codice non fidato—i modelli TensorFlow sono codice. Per i dettagli vedere [Uso sicuro di TensorFlow](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md).
#
# ### Opzioni
#
# Ci sono diversi modi per salvare i modelli TensorFlow—a seconda delle API che state usando. Questa guida usa [tf.keras](https://www.tensorflow.org/guide/keras), un'API di alto livello per costruire e addestrare modelli in TensorFlow. Per altri approcci, vedere la guida [Salvataggio e Ripristino](https://www.tensorflow.org/guide/saved_model) di TensorFlow o [Salvataggio come eager](https://www.tensorflow.org/guide/eager#object-based_saving).
# + [markdown] colab_type="text" id="xCUREq7WXgvg"
# ## Setup
#
# ### Installazioni e importazioni
# + [markdown] colab_type="text" id="7l0MiTOrXtNv"
# Installare e importare TensorFlow e dipendenze:
# + colab={} colab_type="code" id="RzIOVSdnMYyO"
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# !pip install pyyaml h5py # Required to save models in HDF5 format
# + colab={} colab_type="code" id="7Nm7Tyb-gRt-"
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow import keras
print(tf.version.VERSION)
# + [markdown] colab_type="text" id="SbGsznErXWt6"
# ### Procuriamoci un dataset di esempio
#
# Per dimostrare come salvare e caricare weights, usiamo il [dataset MNIST](http://yann.lecun.com/exdb/mnist/). Per velocizzare il processo, usiamo i primi 1000 esempi:
# + colab={} colab_type="code" id="9rGfFwE9XVwz"
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
# + [markdown] colab_type="text" id="anG3iVoXyZGI"
# ### Definiamo un modello
# + [markdown] colab_type="text" id="wynsOBfby0Pa"
# Cominciamo costruendo un semplice modello sequenziale:
# + colab={} colab_type="code" id="0HZbJIjxyX1S"
# Define a simple sequential model
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
# Create a basic model instance
model = create_model()
# Display the model's architecture
model.summary()
# + [markdown] colab_type="text" id="soDE0W_KH8rG"
# ## Salvare i punti di controllo durante l'addestramento
# + [markdown] colab_type="text" id="mRyd5qQQIXZm"
# Potete usare un modello addestrato senza doverlo addestrare di nuovo, o riprendere l'addestramento da dove avevate lasciato, nel caso il processo di addestramento sia stato interrotto. La callback `tf.keras.callbacks.ModelCheckpoint` permette di salvare il modello continuamente sia *durante* sia *alla fine* dell'addestramento.
#
# ### Utilizzo della callback Checkpoint
#
# Creiamo una callback `tf.keras.callbacks.ModelCheckpoint` che salvi i weights solo durante l'addestramento:
# + colab={} colab_type="code" id="IFPuhwntH8VH"
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=10,
validation_data=(test_images,test_labels),
callbacks=[cp_callback]) # Pass callback to training
# This may generate warnings related to saving the state of the optimizer.
# These warnings (and similar warnings throughout this notebook)
# are in place to discourage outdated usage, and can be ignored.
# + [markdown] colab_type="text" id="rlM-sgyJO084"
# Ciò crea un'unica collezione di file con i punti di controllo di TensorFlow che vengono salvati alla fine di ogni epoca:
# + colab={} colab_type="code" id="gXG5FVKFOVQ3"
# !ls {checkpoint_dir}
# + [markdown] colab_type="text" id="wlRN_f56Pqa9"
# Creiamo un nuovo modello, non addestrato. Quando ripristinate un modello dai soli weight, dovete avere un modello con la stessa architettura dell'originale. Dato che l'architettura del modello è la stessa, potete condividere gli weight anche se si tratta di una diversa *istanza* del modello.
#
# Ora ricostruiamo una nuova istanza, non addestrata, del modello e valutiamola sull'insieme di test. Un modello non addestrato avrà prestazioni di livello basso (~10% di accuratezza):
# + colab={} colab_type="code" id="Fp5gbuiaPqCT"
# Create a basic model instance
model = create_model()
# Evaluate the model
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="1DTKpZssRSo3"
# Ora carichiamo i pesi dal punto di controllo e valutiamo di nuovo:
# + colab={} colab_type="code" id="2IZxbwiRRSD2"
# Loads the weights
model.load_weights(checkpoint_path)
# Re-evaluate the model
loss,acc = model.evaluate(test_images, test_labels, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="bpAbKkAyVPV8"
# ### Opzioni di callback del punto di controllo
#
# La callback mette a disposizione diverse opzioni per dare un nome univoco ai punti di controllo e regolare la loro frequenza.
#
# Addestriamo un nuovo modello, e salviamo i checkpoint con un nome univoco ogni cinque epoche:
# + colab={} colab_type="code" id="mQF_dlgIVOvq"
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "training_2/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period=5)
# Create a new model instance
model = create_model()
# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=50,
callbacks=[cp_callback],
validation_data=(test_images,test_labels),
verbose=0)
# + [markdown] colab_type="text" id="1zFrKTjjavWI"
# Ora, guardiamo i punti di controllo che abbiamo ottenuto e prendiamo l'ultimo:
# + colab={} colab_type="code" id="p64q3-V4sXt0"
# !ls {checkpoint_dir}
# + colab={} colab_type="code" id="1AN_fnuyR41H"
latest = tf.train.latest_checkpoint(checkpoint_dir)
latest
# + [markdown] colab_type="text" id="Zk2ciGbKg561"
# Notare: che il formato standard di tensorflow salva solo i 5 punti di controllo più recenti.
#
# Per verificare, riavviamo il modello e carichiamo il checkpoint più recente:
# + colab={} colab_type="code" id="3M04jyK-H3QK"
# Create a new model instance
model = create_model()
# Load the previously saved weights
model.load_weights(latest)
# Re-evaluate the model
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="c2OxsJOTHxia"
# ## Cosa sono questi file?
# + [markdown] colab_type="text" id="JtdYhvWnH2ib"
# Il codice di cui sopra memorizza i pesi in una collezione di file formattati come [checkpoint](https://www.tensorflow.org/guide/saved_model#save_and_restore_variables)-che contengono solo i pesi addestrati in un formato binario. I checkpoint contengono:
# * Uno o più blocchi che contengono i pesi del nostro modello.
# * Un file indice che indica quali pesi sono memorizzati in un determinato blocco.
#
# Se state solo addestrando un modello su una singola macchina, avrete un blocco con il suffisso: `.data-00000-of-00001`
# + [markdown] colab_type="text" id="S_FA-ZvxuXQV"
# ## Salvare i pesi manualmente
#
# Avete visto come caricare i pesi in un modello. E' semplice salvarli manualmente con il metodo `Model.save_weights`. Per default, in particolare-`tf.keras`—and `save_weights` —usa il formato TensorFlow [checkpoint](../../guide/checkpoint.ipynb) con estensione `.ckpt` (il salvataggio in [HDF5](https://js.tensorflow.org/tutorials/import-keras.html) con estensione `.h5` è trattato nella guida[Salvare e serializzare i modelli](../../guide/keras/save_and_serialize#weights-only_saving_in_savedmodel_format)):
# + colab={} colab_type="code" id="R7W5plyZ-u9X"
# Save the weights
model.save_weights('./checkpoints/my_checkpoint')
# Create a new model instance
model = create_model()
# Restore the weights
model.load_weights('./checkpoints/my_checkpoint')
# Evaluate the model
loss,acc = model.evaluate(test_images, test_labels, verbose=2)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="kOGlxPRBEvV1"
# ## Salvare l'intero modello
#
# Per salvare l'architettura di un modello, i pesi, e la configurazione di addestramento in un singolo file/cartella, occorre chiamare [`model.save`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#save). Ciò vi permette di esportare un modello in modo che possa essere usato senza accedere al codice* Python originale. Dato che viene ripristinato lo stato dell'ottimizzatore, potrete riprendere l'addestramento esattamente da dove l'avevate lasciato.
#
# Un intero modello può essere salvato in due formati di file diversi (SavedModel e HDF). Occorre notare che il formato TensodrFlow SavedModel è il default in TF2.x. Comunque, un modello può essere salvato anche in formato HDF5. Maggiori dettagli sul salvataggio dei modelli nei due formati sono dati in seguito.
#
# Il salvataggio di un modello completamente funzionante è molto utile—lo potete caricare in un TensorFlow.js ([HDF5](https://js.tensorflow.org/tutorials/import-keras.html), [Modello Salvato](https://js.tensorflow.org/tutorials/import-saved-model.html)) e poi addestrarlo ed eseguirlo in un browser web, o convertirlo per eseguirlo su un dispositivo mobile usando TensorFlow Lite ([HDF5](https://www.tensorflow.org/lite/convert/python_api#exporting_a_tfkeras_file_), [Modello Salvato](https://www.tensorflow.org/lite/convert/python_api#exporting_a_savedmodel_))
#
# \*Oggetti personalizzati (es. modelli specializzati o livelli) richiedono particolare attenzione durante il salvataggio e il caricamento. Vedere la sezione **Salvataggio di oggetti personalizzati** più sotto
# + [markdown] colab_type="text" id="SkGwf-50zLNn"
# ### formato HDF5
#
# Keras fornisce un formato base di salvataggio che usa lo standard [HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format).
# + colab={} colab_type="code" id="m2dkmJVCGUia"
# Create and train a new model instance.
model = create_model()
model.fit(train_images, train_labels, epochs=5)
# Save the entire model to a HDF5 file.
# The '.h5' extension indicates that the model should be saved to HDF5.
model.save('my_model.h5')
# + [markdown] colab_type="text" id="GWmttMOqS68S"
# Ora, ri-creiamo il modello dal file:
# + colab={} colab_type="code" id="5NDMO_7kS6Do"
# Recreate the exact same model, including its weights and the optimizer
new_model = tf.keras.models.load_model('my_model.h5')
# Show the model architecture
new_model.summary()
# + [markdown] colab_type="text" id="JXQpbTicTBwt"
# E controlliamo la sua accuratezza:
# + colab={} colab_type="code" id="jwEaj9DnTCVA"
loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100*acc))
# + [markdown] colab_type="text" id="dGXqd4wWJl8O"
# Questa tecnica salva ogni cosa:
#
# * I valori dei pesi
# * L'architettura del modello
# * La configurazione di addestramento del modello (ciò che avete passato al compilatore)
# * L'ottimizzatore ed il suo stato, se ce n'è uno (questo è ciò che vi permette di riprendere l'addestramento da dove l'avete lasciato)
#
# Keras salva i modelli ispezionando l'architettura. Al momento, esso non è in grado di salvare gli ottimizzatori 'v1.x' (da `tf.compat.v1.train`) in quanto essi non sono compatibili con i punti di controllo. Quando li doveste usare, avrete bisogno di ri-compilare il modello dopo il caricamento, e perderete lo stato dell'ottimizzatore.
#
# + [markdown] colab_type="text" id="kPyhgcoVzqUB"
# ### formato SavedModel
# + [markdown] colab_type="text" id="LtcN4VIb7JkK"
# Il formato SavedModel è un'altro modo di serializzare i modelli. I modelli salvati in questo formato possono essere ripristinati usando `tf.keras.models.load_model` e sono compatibili con TensorFlow Serving. La [guida SavedModel](https://www.tensorflow.org/guide/saved_model) scende nei dettagli su come utilizzare/ispezionare il SavedModel. La sezione che segue illustra i passi di salvataggio e recupero del modello.
# + colab={} colab_type="code" id="sI1YvCDFzpl3"
# Create and train a new model instance.
model = create_model()
model.fit(train_images, train_labels, epochs=5)
# Save the entire model as a SavedModel.
# !mkdir -p saved_model
model.save('saved_model/my_model')
# + [markdown] colab_type="text" id="iUvT_3qE8hV5"
# Il formato SavedModel è una directory che contiene un binario protobuf ed un checkpoint Tensorflow. Osserviamo la directory del modello salvato:
# + colab={} colab_type="code" id="sq8fPglI1RWA"
# my_model directory
# !ls saved_model
# Contains an assets folder, saved_model.pb, and variables folder.
# !ls saved_model/my_model
# + [markdown] colab_type="text" id="B7qfpvpY9HCe"
# Carichiamo un nuovo modello Keras dal modello salvato:
# + colab={} colab_type="code" id="0YofwHdN0pxa"
new_model = tf.keras.models.load_model('saved_model/my_model')
# Check its architecture
new_model.summary()
# + [markdown] colab_type="text" id="uWwgNaz19TH2"
# Il modello ripristinato viene compilato con gli stessi argomenti del modello originale. Proviamo ad eseguire, valutare e predire con il modello caricato:
# + colab={} colab_type="code" id="Yh5Mu0yOgE5J"
# Evaluate the restored model
loss, acc = new_model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100*acc))
print(new_model.predict(test_images).shape)
# + [markdown] colab_type="text" id="kAUKJQyGqTNH"
# ### Salvataggio di oggetti personalizzati
#
# Se usate il formato SavedModel potete saltare questa sezione. La differenza chiave tra HDF5 e SavedModel è che HDF5 usa le gli oggetti configs per salvare l'architettura del modello, mentre SavedModel salva il grafo di esecuzione. Così, i SavedModel sono in grado di salvare oggetti personalizzati come modelli derivati per specializzazione e livelli personalizzati senza bisogno del codice originale.
#
# Per salvare oggetti personalizzati in HDF5, dovete:
#
# 1. Definire un metodo `get_config` nel vostro oggetto, e and facoltativamente un classmethod `from_config`.
# * `get_config(self)` ritorna un dizionario JSON-serializzabile dei parametri necessari per ri-creare l'oggetto.
# * `from_config(cls, config)` usa il config restituito da `get_config` per creare un nuovo oggetto. Per, questa funzione userà il config come kwargs di inizializzazioone (`return cls(**config)`).
# 2. Passare l'oggetto come argomento `custom_objects` al caricamento del modello. L'argomento deve essere un dizionario che mappi la stringa del nome della classe name nella classe Python. Es. `tf.keras.models.load_model(path, custom_objects={'CustomLayer': CustomLayer})`
#
# Vedere il tutorial [Scrivere livelli e modelli da zero](https://www.tensorflow.org/guide/keras/custom_layers_and_models) per esempi di oggetti personalizzati e `get_config`.
#
| site/it/tutorials/keras/save_and_load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="FNUeXnA1JpH0"
##GENERAL
import time
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import random
from collections import Counter
import csv
import re
import os
##SPACY
import spacy
nlp = spacy.load('en_core_web_sm')
#nlp = spacy.load('en_core_web_md')
#nlp = spacy.load('en_core_web_lg')
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
from spacy.pipeline import SentenceSegmenter
from spacy import displacy
import tensorflow as tf
from tensorflow.keras.models import Model
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Embedding, LSTM, Bidirectional, GlobalMaxPool1D, Input,BatchNormalization, Dropout, SpatialDropout1D, Conv1D, concatenate
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint
##SKLEARN
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.decomposition import NMF
# + id="Jaa0W1hkNcyk" outputId="b7feef87-0270-4249-85de-e016f55b1fff" colab={"base_uri": "https://localhost:8080/", "height": 51}
from google.colab import drive
drive.mount('/gdrive')
# %cd /gdrive
# + [markdown] id="YWMb3KP7dPx3"
# ##Loading in the data
# + id="z0sXPj5gNDe2" outputId="b6e7d0f6-d025-4de5-c4e7-27765ea309bc" colab={"base_uri": "https://localhost:8080/", "height": 221}
data = pd.read_csv('/gdrive/My Drive/data/amazon_products.csv')
data = data.drop(columns=['Unnamed: 0'])
data.info()
# + id="e2uzSx1-N3b1" outputId="d79d47f9-522a-4f72-8a86-2389268e462d" colab={"base_uri": "https://localhost:8080/", "height": 85}
data['review_category'].value_counts()
# + [markdown] id="KA0JP30idTUg"
# ##Creating the Text to Categorical Columns
# + id="EP22QKREbuBq" outputId="183ce344-940f-4434-b5cf-d81fd385979a" colab={"base_uri": "https://localhost:8080/", "height": 204}
data['topic_name']=data['topic_name'].astype('category')
data['topic_name'].cat.categories
data['topic_category_codes']=data['topic_name'].cat.codes.values
data = data.drop(columns=['review','review_category','topic_category','topic_name'])
data.head()
# + id="H6QRgZWblcsr" outputId="fe02ce88-0c08-45bb-f549-f7bb4a1a8544" colab={"base_uri": "https://localhost:8080/", "height": 136}
data['topic_category_codes'].value_counts()
# + id="4OB3XdkrfNBW"
sentences = data['clean_review'].values
labels = data['topic_category_codes'].values
# + [markdown] id="rZkfPvUOdWG-"
# ##Declaring Variables
# + id="diDFczpuWqme"
# some configuration
MAX_SEQUENCE_LENGTH = 75
MAX_VOCAB_SIZE = 20000
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
BATCH_SIZE = 128
EPOCHS = 80
output_dir = '/gdrive/My Drive/amazon_review_full_csv'
# + [markdown] id="OYDSz0S0dZKc"
# ##Creating Train and Test Dataset
# + id="yGBRKkyezy0d"
df_train, df_test, Ytrain, Ytest = train_test_split(sentences, labels, test_size=0.20)
# + [markdown] id="SePnUXDpdcIC"
# ##Tokenization and Padding
# + id="bmRdN2kOWSe6"
# convert the sentences (strings) into integers
tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE, filters='')
tokenizer.fit_on_texts(df_train)
sequences_train = tokenizer.texts_to_sequences(df_train)
sequences_test = tokenizer.texts_to_sequences(df_test)
# + id="82rDMqR9Ww3I" outputId="975fda4f-0e04-4be4-d5c9-bbde63bf98bd" colab={"base_uri": "https://localhost:8080/", "height": 68}
idx2word = tokenizer.index_word
#word2idx = {k:v for v,k in idx2word.items()}
word2idx = tokenizer.word_index
print('Found %s unique tokens.' % len(word2idx))
print(word2idx['instead'])
print(idx2word[157])
# + id="gRbC1vyFXDgz" outputId="cffc0876-c42a-45f5-c38c-de502122945e" colab={"base_uri": "https://localhost:8080/", "height": 68}
seq_len = [len(sent) for sent in sequences_train]
max_seq_len = max(seq_len)
print(f'maximum sequencse length {max_seq_len}')
padded_data_train = pad_sequences(sequences_train, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', padded_data_train.shape)
padded_data_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data test tensor:', padded_data_test.shape)
# + [markdown] id="TZ2sZJCQdfZR"
# ##Creating the Embedding Matrix using Glove() pretrained vectors
# + id="EWV5A5oBXWrN" outputId="0a8fbbf8-75e1-46b8-a50d-7273cf4038cd" colab={"base_uri": "https://localhost:8080/", "height": 51}
# load in pre-trained word vectors
print('Loading word vectors...')
word2vec = {}
with open(os.path.join('/gdrive/My Drive/data/glove.6B.%sd.txt' % EMBEDDING_DIM)) as f:
for line in f:
values = line.split()
word = values[0]
value = np.asarray(values[1:] , dtype='float32')
word2vec[word]=value
print(len(word2vec))
# + id="edEO2OAtuCgf" outputId="74af8e66-fdf0-4f68-c35b-b6bb9d15ab90" colab={"base_uri": "https://localhost:8080/", "height": 34}
# prepare embedding matrix
#print('Filling pre-trained embeddings...')
num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1)
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word,i in word2idx.items():
#print(word)
#print(i)
if i < MAX_VOCAB_SIZE:
embedding_vector= word2vec.get(word)
if embedding_vector is not None:
embedding_matrix[i]=embedding_vector
print(embedding_matrix.shape)
# + id="P1dvTQX2uS5e"
embedding_layer = Embedding(
num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False
)
# + id="NhPg_xk_jjep" outputId="9be008c5-afe1-48ec-8748-5205dae426aa" colab={"base_uri": "https://localhost:8080/", "height": 34}
type(df_train)
# + [markdown] id="v5R5iNy-djj6"
# ##Model Architecture
# + id="goJpb5LevYZI"
input_ = Input(shape=(MAX_SEQUENCE_LENGTH,))
embeddings = embedding_layer(input_)
drop_embed_layer = SpatialDropout1D(.2, name='drop_embed')(embeddings)
conv1 = Conv1D(256, 20,strides=1, activation='relu')(drop_embed_layer)
maxp_1 = GlobalMaxPool1D(name='maxp_1')(conv1)
conv2= Conv1D(256, 10, activation='relu' )(drop_embed_layer)
maxp_2 = GlobalMaxPool1D(name='maxp_2')(conv2)
conv3= Conv1D(256, 5, activation='relu' )(drop_embed_layer)
maxp_3 = GlobalMaxPool1D(name='maxp_3')(conv3)
concat = concatenate([maxp_1, maxp_2, maxp_3])
dense = Dense(64,kernel_initializer=tf.keras.initializers.glorot_normal(seed=None),
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.05), activation='relu')(concat)
batch_norm = tf.keras.layers.Dropout(0.2)(dense)
output = Dense(6)(batch_norm)
#output = dense(dense_layer)
model = Model(input_, output)
# + id="hWZb1TTSvd0l" outputId="997360e8-3c75-4895-fe72-d6a896962a72" colab={"base_uri": "https://localhost:8080/", "height": 646}
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy']
)
print(model.summary())
# + id="d_ZExn30GD8j"
#early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25)
checkpoint = ModelCheckpoint("/content/drive/My Drive/models/amazon_covnet.h5",
monitor="val_loss",
mode="min",
save_best_only = True,
verbose=1)
earlystop = EarlyStopping(monitor = 'val_loss',
min_delta = 0,
patience = 20,
verbose = 1,
restore_best_weights = True)
# we put our call backs into a callback list
callbacks = [earlystop, checkpoint]
# + [markdown] id="yIbwV6fddqfI"
# ##Model Training
# + id="ZaKVQlKTyAlm" outputId="0cea89cb-67b7-4760-e66c-b8c3c2d742b4" colab={"base_uri": "https://localhost:8080/", "height": 717}
r = model.fit(padded_data_train,
Ytrain,
batch_size=64,
epochs=10,
callbacks=callbacks,
validation_data=(padded_data_test, Ytest)
)
# + [markdown] id="iiueW318dudK"
# ##Plotting
# + id="m5_wTRUMTnnZ"
x1=[x for x in range(len(r.history['loss']))]
x2=[x for x in range(len(r.history['loss']))]
x3=[x for x in range(len(r.history['loss']))]
x4=[x for x in range(len(r.history['loss']))]
y1=r.history['loss']
y2=r.history['val_loss']
y3=r.history['accuracy']
y4=r.history['val_accuracy']
# + id="oSvjzyysXBpv"
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-latest.min.js?noext',
},
});
</script>
'''))
get_ipython().events.register('pre_run_cell', configure_plotly_browser_state)
# + [markdown] id="xZulD4BNdxwl"
# ##Training and Validation Loss
# + id="46rxL8XGTf3-" outputId="ba5b06a1-d771-4953-f87e-0053e9c161b0" colab={"base_uri": "https://localhost:8080/", "height": 617}
import plotly.graph_objects as go
fig = go.Figure()
#fig.add_trace(go.Scatter(x=[x for x in range(len(r.history['loss']))], y=r.history['loss'] , mode='lines', name='VOLUME',line=dict(color='#00b8b8', width=2)))
fig.add_trace(go.Scatter(x=x1, y=y1, mode='lines+markers', name='train_loss', line=dict(color='#00b8b8', width=4)))
fig.add_trace(go.Scatter(x=x2, y=y2, mode='lines+markers', name='val_loss', line=dict(color='#e4bd0b', width=4)))
fig.update_layout(width=800,
height=600,legend=dict(x=.70,y=0.85, traceorder='reversed', font_size=16),
yaxis=dict(
title="Training and Validation Loss",
titlefont=dict(
color="#1f77b4"
),
tickfont=dict(
color="#1f77b4"
)
),)
fig.show()
# + [markdown] id="VkobuJaBd1xi"
# ##Training and Validation Accuracy
# + id="9SwP0n7iXIuK" outputId="9af33adb-1fe1-436f-8172-2580d17f6cd9" colab={"base_uri": "https://localhost:8080/", "height": 617}
import plotly.graph_objects as go
fig = go.Figure()
#fig.add_trace(go.Scatter(x=[x for x in range(len(r.history['loss']))], y=r.history['loss'] , mode='lines', name='VOLUME',line=dict(color='#00b8b8', width=2)))
fig.add_trace(go.Scatter(x=x3, y=y3, mode='lines+markers', name='train_accuracy', line=dict(color='#00b8b8', width=4)))
fig.add_trace(go.Scatter(x=x4, y=y4, mode='lines+markers', name='val_accuracy', line=dict(color='#e4bd0b', width=4)))
fig.update_layout(width=800,
height=600,legend=dict(x=.70,y=0.85, traceorder='reversed', font_size=16),
yaxis=dict(
title="Training and Validation Accuracy",
titlefont=dict(
color="#1f77b4"
),
tickfont=dict(
color="#1f77b4"
)
),)
fig.show()
# + [markdown] id="WeDSdYUfd5Oc"
# ##Saving the Model
# + id="pPdlRX3dgvKk" outputId="5a2f56f2-0b41-4332-d5f1-5bba8843e132" colab={"base_uri": "https://localhost:8080/", "height": 17}
model.save('/gdrive/My Drive/models/amazon_covnet.h5')
# + [markdown] id="-XKKLUdcd8Ni"
# ##Model Predictions
# + id="25BGd5TKgFmM" outputId="c623ed57-18e0-4c69-be27-d721e8156279" colab={"base_uri": "https://localhost:8080/", "height": 17}
#from tensorflow.keras.models import load_model
#bilstm = load_model('/content/drive/My Drive/models/amazon_bilstm.h5')
y_hat = model.predict(padded_data_test)
y_hat = np.argmax(y_hat, axis=1)
# + id="8LBNPKXdixCa" outputId="19e5b08b-72bb-460b-a957-1fc9b7f483c0" colab={"base_uri": "https://localhost:8080/", "height": 17}
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# + [markdown] id="0CXwtF-jeAuT"
# ##Model Performance
# + id="MHe827GYi7sn" outputId="26155dab-a038-412b-ee24-30e9afda59da" colab={"base_uri": "https://localhost:8080/", "height": 430}
cm = confusion_matrix(Ytest, y_hat)
plot_confusion_matrix(cm, [0, 1, 2,3,4,5])
| Natural-Language-Processing/TF2-Classification/NLP_Part5_Multi_Class_Text_Classification_Covnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/VirtualGoat/Twitter-Data-Mining/blob/master/ci.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="l2UQxGsTL3EI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="5de0aa59-d98e-41b9-cdfc-c2ece28bfd55"
#Accessing the data that has been stored.
from google.colab import drive
import pickle
# import GetOldTweets3 as got
drive.mount('/content/drive')
DATA_PATH = "/content/drive/My Drive/Colab Notebooks/Internship/Tweet Data/New/Thane/Passive"
users4=open(DATA_PATH+'/passiveupdated.pickle','rb')
real_tweets3=pickle.load(users4)
users4.close()
print(len(real_tweets3))
print(real_tweets3)
lisi=list()
# + id="QXvT0R0uMeii" colab_type="code" colab={}
import tweepy
consumer_key=''
consumer_secret= ''
access_token=''
access_token_secret=''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
# + id="SuOlr9G_Mfqe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="fd8ab413-01a4-4657-b5cb-a8e4e4e87638"
import re
j=0
d=dict()
for i,k in real_tweets3.items():
print(j)
ci=list()
j=j+1
try:
u=api.get_user(i)
# print("Crawling account:", u.screen_name)
stat=u.description
lst = re.findall('\S+@\S+', stat)
ph = re.findall('[789]\d{9}', stat)
if len(lst)>0:
print(lst)
ci.append(lst)
if len(ph)>0:
print(ph)
ci.append(ph)
d[i]=ci
# if len(lst)>0 or len(ph>0):
# print(lst,ph)
except:
continue
# print("Extracted Usernames: ",pname)
# + id="6i7YTRXTMigd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="b46c119c-043a-41a4-a3b0-2102950e69a8"
print(d)
# + id="PqzOEvALMi8O" colab_type="code" colab={}
c=dict()
import pandas as pd
# + id="VstOO-UtMk6D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="f5683dc3-de68-48e4-bd40-b199ed00fe1b"
for k,v in d.items():
c[k]=pd.Series(v)
print(c)
# + id="NkclRPeNMmCB" colab_type="code" colab={}
finaldata=pd.DataFrame(c)
# + id="YtMByfrCMnSA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f319565a-d94a-4a73-d761-0ee881189e48"
df=finaldata.T
for i in range(113):
print(df.iloc[i,:])
# + id="2ZpGdYPqMoiY" colab_type="code" colab={}
df.columns=['Email']
# + id="AGBKFsUxMo_N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 276} outputId="7f9bc0d0-ffe0-4cc0-df1e-c874453d8d8b"
df['usernames']=df.index
print(df)
# + id="TqzWv7ODO-Y7" colab_type="code" colab={}
cols = ['usernames', 'Email']
df = df[cols]
# + id="FF_0Bua1PHRB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 276} outputId="28c4decd-10e0-47ea-b186-f614e4062f0a"
print(df)
# + id="lMn_umgbMrb9" colab_type="code" colab={}
df.to_csv(DATA_PATH+"/contact.csv",index=False,encoding='UTF-8')
| ci.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Vectors in Python
#
# In the following exercises, you will work on coding vectors in Python.
#
# Assume that you have a state vector
# $$\mathbf{x_0}$$
#
# representing the x position, y position, velocity in the x direction, and velocity in the y direction of a car that is driving in front of your vehicle. You are tracking the other vehicle.
#
# Currently, the other vehicle is 5 meters ahead of you along your x-axis, 2 meters to your left along your y-axis, driving 10 m/s in the x direction and 0 m/s in the y-direction. How would you represent this in a Python list where the vector contains `<x, y, vx, vy>` in exactly that order?
#
#
# ### Vector Assignment: Example 1
# +
## Practice working with Python vectors
## TODO: Assume the state vector contains values for <x, y, vx, vy>
## Currently, x = 5, y = 2, vx = 10, vy = 0
## Represent this information in a list
x0 = [5, 2, 10, 0]
# -
# ### Test your code
#
# Run the cell below to test your code.
#
# The test code uses a Python assert statement. If you have a code statement that resolves to either True or False, an assert statement will either:
# * do nothing if the statement is True
# * throw an error if the statement is False
#
#
# A Python assert statement
# will output an error if the answer was not as expected. If the
# answer was as expected, then nothing will be outputted.
# +
### Test Cases
### Run these test cases to see if your results are as expected
### Running this cell should produce no output if all assertions are True
assert x0 == [5, 2, 10, 0]
# -
# ### Vector Assignment: Example 2
#
# The vehicle ahead of you has now moved farther away from you. You know that the vehicle has moved 3 meters forward in the x-direction, 5 meters forward in the y-direction, has increased its x velocity by 2 m/s and has increased its y velocity by 5 m/s.
#
# Store the change in position and velocity in a list variable called xdelta
# +
## TODO: Assign the change in position and velocity to the variable
## xdelta. Remember that the order of the vector is x, y, vx, vy
xdelta = [3, 5, 2, 5]
# +
### Test Case
### Run this test case to see if your results are as expected
### Running this cell should produce no output if all assertions are True
assert xdelta == [3, 5, 2, 5]
# -
# ### Vector Math: Addition
#
# Calculate the tracked vehicle's new position and velocity. Here are the steps you need to carry this out:
#
# * initialize an empty list called x1
# * add xdelta to x0 using a for loop
# * store your results in x1 as you iterate through the for loop using the append method
# +
## TODO: Add the vectors together element-wise. For example,
## element-wise addition of [2, 6] and [10, 3] is [12, 9].
## Place the answer in the x1 variable.
##
## Hint: You can use a for loop. The append method might also
## be helpful.
x1 = []
for i in range(len(x0)):
x1.append(xdelta[i] + x0[i])
# -
### Test Case
### Run this test case to see if your results are as expected
### Running this cell should produce no output if all assertions are True
assert x1 == [8, 7, 12, 5]
# ### Vector Math: Scalar Multiplication
#
# You have your current position in meters and current velocity in meters per second. But you need to report your results at a company meeting where most people will only be familiar with working in feet rather than meters. Convert your position vector x1 to feet and feet/second.
#
# This will involve scalar multiplication. The process for coding scalar multiplication is very similar to vector addition. You will need to:
# * initialize an empty list
# * use a for loop to access each element in the vector
# * multiply each element by the scalar
# * append the result to the empty list
# +
## TODO: Multiply each element in the x1 vector by the conversion
## factor shown belowand store the results in the variable s.
## Use a for loop
meters_to_feet = 1.0 / 0.3048
x1feet =[]
for val in x1:
x1feet.append(val * meters_to_feet)
# +
### Test Cases
### Run this test case to see if your results are as expected
### Running this cell should produce no output if all assertions are True
x1feet_sol = [8/.3048, 7/.3048, 12/.3048, 5/.3048]
assert(len(x1feet) == len(x1feet_sol))
for response, expected in zip(x1feet, x1feet_sol):
assert(abs(response-expected) < 0.001)
# -
# ### Vector Math: Dot Product
#
# The tracked vehicle is currently at the state represented by
# $$\mathbf{x_1} = [8, 7, 12, 5] $$.
#
# Where will the vehicle be in two seconds?
#
# You could actually solve this problem very quickly using Matrix multiplication, but we have not covered that yet. Instead, think about the x-direction and y-direction separately and how you could do this with the dot product.
#
# #### Solving with the Dot Product
# You know that the tracked vehicle at x1 is 8m ahead of you in the x-direction and traveling at 12m/s. Assuming constant velocity, the new x-position after 2 seconds would be
#
# $$8 + 12*2 = 32$$
#
# The new y-position would be
# $$7 + 5*2 = 17$$
#
# You could actually solve each of these equations using the dot product:
#
# $$x_2 = [8, 7, 12, 5]\cdot[1, 0, 2, 0] \\\
# = 8\times1 + 7\times0 + 12\times2 + 5\times0 \\\
# = 32$$
#
# $$y_2 = [8, 7, 12, 5]\cdot[0, 1, 0, 2] \\\
# = 8\times0 + 7\times1 + 12\times0 + 5\times2 \\\
# = 17$$
#
# Since you are assuming constant velocity, the final state vector would be
#
# $$\mathbf{x_2} = [32, 17, 12, 5]$$
#
# #### Coding the Dot Product
#
# Now, calculate the state vector $$\mathbf{x_2}$$ but with code. You will need to calculate the dot product of two vectors. Rather than writing the dot product code for the x-direction and then copying the code for the y-direction, write a function that calculates the dot product of two Python lists.
#
# Here is an outline of the steps:
# * initialize an empty list
# * initialize a variable with value zero to accumulate the sum
# * use a for loop to iterate through the vectors. Assume the two vectors have the same length
# * accumulate the sum as you multiply elements together
#
# You will see in the starter code that x2 is already being calculated for you based on the results of your dotproduct function
# +
## TODO: Fill in the dotproduct() function to calculate the
## dot product of two vectors.
##
## Here are the inputs and outputs of the dotproduct() function:
## INPUTS: vector, vector
## OUTPUT: dot product of the two vectors
##
##
## The dot product involves mutliplying the vectors element
## by element and then taking the sum of the results
##
## For example, the dot product of [9, 7, 5] and [2, 3, 4] is
## 9*2+7*3 +5*4 = 59
##
## Hint: You can use a for loop. You will also need to accumulate
## the sum as you iterate through the vectors. In Python, you can accumulate
## sums with syntax like w = w + 1
x2 = []
def dotproduct(vectora, vectorb):
# variable for accumulating the sum
result = 0
# TODO: Use a for loop to multiply the two vectors
# element by element. Accumulate the sum in the result variable
for i in range(len(vectora)):
result += vectora[i] * vectorb[i]
return result
x2 = [dotproduct([8, 7, 12, 5], [1, 0, 2, 0]),
dotproduct([8, 7, 12, 5], [0, 1, 0, 2]),
12,
5]
# -
### Test Case
### Run this test case to see if your results are as expected
### Running this cell should produce no output if all assertions are True
assert x2 == [32, 17, 12, 5]
| CVND_Exercises/3_6_Matrices_and_transformation_of_state/1_vector_coding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="t09eeeR5prIJ"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab_type="code" id="GCCk8_dHpuNf" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="xh8WkEwWpnm7"
# # Automatic differentiation and gradient tape
# + [markdown] colab_type="text" id="idv0bPeCp325"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/eager/automatic_differentiation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/eager/automatic_differentiation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/eager/automatic_differentiation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="vDJ4XzMqodTy"
# In the previous tutorial we introduced `Tensor`s and operations on them. In this tutorial we will cover [automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation), a key technique for optimizing machine learning models.
# + [markdown] colab_type="text" id="GQJysDM__Qb0"
# ## Setup
#
# + colab_type="code" id="OiMPZStlibBv" colab={}
import tensorflow as tf
tf.enable_eager_execution()
tfe = tf.contrib.eager # Shorthand for some symbols
# + [markdown] colab_type="text" id="1CLWJl0QliB0"
# ## Derivatives of a function
#
# TensorFlow provides APIs for automatic differentiation - computing the derivative of a function. The way that more closely mimics the math is to encapsulate the computation in a Python function, say `f`, and use `tfe.gradients_function` to create a function that computes the derivatives of `f` with respect to its arguments. If you're familiar with [autograd](https://github.com/HIPS/autograd) for differentiating numpy functions, this will be familiar. For example:
# + colab_type="code" id="9FViq92UX7P8" colab={}
from math import pi
def f(x):
return tf.square(tf.sin(x))
assert f(pi/2).numpy() == 1.0
# grad_f will return a list of derivatives of f
# with respect to its arguments. Since f() has a single argument,
# grad_f will return a list with a single element.
grad_f = tfe.gradients_function(f)
assert tf.abs(grad_f(pi/2)[0]).numpy() < 1e-7
# + [markdown] colab_type="text" id="v9fPs8RyopCf"
# ### Higher-order gradients
#
# The same API can be used to differentiate as many times as you like:
#
# + colab_type="code" id="3D0ZvnGYo0rW" colab={}
def f(x):
return tf.square(tf.sin(x))
def grad(f):
return lambda x: tfe.gradients_function(f)(x)[0]
x = tf.lin_space(-2*pi, 2*pi, 100) # 100 points between -2π and +2π
import matplotlib.pyplot as plt
plt.plot(x, f(x), label="f")
plt.plot(x, grad(f)(x), label="first derivative")
plt.plot(x, grad(grad(f))(x), label="second derivative")
plt.plot(x, grad(grad(grad(f)))(x), label="third derivative")
plt.legend()
plt.show()
# + [markdown] colab_type="text" id="-39gouo7mtgu"
# ## Gradient tapes
#
# Every differentiable TensorFlow operation has an associated gradient function. For example, the gradient function of `tf.square(x)` would be a function that returns `2.0 * x`. To compute the gradient of a user-defined function (like `f(x)` in the example above), TensorFlow first "records" all the operations applied to compute the output of the function. We call this record a "tape". It then uses that tape and the gradients functions associated with each primitive operation to compute the gradients of the user-defined function using [reverse mode differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation).
#
# Since operations are recorded as they are executed, Python control flow (using `if`s and `while`s for example) is naturally handled:
#
#
# + colab_type="code" id="MH0UfjympWf7" colab={}
def f(x, y):
output = 1
# Must use range(int(y)) instead of range(y) in Python 3 when
# using TensorFlow 1.10 and earlier. Can use range(y) in 1.11+
for i in range(int(y)):
output = tf.multiply(output, x)
return output
def g(x, y):
# Return the gradient of `f` with respect to it's first parameter
return tfe.gradients_function(f)(x, y)[0]
assert f(3.0, 2).numpy() == 9.0 # f(x, 2) is essentially x * x
assert g(3.0, 2).numpy() == 6.0 # And its gradient will be 2 * x
assert f(4.0, 3).numpy() == 64.0 # f(x, 3) is essentially x * x * x
assert g(4.0, 3).numpy() == 48.0 # And its gradient will be 3 * x * x
# + [markdown] colab_type="text" id="aNmR5-jhpX2t"
# At times it may be inconvenient to encapsulate computation of interest into a function. For example, if you want the gradient of the output with respect to intermediate values computed in the function. In such cases, the slightly more verbose but explicit [tf.GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) context is useful. All computation inside the context of a `tf.GradientTape` is "recorded".
#
# For example:
# + colab_type="code" id="bAFeIE8EuVIq" colab={}
x = tf.ones((2, 2))
# a single t.gradient() call when the bug is resolved.
with tf.GradientTape(persistent=True) as t:
t.watch(x)
y = tf.reduce_sum(x)
z = tf.multiply(y, y)
# Use the same tape to compute the derivative of z with respect to the
# intermediate value y.
dz_dy = t.gradient(z, y)
assert dz_dy.numpy() == 8.0
# Derivative of z with respect to the original input tensor x
dz_dx = t.gradient(z, x)
for i in [0, 1]:
for j in [0, 1]:
assert dz_dx[i][j].numpy() == 8.0
# + [markdown] colab_type="text" id="DK05KXrAAld3"
# ### Higher-order gradients
#
# Operations inside of the `GradientTape` context manager are recorded for automatic differentiation. If gradients are computed in that context, then the gradient computation is recorded as well. As a result, the exact same API works for higher-order gradients as well. For example:
# + colab_type="code" id="cPQgthZ7ugRJ" colab={}
x = tf.Variable(1.0) # Convert the Python 1.0 to a Tensor object
with tf.GradientTape() as t:
with tf.GradientTape() as t2:
y = x * x * x
# Compute the gradient inside the 't' context manager
# which means the gradient computation is differentiable as well.
dy_dx = t2.gradient(y, x)
d2y_dx2 = t.gradient(dy_dx, x)
assert dy_dx.numpy() == 3.0
assert d2y_dx2.numpy() == 6.0
# + [markdown] colab_type="text" id="4U1KKzUpNl58"
# ## Next Steps
#
# In this tutorial we covered gradient computation in TensorFlow. With that we have enough of the primitives required to build and train neural networks.
| site/en/tutorials/eager/automatic_differentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 파이토치로 구현하는 신경망
#
# 파이토치를 이용하여 가장 기본적인 신경망을 만들어봅니다.
# * [개념] 텐서와 Autograd
# * [프로젝트 1] 텐서와 Autograd
# * [프로젝트 2] 신경망 모델 구현하기
# * [프로젝트 2] 토치비전과 토치텍스트로 데이터셋 다루기
#
# 파이토치는 기본적인 수학 계산용 라이브러리를 바탕으로 그 위에 머신러닝에 필요한 그래프 형태의 계산방식을 추가 시킨 라이브러리 입니다. 물론 파이토치의 바탕이 되는 계산 라이브러리에 대한 깊은 지식이 없더라도 파이토치를 이용해 머신러닝 모델을 구현하는데 그리 큰 문제는 없습니다.
# 하지만 파이썬 개발자들에게 편리하도록 설계 되었더라도 수리적 계산이 많이 들어가는 머신러닝의 특성 때문에 파이토치의 자료구조는 기존 파이썬의 자료구조와는 사뭇 다릅니다.
# 파이토치의 가장 기본적인 자료구조인 텐서(Tensor) 가 그 대표적인 예 인데요,이번 장에선 이 텐서와 텐서를 이용한 연산, 그리고 Autograd 등의 기능을 배워 보겠습니다. 더불어 이들을 이용해 기본적인 신경망 모델을 구현 해 보고 저장, 재사용 하는 방법까지 배워 보겠습니다.
#
# ## 프로젝트 1. 텐서와 Autograd
#
# 프로그래밍 언어를 배울 때와 마찬가지로, 파이토치 또한 직접 코딩을 하면서 배우는 것이 가장 효율적인 방법이라고 생각합니다. 간단한 파이토치 코드 예제를 같이 코딩하면서 파이토치에 대해 공부 해 보겠습니다.
#
# ### 텐서 다루기 기본: 차원(Rank)과 모양(Shpae)
#
# 가장 먼저 파이토치를 임포트 합니다.
#
# ```python
# import torch
# ```
#
# 텐서(Tensor)는 파이토치에서 다양한 수식을 계산하기 위한 가장 기본적인 자료구조 입니다. 흔히 수학에서 말하는 벡터나 행렬 과 같은 개념이며, 숫자들을 특정한 모양으로 배열 한 것입니다. 그럼 간단한 텐서를 만들어 보겠습니다.
#
# ```python
# x = torch.tensor([[1,2,3], [4,5,6], [7,8,9]])
# print(x)
# ```
#
# 위 코드는 다음과 같은 결과를 출력합니다.
#
# ```
# tensor([[1, 2, 3], [ 4, 5, 6], [7, 8, 9]])
# ```
#
# 즉 x는 1부터 9까지의 숫자를 가로 3줄, 세로 3줄의 모양을 지니도록 배열한 텐서입니다. 그리고 가로와 세로 두 차원으로만 이루어져 있는 2차원 텐서라고 할 수 있습니다.
# 이처럼 텐서는 랭크(Rank) 과 모양(Shape) 이라는 개념을 갖고 있습니다. 텐서의 랭크가 0이면 스케일러(Scaler), 1이면 벡터(Vector), 2면 행렬(Matrix), 3이상이면 n 랭크 텐서 라고 부릅니다.
#
# ```python
# 1 -> 스케일러, 모양은 []
# [1,2,3] -> 벡터, 모양은 [3]
# [[1,2,3]] -> 행렬, 모양은 [1,3]
# ```
#
# 텐서의 랭크과 모양은 size() 함수 혹은 shape 키워드를 통해 확인 할 수 있습니다.
#
# ```python
# print(x.size())
# print(x.shape)
# ```
#
# ```python
# torch.Size([3, 3])
# torch.Size([3, 3])
# ```
#
# unsqueeze(), squeeze(), 그리고 view() 함수를 통해 우리는 인위적으로 텐서의 랭크와 모양을 바꿔 줄 수도 있습니다.
# 먼저 unsqueeze() 함수를 통해 텐서 x의 랭크를 늘려 보겠습니다.
#
# ```python
# x = torch.unsqueeze(x, 0)
# print(x)
# print(x.shape)
# ```
#
# 위 코드는 텐서 모양의 첫번째(0 번째) 자리에 1 이라는 차원값을 인위적으로 추가 시켜 [3,3] 모양의 랭크 2 텐서를 [1,3,3] 모양의 랭크 3 텐서로 변경시킵니다. 랭크는 늘어나도, 텐서 속 원소의 수는 유지됩니다.
#
# ```python
# tensor([[[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9]]])
# torch.Size([1, 3, 3])
# ```
#
# squeeze() 함수를 이용하면 텐서의 랭크 중 크기가 1인 랭크를 삭제하여 다시 랭크 2 텐서로 되돌릴 수 있습니다. [1, 3, 3] 모양을 가진 텐서 x 를 다시 [3,3] 모양으로 되돌려 보겠습니다.
#
# ```python
# x = torch.squeeze(x)
# print(x)
# print(x.shape)
# ```
#
# ```python
# tensor([[1, 2, 3], [ 4, 5, 6], [ 7, 8, 9]])
# torch.Size([3, 3]) #[3,3] 모양의 랭크 2 텐서
# ```
#
# x 는 이제 랭크 2의 텐서가 되었지만 이번에도 역시 텐서 속의 총 숫자 수는 계속 9로 영향을 받지 않았습니다.
# view()함수를 이용하면 위와 같은 작업을 더 쉽게 할 수 있을 뿐만 아니라, 직접 텐서의 모양을 바꿔 줄 수도 있습니다. 랭크 2의 [3,3] 모양을 한 x 를 랭크 1의 [1,9] 모양으로 바꿔 보겠습니다.
#
# ```python
# x = x.view(9)
# print(x)
# print(x.shape)
# ```
#
# ```python
# tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9])
# torch.Size([9])
# ```
#
# 이제 텐서 x는 [9] 모양을 한 랭크 1 텐서가 되었습니다.
# 이처럼 squeeze(), unsqueeze(), view() 함수는 텐서 속 원소의 수를 그대로 유지하면서 텐서의 모양과 차원을 조절합니다. 말인즉슨, view() 함수에 잘못된 모양을 입력하면 함수는 실행 될 수 없습니다.
# 예를 들어 view 함수를 이용해 x 의 모양을 [2, 4] 가 되도록 만들어 보겠습니다.
#
# ```python
# x = x.view(2,4)
# Print(x)
# ```
#
# 코드를 실행시키면 다음과 같은 에러 메시지를 보게 됩니다.
#
# ```python
# Traceback (most recent call last):
# File "tensor_autograd.py", line 12, in <module>
# x = x.view(2,4)
# RuntimeError: invalid argument 2: size '[2 x 4]' is invalid for input with 9 elements at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/THStorage.c:41
# ```
#
# 이처럼 원소가 9 개인 텐서를 2 X 4, 즉 8 개 의 원소를 가진 텐서로 바꿔주는것은 불가능합니다.
#
# ### 전체 코드
# ```python
# import torch
#
# x = torch.tensor([[1,2,3], [4,5,6], [7,8,9]])
#
# print(x)
# print(x.size())
# print(x.shape)
#
# x = torch.unsqueeze(x, 0)
# print(x)
# print(x.shape)
#
# x = torch.squeeze(x)
# print(x)
# print(x.shape)
#
# x = x.view(9)
# print(x)
# print(x.shape)
#
# x = x.view(2,4)
# Print(x)
# ```
#
# ### 텐서를 이용한 연산과 행렬곱
#
# 딥러닝을 하는데 수준 높은 수학적 지식이 필요하지는 않습니다. 하지만 기본적으로 행렬과 행렬곱은 모든 딥러닝 알고리즘에 사용되므로 꼭 짚고 넘어가면 좋습니다. 앞서 말씀드렸듯이 행렬은 2차원 텐서와 같은 개념입니다. 숫자들을 네모꼴로 배열한 것으로, 네모꼴의 높이를 행, 넓이를 열 이라고 합니다. 만약 A, B 라는 두 행렬을 가지고 행렬곱을 할 시 다음과 같은 조건이 성립해야 합니다.
#
# ```
# A 의 열 수와 B 의 행 수는 같아야 한다.
# 행렬곱 A X B 를 계산한 행렬은 A의 행 개수, 그리고 B 의 열 개수를 가지게 된다.
# ```
#
# <img src="./images/mm.png" width="200">
#
# 그러면 직접 파이토치를 이용해 행렬곱을 구현해 보겠습니다. 우선 행렬곱에 사용될 두 행렬을 정의합니다.
#
# ```python
# w = torch.randn(5,3, dtype = torch.float)
# x = torch.tensor([[1.0,2.0], [3.0,4.0], [5.0,6.0]])
# ```
#
# randn() 함수는 정규분포(Normal Distribution)에서 무작위하게 float32 형의 숫자들을 선택해 w 라는 텐서를 채워넣습니다. 그리고 텐서 x에는 직접 float 형의 원소들을 집어넣어 주었습니다.
# 행렬곱 외에도 다른 행렬 연산에 쓰일 b 라는 텐서도 추가로 정의해 보겠습니다.
#
# ```python
# b = torch.randn(5,2, dtype = torch.float)
# ```
#
# 행렬곱을 하려면 torch.mm() 함수를 사용하면 됩니다.
#
# ```python
# wx = torch.mm(w,x) # w의 행은 5, x의 열은 2 즉 [5,2] 의 형태
# ```
#
# 이 wx 행렬의 원소들에 b 행렬의 원소들을 더해 보겠습니다.
#
# result = wx + b
#
# 위의 텐서들을 출력시켜 보면, x 는 [5, 3], w 는 [3, 2], 그리고 나머지 텐서는 [5, 2] 형태를 띄고 있음을 확인 할 수 있습니다.
#
# #### 전체 코드
#
# ```python
# import torch
#
# w = torch.randn(5,3, dtype = torch.float)
# x = torch.tensor([[1.0,2.0], [3.0,4.0], [5.0,6.0]])
# b = torch.randn(5,2, dtype = torch.float)
# wx = torch.mm(w,x)
# result = wx + b
#
# print(x)
# print(w)
# print(b)
# print(wx)
# print(result)
# ```
#
# ### Autograd
#
# Autograd 는 머신러닝에 필수적인 최적화 알고리즘인 ***경사 하강법(Gradient Descent)*** 에 관련된 기능을 제공합니다. 처음 머신러닝을 접하시는 분들은 이 알고리즘이 무엇인지, 그리고 어떻게 머신러닝에 관련되 있는지 몰라 고개를 갸웃거리실 수도 있습니다. 그런 분들을 위해 이번에는 직접 코드를 짜보기에 앞서 머신러닝의 학습 원리에 대하여 조금 더 깊게 배워보고 이 알고리즘이 어떻게 머신러닝에 사용되는지 알아보겠습니다.
#
# 앞 장에서 배웠듯 머신러닝 모델은 입력된 데이터를 기반으로 학습합니다. 다시말해 아직 충분한 데이터를 입력받지 못하거나 학습을 아직 끝내지 않은 모델은 입력된 데이터에 대해 잘못된 결과를 출력하게 됩니다.
# 이처럼 입력 데이터에 대해 정해진 답(Ground Truth) 과 머신러닝 모델이 낸 답의 차이를 산술적으로 표현한 것을 ***거리(Distance)*** 라고 합니다. 그리고 학습에 이용되는 데이터들을 가지고 계산된 거리들의 평균을 ***오차(loss)*** 라고 일컫습니다. 즉, 오차 값이 작은 머신러닝 모델일수록 주어진 데이터에 대해 더 정확한 답을 낸다고 볼수 있습니다.
#
# 오차값을 최소화 하는데는 여러 알고리즘이 쓰이고 있지만, 가장 유명하고 많이 쓰이는 알고리즘은 바로 전 언급한 경사하강법 이라는 알고리즘입니다. 오차를 수학적 함수로 표현한 후, 오차 함수의 기울기를 구해 오차의 최소값이 있는 곳의 방향을 찾아내는 알고리즘이죠. 간단한 경사하강법은 Numpy와 같은 라이브러리 만으로도 직접 구현이 가능합니다만 복잡한 인공신경망 모델에선 어렵고 머리아픈 미분식의 구현과 계산을 여러번 해 주어야 합니다. 다행히도 파이토치의 Autograd는 이름 그대로 파이토치 라이브러리 내에서 미분과 같은 수학 계산들을 자동화 시켜 우리로부터 직접 경사하강법을 구현하는 수고를 덜어줍니다.
# 그럼 Autograd를 어떻게 사용하는지 같이 공부해 보겠습니다.
#
# 우선 값이 1인 w 라는 0차원 스케일러 텐서를 만들어 보겠습니다. 방금 전 설명에서 Autograd가 미분 계산을 자동화 해준다고 설명했는데요, 쉽게 말하면 w 가 변수로 들어가는 수식을 w로 미분하고 기울기를 계산해 준다고 이해하면 됩니다. 이를 위해선 텐서 w의 requires_grad 키워드를 True로 설정해야 합니다.
# 아주 쉬운 예를 통해 간단한 미분식을 계산 해 보겠습니다.
#
# ```python
# w = torch.tensor(1, requires_grad=True)
# a = w*3
# ```
#
# a 라는 수식을 w 곱하기 3이라고 정의했습니다. 즉 이 식의 w에 대한 기울기는 3 입니다. backward() 함수를 이용하면 이 수식의 기울기를 구할 수 있습니다.
#
# ```python
# a.backward()
# print(w.grad)
# ```
#
# 예상대로, 위 코드는 다음과 같이 3 이라는 결과를 출력합니다.
#
# ```python
# tensor(3)
# ```
#
# 간단한 미분식과 기울기 계산을 해 봤으니, 이번엔 조금 더 복잡한 미분식 계산을 해 보겠습니다.
#
# ```python
# w = torch.tensor(1, requires_grad=True)
# a = w*3
# l = a*2
# ```
#
# 위의 l은 텐서 a의 모든 값을 제곱한 텐서 입니다.
# 텐서 w에 3을 곱해 a를 만들었고, 또 a를 제곱하여 l을 만들었습니다.
# 이를 수식으로 표현하면 다음과 같습니다.
#
# ```python
# l = 2*a
# a = 3*w
# 그러므로
# l = 2*(3*w) = 6w
# ```
#
# 이러한 l을 w로 미분하려면 연쇄법칙(Chain Rule)을 이용하여 l을 a와 w로 차례대로 미분해 줘야합니다.
#
# ```python
# l.backward()
# print('l을 w로 미분한 값은 ', w.grad)
# ```
#
# 위의 코드를 실행하면 다음과 같은 결과를 확인 하실 수 있습니다.
#
# ```python
# l을 w로 미분한 값은 tensor(6)
# ```
#
# backward() 함수는 l을 a로 미분한 후, 그 값을 a를 w로 미분한 값에 곱해줘 w.grad 를 계산했습니다.
# 여러 겹의 행렬곱을 하는 인공신경망이 경사 하강법을 할때는 위처럼 여러 겹의 미분식을 해야합니다.
# 이렇게 연쇄법칙을 사용하여 경사 하강법을 하는 딥러닝 특유의 알고리즘이 바로 그 유명한
# ***역전파 알고리즘(Backpropagation Algorithm)*** 입니다.
#
# 역전파 알고리즘은 딥러닝에 있어 가장 자주 쓰이는 알고리즘 이지만 직접 구현하는데에는 복잡한 코드와 수학적 지식이 필요합니다.
# 다행히 파이토치는 역전파 알고리즘 기법을 제공해주기 때문에 우리가 직접 역전파 알고리즘을 구현할 일은 없습니다만, 아주 중요한 알고리즘이므로
# 딥러닝을 좀 더 깊게 공부하고자 하신다면 꼭 자세히 공부하는걸 권하고 싶습니다.
#
# #### 전체 코드
#
# ```python
# import torch
#
# w = torch.tensor(1, requires_grad=True)
# a = w*3
# l = a*2
#
# l.backward()
# print('l을 w로 미분한 값은 ', w.grad)
# ```
#
# ## 프로젝트 2. 신경망 모델 구현하기
#
# 이번 장에서는 지금까지 배워 온 개념들을 토대로 간단한 신경망을 함께 구현해 보겠습니다. 지금까지 내용과는 달리, 이번 장에는 딥러닝에 핵심적인 내용을 조금 더 깊게, 그리고 이론적으로 설명하여 처음 딥러닝을 접하는 분들에게는 다소 어려울 수도 있습니다. 하지만 설명을 읽어가며 함께 코딩을 해 보면 어느새 딥러닝을 코딩하는데 익숙해 질 것입니다.
#
# ### 딥러닝과 인공신경망
#
# 이름에서부터 알 수 있듯이 인공신경망은 인간의 뇌, 혹은 신경계의 작동 방식에서 그 영감을 받았습니다. 신경계가 작동을 하기 위해선 가장 먼저 눈이나 혀 같은 감각 기관을 통해 자극을 입력 받아야 합니다. 이런 자극이 첫번째 신경세포로 전달되고, 이 신경세포는 자극을 처리해 다른 신경세포로 전달합니다. 이러한 자극 처리와 전달 과정을 여러번 반복하다 보면 인간의 신경계는 수많은 자극을 인지하고 그에 따라 다른 반응을 하게됩니다. 그러다 언젠가는 맛을 판별하거나 손가락을 움직이는 등 다양하고 복잡한 작업을 할수 있게 됩니다.
#
# 자극을 텐서의 형태로 입력받는 인공신경망에선 이러한 자극의 입력과 전달과정이 행렬곱 과 활성화 함수 라는 수학적 연산으로 표현됩니다.
# 실제 인간의 신경세포가 자극을 전달하기 전에 입력받은 자극에 여러 화학적 가공처리를 가하듯 인공신경망도 입력된 텐서에 특정한 수학적 연산을 실행합니다. 바로 ***가중치(Weight)*** 라고 하는 랜덤한 텐서를 행렬곱 시켜주는 것이죠.
# 그리고 이 행렬곱의 결과는 ***활성화 함수(Activation Function)*** 를 거쳐 결과값을 산출하게 됩니다. 이 결과값이 곧 인접한 다른 신경세포로 전달되는 자극이라고 보시면 됩니다.
# 자극의 처리와 전달, 이러한 과정을 몇겹에 싸여 반복한 후 마지막 결과값 만들어 내는 것이 인공신경망의 기본적인 작동원리입니다.
#
# ### 간단한 분류 모델 구현하기
#
# 이번 장에서는 인공신경망을 이용해 간단한 분류 모델을 함께 구현해 보겠습니다. 하지만 처음 인공신경망과 머신러닝을 접하는 분들을 위해 이미지 같은 고차원의 복잡한 데이터가 아닌 간단한 2차원의 데이터를 이용하겠습니다. 첫번째 인공신경망을 구현하는 만큼, 이번 프로젝트의 코드는 조금 새롭고 생소한 개념을 다소 포함하고 있습니다. 그러므로 꼭 설명을 자세하게 읽어 보고 코딩해 보시기 바랍니다.
#
# 우선 파이토치와 그 외 다른 라이브러리들을 임포트합니다. Numpy는 유명한 수치 해석용 라이브러리 입니다. 행렬과 벡터를 이용한 연산을 하는데 아주 유용한 라이브러리며, 파이토치도 이 넘파이를 기반으로 개발되었을 정도로 긴밀하게 이용됩니다. 이번 프로젝트에서는 인공신경망 학습을 위한 데이터를 만드는데 넘파이와 sklearn 라이브러리를 이용하여 생성하겠습니다. 마지막으로 임포트 되어지는 matplotlib 라이브러리는 데이터를 시각화 하는데 있어 유용한 툴 입니다. 학습데이터가 어떠한 패턴을 보이며 분포되어 있는지 확인하기 위해 matplotlib 을 이용하겠습니다.
#
# ```python
# import torch
# import numpy
# from sklearn.datasets import make_blobs
# import matplotlib.pyplot as plot
# import torch.nn.functional as F
# ```
#
# 인공신경망을 구현하기 전 인공신경망의 학습과 평가를 위한 데이터셋을 만들어 줍니다.
# 밑의 코드에서 x_tra 와 y_tra 라고 정의된 실험데이터는 직접 인공신경망을 학습시키는데 쓰이는 데이터 입니다. 반대로 x_tes 와 y_tes 라고 정의된 데이터는 직접 신경망을 학습시키는데는 쓰이지 않지만 학습이 끝난 신경망의 성능을 평가하고 실험하는데 쓰일 데이터 셋입니다.
#
# ```python
# n_dim = 2
# x_tra, y_tra = make_blobs(n_samples=80, n_features=n_dim, centers=[[1,1],[-1,-1],[1,-1],[-1,1]], shuffle=True, cluster_std=0.3)
# x_tes, y_tes = make_blobs(n_samples=20, n_features=n_dim, centers=[[1,1],[-1,-1],[1,-1],[-1,1]], shuffle=True, cluster_std=0.3)
# ```
#
# make_blobs() 함수를 이용하여 데이터를 2차원 벡터의 형태로 만들어 주었습니다.
# 학습데이터(Training Data Set)에는 80개, 실험데이터(Test Data Set)에는 20개의 2차원 벡터 형태의 데이터가 있는 것을 확인하실 수 있습니다.
# 데이터를 만든 후, 데이터에 해당하는 정답인 ‘레이블’ 을 달아줍니다. label_map 이라는 간단한 함수를 구현해 데이터가 [-1, -1] 혹은 [1, 1] 주위에 있으면 0 이라는 레이블을 달아 줬습니다. 반대로 [1, -1] 혹은 [-1, 1] 주위에 위치해 있으면 1 이라는 레이블을 달아 줬습니다.
#
# ```python
# def label_map(y_, from_, to_):
# y = numpy.copy(y_)
# for f in from_:
# y[y_ == f] = to_
# return y
#
# y_tra = label_map(y_tra, [0, 1], 0)
# y_tra = label_map(y_tra, [2, 3], 1)
# y_tes = label_map(y_tes, [0, 1], 0)
# y_tes = label_map(y_tes, [2, 3], 1)
# ```
#
# 데이터가 제대로 만들어 졌는지, 그리고 제대로 레이블링이 되었는지 확인하기 위해 matplotlib 을 이용해 데이터를 시각화 해 보겠습니다.
#
# ```python
# def vis_data(x,y = None, c = 'r'):
# if y is None:
# y = [None] * len(x)
# for x_, y_ in zip(x,y):
# if y_ is None:
# plot.plot(x_[0], x_[1], '*',markerfacecolor='none', markeredgecolor=c)
# else:
# plot.plot(x_[0], x_[1], c+'o' if y_ == 0 else c+'+')
#
# plot.figure()
# vis_data(x_tra, y_tra, c='r')
# plot.show()
# ```
#
# 레이블이 0 인 학습 데이터는 점으로, 1인 데이터는 십자가로 표시했습니다.
#
# <img src="./images/data_distribution.png" width="200">
#
# 마지막으로 신경망을 구현 하기 전, 위에서 정의한 데이터들을 넘파이 리스트가 아닌 파이토치 텐서로 바꿔줍니다.
#
# ```python
# x_tra = torch.FloatTensor(x_tra)
# x_tes = torch.FloatTensor(x_tes)
# y_tra = torch.FloatTensor(y_tra)
# y_tes = torch.FloatTensor(y_tes)
# ```
#
# 이제 데이터를 준비했으니 본격적으로 신경망 모델을 구현해 보겠습니다.
# 파이토치에서 인공신경망은 아래와 같이 신경망 모듈(Neural Network Module)을 상속받는 파이썬 객체 로 나타낼 수 있습니다.
#
# ```python
# class Feed_forward_nn(torch.nn.Module):
# ```
#
# 인공신경망의 구조와 동작을 정의하는 컨스트럭터/이니셜라이져(Constructor/Initializer) 를 모델 클래스 안에 정의해 보겠습니다.
#
# ```python
# def __init__(self, input_size, hidden_size):
# ```
#
# __init()__ 함수는 파이썬 객체지향 프로그래밍에서 객체가 생성될 때 객체에 내포된 값을 설정 해 주는 함수이며, 객체가 생성 될 때 자동적으로 호출됩니다. 이번 예제에서는 학습/실험 데이터의 차원인 input_size 라는 변수와 hidden_size 라는 변수를 __init()__ 함수를 통해 설정하도록 구현했습니다.
# input_size 는 신경망에 입력되는 데이터들의 차원입니다.
# 2차원 데이터를 입력받는 모델을 구현할 것이므로 input_size는 2라고 정의됩니다.
# [1,2] 사이즈의 입력데이터가 [2,5] 모양을 가진 가중치 텐서와 행렬곱 해 [1,5] 모양의 텐서가 만들어지듯이, 신경망에 입력된 데이터는 신경망 속의 가중치와 활성화 함수를 거치며 차원을 변화시킵니다. 이렇게 중간에 변화된 차원값을 hidden_size 라고 부르겠습니다.
#
# ```python
# super(Feed_forward_nn, self).__init__()
# self.input_size = input_size
# self.hidden_size = hidden_size
# ```
#
# 다음은 입력된 데이터가 인공신경망을 통과하면서 거치는 연산들을 정의해 주겠습니다.
#
# ```python
# self.linear_1 = torch.nn.Linear(self.input_size, self.hidden_size)
# self.relu = torch.nn.ReLU()
# self.linear_2 = torch.nn.Linear(self.hidden_size, 1)
# self.sigmoid = torch.nn.Sigmoid()
# ```
#
# linear_1 함수는 앞서 여러번 반복해 설명드렸던 행렬곱을 하는 함수입니다. [input_size, hidden_size] 사이즈의 가중치를 입력 데이터에 행렬곱 시켜 [1,hidden_size] 꼴의 텐서를 리턴합니다. 이 때 리턴된 값은 torch.nn.ReLU() 라는 활성화 함수를 거치게 됩니다. ReLU 는 입력값이 0보다 작으면 0을, 0보다 크면 입력값을 그대로 출력합니다. 예를 들어 텐서 [-1, 1, 3, -5]가 ReLU 를 거치면 텐서 [0, 1, 3, 0]가 리턴됩니다.
#
# <img src="./images/ReLU.png" width="200">
#
# ReLU 를 통과한 텐서는 다시 한번 linear_2 로 정의된 행렬곱을 거쳐 [1,1] 꼴을 지니게 됩니다. 마지막으로 이 텐서는 sigmoid 활성화 함수에 입력됩니다. Sigmoid 는 입력된 학습데이터가 레이블 1에 해당할 확률값을 리턴하는 함수로써, 머신러닝과 딥러닝에서 가장 중요한 활성화 함수 입니다.
#
# <img src="./images/sigmoid.png" width="200">
#
# 위의 그림처럼 sigmoid 함수는 0과 1 사이의 값을 리턴합니다.
# 다음으로 __init__() 함수에서 정의된 동작들을 차례대로 실행하는 forward() 함수를 구현합니다.
#
# ```python
# def forward(self, input_tensor):
# linear1 = self.linear_1(input_tensor)
# relu = self.relu(linear1)
# linear2 = self.linear_2(relu)
# output = self.sigmoid(linear2)
# return output
# ```
#
# 이로써 인공신경망을 구현이 끝났습니다. 이제 실제로 신경망 객체를 생성하고 학습에 필요한 여러 변수와 알고리즘을 정의하겠습니다.
#
# ```python
# model = Feed_forward_nn(2, 5)
# learning_rate = 0.03
# criterion = torch.nn.BCELoss()
# ```
#
# input_size 를 2로, hidden_size 를 5 로 설정한 신경망 객체를 만들었습니다. learning_rate 은 ‘얼마나 급하게 학습하는가’ 를 설정하는 값입니다. 값이 너무 크면 오차함수의 최소점을 찾지 못하고 지나치게 되고, 값이 너무 작으면 학습속도가 느려집니다.
# 러닝레이트를 설정했으면 그 다음으로는 오차함수를 만들어야 합니다. 물론 직접 오차함수를 코딩 할 수도 있지만 이는 매우 까다롭고 귀찮은 일입니다. 다행히도 파이토치는 여러 오차함수를 미리 구현해서 바로 사용 할 수 있도록 해놓았습니다. 이번에 우리는 파이토치가 제공해 주는 이진교차 엔트로피(Binary Cross Entropy) 라는 오차함수를 사용하겠습니다.
#
# epochs는 학습데이터를 총 몇번 반복
# 동안 오차를 구하고 그 최소점으로 이동 할지 결정해줍니다.
# 마지막 변수 optimizer 는 최적화 알고리즘입니다. 최적화 알고리즘 에는 여러 종류가 있고 상황에 따라 다른 알고리즘을 사용합니다. 이번 예제를 통해 처음으로 인공신경망을 구현하는 분들을 위해 그중에서도 가장 기본적인 알고리즘인 스토카스틱 경사 하강법(Stochastic Gradient Descent)을 사용하겠습니다.
#
# ```python
# epochs = 1000
# optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
# ```
#
# 학습을 시작하기 전 정말 마지막으로 아무 학습도 하지 않은 모델의 성능을 시험해 보겠습니다.
#
# ```python
# model.eval()
# test_loss_before = criterion(model(x_tes).squeeze(), y_tes)
# print('Before Training, test loss is ', test_loss_before.item())
# ```
#
# 위 코드는 아래와 같은 결과를 출력합니다.
#
# ```
# Before Training, test loss is 0.7301096916198730
# ```
#
# 오차값이 0.73 이 나왔습니다. 이정도의 오차를 가진 모델은 사실상 분류하는 능력이 없다고 봐도 무방합니다.
# 자, 이제 드디어 인공신경망을 학습시켜 퍼포먼스를 향상시켜 보겠습니다.
#
# 우선 epoch을 반복해주는 for loop 을 만들어 줍니다.
#
# ```python
# for epoch in range(epochs):
# ```
#
# 모델에 train()함수를 호출시켜 학습 모드로 바꿔 줍니다.
# '경사'라고도 할 수 있는 그레디언트(Gradient)는 오차 함수가 최소점을 가진 곳의 방향 입니다.
# 매 epoch 마다 우리는 새로운 그레디언트 값을 계산할 것이기 때문에 zero_grad()함수를 통해 그레디언트 값을 0으로 정의해 주겠습니다.
#
# ```python
# model.train()
# optimizer.zero_grad()
# ```
#
# 이미 생성한 모델에 학습데이터를 입력시켜 결과값을 계산합니다.
# 여기서 잠깐, 신경망 객체 속에 정의된 forward() 함수가 곧 신경망의 결과값을 내는 함수인 것은 맞지만, torch.nn.module이 forward() 함수 호출을 대신해줘 우리가 직접 호출할 필요는 없습니다.
#
# ```python
# train_output = model(x_tra) #torch.nn.module 을 통해서 forward()호출
# ```
#
# 신경망의 결과값의 차원을 레이블의 차원과 같도록 만들어 주고 오차를 계산합니다.
#
# ```python
# train_loss = criterion(train_output.squeeze(), y_tra)
# ```
#
# 학습이 잘 되는지 확인하기 위해 100 epoch마다 오차를 출력하도록 설정하겠습니다.
#
# ```python
# if epoch % 100 == 0:
# print('Train loss at ', epoch, 'is ', train_loss.item())
# ```
#
# 그 다음단계는 오차함수를 가중치 값들로 미분하여 오차함수의 최소점의 방향, 즉 그레디언트(Gradient)를 구하고 그 방향으로 모델을 러닝레이트 만큼 이동시키는 것입니다.
#
# ```python
# train_loss.backward()
# optimizer.step()
# ```
#
# 위 코드를 실행시켜 보면 오차값이 점점 줄어드는 것을 보실 수 있습니다.
#
# ```python
# Train loss at 0 is 0.7301096916198730
# Train loss at 100 is 0.6517783403396606
# Train loss at 200 is 0.5854113101959229
# Train loss at 300 is 0.519926130771637
# Train loss at 400 is 0.4684883952140808
# Train loss at 500 is 0.42419689893722534
# Train loss at 600 is 0.3720306158065796
# Train loss at 700 is 0.3115468919277191
# Train loss at 800 is 0.25684845447540283
# Train loss at 900 is 0.2133386880159378
# ```
#
# 바야흐로 우리의 첫 인공신경망 학습이 끝났습니다. 이제 학습된 신경망의 퍼포먼스를 시험할 차례입니다.
# 모델을 평가 모드(evaluation mode)로 바꿔 주고 실험데이터인 x_tes, y_tes를 이용해 오차값을 구해보겠습니다.
#
# ```python
# model.eval()
# test_loss_before = criterion(torch.squeeze(model(x_tes) ), y_tes)
# print('Before Training, test loss is ', test_loss_before.item())
# ```
#
# 학습을 하기 전과 비교했을때 현저하게 줄어든 오차값을 확인 하실 수 있습니다.
#
# ```python
# After Training, test loss is 0.20166122913360596
# ```
#
# 지금까지 인공신경망을 구현하고 학습시켜 보았습니다.
# 이제 학습된 모델을 .pt 파일로 저장해 보겠습니다.
#
# ```python
# torch.save(model.state_dict(), './model.pt')
# ```
#
# 위 코드를 실행하고 나면 학습된 신경망의 가중치를 내포하는 model.pt 라는 파일이 생성됩니다. 아래 코드처럼 새로운 신경망 객체에 model.pt 속의 가중치값을 입력시키는 것 또한 가능합니다.
#
# ```python
# new_model = Feed_forward_nn(2, 5)
# new_model.load_state_dict(torch.load('./model.pt'))
# new_model.eval()
# print(new_model(torch.FloatTensor([-1,1])).item() )
# ```
#
# 여담으로 벡터 [-1,1]을 학습하고 저장된 모델에 입력시켰을 때 레이블이 1일 확률은 90% 이상이 나왔습니다.
# 우리의 첫번째 신경망 모델은 이제 꽤 믿을만한 분류 작업이 가능하게 된 것입니다.
#
# ```python
# 벡터 [-1,1]이 레이블 1 을 가질 확률은 0.9407910108566284
# ```
# ### 전체 코드
#
# ```python
# import torch
# import numpy
# from sklearn.datasets import make_blobs
# import matplotlib.pyplot as plot
# import torch.nn.functional as F
#
# def label_map(y_, from_, to_):
# y = numpy.copy(y_)
# for f in from_:
# y[y_ == f] = to_
# return y
#
# n_dim = 2
# x_tra, y_tra = make_blobs(n_samples=80, n_features=n_dim, centers=[[1,1],[-1,-1],[1,-1],[-1,1]], shuffle=True, cluster_std=0.3)
# x_tes, y_tes = make_blobs(n_samples=20, n_features=n_dim, centers=[[1,1],[-1,-1],[1,-1],[-1,1]], shuffle=True, cluster_std=0.3)
# y_tra = label_map(y_tra, [0, 1], 0)
# y_tra = label_map(y_tra, [2, 3], 1)
# y_tes = label_map(y_tes, [0, 1], 0)
# y_tes = label_map(y_tes, [2, 3], 1)
#
# def vis_data(x,y = None, c = 'r'):
# if y is None:
# y = [None] * len(x)
# for x_, y_ in zip(x,y):
# if y_ is None:
# plot.plot(x_[0], x_[1], '*',markerfacecolor='none', markeredgecolor=c)
# else:
# plot.plot(x_[0], x_[1], c+'o' if y_ == 0 else c+'+')
#
# plot.figure()
# vis_data(x_tra, y_tra, c='r')
# plot.show()
#
# x_tra = torch.FloatTensor(x_tra)
# x_tes = torch.FloatTensor(x_tes)
# y_tra = torch.FloatTensor(y_tra)
# y_tes = torch.FloatTensor(y_tes)
#
# class Feed_forward_nn(torch.nn.Module):
# def __init__(self, input_size, hidden_size):
# super(Feed_forward_nn, self).__init__()
# self.input_size = input_size
# self.hidden_size = hidden_size
# self.linear_1 = torch.nn.Linear(self.input_size, self.hidden_size)
# self.relu = torch.nn.ReLU()
# self.linear_2 = torch.nn.Linear(self.hidden_size, 1)
# self.sigmoid = torch.nn.Sigmoid()
# def forward(self, input_tensor):
# linear1 = self.linear_1(input_tensor)
# relu = self.relu(linear1)
# linear2 = self.linear_2(relu)
# output = self.sigmoid(linear2)
# return output
#
# model = Feed_forward_nn(2, 5)
# learning_rate = 0.03
# criterion = torch.nn.BCELoss()
# epochs = 1000
# optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
#
# model.eval()
# test_loss_before = criterion(model(x_tes).squeeze(), y_tes)
# print('Before Training, test loss is ', test_loss_before.item())
#
# for epoch in range(epochs):
# model.train()
# optimizer.zero_grad()
# train_output = model(x_tra)
# train_loss = criterion(train_output.squeeze(), y_tra)
# if epoch % 100 == 0:
# print('Train loss at ', epoch, 'is ', train_loss.item())
# train_loss.backward()
# optimizer.step()
#
# model.eval()
# test_loss = criterion(model(x_tes).squeeze(), y_tes)
# print('After Training, test loss is ', test_loss.item())
#
# torch.save(model.state_dict(), './model.pt')
# new_model = Feed_forward_nn(2, 5)
# new_model.load_state_dict(torch.load('./model.pt'))
# new_model.eval()
# print(new_model(torch.FloatTensor([-1,1])).item() )
# ```
#
| 03-Coding-Neural-Networks-In-PyTorch/basic-feed-forward-nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Método da bisseção
# ***
# Iniciamos a nossa discussão com o método da bisseção pois este se trata de uma versão mais sofisticada do raciocínio que fizemos anteriormente.
#
# * Começamos mudando ligeiramente o nosso problema.
#
#
# * Ao invés de encontrar o valor de x para o qual $f(x) = y_0$, passamos $y_0$ para o lado esquerdo da igualdade e buscamos o valor de $x$ para o qual $g(x) = f(x) - y_0 = 0$.
#
#
# * Em matemática este problema é conhecido como encontrar o zero de uma função.
#
#
# * É lógico que é trivial transformar um problema em que buscamos $x$ que produz um valor arbitrário (ex.: 42) em um problema de zero de funções. Utilizar o zero, no entanto, simplifica alguns cálculos.
# ***
# ### Exemplos
# ***
# Importa as bibliotecas
import numpy
import matplotlib.pyplot as matplot
# %matplotlib inline
# ***
# Definimos a nossa função $f(x) = x^2 + 2x + 1$ e o valor de $y_0$
#
# Queremos encontrar o valor de $x$ para o qual $f(x) = y_0$
# +
def f(x):
return x*x + 2*x + 1
y0 = 42
# -
# ***
# Cria uma sequência de números começando em $0$ e terminando em $10$ e inserir os resultados de $f(x)$ em y
x = numpy.linspace(0, 10)
y = f(x)
# ***
# Definimos $g(x)$, a função que queremos encontrar os zeros
def g(x):
return f(x) - y0
# ***
# O método da bisseção, assim como o método de força bruta mostrado anteriormente, exige um intervalo de busca inicial. Este intervalo deve ser escolhido para conter o zero da função e deve conter um valor para o qual $g(x)$ seja positivo e outro valor para o qual $g(x)$ seja negativo. Chamamos estes valores de $x_a$ e $x_b$. Pois pelo **teorema de Bolzano** $f(a)\times f(b) < 0$ então existe pelo menos uma raiz entre $a$ e $b$
#
# Nosso intervalo de busca inicial é entre $0$ e $10$. No método de força bruta, usamos entre $5$ e $6$. Vamos utilizar um intervalo maior só por segurança.
x0 = 0
x1 = 10
# ***
# Verificamos os valores de $g(x)$ para descobrir quem é $x_a$ e $x_b$. No nosso caso sabemos que $x_a = x_0$ e $x_b = x_1$, mas caso a função seja decrescente no intervalo, esta relação se inverte.
# +
if g(x0) < g(x1):
print("Função é crescente.")
# xa = Valores de x para que g(x) seja positivo
# xb = Valores de x para que g(x) seja negativo
xa = x0
xb = x1
ya = g(x0)
yb = g(x1)
else:
print("Função é decrescente.")
# xa = Valores de x para que g(x) seja positivo
# xb = Valores de x para que g(x) seja negativo
xa = x1
xb = x0
ya = g(x1)
yb = g(x0)
print("g(x0) =", g(x0))
print("g(x1) =", g(x1))
# -
# ***
# Fazemos um gráfico dos resultados, vamos plotar o gráfico para cada valor de $x$ um respectivo valor de $y - y_0$ na qual $y_0$ é 42, isso irá permitir que o eixo $y$ fica um pouco acima do inicio do gráfico, além disso temos também uma linha horizontal na qual $y = 0$
matplot.plot(x, y - y0)
matplot.plot(
[xa, xb], [0, 0],
color='black',
linestyle='-',
marker='o'
)
matplot.show()
# ***
# O próximo passo consiste em avaliar o valor de g(x) no ponto central do intervalo e investigar qual será o novo intervalo
x_average = (xa + xb) / 2
g_average = g(x_average)
print(g_average)
# ***
# Vimos que o valor no ponto médio é igual à -6.0, e portanto negativo. Deste modo, sabemos que o zero de g(x) deve estar entre este valor e xb. Atualizamos nossas variáveis e mostramos o resultado em um gráfico.
# +
xa = x_average
ga = g_average
matplot.plot(x, y - y0)
matplot.plot([xa, xb], [0, 0], 'ko-')
matplot.show()
# -
# ***
# Vemos que o intervalo reduziu pela metade. Agora repetimos outra vez o mesmo raciocínio.
x_average = (xa + xb) / 2
g_average = g(x_average)
print(g_average)
# ***
# Observe que desta vez o valor de g(x_average) ficou positivo. Isto significa que devemos substituir $x_b$ e não $x_a$. Novamente, o intervalo de valores aceitáveis para o zero da função reduziu pela metade.
# +
xb = x_average
yb = g_average
matplot.plot(x, y - y0)
matplot.plot([xa, xb], [0, 0], 'ko-')
matplot.show()
# -
# ***
# ### Justar tudo em um único passo
# ***
# Agora juntamos o raciocínio realizado nas duas etapas anteriores em um único passo em que o computador decide automaticamente qual dos dois valores ($x_a$ ou $x_b$) deve ser atualizado:
# +
x_average = (xa + xb) / 2
g_average = g(x_average)
if g_average <= 0:
xa = x_average
ya = g_average
else:
xb = x_average
yb = g_average
print("Intervalo:", xa, "e", xb)
matplot.plot(x, y - y0)
matplot.plot([xa, xb], [0, 0], 'ko-')
matplot.grid(True)
# -
# Observe que o intervalo aceitável diminui, mas sempre contêm o valor do zero da função. Podemos verificar isto executando várias vezes a célula acima.
#
# É lógico que quanto mais repetições realizarmos, melhor será a estimativa do intervalo da função. Cada repetição reduz o intervalo pela metade. Deste modo, após 10 repetições teríamos um intervalo $2^{10} = 1024$ vezes menor que o intervalo inicial. Nada mal!
# ***
# Não queremos executar a célula acima várias vezes manualmente. Vamos então programar o computador para fazer isto automaticamente.
#
# Vemos que o valor de $x$ converge para $5.4807$... e o valor de $g(x)$ se aproxima de zero. vamos fazer o calculo acompanhando o valor do erro, para depois mostrarmos em um gráfico.
# +
# Reseta o intervalo inicial
x0, x1 = 0, 10
# Calcula xa/ya e xb/yb
if g(x0) < g(x1):
print("Função é crescente.")
xa, xb = x0, x1
ya, yb = g(x0), g(x1)
else:
print("Função é decrescente.")
xa, xb = x1, x0
ya, yb = g(x1), g(x0)
# Atualiza 20 vezes o intervalo
g_result = []
for i in range(20):
x_average = (xa + xb) / 2
g_average = g(x_average)
if g_average <= 0:
xa, ya = x_average, g_average
else:
xb, yb = x_average, g_average
g_result.append(abs(g_average))
print("%2d) centro: %.6f, g(x): %9.6f" % (i, x_average, g_average))
matplot.plot(g_result)
matplot.xlabel('Número de iterações')
matplot.ylabel('|g(x)|')
matplot.show()
# -
# Percebe-se que o erro começou a tender a zero a partir da oitava iteração
# ***
# ### Critério de parada
# ***
# Vimos no gráfico anterior que o erro claramente reduz com o número de iterações. Mas quantas iterações devemos realizar? A resposta é sempre "depende".
#
# O método da bisseção, assim como vários outros métodos numéricos atinge a resposta correta apenas após um número infinito de iterações. Se por um lado é impossível esperar estas infinitas iterações, por outro, raramente precisamos do valor "completo" da solução com todas suas infinitas casas decimais. Na prática precisamos apenas de um valor "próximo o suficiente" do correto e é lógico que o que é "suficiente" depende muito da aplicação.
#
# No método da bisseção escolhemos tipicamente dois critérios para definir o que é "bom o suficiente". Eles se traduzem em uma margem de tolerância para $y$ ou para $x$.
#
# * $|g(x)| < y_{tol}$
# * $|x_b - x_a| < x_{tol}$
#
# Tipicamente, paramos quando um dos dois critérios for atingido. É lógico que podemos também adotar apenas um dos dois critérios ou, se quisermos ser mais rigorosos, paramos apenas quando os dois critérios forem atingidos. O código abaixo implementa a bisseção com o critério de parada.
#
# No nosso caso colocarmos o critério de parada como $1 \times 10^\left(-6\right)$
# +
# Definir tolerâncias = 1x10^(-6)
x_tolerance = 1e-6
y_tolerance = 1e-6
# Reseta o intervalo inicial
x0, x1 = 0, 10
# Calcula xa/ya e xb/yb
if g(x0) < g(x1):
xa, xb = x0, x1
ya, yb = g(x0), g(x1)
else:
xa, xb = x1, x0
ya, yb = g(x1), g(x0)
# Atualiza o intervalo até atingir o critério de parada
iterations = 0
while True:
iterations += 1
x_average = (xa + xb) / 2
g_average = g(x_average)
if g_average <= 0:
xa, ya = x_average, g_average
else:
xb, yb = x_average, g_average
if abs(xb - xa) < x_tolerance or abs(g_average) < y_tolerance:
break
# -
# ***
# Verificamos o resultado
x_average = (xa + xb) / 2
print("Número de iterações: %s" % iterations)
print("xa = %.7f, xb = %.7f" % (xa, xb))
print("Meio do intervalo: %.7f" % x_average)
print("g(x) no meio do intervalo: %.7f" % g(x_average))
# ***
# ### Criando uma função
# ***
# Seria bom reutilizarmos a lógica do método da bisseção com qualquer função arbitrária, não? Isto é fácil fazer em Python. Vamos criar uma função que recebe uma funçao $g$ e um intervalo $x_0$, $x_1$ e a partir disto calcula o zero de $g(x)$ contido neste intervalo.
def bissect(g, x0, x1, x_tolerance=1e-6, y_tolerance=1e-6):
"""
Calcula o zero de g(x) dentro do intervalo (x0, y0)
Argumentos:
g: um função de um única variável
x0, x1: intervalo inicial para a busca do zero de g(x)
x_tolerance: tolerância em x (retorna quando o intervalo for menor que x_tolerance)
y_tolerance: tolerância em y (retorna quando |g(x)| < y_tolerance)
Retornos:
Retorna o zero da função g(x) (valor de x em que g(x) = 0)
"""
# Calcula xa/ya e xb/yb
# xa = Valores de x para que g(x) seja positivo -> g(xa) = ya
# xb = Valores de x para que g(x) seja negativo -> g(xb) = yb
if g(x0) < g(x1):
# Função é crescente
xa, xb = x0, x1
ya, yb = g(x0), g(x1)
else:
# Função é decrescente
xa, xb = x1, x0
ya, yb = g(x1), g(x0)
# Atualiza o intervalo até atingir o critério de parada
iterations = 0
while True:
iterations += 1
x_average = (xa + xb) / 2
g_average = g(x_average)
# Se o ponto médio de y (g_average) for negativo substituimos o xa/ya caso contrario xb/yb
# Deste modo, sabemos que o zero de g(x) deve estar entre os novos xa e xb.
if g_average < 0:
xa, ya = x_average, g_average
elif g_average > 0:
xb, yb = x_average, g_average
else:
return x_average
# Critério de parada: |xb - xa| < tolerancia em x ou |g(x)| < tolerancia em y
if abs(xb - xa) < x_tolerance or abs(g_average) < y_tolerance:
break
# Retorna o ponto em que g(x) é praticamente zero
if abs(ya) < abs(yb):
return xa
else:
return xb
# ***
# Agora vamos usar a função
# ***
# Encontra o zero da função g(x)
print("x =", bissect(g, 0, 10))
# ***
# Acha o zero da função cos(x) no intervalo de 0 a 4 (pi/2)
print("x =", bissect(numpy.cos, 0, 4))
# ***
# +
# Encontre o ponto onde cos(x) é igual a 0.5 no intervalo de 0 a 4
def g2(x):
return numpy.cos(x) - 0.5
print("x =", bissect(g2, 0, 4))
# -
# ***
# +
# Encontra o ponto onde cos(x) é igual a sin(x) no intervalo de 0 a 4
# Sabemos que a resposta é pi/4
def g3(x):
return numpy.cos(x) - numpy.sin(x)
print("x =", bissect(g3, 0, 4))
| metodos_numericos/zero_de_funcoes/teoria/02_metodo_da_bissecao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit ('env')
# name: python383jvsc74a57bd02515a424d8ea5f36008574fe87c8c85c2d4833ae03e0fa827ee523de5fea16f1
# ---
# +
import argparse
import tensorflow as tf
from tensorflow.keras.applications import MobileNetV2
import tensorflow_datasets as tfds
# -
# ## Loading dataset
# +
batch_size = 32
train_dataset, test_dataset = tfds.load("mnist",
split=["train", "test"],
as_supervised=True)
AUTOTUNE = tf.data.experimental.AUTOTUNE
size = (32, 32)
# Resize image, transform to one-hot encoding, convert from grayscale to rgb
train_dataset = train_dataset.map(lambda x, y: (tf.image.grayscale_to_rgb(tf.image.resize(x, size)), tf.one_hot(y, depth=10)))
test_dataset = test_dataset.map(lambda x, y: (tf.image.grayscale_to_rgb(tf.image.resize(x, size)), tf.one_hot(y, depth=10)))
train_dataset = train_dataset.cache().batch(batch_size).prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.cache().batch(batch_size).prefetch(buffer_size=AUTOTUNE)
# -
# ## Loading Model
# +
lr_rate = 0.0001
base_model = tf.keras.applications.MobileNetV2(input_shape=(32, 32, 3),
include_top=False,
weights='imagenet')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
prediction_layer = tf.keras.layers.Dense(10) # 10 classes
inputs = tf.keras.Input(shape=(32, 32, 3))
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=lr_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# -
# ## Train model
model.fit(train_dataset, epochs=2)
| notebooks/trainer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import spacy
nlp = spacy.load('en_core_web_sm')
from spacy import displacy
doc = nlp(u"Over the last quarter Apple sold nearly 20 thousand iPods for a profit of $6 million"
u"By contrast, Sony only sold half of a thousand Walkman music players.")
displacy.render(doc,style='ent',jupyter=True)
for sent in doc.sents:
displacy.render(nlp(sent.text), style='ent', jupyter=True)
colors = {'ORG':'red' , 'PRODUCT':'radial-gradient(blue,cyan)', 'CARDINAL': 'linear-gradient(90deg, #aa9cfc, #fc9ce7 )'}
options = {'ents':['PRODUCT','ORG','CARDINAL'],'colors':colors} ## filtrando as entidades que são encontraadas
displacy.render(nlp(sent.text), style='ent', jupyter=True, options=options)
displacy.serve(doc, style='ent', options=options)
| Visualizing - Named Entity Recognition/.ipynb_checkpoints/Visualizing Named Entity Recognition-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solution to puzzle number 5
import pandas as pd
import numpy as np
data = pd.read_csv('../inputs/puzzle5_input.csv')
data = [val for val in data.columns]
data[:10]
# ## Part 5.1
# ### After providing 1 to the only input instruction and passing all the tests, what diagnostic code does the program produce?
# More Rules:
# - Opcode 3 takes a single integer as input and saves it to the position given by its only parameter.
# - Opcode 4 outputs the value of its only parameter.
#
# Functions now need to support the parameter mode 1 (Immediate mode):
# - Immediate mode
# - In immediate mode, a parameter is interpreted as a value - if the parameter is 50, its value is 50.
user_ID = 1
numbers = 1002,4,3,4,33
def opcode_instructions(intcode):
"Function that breaks the opcode instructions into pieces"
str_intcode = str(intcode)
opcode = str_intcode[-2:]
int_opcode = int(opcode)
return int_opcode
def extract_p_modes(intcode):
"Function that extracts the p_modes"
str_p_modes = str(intcode)
p_modes_dic = {}
for n, val in enumerate(str_p_modes[:-2]):
p_modes_dic[f'p_mode_{n+1}'] = val
return p_modes_dic
def opcode_1(i, new_numbers, p_modes):
"Function that adds together numbers read from two positions and stores the result in a third position"
second_item = new_numbers[i+1]
third_item = new_numbers[i+2]
position_item = new_numbers[i+3]
if (p_modes[0] == 0) & (p_modes[1] == 0):
sum_of_second_and_third = new_numbers[second_item] + new_numbers[third_item]
elif (p_modes[0] == 1) & (p_modes[1] == 0):
sum_of_second_and_third = second_item + new_numbers[third_item]
elif (p_modes[0] == 0) & (p_modes[1] == 1):
sum_of_second_and_third = new_numbers[second_item] + third_item
else:
sum_of_second_and_third = second_item + third_item
new_numbers[position_item] = sum_of_second_and_third
return new_numbers
def opcode_2(i, new_numbers, p_modes):
"Function that multiplies together numbers read from two positions and stores the result in a third position"
second_item = new_numbers[i+1]
third_item = new_numbers[i+2]
position_item = new_numbers[i+3]
if (p_modes[0] == 0) & (p_modes[1] == 0):
m_of_second_and_third = new_numbers[second_item] * new_numbers[third_item]
elif (p_modes[0] == 1) & (p_modes[1] == 0):
m_of_second_and_third = second_item * new_numbers[third_item]
elif (p_modes[0] == 0) & (p_modes[1] == 1):
m_of_second_and_third = new_numbers[second_item] * third_item
else:
m_of_second_and_third = second_item * third_item
new_numbers[position_item] = m_of_second_and_third
return new_numbers
def opcode_3(i, new_numbers, inpt):
"Function takes a single integer as input and saves it to the position given by its only parameter"
val = input_value
second_item = new_numbers[i+1]
new_numbers[second_item] = val
return new_numbers
# +
# from puzzle n2 copy the intcode function
def modifiedintcodefunction(numbers, input_value):
"Function that similates that of an Intcode program but takes into account extra information."
new_numbers = [num for num in numbers]
i = 0
output_values = []
while i < len(new_numbers):
opcode = opcode_instructions(new_numbers[i])
p_modes = extract_p_modes(new_numbers[i])
if new_numbers[i] == 1:
new_numbers = opcode_1(i, new_numbers, p_modes)
i = i + 4
elif new_numbers[i] == 2:
new_numbers = opcode_2(i, new_numbers, p_modes)
i = i + 4
elif new_numbers[i] == 3:
new_numbers = opcode_3(i, new_numbers, inpt)
i = i + 2
elif new_numbers[i] == 4:
output_values.append(new_numbers[i+1])
i = i + 2
elif new_numbers[i] == 99:
break
else:
continue
#Return the first item after the code has run.
first_item = new_numbers[0]
return first_item
| puzzle_notebooks/puzzle5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark (Spark 2.1.0)
# language: python
# name: pyspark
# ---
# # RDD basics
#
# This notebook will introduce **three basic but essential Spark operations**. Two of them are the transformations map and filter. The other is the action collect. At the same time we will introduce the concept of persistence in Spark.
# ## Getting the data and creating the RDD
#
# We will use the reduced dataset (10 percent) provided for the KDD Cup 1999, containing nearly half million network interactions. The file is provided as a Gzip file that we will download locally.
import urllib
f = urllib.urlretrieve ("http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz", "kddcup.data_10_percent.gz")
# Now we can use this file to create our RDD.
data_file = "./kddcup.data_10_percent.gz"
raw_data = sc.textFile(data_file)
# ## The filter transformation
#
# This transformation can be applied to RDDs in order to keep just elements that satisfy a certain condition. More concretely, a function is evaluated on every element in the original RDD. The new resulting RDD will contain just those elements that make the function return True.
# For example, imagine we want to count how many normal. interactions we have in our dataset. We can filter our raw_data RDD as follows.
normal_raw_data = raw_data.filter(lambda x: 'normal.' in x)
# Now we can count how many elements we have in the new RDD.
from time import time
t0 = time()
normal_count = normal_raw_data.count()
tt = time() - t0
print "There are {} 'normal' interactions".format(normal_count)
print "Count completed in {} seconds".format(round(tt,3))
# The **real calculations** (distributed) in Spark **occur when we execute actions and not transformations.** In this case counting is the action that we execute in the RDD. We can apply as many transformations as we would like in a RDD and no computation will take place until we call the first action which, in this case, takes a few seconds to complete.
#
# ## The map transformation
#
#
# By using the map transformation in Spark, we can apply a function to every element in our RDD. **Python's lambdas are specially expressive for this particular.**
#
# In this case we want to read our data file as a CSV formatted one. We can do this by applying a lambda function to each element in the RDD as follows.
from pprint import pprint
csv_data = raw_data.map(lambda x: x.split(","))
t0 = time()
head_rows = csv_data.take(5)
tt = time() - t0
print "Parse completed in {} seconds".format(round(tt,3))
pprint(head_rows[0])
# Again, **all action happens once we call the first Spark action** (i.e. take in this case). What if we take a lot of elements instead of just the first few?
t0 = time()
head_rows = csv_data.take(100000)
tt = time() - t0
print "Parse completed in {} seconds".format(round(tt,3))
# We can see that it takes longer. The map function is applied now in a distributed way to a lot of elements on the RDD, hence the longer execution time.
# ## Using map and predefined functions
#
#
# Of course we can use predefined functions with map. Imagine we want to have each element in the RDD as a key-value pair where the key is the tag (e.g. normal) and the value is the whole list of elements that represents the row in the CSV formatted file. We could proceed as follows.
# +
def parse_interaction(line):
elems = line.split(",")
tag = elems[41]
return (tag, elems)
key_csv_data = raw_data.map(parse_interaction)
head_rows = key_csv_data.take(5)
pprint(head_rows[0])
# -
# ## The collect action
#
# **Basically it will get all the elements in the RDD into memory for us to work with them.** For this reason it has to be used with care, specially when working with large RDDs.
#
# An example using our raw data.
t0 = time()
all_raw_data = raw_data.collect()
tt = time() - t0
print "Data collected in {} seconds".format(round(tt,3))
# Every Spark worker node that has a fragment of the RDD has to be coordinated in order to retrieve its part, and then reduce everything together.
# As a last example combining all the previous, we want to collect all the normal interactions as key-value pairs.
# +
# get data from file
data_file = "./kddcup.data_10_percent.gz"
raw_data = sc.textFile(data_file)
# parse into key-value pairs
key_csv_data = raw_data.map(parse_interaction)
# filter normal key interactions
normal_key_interactions = key_csv_data.filter(lambda x: x[0] == "normal.")
# collect all
t0 = time()
all_normal = normal_key_interactions.collect()
tt = time() - t0
normal_count = len(all_normal)
print "Data collected in {} seconds".format(round(tt,3))
print "There are {} 'normal' interactions".format(normal_count)
# -
# This count matches with the previous count for normal interactions. The new procedure is more time consuming. This is because we retrieve all the data with collect and then use Python's len on the resulting list. Before we were just counting the total number of elements in the RDD by using count.
| rdd-basics/rdd-basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# ## Simulating (pseudo-) random numbers in Python
# ### Setup
# +
# numpy is the 'Numerical Python' package
import numpy as np
# Numpy's methods for pseudorandom number generation
import numpy.random as rnd
# scipy is the 'Scientific Python' package
# We'll use this to get the gamma function
from scipy.special import gamma
# -
# Print out the versions of software I'm running
import sys
print("Python version:", sys.version)
print("Numpy version:", np.__version__)
# Reminder that we need a relatively new version of numpy to make
# use of the latest pseudorandom number generation algorithms.
if int(np.__version__.split('.')[1]) < 17:
raise RuntimeError("Need Numpy version >= 1.17")
# ### Random numbers, seeds, accessing docs
# Create a _random number generator_ and call it `rng`.
rng = rnd.default_rng()
# What kind of things can `rng` do? Let's look at the methods available to it using `dir`
print(dir(rng))
# So it can simulate from a bunch of common distributions. That's nice. Let's try to generate a simulate standard uniform random variable:
print(rng.uniform())
rng.uniform()
# Run that cell a few times, and you'll see it get different numbers.
# Sometimes nice to have _same_ random numbers. To do that, we set the 'seed' to be any fixed number.
# +
print("First run")
print(18*"-")
rng = rnd.default_rng(seed=1)
print(rng.uniform())
print(rng.uniform())
print(rng.uniform())
# +
print("Second run")
print(18*"-")
rng = rnd.default_rng(seed=1)
print(rng.uniform())
print(rng.uniform())
print(rng.uniform())
# -
# Using `rng = rnd.default_rng()` and `rng.uniform()` is a pretty modern ($\ge$ July 26, 2019). The old way was to run `np.random.uniform()` and the RNG was hidden away. Proper nerds (or those with trouble sleeping) can [take a look](https://numpy.org/neps/nep-0019-rng-policy.html) at why the numpy developers moved away from this.
# +
np.random.seed(1)
print(np.random.uniform())
np.random.seed(1)
print(np.random.uniform())
# -
# Say we want to generate some other uniform variable, like $\mathsf{Unif}(a,b)$ with p.d.f.
#
# $$ f(x) = \begin{cases}
# \frac{1}{b-a} & \text{if } a \le x < b \\
# 0 & \text{otherwise.}
# \end{cases}
# $$
#
# Let's use `help` to look at the documentation for the `uniform` method to see if this is builtin.
help(rng.uniform)
# So, let's simulate from $\mathsf{Unif}(-10,10)$.
rng.uniform(low=-10, high=10)
# The `uniform` method has the optional arguments `low`, `high`, and `size`. If we simply use them in this order, we don't need to write `low=` and `high=`, but can directly write:
rng.uniform(-10, 10)
# You can use you own judgement on whether to include the names of the arguments or omit them.
# We can simulate many uniforms at the same time and the result will be an array filled with i.i.d. variables.
rng.uniform(-10, 10, size=5)
# Let's simulate a large number of uniforms, and compare some of empirical quantities against the theoretical quantities.
# The number of random variables to simulate.
R = 10^6
print(R)
# The number of random variables to simulate.
R = 10**6
print(R)
# Simulate a bunch of i.i.d. uniform variables
uniforms = rng.uniform(-10, 10, R)
# Print the sample mean of these observations and the theoretical mean for this distribution
print("Sample mean:", uniforms.mean())
print("Theoretical mean:", (-10 + 10) / 2)
# Print the sample variance of these observations and the theoretical variance for this distribution
print("Sample variance:", uniforms.var())
print("Theoretical variance:", (10 - -10)**2 / 12)
# It certainly looks like we correctly simulated from the desired/target distribution; well, at the very least, we simulated from some distribution which has the same mean and variance as our target distribution.
# Let's try to simulate from a slightly more complication distribution, the exponential distribution. I'll define $\mathsf{Exp}(\lambda)$ to have p.d.f.
#
# $$ f(x) = \begin{cases}
# \lambda \mathrm{e}^{-\lambda x} & \text{if } x > 0 \\
# 0 & \text{otherwise.}
# \end{cases} $$
#
# There is a `rng.exponential` method, though we should call `help` on it to find out how to give it a specific rate $\lambda$.
help(rng.exponential)
# So, this one only takes a scale parameter, so we'll have to set the scale to be $1/\lambda$. Let's try simulate from the $\mathsf{Exp}(5)$ distribution.
λ = 5
rng.exponential(scale=1/λ)
# _Fun fact: To get $\lambda$ as a variable name, just type `\lambda` then hit TAB. Try `\sigma` and some others._
#
# _Fun fact 2: Can use `sigma` as a variable name, but not `lambda` as it has special significance in Python (for "lambda functions" = "anonymous functions")._
# Let's simulate a bunch of i.i.d. exponentials and check that their sample mean matches the theoretical value of $0.2=1/5$.
exponentials = rng.exponential(1/λ, R)
print("Sample mean:", exponentials.mean())
print("Theoretical mean:", 1/λ)
# The `exponential` function just specifies a `scale` argument, but sometimes the distribution we want to simulate from won't offer us this. There's an easy workaround though, just simulate from the default/unscaled distribution and multiply all the variables by our scale value. E.g. to simulate from the same exponential distrbution this way:
exponentials = (1/λ) * rng.exponential(size=R)
print("Sample mean:", exponentials.mean())
print("Theoretical mean:", 1/λ)
# ### Basic plotting
# Import the plotting library matplotlib
import matplotlib.pyplot as plt
plt.hist(exponentials)
plt.hist(exponentials, bins=100, density=True);
# %config InlineBackend.figure_format = 'retina'
plt.hist(exponentials, bins=100, density=True);
x = np.linspace(0, 2.5, 500)
pdf = λ * np.exp(-λ*x)
plt.plot(x, pdf);
plt.hist(exponentials, bins=100, density=True);
plt.plot(x, pdf); # Or plt.plot(x, pdf, 'r');
# Try again with $R = 10^5$ $\mathsf{Gamma}(2,3)$ random variables, where our definition of $\mathsf{Gamma}(r,m)$ has the p.d.f.
#
#
# $$ f(x) = \begin{cases}
# \frac{x^{r-1} \mathrm{e}^{-\frac{x}{m}}}{\Gamma(r) m^r} & \text{if } x > 0 \\
# 0 & \text{otherwise.}
# \end{cases} $$
#
#
R = 10**5
r = 2
m = 3
gammas = rng.gamma(r, m, R)
# +
x = np.linspace(0, 40, 500)
pdf = (x**(r-1) * np.exp(-x/m)) / (gamma(r) * m**r)
plt.hist(gammas, bins=100, density=True)
plt.plot(x, pdf);
# +
R = 10**4
# Mean vector and covariance matrix
n = 2
μ = (1, 2)
σ2 = 2
ρ = 0.8
Σ = σ2 * ( (1-ρ) * np.eye(n) + ρ * np.ones(n) )
# Simulating the index value at T
rng = rnd.default_rng()
normals = rng.multivariate_normal(μ, Σ, size=R)
# +
import pandas as pd
df = pd.DataFrame(normals, columns=["x", "y"])
df
# +
import seaborn as sns
sns.jointplot(x="x", y="y", data=df);
# -
sns.jointplot(x="x", y="y", data=df);
sns.jointplot(x="x", y="y", data=df, kind="kde");
# ### Crude Monte Carlo
# Say that an insurer has $n = 10$ claims each month, and each claim size is $X_i \overset{\mathrm{i.i.d.}}{\sim} \mathsf{Pareto}(\alpha=\frac32)$. The reinsurer will cover the excess of $S_n = \sum_{i=1}^n X_i$ over the threshold $\gamma = 10^2$.
# What is the probability of reinsurer having to payout?
n = 10
α = 3/2
γ = 10**2
R = 10**6
# +
# %%time
rng = rnd.default_rng(1)
numPayouts = 0
for r in range(R):
S_n = rng.pareto(α, size=n).sum()
if S_n > γ:
numPayouts += 1
print("Probability of reinsurer paying out:", numPayouts / R)
# -
# This last cell is quite clunky and slow; please never write code like that. The preferred way is the vectorised code below. Firstly note that if we give a list to the `size=` parameter (actually we use an immutable list called a 'tuple') it returns a matrix of i.i.d. Paretos:
rng.pareto(α, size=(2,2))
# Now if we generate all the Pareto variable we need at once, everything is more efficient.
# +
# %%time
rng = rnd.default_rng(1)
losses = rng.pareto(α, size=(R,n)).sum(axis=1)
ests = losses > γ
ellHat = ests.mean()
print("Probability of reinsurer paying out:", ellHat)
# -
plt.hist(losses[losses < 1.5*γ], bins=100, density=True)
plt.axvline(γ, color="black", linestyle="dashed");
sigmaHat = ests.std()
widthCI = 1.96 * sigmaHat / np.sqrt(R)
CIs = (ellHat - widthCI, ellHat + widthCI)
print(f"Probability of reinsurer paying out: {ellHat} (+- {widthCI:f}))")
# _Bonus question: Can compare to [series expansion](http://docs.isfa.fr/labo/2012.16.pdf) by ISFA's Quang Huy NGUYEN & <NAME>._
# How much is the reinsurer paying out on average?
# +
rng = rnd.default_rng(1)
losses = rng.pareto(α, size=(R,n)).sum(axis=1)
payouts = np.maximum(losses - γ, 0)
np.mean(payouts)
# -
# __Note__: We can't calculate confidence intervals here using the normal approach. We're in the unlucky case that our variables have infinite variance and the CLT doesn't apply.
# What is the expected payout for the of reinsurer conditioned on the event of a payout?
# +
rng = rnd.default_rng(1)
losses = rng.pareto(α, size=(R,n)).sum(axis=1)
bigLosses = losses[losses > γ]
payouts = bigLosses - γ
np.mean(payouts)
# -
print(f"We had {len(bigLosses)} reinsurer payouts out of 10^{int(np.log10(R))} simulations.")
plt.hist(bigLosses, bins=100);
# What about the 99.9% Value-at-Risk for the reinsurer?
# +
rng = rnd.default_rng(1)
losses = rng.pareto(α, size=(R,n)).sum(axis=1)
payouts = np.maximum(losses - γ, 0)
np.quantile(payouts, 0.999)
# -
# Let's consider a financial example. Say that $X_i$ is the future stock price for company $i$ at expiry time $T$.
# We assume the Black-Scholes model, so $X_i \sim \mathsf{Lognormal}(\mu_i, \sigma^2)$, and assume a constant correlation $\rho$ between each pair of stocks.
#
# Let's imagine we have a simple index which tracks $n$ of these stocks, so at time $T$ is will have the value
# $$ S_T = \sum_{i=1}^n X_i . $$
#
#
# What would be the value of a call option on this index, i.e., what is
# $$ \mathbb{E}[ \mathrm{e}^{-r T} ( S_T - K )_+ ] \, ?$$
# (Let's ignore the $\mathbb{Q}$ measure here.)
#
# Set $n = 2$, $r = 0.05$, $T = 1$, $\mu_i = \frac{i}{10}$, $\sigma^2 = \frac{1}{10}$, $\rho = 0.25$, $K = 2$.
# +
# Problem constants
n = 2
r = 0.05
T = 1
K = 3
ρ = -0.5
σ2 = 1/10
R = 10**6
# Mean vector and covariance matrix
μ = np.arange(1, n+1) / 10
Σ = σ2 * ( (1-ρ) * np.eye(n) + ρ * np.ones(n) )
# Simulating the index value at T
rng = rnd.default_rng()
normals = rng.multivariate_normal(μ, Σ, size=R)
Xs = np.exp(normals)
Ss = Xs.sum(axis=1)
# Calculating the MC estimate and CIs
ests = np.exp(-r*T) * np.maximum(Ss - K, 0)
ellHat = ests.mean()
sigmaHat = ests.std()
widthCI = 1.96 * sigmaHat / np.sqrt(R)
print(f"Option value: {ellHat} (+- {widthCI:f}))")
ests = (Ss > K)
ellHat = ests.mean()
sigmaHat = ests.std()
widthCI = 1.96 * sigmaHat / np.sqrt(R)
print(f"Probability of payout: {ellHat} (+- {widthCI:f}))")
# -
Ss.mean()
| codedemos/CodeL1i.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="1_1EIF_zc_IX"
import warnings
warnings.filterwarnings("ignore")
# Basics - Data Handling
import numpy as np
import pandas as pd
# sklearn Classification and Model Building
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import StackingClassifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Cross Validation
from sklearn.model_selection import GridSearchCV
# Scoring functions
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
# Plotting
import matplotlib.pyplot as plt
# -
# ## Read Data
X_train = pd.read_csv("datasets/train_smote.csv", index_col=0)
y_train = X_train["y"]
X_train.drop(["y"], axis=1, inplace=True)
X_train.head(2)
# ## Grid Search with 5-fold CV
# ## [MLP](https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html)
# +
# GridSearch for MLP
parameters = {'hidden_layer_sizes':[(50,),(100,),(200,)], 'learning_rate':['constant', 'adaptive'], 'early_stopping':[True, False]}
model = MLPClassifier(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/mlp_param_tuning.csv")
df
# -
# ## MLP with Pipeline
# +
# GridSearch for MLP, Standard Scaling
parameters = {'mlp__hidden_layer_sizes':[(50,),(100,),(200,)], 'mlp__learning_rate':['constant', 'adaptive'], 'mlp__early_stopping':[True, False]}
model = Pipeline([('scaler', StandardScaler()), ('mlp', MLPClassifier(max_iter=1000, random_state=42))])
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/mlp_stdscaler_param_tuning.csv")
df
# -
# ## [Random Forest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
# +
# GridSearch for Random Forest
parameters = {'max_depth':[None, 10, 7, 5], 'min_samples_split':[2, 3, 4]}
model = RandomForestClassifier(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/rf_param_tuning.csv")
df
# -
# ## [AdaBoost](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html)
# +
# GridSearch for AdaBoost
parameters = {'algorithm':['SAMME', 'SAMME.R']}
model = AdaBoostClassifier(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/adaboost_param_tuning.csv")
df
# + [markdown] id="BmeF4nFfip6H"
# ## [KNN](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="pLQt-lU6c_Iv" outputId="c6d97b1c-2532-4651-ba14-b934fc607ec1"
# GridSearch for KNN
parameters = {'n_neighbors':[3, 5, 10, 15]}
model = KNeighborsClassifier()
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/knn_param_tuning.csv")
df
# -
# ## [KNN with Pipeline](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)
# +
# GridSearch for KNN with StandardScaler
parameters = {'knn__n_neighbors':[3, 5, 10, 15]}
model = Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier())])
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/knn_stdscaler_param_tuning.csv")
df
# + [markdown] id="-SzwHCy8izks"
# ## [DecisionTreeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="LLKBdxh1fUNv" outputId="09aaab55-a9ff-42ce-f436-4c39691488af"
# GridSearch for DecisionTreeClassifier
parameters = {'criterion':['gini', 'entropy'], 'splitter':['best', 'random'], 'max_depth':[None, 10, 7, 5], 'min_samples_split':[2, 3, 4]}
model = DecisionTreeClassifier(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/decisiontree_param_tuning.csv")
df
# + [markdown] id="e_m7XHHKjpTh"
# ## [SGDClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html)
# + colab={"base_uri": "https://localhost:8080/", "height": 878} id="5Z474hdHjQFg" outputId="360b295a-3b6c-4589-8c67-e94b4f63813f"
# GridSearch for SGDClassifier
parameters = {'penalty':['l2','l1','elasticnet'], 'learning_rate':['constant','optimal','invscaling','adaptive'], 'eta0':[0.001, 0.05, 0.01, 0.1, 0.5, 1]}
model = SGDClassifier(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/sgd_param_tuning.csv")
df
# + [markdown] id="55jF432Yk0As"
# ## [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)
# + colab={"base_uri": "https://localhost:8080/", "height": 982} id="wmVOiu2RkSxJ" outputId="eae964f9-2b96-4b57-8628-c5b132e277cc"
# GridSearch for LogisticRegression
parameters = {'penalty':['l2','l1','elasticnet'], 'C':[1, 10, 50, 100, 200, 500], 'solver':['newton-cg','lbfgs','liblinear','sag','saga']}
model = LogisticRegression(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/logistic_param_tuning.csv")
df
# -
# ## [SVM with Pipeline](https://scikit-learn.org/stable/tutorial/statistical_inference/putting_together.html)
# + colab={"base_uri": "https://localhost:8080/", "height": 932} id="zeUDosPqPgBa" outputId="e3184e64-eb3e-4a11-849e-9947d34746f1"
# GridSearch for SVM, StandardScaler - Run on Google Colab
parameters = {'svc__C':[1, 10, 50, 100, 200, 500], 'svc__gamma':['scale', 'auto']}
model = Pipeline([('scaler', StandardScaler()), ('svc', SVC(random_state=42))])
clf = GridSearchCV(model, parameters, scoring="f1", verbose=4, n_jobs=-1)
clf.fit(X_train, y_train)
df = pd.DataFrame(clf.cv_results_)
df.to_csv("/content/drive/MyDrive/ms4610_project/parameter_search/svm_stdscaler_param_tuning.csv")
df
# -
# ## [SVM](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)
# +
# GridSearch for SVM
parameters = {'C':[1, 10, 50, 100, 200, 500], 'gamma':['scale', 'auto']}
model = SVC(random_state=42)
clf = GridSearchCV(model, parameters, scoring="f1")
df = pd.DataFrame(clf.cv_results_)
df.to_csv("parameter_search/svm_param_tuning.csv")
df
# -
# ## Consolidating the best set of parameters
# +
import os
files = os.listdir("parameter_search/")
for file in files:
df = pd.read_csv("parameter_search/"+file, index_col=0).sort_values(by=["rank_test_score"])
print(file.split(".")[0], *df[df["rank_test_score"]==1]["params"], "\n")
# +
models = []
mean_score = []
std_score = []
for file in files:
print(file)
df = pd.read_csv("parameter_search/"+file, index_col=0)
display(df[df["rank_test_score"]==1][["mean_test_score", "std_test_score"]])
models.append(file.split("_param_tuning.csv")[0])
mean_score.append(df[df["rank_test_score"]==1]["mean_test_score"].unique()[0])
std_score.append(df[df["rank_test_score"]==1]["std_test_score"].unique()[0])
# +
x_old = [i.upper() for i in models]
x = []
for i in x_old:
if len(i.split("_")) == 2:
x.append(i.split("_")[0]+"_SCALED")
else:
x.append(i)
y = [i*100 for i in mean_score]
yerr = [i*100 for i in std_score]
plt.figure(figsize=[15,8])
plt.bar(x, y, yerr=yerr, color="dodgerblue", alpha=0.5, edgecolor="darkslategray")
plt.xlabel("Models")
plt.ylabel("Accuracy")
plt.title("Accuracy of the models (best parameter set) on Training data")
plt.show()
# -
df = pd.DataFrame({'Models':x, 'Mean Accuracy':y, 'Std Accuracy':yerr})
df.sort_values(by=["Mean Accuracy"], inplace=True)
df.plot.bar(x="Models", y="Mean Accuracy", yerr="Std Accuracy", color="dodgerblue", alpha=0.5,
edgecolor="darkslategray", title="F1 Score of the models (best parameter set) on Training data", legend=False, figsize=[15,6], rot=0, grid=True, ylim=[0,100])
plt.ylabel("F1 Score")
plt.show()
df
| 3_gridsearch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check unuploaded files
#
# Three possible checks:
#
# - `no_matching_records`: Files that should have a matching import record, but don't
# - `matching_records_blank`: Files that have a matching import record, but no data on the corresponding form (happens when another processed file has been uploaded to the record, but not this file)
# - `orphaned_records`: Records with an ID that isn't matched by any file. (Sanity check.)
# + tags=["parameters"]
form = None
target = None
output_dir = None
# event = None
# + tags=["parameters-eval"]
if target is not None:
assert target in ["no_matching_records", "matching_records_blank", "orphaned_records"]
# -
import pandas as pd
import os
import redcap as rc
import numpy as np
import os
import sys
sys.path.append('/sibis-software/python-packages/')
import sibispy
from sibispy import sibislogger as slog
from IPython.display import display
pd.set_option("display.max_rows", 999)
pd.set_option("display.max_columns", 500)
# # 1. Load data
# +
session = sibispy.Session()
if not session.configure():
sys.exit()
slog.init_log(None, None,
'QC: Check all harvester-prepared CSVs are uploaded',
'check_unuploaded_files', None)
slog.startTimer1()
# Setting specific constants for this run of QC
api = session.connect_server('import_laptops', True)
primary_key = api.def_field
# -
meta = api.export_metadata(format='df')
form_names = meta.form_name.unique().tolist()
if form is not None:
# FIXME: This is incorrect - needs to reflect the short_to_long, etc.
if not form in form_names:
raise KeyError("{} not among Import Project forms".format(form))
form_names_subset = [form]
else:
form_names_subset = form_names
# +
# # Taken from http://pycap.readthedocs.io/en/latest/deep.html#dealing-with-large-exports
# # and adapted to scope down to forms
# def chunked_export(project, form, chunk_size=100, verbose=True):
# def chunks(l, n):
# """Yield successive n-sized chunks from list l"""
# for i in range(0, len(l), n):
# yield l[i:i+n]
# record_list = project.export_records(fields=[project.def_field])
# records = [r[project.def_field] for r in record_list]
# #print "Total records: %d" % len(records)
# try:
# response = None
# record_count = 0
# for record_chunk in chunks(records, chunk_size):
# record_count = record_count + chunk_size
# #print record_count
# chunked_response = project.export_records(records=record_chunk,
# fields=[project.def_field],
# forms=[form],
# format='df',
# df_kwargs={'low_memory': False})
# if response is not None:
# response = pd.concat([response, chunked_response], axis=0)
# else:
# response = chunked_response
# except rc.RedcapError:
# msg = "Chunked export failed for chunk_size={:d}".format(chunk_size)
# raise ValueError(msg)
# else:
# return response
# +
# def load_form(api, form_name, verbose=True):
# if verbose:
# print(form_name)
# # 1. Standard load attempt
# # try:
# # print "Trying standard export"
# # return api.export_records(fields=[api.def_field],
# # forms=[form_name],
# # format='df',
# # df_kwargs={'low_memory': False})
# # except (ValueError, rc.RedcapError, pd.io.common.EmptyDataError):
# # pass
# try:
# print("Trying chunked export, 5000 records at a time")
# return chunked_export(api, form_name, 5000)
# except (ValueError, rc.RedcapError, pd.io.common.EmptyDataError):
# pass
# # 2. Chunked load with chunk size of 1000
# try:
# print("Trying chunked export, 1000 records at a time")
# return chunked_export(api, form_name, 1000)
# except (ValueError, rc.RedcapError, pd.io.common.EmptyDataError):
# pass
# # 2. Chunked load with default chunk size
# try:
# print("Trying chunked export, default chunk size (100)")
# return chunked_export(api, form_name, 100)
# except (ValueError, rc.RedcapError, pd.io.common.EmptyDataError):
# pass
# # 3. Chunked load with tiny chunk
# try:
# print("Trying chunked export with tiny chunks (10)")
# return chunked_export(api, form_name, 10)
# except (ValueError, rc.RedcapError, pd.io.common.EmptyDataError):
# print("Giving up")
# return None
# def load_form_with_primary_key(api, form_name, verbose=True):
# df = load_form(api, form_name, verbose)
# if df is not None:
# return df.set_index(api.def_field)
# -
from load_utils import load_form_with_primary_key
all_data = {form_name: load_form_with_primary_key(api, form_name) for form_name in form_names_subset}
# # 2. Extract emptiness statistic from Import records
def count_non_nan_rowwise(df, form_name=None, drop_column=None):
""" A more efficient method of checking non-NaN values """
# 1. check complete
if form_name:
complete_field = form_name + '_complete'
if drop_columns:
drop_columns.append(complete_field)
else:
drop_columns = [complete_field]
if drop_columns is None:
drop_columns = []
# 2. count up NaNs
return df.drop(drop_columns, axis=1).notnull().sum(axis=1)
# Apply to DF to get all empty records
def set_emptiness_flags(row, form_name, drop_columns=None):
# 1. check complete
complete_field = form_name + '_complete'
#is_incomplete = row[complete_field] == 0 # TODO: maybe complete_field not in [1, 2] to catch NaNs?
# 2. count up NaNs
if drop_columns:
drop_columns.append(complete_field)
else:
drop_columns = [complete_field]
# NOTE: This will only work for a Series
# NOTE: For a full Data Frame, use df.drop(drop_columns, axis=1).notnull().sum(axis=1)
non_nan_count = row.drop(drop_columns).notnull().sum()
return pd.Series({'completion_status': row[complete_field], 'non_nan_count': non_nan_count})
emptiness_df = {form_name: all_data[form_name].apply(lambda x: set_emptiness_flags(x, form_name), axis=1)
for form_name in all_data.keys()
if all_data[form_name] is not None}
#all_data['recovery_questionnaire'].apply(lambda x: set_emptiness_flags(x, 'recovery_questionnaire'), axis=1)
for form_name in emptiness_df.keys():
emptiness_df[form_name]['form'] = form_name
all_forms_emptiness = pd.concat(emptiness_df.values())
all_forms_emptiness.shape
# # 3. Load files
short_to_long = {
# Forms for Arm 1: Standard Protocol
'dd100': 'delayed_discounting_100',
'dd1000': 'delayed_discounting_1000',
'pasat': 'paced_auditory_serial_addition_test_pasat',
'stroop': 'stroop',
'ssaga_youth': 'ssaga_youth',
'ssaga_parent': 'ssaga_parent',
'youthreport1': 'youth_report_1',
'youthreport1b': 'youth_report_1b',
'youthreport2': 'youth_report_2',
'parentreport': 'parent_report',
'mrireport': 'mri_report',
'plus': 'participant_last_use_summary',
'myy': 'midyear_youth_interview',
'lssaga1_youth': 'limesurvey_ssaga_part_1_youth',
'lssaga2_youth': 'limesurvey_ssaga_part_2_youth',
'lssaga3_youth': 'limesurvey_ssaga_part_3_youth',
'lssaga4_youth': 'limesurvey_ssaga_part_4_youth',
'lssaga1_parent': 'limesurvey_ssaga_part_1_parent',
'lssaga2_parent': 'limesurvey_ssaga_part_2_parent',
'lssaga3_parent': 'limesurvey_ssaga_part_3_parent',
'lssaga4_parent': 'limesurvey_ssaga_part_4_parent',
# Forms for Arm 3: Sleep Studies
'sleepeve': 'sleep_study_evening_questionnaire',
'sleeppre': 'sleep_study_presleep_questionnaire',
'sleepmor': 'sleep_study_morning_questionnaire',
# Forms for Recovery project
'recq': 'recovery_questionnaire',
# Forms for UCSD
'parent': 'ssaga_parent',
'youth': 'ssaga_youth',
'deldisc': 'delayed_discounting'
}
files_df = pd.DataFrame(columns=["file", "path", "form"])
records = []
record_paths = []
for root, subdirs, files in os.walk('/fs/storage/laptops/imported'):
csv_files = [f for f in files if (f.endswith(".csv") and not f.endswith("-fields.csv"))]
if csv_files:
folder_df = pd.DataFrame(columns=["file", "path", "form"])
folder_df['file'] = csv_files
folder_df['path'] = [root + "/" + f for f in csv_files]
root_parts = root.split('/')
current_folder = root_parts[-1]
try:
form = short_to_long[current_folder]
if form not in form_names_subset:
continue
else:
folder_df['form'] = form
files_df = pd.concat([files_df, folder_df])
except KeyError as e:
continue
files_df.set_index("path", inplace=True)
def getRecordIDFromFile(row):
import re
bare_file = re.sub(r"\.csv$", "", row["file"])
bare_file = re.sub(r"^\s+|\s+$", "", bare_file)
if row["form"] == "delayed_discounting":
bare_file = re.sub("-1000?$", "", bare_file)
return bare_file
files_df["record_id"] = files_df.apply(getRecordIDFromFile, axis=1)
files_df.head()
def fixFormName(row):
import re
if row["form"] == "delayed_discounting":
if re.search(r"-100\.csv$", row["file"]):
return "delayed_discounting_100"
elif re.search(r"-1000\.csv$", row["file"]):
return "delayed_discounting_1000"
else:
return "delayed_discounting"
else:
return row["form"]
files_df["form"] = files_df.apply(fixFormName, axis=1)
files_in_redcap = pd.merge(files_df.reset_index(),
all_forms_emptiness.reset_index(),
on=["record_id", "form"],
how="outer")
files_in_redcap.head()
if output_dir is not None:
files_in_redcap.to_csv(os.path.join(output_dir, "all_files_upload_status.csv"), index=False)
# # 4. Get results
# ## Processed files that weren't matched at all
if (target is None) or (target == "no_matching_records"):
unmatched_files = files_in_redcap.loc[files_in_redcap.completion_status.isnull()]
if output_dir is not None:
unmatched_files.to_csv(os.path.join(output_dir, "no_matching_records.csv"), index=False)
display(unmatched_files)
# ## Files that were matched but have blank forms
def check_if_file_empty(row):
contents = pd.read_csv(row['path'])
return contents.dropna(axis="columns").shape[1]
if (target is None) or (target == "matching_records_blank"):
matched_blank_index = files_in_redcap['path'].notnull() & (files_in_redcap['non_nan_count'] == 0)
files_in_redcap.loc[matched_blank_index, 'file_value_count'] = (
files_in_redcap
.loc[matched_blank_index]
.apply(check_if_file_empty, axis=1))
matched_blank = files_in_redcap.loc[matched_blank_index & (files_in_redcap['file_value_count'] > 0)]
if output_dir is not None:
matched_blank.to_csv(os.path.join(output_dir, "matched_blank.csv"), index=False)
display(matched_blank)
# ## Records that don't match harvested CSV files
if (target is None) or (target == "orphaned_records"):
orphaned_records = files_in_redcap.loc[files_in_redcap['path'].isnull() & (files_in_redcap['non_nan_count'] > 0)]
if (output_dir is not None) and (orphaned_records.shape[0] > 0):
orphaned_records.to_csv(os.path.join(output_dir, "orphaned_records.csv"), index=False)
display(orphaned_records)
| scripts/qc/check_unuploaded_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:metis] *
# language: python
# name: conda-env-metis-py
# ---
import pandas as pd
import numpy as np
from collections import defaultdict
import pickle
# # Load
# Function to import the MAP assessments from csv files which contain the RIT Scores for K-12.
# Function to import the SBAC assessments from its csv file.
# +
def load_map_df_from_csv(start_year,end_year):
data_folder = './data/raw/'
filename_common = '_ADR-301-MAP-Assessment-Scores.csv'
map_df = None
for yr in range(start_year,end_year+1):
df = pd.read_csv(data_folder+str(yr)+filename_common, encoding = "ISO-8859-1")
if map_df is None:
map_df = df.copy()
else:
map_df = map_df.append(df, ignore_index = True).copy()
return map_df
def load_sbac_df_from_csv(start_year,end_year):
data_folder = './data/raw/'
filename_common = '_ADR-311-State-Assessment-Scores.csv'
sbac_df = None
for yr in range(start_year,end_year+1):
df = pd.read_csv(data_folder+str(yr)+filename_common, encoding = "ISO-8859-1")
if sbac_df is None:
sbac_df = df.copy()
else:
sbac_df = sbac_df.append(df, ignore_index = True).copy()
return sbac_df
# -
# Load the testing data:
map_df = load_map_df_from_csv(2015,2018)
print("{:,}".format(map_df.StudentID.nunique()),"K-12 students of MAP data loaded.")
sbac_df = load_sbac_df_from_csv(2018,2018)
print("{:,}".format(sbac_df.StudentID.nunique()),"K-12 students of SBAC data loaded.")
# # Clean
# Function to clean the K-2 data:
# * Drop columns originally related to personally identifiable information: LastName, FirstName, PhoneNumber
# * Drop discontinued data column: OnTrackToGraduate
# * Remove all rows that contain SubjectArea='Mathematics'
# * Remove all rows for kids in grades 3-12.
# * Correct column data types.
# +
def clean_map_df(map_df):
map_df.drop(columns=['LastName','FirstName','PhoneNumber','OnTrackToGraduate'],\
inplace=True)
map_df = map_df[map_df.SubjectArea != 'Mathematics']
map_df = map_df[map_df.CurrentGrade.isin(['K','1','2'])]
map_df = map_df.astype({'StudentID':'int32',\
'CurrentEnrollmentSchoolID':'int32',\
'TestSchoolID':'int32',\
'RITScore':'int32',\
'PercentileRank':pd.Int32Dtype(),\
'MetGrowthLastFallToThisFall':pd.Int32Dtype(),\
'MetGrowthLastSpringToThisSpring':pd.Int32Dtype(),\
'MetGrowthLastFallToThisSpring':pd.Int32Dtype()})
map_df.TestSchoolYear = map_df.TestSchoolYear.str.replace('\x96','-')
map_df.ExtractSchoolYear = map_df.ExtractSchoolYear.str.replace('\x96','-')
map_df.BirthDate = pd.to_datetime(map_df.BirthDate)
map_df.USAEntryDate = pd.to_datetime(map_df.USAEntryDate,errors='coerce')
return map_df
# I've commented this out for now while I was troubleshooting whey I can't find the
# 2015 Kindergartener's in the SBAC file.
# def clean_sbac_df(SBAC_df):
# SBAC_df.drop(columns=['LastName','FirstName','PhoneNumber','OnTrackToGraduate'],\
# inplace=True)
# SBAC_df = SBAC_df[SBAC_df.SubjectArea == 'Reading/ELA'].copy()
# SBAC_df = SBAC_df[SBAC_df.TestGrade == 3]
# SBAC_df.BirthDate = pd.to_datetime(SBAC_df.BirthDate)
# SBAC_df.USAEntryDate = pd.to_datetime(SBAC_df.USAEntryDate,errors='coerce')
# SBAC_df.TestSchoolYear = SBAC_df.TestSchoolYear.str.replace('\x96','-')
# SBAC_df.ExtractSchoolYear = SBAC_df.ExtractSchoolYear.str.replace('\x96','-')
# return SBAC_df
# -
# Clean the MAP (K-2) data:
map_df = clean_map_df(map_df)
map_df.info()
# I may want to open this dataframe in Numbers:
# +
#map_df.to_csv('./data/interim/map_df.csv')
# -
map_df[map_df.CurrentGrade=='K'].groupby(['CurrentGrade','TestSeason','TestSchoolYear']).RITScore.count()
# Interesting that there are a lot more Kindergarteners tested in Winter and few in the Fall. What is that all about?
map_df[(map_df.CurrentGrade=='K') & (map_df.TestSchoolYear=='2015-16')].groupby(['TestSchoolID','TestSeason']).RITScore.count()[:25]
# Schools definitely test kids with different frequencies! Are they spreading the testing out over multiple testing seasons or are they retesting kids at each testing season?
map_df[(map_df.CurrentGrade=='K') & (map_df.TestSchoolYear=='2015-16') & (map_df.TestSchoolID==204)].iloc[:15,:12]
# Well at least <NAME> tests each kid in each season.
map_df.groupby(['CurrentGrade','TestSeason','TestSchoolYear']).StudentID.nunique()
map_df.groupby(['CurrentGrade','TestSeason','TestSchoolYear']).RITScore.mean()
# From this it is highly likely that in the MAP score data fall comes before winter which comes before spring.
map_df[(map_df.TestSchoolYear=='2015-16') & (map_df.CurrentGrade=='K')].groupby(['StudentID','TestSeason']).RITScore.mean()[:50]
# Individual kids definitely get tested in multiple seasons in some schools!
# For the MAP data files:
print('#Rows were TestGrade!=CurrentGrade:',len(map_df[map_df.TestGrade != map_df.CurrentGrade]))
print('#Rows were TestGrade==CurrentGrade:',len(map_df[map_df.TestGrade == map_df.CurrentGrade]))
# So, the MAP files, except for the rare occassions TestGrade is also CurrentGrade.
# For the SBAC data file:
print('#Rows where TestGrade!=CurrentGrade-1:',len(sbac_df[sbac_df.TestGrade!=sbac_df.CurrentGrade-1]))
print('#Rows where TestGrade==CurrentGrade-1:',len(sbac_df[sbac_df.TestGrade==sbac_df.CurrentGrade-1]))
def get_student_data(studentID):
df = map_df[map_df.StudentID==studentID]
return df
# Let's look at some samples of student's data:
get_student_data(map_df.StudentID.sample().values[0])
living_with_situations = map_df.groupby(['StudentID']).LivingWith.nunique()
living_with_situations[living_with_situations>1].sample().index[0]
get_student_data(living_with_situations[living_with_situations>1].sample().index[0]).LivingWith
# This work grabs all the unique school ID, school Name combinations in the MAP df. I'll verify there aren't data entry issues.
schools_df = map_df.groupby(['CurrentEnrollmentSchoolID','CurrentEnrollmentSchoolName']).size().reset_index(name='Freq').drop(columns=['Freq'])
school_names = schools_df.set_index('CurrentEnrollmentSchoolID')
school_IDs = schools_df.set_index('CurrentEnrollmentSchoolName')
# Confirming that there are no duplicate school numbers in the collection:
uniqueIDs = schools_df.groupby(['CurrentEnrollmentSchoolID']).CurrentEnrollmentSchoolID.nunique()
uniqueIDs[uniqueIDs>1]
# This is how to refer to a school name from its ID #:
school_names.loc[201][0]
# Taking the list of school names from the Seattle Onboarding History.xlsx workbook, I'll figure out what school ID's these schools are:
schoolNames = ['<NAME> Elementary',
'Concord International',
'Dearborn Park International School',
'Dunlap Elementary School',
'Emerson Elementary',
'<NAME> Elementary',
'Hawthorne Elementary School',
'Highland Park Elementary',
'<NAME> Elementary',
'Maple Elementary',
'<NAME>. Elementary',
'Northgate Elementary School',
'Rainier View Elementary',
'Sanislo Elementary',
'Van Asselt Elementary',
'West Seattle Elementary',
'Wing Luke Elementary',
'Roxhill Elementary School']
for name in schoolNames:
for part in name.split():
if school_names.CurrentEnrollmentSchoolName.str.contains(part).any():
print(name,'\n', school_names[school_names.CurrentEnrollmentSchoolName.str.contains(part)].CurrentEnrollmentSchoolName)
break
# Believing dictionaries are faster to access I'll convert the dataframes to dictionaries and show examples of how to use them:
school_names_dct = school_names.rename(columns={'CurrentEnrollmentSchoolName':'Name'}).to_dict()
school_names_dct['Name'][201]
school_IDs_dct = school_IDs.rename(columns={'CurrentEnrollmentSchoolID':'ID'}).to_dict()
school_IDs_dct['ID']['Adams Elementary']
map_df.to_pickle('./data/interim/map_df.pkl')
# Clean the SBAC data:
# This cell is for making reloading the SBAC data easy:
sbac_df = load_sbac_df_from_csv(2018,2018)
print(sbac_df.StudentID.nunique()," K-12 students of SBAC data loaded.")
# Notice that I'm __*not*__ filtering to only include 3rd graders:
def clean_sbac_df(SBAC_df):
SBAC_df.drop(columns=['LastName','FirstName','PhoneNumber','OnTrackToGraduate'],\
inplace=True)
SBAC_df = SBAC_df[SBAC_df.SubjectArea == 'Reading/ELA'].copy()
#SBAC_df = SBAC_df[SBAC_df.TestGrade == 3]
SBAC_df.BirthDate = pd.to_datetime(SBAC_df.BirthDate)
SBAC_df.USAEntryDate = pd.to_datetime(SBAC_df.USAEntryDate,errors='coerce')
SBAC_df.TestSchoolYear = SBAC_df.TestSchoolYear.str.replace('\x96','-')
SBAC_df.ExtractSchoolYear = SBAC_df.ExtractSchoolYear.str.replace('\x96','-')
SBAC_df.replace({'MetStandard':{'-':np.nan}},inplace=True)
return SBAC_df
sbac_df = clean_sbac_df(sbac_df)
#sbac_df.info()
map_df.TestSchoolYear.iloc[0]
sbac_df.StudentID.nunique()
map_df[(map_df.TestSchoolYear=='2015-16') & (map_df.CurrentGrade=='1')].StudentID.nunique()
MAP2015 = map_df[(map_df.TestSchoolYear=='2015-16') & (map_df.CurrentGrade=='K')]
sbac_df[sbac_df.StudentID.isin(MAP2015.StudentID)].TestGrade.value_counts()
MAP2015 = map_df[(map_df.TestSchoolYear=='2015-16') & (map_df.CurrentGrade=='1')]
sbac_df[sbac_df.StudentID.isin(MAP2015.StudentID)].TestGrade.value_counts()
sbac_df.TestGrade.value_counts().sort_index()
# The last 4 cells are a key result! The 3rd grade student's in the SBAC file where 1st graders the 2015-16 school year. Of the 4,614 1st graders in 2015-16 school year, 3,611 are represented in the 4,436 3rd graders in the 2017-18 SBAC file.
sbac_df[sbac_df.TestGrade==3].StudentID.nunique()
# Is there 1 duplicate student ID in the 3rd graders in the SBAC data? Why doesn't value_counts and nunique give the exact same result?
sbac_df.TestSchoolYear.value_counts()
sbac_df
sbac_df.TestSeason.value_counts()
targets = sbac_df[sbac_df.TestGrade==3]
targets = targets.append(sbac_df[(sbac_df.StudentID.isin(MAP2015.StudentID)) & (sbac_df.TestGrade==4)])
targets.to_pickle('./data/interim/targets.pkl')
targets.shape
targets.to_csv('./data/interim/targets.csv')
# NOTE to self: There are a bunch of rows with radically different Score values. Notice the TestName is 'Access to Instrution & Measurement' rather than 'Smarter Balanced' I'll leave these values in the dataframe since there are still LevelCode and MetStandard values.
sbac_df[sbac_df.Score>500].iloc[:5,[0,10,13,14,15]]
sbac_df[sbac_df.Score<500].iloc[:5,[0,10,13,14,15]]
# I needed the list of schools for a communication to Susan:
Schools=sbac_df.CurrentEnrollmentSchoolName.unique()
Schools.sort()
for school in Schools:
print(school)
| ETL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import os, sys
import numpy as np
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RepeatedStratifiedKFold
sys.path.insert(0, '..')
from utils import UnityScaler
# +
reads_threshold = 6000
data_directory = '../data/'
data_filename = 'metaanalysis_data.pickle'
metadata_filename = 'metaanalysis_metadata.pickle'
data = pickle.load(open(os.path.join(data_directory, data_filename), 'rb'))
metadata = pickle.load(open(os.path.join(data_directory, metadata_filename), 'rb'))
# remove samples with fewer than reads_threshold
data = data.loc[data.sum(axis = 1) >= reads_threshold]
metadata = metadata.loc[data.index]
# label encoder
label_encoder = LabelEncoder()
metadata['Status'] = label_encoder.fit_transform(metadata['Status'])
# normalize each sample
data = UnityScaler().fit_transform(data)
# fill in metadata
metadata['Variable_Region'] = [v.replace('-','') for v in metadata['Variable_Region']]
metadata['Country'].replace(np.nan, 'USA', inplace = True)
metadata['Sex'].replace(np.nan, 'Unknown', inplace = True)
metadata['Status'] = 1 - metadata['Status']
# +
from sklearn.metrics import roc_auc_score, f1_score, accuracy_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import roc_curve, roc_auc_score
model = GradientBoostingClassifier()
repeats = 50
colors = ['#CE3534', '#741B47', '#1F78B4']
scores = {}
def get_tpr(x, y , repeats = 5):
fpr_mean = np.linspace(0, 1, 100)
tprs, aucs, accuracy, f1 = [], [], [], []
for train_index, test_index in RepeatedKFold(n_splits = 5, n_repeats = repeats, random_state = 1).split(y):
try:
xtrain, xtest = x.iloc[train_index], x.iloc[test_index]
ytrain, ytest = y.iloc[train_index], y.iloc[test_index]
model.fit(xtrain, ytrain)
fpr, tpr, threshold = roc_curve(ytest, model.predict_proba(xtest)[:, 1])
interp_tpr = np.interp(fpr_mean, fpr, tpr)
interp_tpr[0] = 0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(ytest, model.predict(xtest)))
accuracy.append(accuracy_score(ytest, model.predict(xtest)))
f1.append(f1_score(ytest, model.predict(xtest)))
except:
continue
return np.array(tprs), np.array(aucs), np.array(accuracy), np.array(f1)
def plot(mean, sem, label, color):
plt.fill_between(np.linspace(0, 1, 100), mean - sem, mean + sem, alpha = 0.5, color = color)
plt.plot(np.linspace(0, 1, 100), mean, label = label, color = color)
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(bbox_to_anchor = (1, 1))
# +
fig, ax = plt.subplots()
for i, (group, df) in enumerate(metadata.groupby('Country')):
tprs, aucs, accuracy, f1 = get_tpr(data.loc[df.index], df['Status'], repeats)
scores['Country', group] = (aucs, accuracy, f1)
plot(np.mean(tprs,axis = 0), np.std(tprs, axis = 0) / np.sqrt(repeats), f'{group} (AUC = {np.mean(aucs):.2f})', colors[i])
plt.savefig('Split_by_Country.pdf', dpi = 1200)
# +
fig, ax = plt.subplots()
for i, (group, df) in enumerate(metadata.groupby('Variable_Region')):
tprs, aucs, accuracy, f1 = get_tpr(data.loc[df.index], df['Status'], repeats)
scores['Variable_Region', group] = (aucs, accuracy, f1)
plot(np.mean(tprs,axis = 0), np.std(tprs, axis = 0) / np.sqrt(repeats), f'{group} (AUC = {np.mean(aucs):.2f})', colors[i])
plt.savefig('Split_by_Variable_Region.pdf', dpi = 1200)
# +
fig, ax = plt.subplots()
for i, (group, df) in enumerate(metadata.groupby('Sex')):
tprs, aucs, accuracy, f1 = get_tpr(data.loc[df.index], df['Status'], repeats)
scores['Sex', group] = (aucs, accuracy, f1)
plot(np.mean(tprs,axis = 0), np.std(tprs, axis = 0) / np.sqrt(repeats), f'{group} (AUC = {np.mean(aucs):.2f})', colors[i])
plt.savefig('Split_by_Sex.pdf', dpi = 1200)
# +
fig, ax = plt.subplots()
for i, (group, df) in enumerate(metadata.groupby('Control_relation')):
tprs, aucs, accuracy, f1 = get_tpr(data.loc[df.index], df['Status'], repeats)
scores['Control_relation', group] = (aucs, accuracy, f1)
plot(np.mean(tprs,axis = 0), np.std(tprs, axis = 0) / np.sqrt(repeats), f'{group} (AUC = {np.mean(aucs):.2f})', colors[i])
plt.savefig('Split_by_relationship.pdf', dpi = 1200)
# +
fig, ax = plt.subplots()
for i, (group, df) in enumerate(metadata.groupby('seq_depth_range')):
tprs, aucs, accuracy, f1 = get_tpr(data.loc[df.index], df['Status'], repeats)
scores['seq_depth_range', group] = (aucs, accuracy, f1)
plot(np.mean(tprs,axis = 0), np.std(tprs, axis = 0) / np.sqrt(repeats), f'{group} (AUC = {np.mean(aucs):.2f})', colors[i])
plt.savefig('Split_by_seq_depth_range.pdf', dpi = 1200)
# -
metrics = ['AUC', 'Accuracy', 'F1 Score']
means = pd.concat([pd.Series({(k[0], k[1], metrics[i]) : a.mean() for i, a in enumerate(v)}) for k, v in scores.items()])
sems = pd.concat([pd.Series({(k[0], k[1], metrics[i]) : a.std() /np.sqrt(repeats) for i, a in enumerate(v)}) for k, v in scores.items()])
output = pd.concat([means, sems], axis = 1)
output.columns = ['average', 'sem']
output.to_csv('metadata metrics.csv', sep = '\t')
| Figure 5/.ipynb_checkpoints/metadata_separation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework 02
#
# ### Instructions:
# If you are able to see this successfully, it means you have downloaded this notebook file (<code>**Homework_02.ipynb**</code>) on your local machine, started up your Jupyter Notebook server, and opened the notebook file from the running server.<br />
#
# In the following, you will be prompted with a sequence of coding exercises.
# Each (code) cell contains a single exercise; <u>unless otherwise specified</u>, each exercise is **self-contained**, i.e., there is no dependency with any other exercise contained in the above cells. In other words, you don't need to solve exercises in any particular order and/or execute the notebook's cells in the same order as they appear.<br />
#
# A typical code solution cell contains the skeleton of a function; the function has a signature (name and input arguments) which has already been defined (**PLEASE DO NOT CHANGE IT!**). You are asked to implement the function according to the decription provided above. To do so, you must replace the <code>**# YOUR CODE HERE**</code> with your own code. <br />
#
# Once you have done it and you are confident that your solution works as you expect, just run the corresponding test code cell right below the specific solution code cell.
# <br />
#
# Please, remember that to execute a cell you need to do the following:
# - Be sure the cell is selected (you can verify this by looking at the cell border: if this is green the cell is selected);
# - Go to the *Main Menu* bar, click on *Cell --> Run Cells*. Alternatively, you can use a keyboard shortcut (e.g., <code>**Ctrl + Enter**</code>).
#
# Finally, I **strongly** recommend you not to delete nor modify this notebook in any of its parts. As this is not a read-only file, anyone could make changes to it. Should you do it by mistake, just download this notebook file again from our [Moodle web page](https://elearning.unipd.it/math/course/view.php?id=321).
# ## Exercise 1
#
# Consider the skeleton of the function below called <code>**mix_up_chars**</code>, which takes as input two strings <code>**a**</code> and <code>**b**</code>, and returns a **new** string which results from the concatenation of <code>**a**</code> and <code>**b**</code> with a whitespace, where their first **two** characters of <code>**a**</code> and <code>**b**</code> are swapped.<br />
# For example:<br />
# <code>**mix_up_chars("swap", "this") = "thap swis"**</code>
#
# (__REQUIREMENT__: *Use sequence slicing *<code>**s[i:j]**</code>* and concatenation *'<code>**+**</code>'* to manipulate the input strings...*)
# ## Solution 1
# + jupyter={"outputs_hidden": true}
def mix_up_chars(a, b):
"""
Swap the first two characthers of a and b, and concatenate them using whitespace.
"""
# YOUR CODE HERE
# -
# ## Test 1
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 1 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(mix_up_chars("fairy", "tale")) == "tairy fale"
assert(mix_up_chars("x", "y")) == "y x"
assert(mix_up_chars("un", "ai")) == "ai un"
assert(mix_up_chars("I am legend", "Lorem ipsum")) == "Loam legend I rem ipsum"
assert(mix_up_chars("I am legend", " Lorem ipsum")) == " Lam legend I orem ipsum"
# -
# ## Exercise 2
#
# Consider the skeleton of the function below called <code>**from_not_poor_to_good**</code>, which takes as input a string <code>**s**</code>, find the first occurrence of the substring <code>**"not"**</code> and <code>**"poor"**</code> in it (*case insensitive*), and if <code>**"poor"**</code> follows <code>**"not"**</code> it will replace the whole substring <code>**"not...poor"**</code> with the string <code>**"good"**</code> and return this **new** string, otherwise it just returns the original input string as it is.<br />
# For example: <br />
# <code>**from_not_poor_to_good("The lyrics is not that poor!") = "The lyrics is good!"**</code><br />
# <code>**from_not_poor_to_good("Poor boy! It's not your fault") = "Poor boy! It's not your fault"**</code><br />
# <code>**from_not_poor_to_good("This is poor") = "This is poor"**</code><br />
# <code>**from_not_poor_to_good("This sounds really bad") = "This sounds really bad"**</code>
#
# (__SUGGESTIONS__: *Use *<code>**str.lower()**</code>* to lowercase a string; Use *<code>**str.find("substr")**</code>* to get the index of the first character of the first occurrence of *"<code>**substr**</code>"* in *<code>**str**</code>* or *-1* if *"<code>**substr**</code>"* does not occur in *<code>**str**</code>*. Instead, if you use *<code>**str.index("substr")**</code>* this will raise a *<code>**ValueError**</code>* exception if *"<code>**substr**</code>"* does not occur in *<code>**str**</code>*.*)
#
# **NOTE**: This problem can be easily solved using **regular expressions** and the Python's built-in <code>**re**</code> module but you are required to use only the tools indicated above.
# ## Solution 2
# + jupyter={"outputs_hidden": true}
def from_not_poor_to_good(s):
"""
Find the first occurrence of the substring "not" and "poor" in s (case insensitive),
If "poor" follows "not" it will replace the whole substring "not...poor" with the string "good"
and return this new string.
In any other cases (i.e., either "poor" does not follow "not" or one of the two is not in the string)
just return the input string as it is.
"""
# YOUR CODE HERE
# -
# ## Test 2
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 2 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(from_not_poor_to_good("The lyrics is not that poor!") == "The lyrics is good!")
assert(from_not_poor_to_good("The lyrics is NotThatPoor!") == "The lyrics is good!")
assert(from_not_poor_to_good("Poor boy! It's not your fault") == "Poor boy! It's not your fault")
assert(from_not_poor_to_good("This is poor") == "This is poor")
assert(from_not_poor_to_good("This is not good") == "This is not good")
assert(from_not_poor_to_good("This sounds really bad") == "This sounds really bad")
# -
# ## Exercise 3
#
# Consider the skeleton of the function below called <code>**count_vowels**</code>, which takes as input a string <code>**text**</code> and returns the number of (_case insensitive_) vowels in the string.<br />
# For example:<br />
# <code>**count_vowels("Another one bites the dust") = 9**</code><br />
# <code>**count_vowels("Bohemian Rhapsody") = 6**</code><br />
# <code>**count_vowels("FFWD") = 0**</code>
#
# (__SUGGESTIONS__: *Use *<code>**str.lower()**</code>* to lowercase a string. Also, try to organize your data so as to make vowel lookup efficient...*)
# ## Solution 3
# + jupyter={"outputs_hidden": true}
def count_vowels(text):
"""
Count the number of vowels in input text
"""
# YOUR CODE HERE
# -
# ## Test 3
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 3 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(count_vowels("Bohemian Rhapsody") == 6)
assert(count_vowels("Another one bites the dust") == 9)
assert(count_vowels("FFWD") == 0)
# -
# ## Exercise 4
#
# Consider the skeleton of the function below called <code>**count_odds**</code>, which takes as input a list of integers <code>**numbers**</code> and returns how many **odd** numbers are in the list.<br />
# For example:<br />
# <code>**count_odds([11, 6, 5, 5, 10, 8, 7]) = 4**</code><br />
# <code>**count_odds([42, 16]) = 0**</code>
#
# (__SUGGESTION__: *Test if a number is __odd__ using the modulo operator *'<code>**%**</code>'*...*)
# ## Solution 4
# + jupyter={"outputs_hidden": true}
def count_odds(numbers):
"""
Count the number of odd numbers in input list of integers
"""
# YOUR CODE HERE
# -
# ## Test 4
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 4 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(count_odds([11, 6, 5, 5, 10, 8, 7]) == 4)
assert(count_odds([42, 16]) == 0)
assert(count_odds([13, 42, 16]) == 1)
assert(count_odds([13, 11, 21]) == 3)
# -
# ## Exercise 5
#
# Consider the skeleton of the function below called <code>**centered_average**</code>, which takes as input a list of integers <code>**numbers**</code> and returns its "centered average". The "centered average" is defined as the average of all the values in the list, except the smallest and the largest one. If there are multiple copies of the smallest/largest value, only **one** copy must be ignored before computing the average. If the input list contains less than 3 elements, you should return -1, instead.<br />
# For example:<br />
# <code>**centered_average([1, 1, 5, 5, 10, 8, 7]) = 5**</code><br />
# <code>**centered_average([7, 3]) = -1**</code>
# ## Solution 5
# + jupyter={"outputs_hidden": true}
def centered_average(numbers):
"""
Return -1 if the input list of integers contains less than 3 elements, otherwise its "centered average".
The "centered average" is computed by removing the smallest and the largest element from the input list.
If there are duplicates of the smallest/largest element, ONLY one instance is removed.
"""
# YOUR CODE HERE
# -
# ## Test 5
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 5 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(centered_average([1, 2, 3, 4, 100]) == 3.0)
assert(centered_average([1, 1, 5, 5, 10, 8, 7]) == 5.2)
assert(centered_average([-10, -4, -2, -4, -2, 0]) == -3)
assert(centered_average([7, 3]) == -1)
assert(centered_average([42]) == -1)
assert(centered_average([]) == -1)
assert(centered_average([10, -4, 2, 1, -2, 0]) == 0.25)
# -
# ## Exercise 6
#
# Consider the dictionary <code>**shopping_cart**</code> as defined in the code cell below.
# Try to perform the following steps:
# 1. Add a key to inventory called '<code>**fruit**</code>', and associate to this new key a value which is a list consisting of the following items <code>**'apple'**</code>', <code>**'orange'**</code>, and <code>**'lemon'**</code>
# 2. Sort the items indexed by the key <code>**'vegetables**'</code>.
# 3. Remove the item <code>**'coke'**</code> from the items stored under the <code>**'drinks'**</code> key.
# 4. Double the value stored under the <code>**'budget'**</code> key.
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell first in order to initialize the dictionary 'shopping_cart'.
"""
shopping_cart = {
'budget' : 40,
'meat' : ['chicken', 'turkey', 'pork', 'beef'],
'vegetables' : ['courgette', 'aubergine', 'spinach', 'broccoli'],
'drinks' : ['beer', 'coke', 'wine', 'sparkling water']
}
# -
# ## Solution 6.1
# + jupyter={"outputs_hidden": true}
# YOUR CODE HERE
# -
# ## Test 6.1
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 6.1 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(shopping_cart['fruit'] == ['apple', 'orange', 'lemon'])
# -
# ## Solution 6.2
# + jupyter={"outputs_hidden": true}
# YOUR CODE HERE
# -
# ## Test 6.2
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 6.2 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(shopping_cart['vegetables'] == ['aubergine', 'broccoli', 'courgette', 'spinach'])
# -
# ## Solution 6.3
# + jupyter={"outputs_hidden": true}
# YOUR CODE HERE
# -
# ## Test 6.3
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 6.3 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(shopping_cart['drinks'] == ['beer', 'wine', 'sparkling water'])
# -
# ## Solution 6.4
# + jupyter={"outputs_hidden": true}
# YOUR CODE HERE
# -
# ## Test 6.4
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 6.3 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(shopping_cart['budget'] == 80)
# -
# ## Exercise 7
#
# Suppose you are given with the two following dictionaries: <code>**stock**</code> and <code>**prices**</code>. The former contains the available quantity of a certain item, whilst the latter indicates the price of **each** item.<br />
# Implement the function <code>**compute_order_bill**</code> which takes as input the <code>**stock**</code> and <code>**prices**</code> dictionaries along with an <code>**order**</code>. The <code>**order**</code> is a list of tuples, where each tuple contains the name of an item and the ordered quantity. Within an order there might be duplicate elements (e.g., <code>**order = [("apple", 1), ("apple", 1)]**</code> or repeated items with different quantity requested (e.g., <code>**order = [("apple", 1), ("apple", 2)]**</code>).<br/>
# The function is supposed to return the total amount of money for the specific order. Note that an item contributes to the total if it is listed in the stock and until the requested quantity of that item is greater than what is left in the stock. Once the quantity of an item reaches the value <code>**0**</code>, the corresponding entry in the <code>**stock**</code> dictionary must be deleted (*conversely, you can assume *<code>**prices**</code>* will keep track of all the prices forever, i.e., even those of deleted items*).
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell first in order to initialize the dictionary 'stock' and 'prices'.
"""
def init_dicts():
stock = {
"banana": 6,
"apple": 2,
"orange": 32,
"pear": 15,
"aubergine": 9,
"courgette": 1,
"onion": 27
}
prices = {
"banana": 0.49,
"apple": 0.78,
"orange": 0.65,
"pear": 1.12,
"aubergine": 0.51,
"courgette": 0.24,
"onion": 0.03,
"tomato": 0.18,
"carrot": 0.23,
"lemon": 0.05
}
return stock, prices
# -
# ## Solution 7
# + jupyter={"outputs_hidden": true}
def compute_order_bill(stock, prices, order):
"""
Compute the total amount of the bill corresponding to the order.
Be sure to update the quantity of each available item in the stock.
If an item is exhausted (i.e., its quantity goes to 0) you should also remove it from the stock.
"""
# YOUR CODE HERE
# -
# ## Test 7
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 7 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
stock, prices = init_dicts() # re-initialize dictionaries every time
order = [("apple",1)] # test order
assert(compute_order_bill(stock, prices, order) == 0.78)
assert(stock["apple"] == 1)
stock, prices = init_dicts()
order = [("apple", 1), ("apple", 1)]
assert(compute_order_bill(stock, prices, order) == 1.56)
assert("apple" not in stock)
stock, prices = init_dicts()
order = [("apple", 1), ("mango", 3), ("aubergine", 5)]
assert(compute_order_bill(stock, prices, order) == 3.33)
assert(stock["apple"] == 1)
assert(stock["aubergine"] == 4)
stock, prices = init_dicts()
epsilon = 0.0001
order = [("apple", 1), ("mango", 3), ("banana", 8), ("orange", 10), ("apple", 3)]
assert(abs(compute_order_bill(stock, prices, order) - 11) < epsilon)
assert("apple" not in stock)
assert("banana" not in stock)
assert(stock["orange"] == 22)
# -
# ## Exercise 8
#
# Consider the following object called <code>**documents**</code>, which is a list of strings with each string representing a text document. <br />
# Implement the function called <code>**create_inverted_index**</code> which takes as input the list <code>**documents**</code> and returns an **inverted index**. An inverted index is a data structure that associates to each word in the collection of documents a so called **posting list**. At least, each posting list associated to a word contains the unique ID of the document where the word appears.<br />
# For example, suppose <code>**documents = ['This is the first document', 'A second document is this one although it is very short', 'I cannot type too many documents but I guess you now where this is going']**</code>. Then, the inverted index associated with the above list of documents is as follows:<br />
# <code>**'i'** --> [2]</code><br />
# <code>**'this'** --> [0, 1, 2]</code><br />
# <code>**'is'** --> [0, 1, 2]</code><br />
# <code>**'document'** --> [0, 1]</code><br />
# <code>**'documents'** --> [2]</code><br />
# <code>...</code><br />
# (__REQUIREMENTS__: *We want the index to be **case insensitive** and the document ID to be the position of the document in the original list.*)
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell first in order to initialize the list of text documents.
"""
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# -
# ## Solution 8
# + jupyter={"outputs_hidden": true}
def create_inverted_index(documents):
"""
This function returns the inverted index from the input list of text documents.
At this stage, the inverted index will only contain an entry for each word (case insensitive).
Each word entry, in turn, will contain the list of document ID where the word appears.
"""
# YOUR CODE HERE
# -
# ## Test 8
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 8 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(create_inverted_index(documents)['machine'] == [0])
assert(create_inverted_index(documents)['graph'] == [6, 7, 8])
assert('unexisting' not in create_inverted_index(documents))
# -
# ## Exercise 9
#
# Consider again **Exercise 8** but this time we want to store in the posting list not only the document ID but also the frequency of the word in that document ID.<br />
# For example, suppose again <code>**documents = ['This is the first document', 'A second document is this one although it is very short', 'I cannot type too many documents but I guess you now where this is going']**</code>. Then, the inverted index associated with the above list of documents should look like the following:<br />
# <code>**'i'** --> {2: 2}</code><br />
# <code>**'this'** --> {0: 1, 1: 1, 2: 1}</code><br />
# <code>**'is'** --> {0: 1, 1: 2, 2: 1}</code><br />
# <code>**'document'** --> {0: 1, 1: 1}</code><br />
# <code>**'documents'** --> {2: 1}</code><br />
# <code>...</code><br />
# (__SUGGESTION__: *Instead of representing the posting list as a list, use another dictionary whose keys are the document IDs...*)
# + jupyter={"outputs_hidden": true}
def create_inverted_index_and_freq(documents):
"""
This function returns the inverted index from the input list of text documents.
At this stage, the inverted index will only contain an entry for each word (case insensitive).
Each word entry, in turn, will contain another dictionary whose keys are the document IDs where the word appears,
and values are the frequencies of that word in each document ID.
"""
# YOUR CODE HERE
# -
# ## Test 9
# + jupyter={"outputs_hidden": true}
"""
Please, run this cell to test your solution to Exercise 9 above, by means of some unit tests.
Be sure all the unit tests below are passed correctly.
"""
assert(create_inverted_index_and_freq(documents)['human'] == {0: 1, 3: 1})
assert(create_inverted_index_and_freq(documents)['system'] == {1: 1, 2: 1, 3: 2})
assert('unexisting' not in create_inverted_index_and_freq(documents))
| Homeworks/Homework_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
import json
import os
import math
import librosa
# + [markdown] pycharm={"name": "#%% md\n"}
# # MFCC Extractor
# + pycharm={"name": "#%%\n", "is_executing": false}
dataset_path = "data/archive/Data/genres_original"
json_path = "data/gtzan_mfcc_json.json"
sr = 22050
duration = 30 # seconds
total_samples = sr * duration
num_mfcc = 13
n_fft = 2048
hop_length = 512
segments_per_track = 10
# + [markdown] pycharm={"name": "#%% md\n"}
# We create a dictionary to store labels of all the songs' MFCCs.
# * `mapping` consists of all the 10 genres.
# * `labels` consists of the label for each of the 1000 songs.
# 0 corresponds to blues, 1 for classical, 2 for country and so on.
# Since each songs has a label, there will be 100 zeroes, 100 ones, 100 twos, and so on.
# * `mfcc` consists of individual mfcc values (grouped by 13 to be called an MFCC) for every song.
# Every song consists of `22050 * 30 = 661500` total number of samples,
# which are divided into 10 segments. So each segment has 66150 samples.
# The number of MFCCs in each segment would be determined by `hop_length` (`=512`),
# which would be `ceil(66150 / 512) = 130` MFCCs in each segment.
# + pycharm={"name": "#%% \n", "is_executing": false}
# dictionary to store mapping, labels, and MFCCs
data = {
"mapping": [],
"labels": [],
"mfcc": []
}
print("No. of segments: ", segments_per_track)
samples_per_segment = int(total_samples / segments_per_track)
print("No. of samples per segment: ", samples_per_segment)
num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)
print("No. of MFCCs per segment: ", num_mfcc_vectors_per_segment)
# + pycharm={"name": "#%%\n", "is_executing": false}
# loop through all genre sub-folder
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
# ensure we're processing a genre sub-folder level
if dirpath is not dataset_path:
# save genre label (i.e., sub-folder name) in the mapping
# For Windows, '\\' is used. For Linux, change to '/'
semantic_label = dirpath.split('\\')[-1]
# print(dirpath)
# print(semantic_label)
data["mapping"].append(semantic_label)
print("Processing:", semantic_label)
# process all audio files in genre sub-dir
for f in filenames:
# load audio file
file_path = os.path.join(dirpath, f)
signal, sample_rate = librosa.load(file_path, sr=sr)
# process all segments of audio file
for d in range(segments_per_track):
# calculate start and finish sample for current segment
start = samples_per_segment * d
finish = start + samples_per_segment
# extract mfcc
mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,
hop_length=hop_length)
mfcc = mfcc.T
# store only mfcc feature with expected number of vectors
if len(mfcc) == num_mfcc_vectors_per_segment:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i - 1)
print("\nMFCCs extracted. Saving to JSON file...")
# save MFCCs to json file
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
print("Done")
# + [markdown] pycharm={"name": "#%% md\n"}
# * There are total 1000-1 = 999 songs (one song removed as the file was corrupted)
# So there should ideally be 9990 total number of segments, which would serve
# as the input to the training part.
# The dimensions would be (9990, 130, 13)
# * The above dimensions are under the assumption that every song is __exactly__ 30 seconds in duration.
# + pycharm={"name": "#%%\n", "is_executing": false}
print("Labels:", len(data["labels"]))
print("MFCCs:", len(data["mfcc"]))
# + [markdown] pycharm={"name": "#%% md\n"}
# * We see that there are slightly less number of segments as expected.
# There are 4 segments less. The possible reason could be that not
# every song is exactly 30 seconds, there could be +/- few milliseconds
# for each song.
| 2-mfcc-extractor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ADN
# Implemente un programa que identifique a una persona en función de su ADN, según se indica a continuación.
# <code>$ python dna.py databases/large.csv sequences/5.txt
# Lavender</code>
# ## Empezando
# - Dentro de la carpeta data/adn se encuentra la información necesaria para resolver este ejercicio la cual incluye un archivo de base de datos y archivos txt con las cadenas adn
# ## Antecedentes
# El ADN, el portador de información genética en los seres vivos, se ha utilizado en la justicia penal durante décadas. Pero, ¿cómo funciona exactamente el perfil de ADN? Dada una secuencia de ADN, ¿cómo pueden los investigadores forenses identificar a quién pertenece?
#
# Bueno, el ADN es en realidad solo una secuencia de moléculas llamadas nucleótidos, dispuestas en una forma particular (una doble hélice). Cada nucleótido de ADN contiene una de cuatro bases diferentes: adenina (A), citosina (C), guanina (G) o timina (T). Cada célula humana tiene miles de millones de estos nucleótidos ordenados en secuencia. Algunas porciones de esta secuencia (es decir, el genoma) son iguales, o al menos muy similares, en casi todos los seres humanos, pero otras porciones de la secuencia tienen una mayor diversidad genética y, por tanto, varían más en la población.
#
# Un lugar donde el ADN tiende a tener una alta diversidad genética es en las repeticiones cortas en tándem (STR). Un STR es una secuencia corta de bases de ADN que tiende a repetirse consecutivamente numerosas veces en lugares específicos dentro del ADN de una persona. El número de veces que se repite un STR en particular varía mucho entre los individuos. En las siguientes muestras de ADN, por ejemplo, Alice tiene el STR <code>AGAT</code> repetido cuatro veces en su ADN, mientras que Bob tiene el mismo STR repetido cinco veces.
# <img src="./img/adn.PNG">
# El uso de varios STR, en lugar de solo uno, puede mejorar la precisión del perfil de ADN. Si la probabilidad de que dos personas tengan el mismo número de repeticiones para un solo STR es del 5%, y el analista observa 10 STR diferentes, entonces la probabilidad de que dos muestras de ADN coincidan puramente por casualidad es de aproximadamente 1 en 1 billón (asumiendo que todos los STR son independientes entre sí). Entonces, si dos muestras de ADN coinciden en el número de repeticiones para cada uno de los STR, el analista puede estar bastante seguro de que provienen de la misma persona. CODIS, la base de datos de ADN del FBI , utiliza 20 STR diferentes como parte de su proceso de elaboración de perfiles de ADN.
#
# ¿Cómo sería una base de datos de ADN de este tipo? Bueno, en su forma más simple, podría imaginarse formateando una base de datos de ADN como un archivo CSV, donde cada fila corresponde a un individuo y cada columna corresponde a un STR particular.
# <code>name,AGAT,AATG,TATC
# Alice,28,42,14
# Bob,17,22,19
# Charlie,36,18,25</code>
# Los datos del archivo anterior sugerirían que Alice tiene la secuencia <code>AGAT</code> repetida 28 veces consecutivamente en algún lugar de su ADN, la secuencia <code>AATG</code> repetida 42 veces y <code>TATC</code> repetida 14 veces. Bob, mientras tanto, tiene esos mismos tres STR repetidos 17, 22 y 19 veces, respectivamente. Y Charlie tiene esos mismos tres STR repetidos 36, 18 y 25 veces, respectivamente.
#
# Entonces, dada una secuencia de ADN, ¿cómo podría identificar a quién pertenece? Bueno, imagina que buscas en la secuencia de ADN la secuencia consecutiva más larga de <code>AGAT</code>s repetidos y descubres que la secuencia más larga tiene 17 repeticiones. Si luego encontrara que la secuencia más larga de <code>AATG</code> tiene 22 repeticiones y la secuencia más larga de <code>TATC</code> 19 repeticiones, eso proporcionaría una evidencia bastante buena de que el ADN era de Bob. Por supuesto, también es posible que una vez que tome los recuentos de cada uno de los STR, no coincida con nadie en su base de datos de ADN, en cuyo caso no tendrá ninguna coincidencia.
#
# En la práctica, dado que los analistas saben en qué cromosoma y en qué lugar del ADN se encontrará un STR, pueden localizar su búsqueda en una sección limitada del ADN. Pero ignoraremos ese detalle para este problema.
#
# Su tarea es escribir un programa que tomará una secuencia de ADN y un archivo CSV que contiene recuentos de STR para una lista de individuos y luego generará a quién pertenece el ADN (lo más probable).
# ## Especificaciones
# En un archivo llamado <code>dna.py</code>, implementar un programa que identifica a la que pertenece una secuencia de ADN.
#
# - El programa debe requerir como primer argumento de línea de comando el nombre de un archivo CSV que contiene los recuentos de STR para una lista de individuos y debe requerir como segundo argumento de línea de comando el nombre de un archivo de texto que contiene la secuencia de ADN para identificar.
#
# - Si su programa se ejecuta con el número incorrecto de argumentos en la línea de comandos, su programa debería imprimir un mensaje de error de su elección (con <code>print</code>). Si se proporciona el número correcto de argumentos, puede suponer que el primer argumento es de hecho el nombre de archivo de un archivo CSV válido y que el segundo argumento es el nombre de archivo de un archivo de texto válido.
#
# - Su programa debería abrir el archivo CSV y leer su contenido en la memoria.
# - Puede suponer que la primera fila del archivo CSV serán los nombres de las columnas. La primera columna será la palabra <code>name</code> y las columnas restantes serán las propias secuencias STR.
#
# - Su programa debería abrir la secuencia de ADN y leer su contenido en la memoria.
# - Para cada uno de los STR (de la primera línea del archivo CSV), su programa debe calcular la ejecución más larga de repeticiones consecutivas del STR en la secuencia de ADN para identificar.
# - Si los conteos de STR coinciden exactamente con cualquiera de las personas en el archivo CSV, su programa debe imprimir el nombre de la persona que coincide.
# - Puede suponer que los recuentos de STR no coincidirán con más de un individuo.
# - Si los recuentos de STR no coinciden exactamente con ninguno de los individuos en el archivo CSV, su programa debería imprimir <code>"No match"</code>.
# ## Uso
# Su programa debería comportarse según los siguientes ejemplos.
# <code>$ python dna.py databases/large.csv sequences/5.txt
# Lavender</code>
#
#
# <code>$ python dna.py
# Usage: python dna.py data.csv sequence.txt </code>
#
# <code>$ python dna.py data.csv
# Usage: python dna.py data.csv sequence.txt</code>
# ## Sugerencia
# - Puede encontrar <a href='https://docs.python.org/3/library/csv.html'><code>csv</code></a> útil el módulo de Python para leer archivos CSV en la memoria. Es posible que desee aprovechar <a href='https://docs.python.org/3/library/csv.html#csv.reader'><code>csv.reader</code></a> o <a href='https://docs.python.org/3/library/csv.html#csv.DictReader'><code>csv.DictReader</code></a>.
#
# - Las funciones <a href='https://docs.python.org/3.3/tutorial/inputoutput.html#reading-and-writing-files'><code>open</code></a> y <a href='https://docs.python.org/3.3/tutorial/inputoutput.html#methods-of-file-objects'><code>read</code></a> pueden resultar útiles para leer archivos de texto en la memoria.
# - Considere qué estructuras de datos podrían ser útiles para realizar un seguimiento de la información en su programa. A <code>list</code> o a <code>dict</code> pueden resultar útiles.
# ## Pruebas
# Asegúrese de probar su código para cada uno de los siguientes.
#
# - Ejecute su programa como <code>python dna.py databases/small.csv sequences/1.txt.</code> Su programa debería generar <code>Bob</code>.
# - Ejecute su programa como <code>python dna.py databases/small.csv sequences/2.txt.</code> Su programa debería generar <code>No</code> match.
# - Ejecute su programa como <code>python dna.py databases/small.csv sequences/3.txt.</code> Su programa debería generar <code>No</code> match.
# - Ejecute su programa como <code>python dna.py databases/small.csv sequences/4.txt.</code> Su programa debería generar <code>Alice</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/5.txt.</code> Su programa debería generar <code>Lavender</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/6.txt.</code> Su programa debería generar <code>Luna</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/7.txt.</code> Su programa debería generar <code>Ron</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/8.txt.</code> Su programa debería generar <code>Ginny</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/9.txt.</code> Su programa debería generar <code>Draco</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/10.txt.</code> Su programa debería generar <code>Albus</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/11.txt.</code> Su programa debería generar <code>Hermione</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/12.txt.</code> Su programa debería generar <code>Lily</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/13.txt.</code> Su programa debería generar <code>No</code> match.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/14.txt.</code> Su programa debería generar <code>Severus</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/15.txt.</code> Su programa debería generar <code>Sirius</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/16.txt.</code> Su programa debería generar <code>No</code> match.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/17.txt.</code> Su programa debería generar <code>Harry</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/18.txt.</code> Su programa debería generar <code>No</code> match.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/19.txt.</code> Su programa debería generar <code>Fred</code>.
# - Ejecute su programa como <code>python dna.py databases/large.csv sequences/20.txt.</code> Su programa debería generar <code>No</code> match.
# +
import csv
from sys import argv
import re
class DnaTest(object):
def __init__(self):
# obtener el nombre de archivo de la línea de comandos sin los nombres de directorio "base de datos" y "secuencia"
self.sequence_argv = str(argv[2][10:])
self.database_argv = str(argv[1][10:])
# Abrir y cerrar automáticamente el archivo de la base de datos "database"
with open(f"data/dna/databases/{self.database_argv}", 'r') as database_file:
self.database_file = database_file.readlines()
# Abrir y cerrar automáticamente el archivo de secuencia "sequence"
with open(f"data/dna/sequences/{self.sequence_argv}", 'r') as sequence_file:
self.sequence_file = sequence_file.readline()
#Leer archivo CSV como diccionario, función: compare_database_with_sequence()
self.csv_database_dictionary = csv.DictReader(self.database_file)
# Leer archivo CSV para tomar la primera fila, función: get_str_list()
self.reader = csv.reader(self.database_file)
# diccionario calculado del archivo de secuencia
self.dict_from_sequence = {}
self.select_max = {}
# returns the first row of the CSV file (database file)
def get_str_list(self):
# get first row from CSV file
keys = next(self.reader)
# eliminar 'nombre' de la lista, obtener STR solo.
keys.remove("name")
return keys
# devuelve el diccionario de STR calculados del archivo de secuencia(key(STR): value(count))
def get_str_count_from_sequence(self): # PROBLEM HERE AND RETURN DICTIONARY FROM IT !
for str_key in self.get_str_list():
regex = rf"({str_key})+"
matches = re.finditer(regex, self.sequence_file, re.MULTILINE)
# my code
for match in matches:
match_len = len(match.group())
key_len = len(str_key)
self.select_max[match] = match_len
# seleccione el valor máximo del diccionario de resultados (select_max)
max_values = max(self.select_max.values())
if max_values >= key_len:
result = int(max_values / key_len)
self.select_max[str_key] = result
self.dict_from_sequence[str_key] = result
# Borrar diccionario de comparación para seleccionar una nueva clave
self.select_max.clear()
# comparar el diccionario calculado con los diccionarios de la base de datos y obtener el nombre de la persona
def compare_database_with_sequence(self):
# función de comparación entre el diccionario de base de datos y el diccionario de secuencia calculada
def dicts_equal(from_sequence, from_database):
""" return True if all keys and values are the same """
return all(k in from_database and int(from_sequence[k]) == int(from_database[k]) for k in from_sequence) \
and all(k in from_sequence and int(from_sequence[k]) == int(from_database[k]) for k in from_database)
def check_result():
for dictionary in self.csv_database_dictionary:
dict_from_database = dict(dictionary)
dict_from_database.pop('name')
if dicts_equal(self.dict_from_sequence, dict_from_database):
dict_from_database = dict(dictionary)
print(dict_from_database['name'])
return True
if check_result():
pass
else:
print("No match")
# ejecutar la clase y sus funciones (control de programa)
if __name__ == '__main__':
RunTest = DnaTest()
RunTest.get_str_count_from_sequence()
RunTest.compare_database_with_sequence()
| Modulo4/Ejercicios/Problema1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
import torch
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
# DeepMoD stuff
from deepymod import DeepMoD
from deepymod.model.func_approx import NN, Siren
from deepymod.model.library import Library1D
from deepymod.model.constraint import LeastSquares
from deepymod.model.sparse_estimators import PDEFIND, Threshold
from deepymod.training import train
from deepymod.training.sparsity_scheduler import TrainTestPeriodic
from scipy.io import loadmat
# Setting cuda
#if torch.cuda.is_available():
# torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Settings for reproducibility
np.random.seed(42)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# -
data = loadmat('cable_exp_500.mat')
usol = np.real(data['Expression1'])
usol= usol.reshape((20,500,3))
x_v= usol[:,:,0]
t_v = usol[:,:,1]
u_v = usol[:,:,2]
u_v_full = u_v
u_v = np.take(u_v,np.arange(0,u_v.shape[0],2),axis=0)
u_v.shape
plt.figure(figsize=(6, 6))
plt.imshow(u_v,aspect=30)
noise_level = 0.1
u_v_noise = u_v + noise_level * np.std(u_v) * np.random.randn(u_v.shape[0],u_v.shape[1])
u_v_noise.shape
plt.plot(u_v_noise[4,:])
plt.plot(u_v[4,:])
plt.figure(figsize=(6, 6))
plt.imshow(u_v_noise,aspect=10)
output_data = np.take(u_v_noise,np.arange(0,u_v_noise.shape[0],1),axis=0)
output_data.shape
x = np.linspace(-1, 1, output_data.shape[0])
t = np.linspace(0, 5, output_data.shape[1])
x_grid, t_grid = np.meshgrid(x, t, indexing='ij')
X = np.transpose((t_grid.flatten(), x_grid.flatten()))
y = np.real(output_data).reshape((output_data.size, 1))
y = y/np.max(y)
# +
number_of_samples = 5000
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :][:number_of_samples], dtype=torch.float32, requires_grad=True)
y_train = torch.tensor(y[idx, :][:number_of_samples], dtype=torch.float32)
# -
network = NN(2, [30, 30, 30, 30], 1)
# Configuration of the library function: We select athe library with a 2D spatial input. Note that that the max differential order has been pre-determined here out of convinience. So, for poly_order 1 the library contains the following 12 terms:
# * [$1, u_x, u_{xx}, u_{xxx}, u, u u_{x}, u u_{xx}, u u_{xxx}, u^2, u^2 u_{x}, u^2 u_{xx}, u^2 u_{xxx}$]
library = Library1D(poly_order=1, diff_order=2)
# Configuration of the sparsity estimator and sparsity scheduler used. In this case we use the most basic threshold-based Lasso estimator and a scheduler that asseses the validation loss after a given patience. If that value is smaller than 1e-5, the algorithm is converged.
estimator = Threshold(0.2)
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=200, delta=1e-5)
# Configuration of the sparsity estimator
constraint = LeastSquares()
# Configuration of the sparsity scheduler
# Now we instantiate the model and select the optimizer
model = DeepMoD(network, library, estimator, constraint)
# Defining optimizer
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=2e-3)
# ## Run DeepMoD
# We can now run DeepMoD using all the options we have set and the training data:
# * The directory where the tensorboard file is written (log_dir)
# * The ratio of train/test set used (split)
# * The maximum number of iterations performed (max_iterations)
# * The absolute change in L1 norm considered converged (delta)
# * The amount of epochs over which the absolute change in L1 norm is calculated (patience)
train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/theory_new', split=0.8, max_iterations=200000, delta=1e-7, patience=200)
print(model.estimator_coeffs())
sol = model(torch.tensor(X, dtype=torch.float32))[0].reshape(output_data.shape).detach().numpy()
ux = model(torch.tensor(X, dtype=torch.float32))[2][0][:,1].reshape(output_data.shape).detach().numpy()
uxx = model(torch.tensor(X, dtype=torch.float32))[2][0][:,2].reshape(output_data.shape).detach().numpy()
ut = model(torch.tensor(X, dtype=torch.float32))[1][0].reshape(output_data.shape).detach().numpy()
import pysindy as ps
fd_spline = ps.SINDyDerivative(kind='spline', s=1e-2)
fd_spectral = ps.SINDyDerivative(kind='spectral')
fd_sg = ps.SINDyDerivative(kind='savitzky_golay', left=0.5, right=0.5, order=3)
ground_truth = u_v
u_v.shape
u_v_noise.shape
sol.shape
x0 = 2
plt.plot(u_v_noise[x0,:])
plt.plot(u_v[x0,:])
plt.plot(sol[x0,:]*np.max(output_data))
x0 = 2
plt.plot(u_v_noise[x0,:])
plt.plot(u_v[x0,:])
plt.plot(sol[x0,:]*np.max(output_data))
t0 = 200
plt.plot(u_v_noise[:,t0],'go--')
plt.plot(u_v[:,t0],'ro')
plt.plot(sol[:,t0]*np.max(output_data),'r')
t0=100
plt.plot(fd_spline(u_v_noise[:,t0],x),'go--')
plt.plot(fd_spline(u_v[:,t0],x),'ro')
plt.plot(ux[:,t0]*np.max(output_data),'r')
np.max(output_data)
plt.plot(fd_spline(fd_spline(u_v_noise[:,t0],x),x),'go')
plt.plot(fd_spline(fd_spline(u_v[:,t0],x),x),'ro')
plt.plot(uxx[:,t0]*np.max(output_data)*np.max(output_data),'r')
t0=200
plt.plot(fd_spline(fd_spline(u_v_noise[:,t0],x),x),'go')
plt.plot(fd_spline(fd_spline(u_v[:,t0],x),x),'ro')
plt.plot(uxx[:,t0]*np.max(output_data),'k')
t0=100
plt.plot(fd_spline(fd_spline(u_v_noise[:,t0],x),x),'go')
plt.plot(fd_sg(fd_sg(u_v[:,t0],x),x),'ro')
plt.plot(uxx[:,t0]*np.max(output_data),'k')
# ## SVD
u_v_noise.shape
steps = u_v_noise.shape[0]
plt.imshow(denoised_sol)
dim_w = 2
denoised_sol = []
for i in np.arange(steps):
uwn,sigmawn,vwn= np.linalg.svd(u_v_noise[:,i])
vwn = vwn.T
denoised_sol.append(uwn[:,0:dim_w].dot(np.diag(sigmawn[0:dim_w]).dot(vwn[:,0:dim_w].T)))
denoised_sol = np.array(denoised_sol).T
| paper/Cable_equation/old/Theoretical/Theoretical_10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
import matplotlib
import matplotlib.pyplot as plt
import torchvision
from torchvision import transforms
# -
# ## Grab the test data and visualise
data_path = "/home/sravula/experiments/datasets/8047_vel_imgs.npy"
# +
from datasets.velocity_fine import Velocity
tran_transform = transforms.Compose([
transforms.Resize([256, 256])
])
dataset = Velocity(path=data_path, transform=tran_transform)
# -
N = len(dataset)
indices = list(range(N))
random_state = np.random.get_state()
np.random.seed(2240)
np.random.shuffle(indices)
np.random.set_state(random_state)
train_indices, test_indices = indices[:int(N * 0.9)], indices[int(N * 0.9):]
test_dataset = torch.utils.data.Subset(dataset, test_indices)
# +
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=15, shuffle=False,
num_workers=8, drop_last=True)
test_iter = iter(test_loader)
test_sample = next(test_iter)[0]
# +
grid_img = torchvision.utils.make_grid(test_sample, nrow=5)
dpi = matplotlib.rcParams['figure.dpi']
height = width = 6*256
figsize = width / float(dpi), height / float(dpi)
plt.figure(figsize=figsize)
plt.imshow(grid_img.permute(1, 2, 0))
plt.show()
# -
# ## Noise the test data and visualise
# +
#We want y = Ax where A is a random Gaussian
#y = [N, m, 1], A = [N, m, C x W x H], x = [N, C, H, W]
N, C, H, W = test_sample.shape
m = 10000
# +
#A = (1 / np.sqrt(m)) * torch.randn(m, C*H*W)
A = torch.eye(C*H*W)[np.random.choice(a=C*H*W, size=m, replace=False).tolist()]
y = torch.matmul(A, test_sample.view(N, -1, 1))
print(A.shape)
print(y.shape)
print(test_sample.shape)
# -
# ## Load the network and perform Langevin dynamics on the noisy images
ckpt_path = "/home/sravula/experiments/logs/run_2/checkpoint.pth"
config_path = "/home/sravula/ncsnv2/configs/velocity.yml"
# +
import argparse
from main import dict2namespace
import yaml
with open(config_path, 'r') as f:
config = yaml.load(f)
new_config = dict2namespace(config)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
new_config.device = device
print(device)
# +
from models.ncsnv2 import NCSNv2Deepest
from models.ema import EMAHelper
from models import get_sigmas
new_config.input_dim = new_config.data.image_size ** 2 * new_config.data.channels
score = NCSNv2Deepest(new_config).to(new_config.device)
score = torch.nn.DataParallel(score)
#Set up the exponential moving average
if new_config.model.ema:
ema_helper = EMAHelper(mu=new_config.model.ema_rate)
ema_helper.register(score)
states = torch.load(ckpt_path)
score.load_state_dict(states[0])
### Make sure we can resume with different eps
states[1]['param_groups'][0]['eps'] = new_config.optim.eps
if new_config.model.ema:
ema_helper.load_state_dict(states[4])
#grab all L noise levels
sigmas = get_sigmas(new_config)
# +
test_score = ema_helper.ema_copy(score)
test_score.eval()
# +
from models import langevin_Inverse
import time
N, C, H, W = test_sample.shape
print(N, C, H, W)
y = y.to(new_config.device)
A = A.to(new_config.device)
x0 = torch.rand(N, C, H, W, device=new_config.device)
mse = torch.nn.MSELoss()
mse_start = mse(x0, test_sample.to(new_config.device))
print("Start MSE: ", mse_start.item())
start = time.time()
num_iters = 100
all_samples = langevin_Inverse(x_mod = x0,
y = y,
A = A,
scorenet = test_score,
sigmas = sigmas.cpu().numpy(),
n_steps_each=1,
step_lr=new_config.sampling.step_lr,
final_only=False,
verbose=True,
denoise=False,
add_noise=False,
decimate_sigma=1100//num_iters,
mode=None,
true_x=test_sample.to(new_config.device))
print("\nTOTAL TIME: ", time.time() - start)
# +
sample = x0.cpu()
sample = torch.clamp(sample, 0.0, 1.0)
grid_img = torchvision.utils.make_grid(sample, nrow=5)
dpi = matplotlib.rcParams['figure.dpi']
height = width = 6*256
figsize = width / float(dpi), height / float(dpi)
plt.figure(figsize=figsize)
plt.title("INITIALISATION")
plt.imshow(grid_img.permute(1, 2, 0))
plt.show()
for i in range(len(all_samples)//10):
sample = all_samples[i*10].view(all_samples[i*10].shape[0], new_config.data.channels,
new_config.data.image_size,
new_config.data.image_size)
sample = torch.clamp(sample, 0.0, 1.0)
grid_img = torchvision.utils.make_grid(sample, nrow=5)
dpi = matplotlib.rcParams['figure.dpi']
height = width = 6*256
figsize = width / float(dpi), height / float(dpi)
plt.figure(figsize=figsize)
plt.title("ITERATION" + str(i))
plt.imshow(grid_img.permute(1, 2, 0))
plt.show()
# -
| .ipynb_checkpoints/Inverse_Problem_Test-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import requests
import bs4
from bs4 import BeautifulSoup
# +
wiki = "https://en.wikipedia.org/wiki/List_of_pop_musicians_who_died_of_drug_overdose"
page = requests.get(wiki)
soup = BeautifulSoup(page.content,'html.parser')
names = set()
tables = soup.findAll(name='table', class_='wikitable sortable')
for table in tables:
body = table.findChild("tbody" , recursive=False)
for row in body.findAll('tr'):
cells = row.findAll('a')
if(len(cells) == 0) :continue
names.add(cells[0].text.strip())
# -
names
len(names)
page = requests.get("https://drugabuse.com/30-famous-musicians-who-have-battled-drug-addiction-and-alcoholism/")
soup = BeautifulSoup(page.content,'html.parser')
for entry in soup.findAll("h2"):
try:
names.add(entry.text.split(".",1)[1].strip())
except:
break
print(len(names))
namesPD = pd.DataFrame(data={"Band": list(names)})
songsTable1 = pd.read_csv("lyrics/lyrics1.csv")
songsTable1.head()
pd1= songsTable1.merge(namesPD, how="inner", on="Band")
pd2= songsTable1[songsTable1['Band'].isin(names)]
# +
print(len(pd1))
print(len(pd2))
#This is just to show that these are the the same frames, that the two methods in the above cells do that same thing
pd1.sort_values(by=["Band",'Lyrics','Song'],inplace=True)
pd2.sort_values(by=["Band",'Lyrics','Song'],inplace=True)
pd2.reset_index(drop=True,inplace=True)
pd1.reset_index(drop=True,inplace=True)
print(pd2.equals(pd1))
# +
pd2[pd2.duplicated(keep=False)]
pd1[pd1.duplicated(keep=False)]
#there are duplicates, need to drop later
# +
songsTable2 = pd.read_csv("lyrics/lyrics2.csv")
pd3= songsTable2.merge(namesPD, how="inner", on="Band")
pd4= songsTable2[songsTable2['Band'].isin(names)]
pd3.sort_values(by=["Band",'Lyrics','Song'],inplace=True)
pd4.sort_values(by=["Band",'Lyrics','Song'],inplace=True)
pd3 = pd3.reset_index(drop=True)
pd4 = pd4.reset_index(drop=True)
print(pd3.equals(pd4))
pd3[pd3.duplicated(keep=False)]
pd4[pd4.duplicated(keep=False)]
# -
len(pd3)
len(pd4)
pd3.head()
pd2.head()
pd5 = pd.concat([pd1,pd2])
pd5
| gettingNames.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import itertools
import glob
import pickle
import matplotlib.pyplot as plt
# +
def getTrainData(path_files):
#Import Mineral Balanced Data!!!!
input_files = glob.glob(path_files + "*.csv")
min_in = []
for filename in input_files:
df = pd.read_csv(filename, index_col=None, header=0, encoding = "ISO-8859-1")
min_in.append(df)
df_all = pd.concat(min_in, axis=0, ignore_index=True)
df = df_all.drop(columns = ['Unnamed: 0', 'X1', 'id', 'SAMPLE', 'GROUP', 'MINERAL', 'ROCK','X1_1'])
df = df.drop(columns = ["H20"])
return df, df_all
def test_acc(X,y, model,plot=True,modelName='rf'):
# Function to test the accuracy
n_nodes = []
max_depths = []
for ind_tree in model.estimators_:
n_nodes.append(ind_tree.tree_.node_count)
max_depths.append(ind_tree.tree_.max_depth)
print(f'Average number of nodes {int(np.mean(n_nodes))}')
print(f'Average maximum depth {int(np.mean(max_depths))}')
feature_list = list(X.columns)
# Get numerical feature importances
importances = list(model.feature_importances_)
# List of tuples with variable and importance
feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# Print out the feature and importances
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances];
y_pred = model.predict(X)
cm = confusion_matrix(y, y_pred)
if plot:
if modelName == 'rf':
qmin.plot_confusion_matrix(cm, classes = np.unique(y),
title = 'Confusion Matrix',normalize=True)
else:
qmin.plot_confusion_matrix(cm, classes = np.unique(y),
title = 'Confusion Matrix',normalize=True,
output='../figures/RF_'+modelName+'_confusion_matrix.png')
print(classification_report(y,y_pred))
print(accuracy_score(y, y_pred))
return accuracy_score(y, y_pred)
def save_Model(model, model_name):
path = '../model_py/'
pickle.dump(model, open(path+model_name, 'wb'))
print("Model Saved ...\n"+path+model_name)
def randomForestBuilder(trainData,labels, acc_test=True, saveModel=True, nameModel='RF'):
# Separate 30% for test, training in 70% of Data!
train, test, train_labels, test_labels = train_test_split(trainData, labels,
stratify = labels,
test_size = 0.3)
model = RandomForestClassifier(n_estimators=50,
max_features = 'sqrt',
n_jobs=-1, verbose = 1,
oob_score=True)
# Fit on training data
model.fit(train, train_labels)
if acc_test:
acc = test_acc(test,test_labels,model,modelName=nameModel)
if saveModel:
save_Model(model, nameModel+'.pkl')
return model, test_acc(test,test_labels,model,plot=False)
def _cleanDataFrame(df):
df = df.drop(columns = ['Unnamed: 0', 'X1', 'id', 'SAMPLE', 'GROUP', 'ROCK','X1_1'])
df = df.drop(columns = ["H20"])
return df
def createMineralModel(data,namegroup):
#get Dataframe to create multiple mineral models
labels = labels = np.array(data.pop('MINERAL'))
trainData = _cleanDataFrame(data)
mineralModel, acc = randomForestBuilder(trainData,labels,acc_test=False,
saveModel=False, nameModel=namegroup)
return mineralModel,acc
def modelCreate():
models = {}
trainData, allData = getTrainData('../data_train/')
trainFeatures = trainData.columns
labels = np.array(allData.pop('GROUP'))
allData['GROUP'] = labels
#Group Model Classify
groupModel, acc = randomForestBuilder(trainData,labels,acc_test=False,
saveModel=True, nameModel='RF_GROUP')
models['GROUP'] = groupModel
groups = allData['GROUP'].unique() # Get all Gruops for mineral classification
# Mineral Model Classify
accs = []
for group in groups:
print('Training .... '+group)
mineralData = allData[allData['GROUP']== group]
models[group], acc = createMineralModel(mineralData,group)
accs.append([group,acc])
print(accs)
return models,trainFeatures
models, trainFeatures = modelCreate()
models['Train Features'] = trainFeatures.values.tolist()
#models['Train Features'] = ['SIO2','TIO2','AL2O3','CR2O3','FEOT','CAO','MGO','MNO','K2O',\
# 'NA2O','P2O5','F','CL','NIO','CUO','COO','ZNO','PBO','S','ZRO2', 'AS']
path = '../model_py/'
model_name = 'allmodels'
with open(path+model_name+'.pkl', 'wb') as f:
pickle.dump(models, f)
# pickle.dump(models, open(path+model_name+'.pkl', 'wb'))
print("Model Saved ...\n"+path+model_name)
# -
accs = [['AMPHIBOLES', 0.8045977011494253], ['CARBONATE', 0.8888888888888888], ['CLAY', 0.8271604938271605], ['FELDSPAR', 0.8888888888888888], ['FELDSPATHOID', 0.8787878787878788], ['GARNET', 0.8611111111111112], ['MICA', 0.9281045751633987], ['OLIVINE', 0.9333333333333333], ['PYROXENE', 0.85], ['SPINEL', 0.7373737373737373], ['SULFIDE', 0.9354838709677419], ['APATITE', 1.0], ['ILMENITE', 1.0], ['PEROVSKITE', 1.0], ['QUARTZ', 1.0], ['TITANITE', 1.0], ['ZIRCON', 1.0]]
for i in accs:
print('%s, %.3f'%(i[0],i[1]))
| Code_Python/Model_builder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Recurrenty Neural Networks to predict electricity usage
#
# This notebooks takes raw observation data on etrical usuage, and creats a daily prediction for the next seven days of usage. The dataset is the household_power_consumption dataset hosted by UCI Mahine Learning Repo. Target value to predict is 'Global_active_power'
#
# #### Steps
# ##### Data Loading and Cleaning
# + Download data
# + unzip data, create pandas data frame from the 'household_power_consumption.txt' in the zip file
# + combine date and time to a datatime index for the data frame
# + aggregate the data to daily from observationallay level
# + reshape data so that an entier week of data is used to predictions for the next week
# + RNN Data input needs to be 3d , (observations, timesteps, n_cols)
# + split the data into training and test sets
#
# ##### Modeling
#
# + Design a nerual network architecture
# + compile
# + fit the model
#
# ##### Deployment
# + write a function that combines cleaning steps to create a predictions
#
# +
import requests, zipfile
import pandas as pd
from io import BytesIO
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip'
# get a zip file from UCI machine learning respository
request = requests.get(url)
file = zipfile.ZipFile(BytesIO(request.content))
# reads txt file from zip, as byte code
with file.open('household_power_consumption.txt') as f:
txt = f.readlines()
# decodes bytes to string, splaces return and newline characters with ''
txt_decoded = [row.decode("utf-8").replace('\r\n', '') for row in txt]
# extracts column names
cols = txt_decoded[0].split(';')
# create a data frame
df = pd.DataFrame(columns=cols, data = [row.split(';') for row in txt_decoded[1:100000]])
# combines date and time col to a date time col
df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
# sets date time as index
df.set_index('Datetime', inplace=True)
# drops the seperate Data and Time Columns
df.drop(['Date', 'Time'], axis=1, inplace=True)
# coerrce all the string columns to float
for col in df.columns:
df[col] = pd.to_numeric(df[col], errors='coerce')
# -
# Basic information about the data
dates = daily_data.index
min_date = min(dates)
max_date = max(dates)
horrizon = min_date
print('min date', min_date)
print('max date', max_date)
print('data shape', df.shape)
df.head()
# ##### About the data
# As is clear from the index, the data is one minute time stamp observations of power usage
# + global_active_power: The total active power consumed by the household (kilowatts). (This is the target)
# + global_reactive_power: The total reactive power consumed by the household (kilowatts).
# + voltage: Average voltage (volts).
# + global_intensity: Average current intensity (amps).
# + sub_metering_1: Active energy for kitchen (watt-hours of active energy).
# + sub_metering_2: Active energy for laundry (watt-hours of active energy).
# + sub_metering_3: Active energy for climate control systems (watt-hours of active energy).
#
# groups by daily
daily_groups = df.resample('D')
# aggregates by some
daily_data = daily_groups.sum()
daily_data.head()
# +
# yeilds date range for x,and y ides of the equation
def get_data_ranges(dates, d=7):
min_date = min(dates)
max_date = max(dates)
horrizon = min_date
while horrizon + pd.Timedelta(days=d*2) <= max_date:
x_date_range = pd.date_range(horrizon, periods=d)
y_date_range = pd.date_range(horrizon + pd.Timedelta(days=d) , periods=d)
horrizon = horrizon + pd.Timedelta(days=d)
yield x_date_range, y_date_range
date_ranges = list(get_data_ranges(daily_data.index, d=7))
date_ranges[0]
# +
# Add Fill witn Previous !!
# +
import numpy as np
# function that yields 2d arrays (timestep, x_cols) for training and testing
def array_gen(df, d=7, targetCol='Global_active_power'):
# generatres the correct date ranges
gen = get_data_ranges(df.index, d=d)
try:
while True: # runs until stop interation is met (no more date ranges to use)
data_list = [next(gen) for _ in range(d)]
x_ranges = [v[0] for v in data_list]
y_ranges = [v[1] for v in data_list]
try: # there is a case where date ranges are not in df, that causes key error
# uses index loc to get date ranges from df
x = np.reshape([df.loc[r].values for r in x_ranges], (d,-1))
y = np.reshape([df.loc[r, targetCol].values for r in y_ranges], (d, -1))
yield x, y
except KeyError:
pass
except StopIteration:
print('array gen completed')
# creates the data geneator
days = 7
x_cols = daily_data.shape[1]
g = array_gen(daily_data, d=days)
# creates list of (x,y ) tuples
data_list = list(g)
# reshapes list of (x, y) tuples into a 3d x, y arrays for training
x = np.reshape([v[0] for v in data_list], (-1, days, x_cols))
y = np.reshape([v[1] for v in data_list], (-1, days))
print('x_shape', x.shape)
print('y_shape', y.shape)
# +
# data splitting
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
# -
# #### Data Shape for Machine Recurrent Neural Networks
# Input into LSTM RNN or GRU needs to be three dimenisional, with shape
#
# (observations, timesteps, n_cols)
#
# +
from keras.models import Sequential
from keras.layers import LSTM, Dense, Flatten
import numpy as np
batch_size = 5
# Expected input batch shape: (batch_size, timesteps, data_dim)
# Note that we have to provide the full batch_input_shape since the network is stateful.
# the sample of index i in batch k is the follow-up for the sample i in batch k-1.
model = Sequential()
model.add(LSTM(4, return_sequences=True, stateful=False,
batch_input_shape=(None, days, x_cols)))
model.add(LSTM(4, stateful=False, return_sequences=False))
model.add(Dense(7, activation='linear'))
model.compile(loss='mean_squared_logarithmic_error',
optimizer='adam',
metrics=['mae'])
model.summary()
# -
model.fit(X_train, y_train, batch_size=10, epochs=10)
score = model.evaluate(X_test, y_test, batch_size=16)
| DeepLearning/KerasLstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="uc_sSbDHqU5s"
import tensorflow as tf
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
from tensorflow.keras.losses import sparse_categorical_crossentropy
from keras.layers import Dense, Flatten, Activation, Dropout
from tensorflow.keras.utils import to_categorical
# + id="utXrUqZequpb"
(x_train,y_train),(x_test,y_test)= keras.datasets.fashion_mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="TXX7fBxurLAM" outputId="f4a54633-a4ce-4895-d8f6-424dbbfeb435"
x_train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="M4_Cj6edrQyx" outputId="b208cbc6-2360-4798-a193-b18114776889"
plt.imshow(x_train[0], cmap = plt.cm.binary)
plt.show()
# + id="QbOh9ogdrpX6"
#Normalize the Data
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#Normalize the data
x_train = x_train/255.0
x_test = x_test/255.0
# + id="e8__bxdpwnTl"
train_Y = to_categorical(y_train)
test_Y = to_categorical(y_test)
# + id="3wRwouu7sB55"
model = keras.models.Sequential([
keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)),
keras.layers.MaxPooling2D((2,2)),
keras.layers.Conv2D(64, (3,3), activation='relu', padding='same'),
#keras.layers.Dropout(0.3),
#keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28,28,1)),
keras.layers.MaxPooling2D((2,2)),
keras.layers.Conv2D(128, (3,3), activation='relu', padding='same'),
#keras.layers.Dropout(0.3),
#keras.layers.Conv2D(128, (3,3), activation='relu', input_shape=(28,28,1)),
keras.layers.MaxPooling2D((2,2)),
keras.layers.Flatten(),
keras.layers.Dropout(0.4),
keras.layers.Dense(256, activation='relu'),
#keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
# + colab={"base_uri": "https://localhost:8080/"} id="WAsQinRmshk7" outputId="a74e032e-085c-46bb-cb78-dc18f990d691"
model.summary()
# + id="hEaq8-1Gsru4"
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy']
)
# + colab={"base_uri": "https://localhost:8080/"} id="F5XDv_j7stFp" outputId="477ff29b-ba2b-4492-c32e-ac2f4b0aa1d0"
history = model.fit(
x_train,
y_train,
epochs=50,
batch_size=64,
validation_data=(x_test,y_test)
)
# + colab={"base_uri": "https://localhost:8080/"} id="c9ndcqLax83E" outputId="9a491f61-7dc7-49a0-ddc5-505f2725e626"
test_eval = model.evaluate(x_test, y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="XnWJb3_938g7" outputId="f574bb1e-4565-4d5b-fa4f-4b937157fdf7"
print(f'Test Loss:{round(test_eval[0]*100,2)}%')
print(f'Test Accuracy:{round(test_eval[1]*100,2)}%')
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="3AjRM37-yvSS" outputId="6de91df2-3667-4e08-8033-40398c3479f6"
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="235qabsozy-7" outputId="69332d6b-d7e2-4940-c2ae-8f49f18c8b45"
model.save('CNNforFMNIST.h5py')
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="HHwwLXMly9yM" outputId="41b43d01-c5fc-4d0f-af80-214d3bb7f36f"
plt.plot(history.history["accuracy"], label="train_accuracy")
plt.plot(history.history["val_accuracy"], label="val_accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Train and Validation Accuracy Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
# + id="nlYbVT7y6dU6"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 487} id="9pxYO9-M6iIZ" outputId="b99b65e6-d7c3-438e-8b93-b0ef80abb758"
pd.DataFrame(history.history).plot(figsize=(12,8))
plt.grid()
plt.gca().set_ylim(0,1)
plt.savefig('CNNfmnist')
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="TVrOGvhJ3mse" outputId="1628d69e-edd3-47c0-d6f7-752e3769a1a8"
plt.plot(history.history["accuracy"], label="train_accuracy")
plt.plot(history.history["val_accuracy"], label="val_accuracy")
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Accuracy/Loss")
plt.title("Train and Validation Accuracy/Loss Over Epochs", fontsize=12)
plt.legend()
plt.grid()
plt.show()
| CNNFMnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="v6QTLOT_sRhb" outputId="2a7b689a-6c70-4ee0-d4ae-6472501cb1d4"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="YJlSuVo-sX4I" outputId="5ec6f4c8-99e6-4e6b-f17f-02152b009b63"
import os
os.chdir('/content/drive/MyDrive/DataCollection/tensorflow-ml-nlp-tf2/6.CHATBOT')
os.getcwd()
# + [markdown] id="OS6XpHcmsJqh"
# # 패키지 불러오기
# + colab={"base_uri": "https://localhost:8080/"} id="2_lz_QO9sqVK" outputId="0c624514-29ec-482e-eba9-807a8346a4ed"
# !pip install konlpy
# + id="14_iMQHxsJql"
import tensorflow as tf
import numpy as np
import os
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import matplotlib.pyplot as plt
from preprocess import *
# + [markdown] id="GLeBU2eKsJqm"
# # 시각화 함수
# + id="O4ar_IsXsJqm"
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string], '')
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
# + [markdown] id="JbB6yBC0sJqn"
# # 학습 데이터 경로 정의
# + id="KJvg1oyDsJqn"
DATA_IN_PATH = './data_in/'
DATA_OUT_PATH = './data_out/'
TRAIN_INPUTS = 'train_inputs.npy'
TRAIN_OUTPUTS = 'train_outputs.npy'
TRAIN_TARGETS = 'train_targets.npy'
DATA_CONFIGS = 'data_configs.json'
# + [markdown] id="D4-TJnztsJqn"
# # 랜덤 시드 고정
# + id="uUv23AegsJqn"
SEED_NUM = 1234
tf.random.set_seed(SEED_NUM)
# + [markdown] id="t8b504C3sJqn"
# # 파일 로드
# + id="uQxBwy_tsJqo"
index_inputs = np.load(open(DATA_IN_PATH + TRAIN_INPUTS, 'rb'))
index_outputs = np.load(open(DATA_IN_PATH + TRAIN_OUTPUTS , 'rb'))
index_targets = np.load(open(DATA_IN_PATH + TRAIN_TARGETS , 'rb'))
prepro_configs = json.load(open(DATA_IN_PATH + DATA_CONFIGS, 'r'))
# + colab={"base_uri": "https://localhost:8080/"} id="E2AZ6i5ZsJqo" outputId="3748cbe8-0b82-4134-b6ee-818746424e23"
# Show length ## length가 아니라 data 갯수
print(len(index_inputs), len(index_outputs), len(index_targets))
# + [markdown] id="ytvLRgdssJqp"
# ## 모델 만들기에 필요한 값 선언
# + id="tjR9ztsGsJqp"
MODEL_NAME = 'seq2seq_kor'
BATCH_SIZE = 2
MAX_SEQUENCE = 25 ## 이건 EDA에서 나온 것도 아니고 경험적으로 나온 숫자.
EPOCH = 30
UNITS = 1024
EMBEDDING_DIM = 256
VALIDATION_SPLIT = 0.1
char2idx = prepro_configs['char2idx']
idx2char = prepro_configs['idx2char']
std_index = prepro_configs['std_symbol']
end_index = prepro_configs['end_symbol']
vocab_size = prepro_configs['vocab_size']
# + [markdown] id="m5ftPeOCsJqp"
# # 모델
# + [markdown] id="QYYki9ehsJqp"
# ## 인코더
# + id="QvTn2_V2sJqp"
class Encoder(tf.keras.layers.Layer):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.embedding = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform') # xavier initialization
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
# initial_state: List of initial state tensors to be passed to the first call of the cell.
return output, state
def initialize_hidden_state(self, inp):
return tf.zeros((tf.shape(inp)[0], self.enc_units))
# + [markdown] id="OlM83buYsJqq"
# ## 어텐션
# + id="4LnkSPTwsJqq"
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
#W1, W2, V가 학습되는 가중치.
def call(self, query, values):
hidden_with_time_axis = tf.expand_dims(query, 1)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
# + [markdown] id="-ykCV-ifsJqq"
# ## 디코더
# + id="_45X3jdFsJqq"
class Decoder(tf.keras.layers.Layer):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.embedding = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(self.vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
# + id="8NfNGaUmsJqq"
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')
def loss(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0)) ## real에서 <PAD>인 0 은 tf.math.equal 하면 True니까, mask는 False가 됨.
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype) ##True는 1, False는 0 으로 바꿔줌.
loss_ *= mask ## 따라서, <PAD>는 계산하지 않게ㄷ
return tf.reduce_mean(loss_)
def accuracy(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
mask = tf.expand_dims(tf.cast(mask, dtype=pred.dtype), axis=-1)
pred *= mask
acc = train_accuracy(real, pred)
return tf.reduce_mean(acc)
# + [markdown] id="hum9XiqXsJqr"
# ## 시퀀스 투 스퀀스 모델
# + id="U7ZSP2MVsJqr"
class seq2seq(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, dec_units, batch_sz, end_token_idx=2):
super(seq2seq, self).__init__()
self.end_token_idx = end_token_idx
self.encoder = Encoder(vocab_size, embedding_dim, enc_units, batch_sz)
self.decoder = Decoder(vocab_size, embedding_dim, dec_units, batch_sz)
def call(self, x):
inp, tar = x #inp: encoder의 입력값, tar: decoder의 입력값.
enc_hidden = self.encoder.initialize_hidden_state(inp)
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden
predict_tokens = list()
for t in range(0, tar.shape[1]):
dec_input = tf.dtypes.cast(tf.expand_dims(tar[:, t], 1), tf.float32)
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
predict_tokens.append(tf.dtypes.cast(predictions, tf.float32))
return tf.stack(predict_tokens, axis=1)
def inference(self, x): # 사용자의 입력에 대한 모델의 결괏값을 확인하기 위해 테스트 목적으로 만들어진 함수. (하나의 배치만 동작하도록 되어있음.)
inp = x
enc_hidden = self.encoder.initialize_hidden_state(inp)
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden ## encoder의 last hidden state를 dec_hidden으로 다시 넣어주는 부분.
dec_input = tf.expand_dims([char2idx[std_index]], 1)
predict_tokens = list()
for t in range(0, MAX_SEQUENCE):
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
predict_token = tf.argmax(predictions[0])
if predict_token == self.end_token_idx:
break
predict_tokens.append(predict_token)
dec_input = tf.dtypes.cast(tf.expand_dims([predict_token], 0), tf.float32)
return tf.stack(predict_tokens, axis=0).numpy()
# + id="nVBm_MVlsJqr"
model = seq2seq(vocab_size, EMBEDDING_DIM, UNITS, UNITS, BATCH_SIZE, char2idx[end_index])
model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(1e-3), metrics=[accuracy])
#model.run_eagerly = True
# + [markdown] id="4Mnc5IggsJqs"
# ## 학습 진행
# + id="jc32ltFwsJqs"
PATH = DATA_OUT_PATH + MODEL_NAME
if not(os.path.isdir(PATH)):
os.makedirs(os.path.join(PATH))
checkpoint_path = DATA_OUT_PATH + MODEL_NAME + '/weights.h5'
cp_callback = ModelCheckpoint(
checkpoint_path, monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True)
earlystop_callback = EarlyStopping(monitor='val_accuracy', min_delta=0.0001, patience=10)
history = model.fit([index_inputs, index_outputs], index_targets,
batch_size=BATCH_SIZE, epochs=EPOCH,
validation_split=VALIDATION_SPLIT, callbacks=[earlystop_callback, cp_callback])
# + [markdown] id="JjBAzIZ2sJqt"
# ## 결과 플롯
# + id="-EII1dzfsJqt" outputId="bd1033f5-7159-4456-a117-81077f6183df"
plot_graphs(history, 'accuracy')
# + id="7zJj6LmNsJqu" outputId="b2711dff-aa30-4667-c9cb-90723b9db55b"
plot_graphs(history, 'loss')
# + [markdown] id="rMksH4X5sJqu"
# ### 결과 확인
# + id="q5IKTp2HsJqu"
SAVE_FILE_NM = "weights.h5"
model.load_weights(os.path.join(DATA_OUT_PATH, MODEL_NAME, SAVE_FILE_NM))
# + id="N5dioXLUsJqv" outputId="2071a0d5-3548-4a70-d353-62afaa1e6edf"
query = "남자친구 승진 선물로 뭐가 좋을까?"
test_index_inputs, _ = enc_processing([query], char2idx)
predict_tokens = model.inference(test_index_inputs)
print(predict_tokens)
print(' '.join([idx2char[str(t)] for t in predict_tokens]))
# + id="r3EBJ3fosJqv"
| 6.CHATBOT/6.4.seq2seq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import keras
# +
import pandas as pd
from sklearn.model_selection import train_test_split
# download mnist dataset from https://www.kaggle.com/c/digit-recognizer/data into mnist dir
mnist_df = pd.read_csv('mnist/train.csv')
train_df, test_df = train_test_split(mnist_df, shuffle=False)
# -
train_df.head()
pixel_columns = ['pixel' + str(i) for i in range(0, 784)]
label_column = 'label'
# +
vectorized_normalization_fn = np.vectorize(lambda x: x / 255.0)
normalized_matrix = vectorized_normalization_fn(train_df[pixel_columns].as_matrix())
# -
normalized_matrix.shape
labels = train_df[label_column].as_matrix()
labels.shape
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(784,), batch_size=5),
keras.layers.Dense(256, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(normalized_matrix, labels, epochs=3)
# +
normalized_test_matrix = vectorized_normalization_fn(test_df[pixel_columns].as_matrix())
test_labels = test_df[label_column].as_matrix()
_, test_acc = model.evaluate(normalized_test_matrix, test_labels)
print('Accuracy on test dataset:', test_acc)
# +
from IPython.display import display
from PIL import Image
def predict_digit(index):
predictions = model.predict(normalized_test_matrix[index:index + 1])
return np.argmax(predictions, axis=1)[0]
def show_image(index):
print("predicted digit: %d" % predict_digit(index))
print("digit image:")
vectorized_denormalization_fn = np.vectorize(lambda x: np.uint8(x * 255.0))
img_matrix = normalized_test_matrix[index].reshape(28, 28)
img_matrix = vectorized_denormalization_fn(img_matrix)
img = Image.fromarray(img_matrix, mode='L')
display(img)
# -
show_image(0)
show_image(3)
show_image(19)
show_image(429)
estimator = tf.keras.estimator.model_to_estimator(model)
| Chapter08/train_mnist_local.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Combining Health and County Climate Data and Exploring
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#health data
inf_df = pd.read_csv('../../data/02_cleaned/inf_mortality.zip', parse_dates=['year_id'], dtype={'FIPS': object})
resp_df = pd.read_csv('../../data/02_cleaned/resp_mortality.zip', parse_dates=['year_id'], dtype={'FIPS': object})
cvd_df = pd.read_csv('../../data/02_cleaned/cvd_mortality.zip', parse_dates=['year_id'], dtype={'FIPS': object})
inf_df.groupby(['year_id', 'cause_name'])['mx'].mean().unstack().plot()
climate_df.groupby(['month_year_long'])['min_dailyMaxAirTemp_F'].max().plot()
inf_df['year'] = inf_df['year_id'].dt.year
inf_df['mx'].plot()
inf_df_both = inf_df[inf_df['sex'] == 'Both']
resp_df_both = resp_df[resp_df['sex'] == 'Both']
cvd_df_both = cvd_df[cvd_df['sex'] == 'Both']
fig, ax = plt.subplots(3, 1, figsize=(12, 8))
inf_df.groupby(['cause_name'])['mx'].mean().plot(kind='barh', ax=ax[0])
resp_df.groupby(['cause_name'])['mx'].mean().plot(kind='barh', ax=ax[1])
cvd_df.groupby(['cause_name'])['mx'].mean().plot(kind='barh', ax=ax[2])
# + jupyter={"outputs_hidden": true} tags=[]
for title, group in inf_df.groupby('cause_name'):
group.plot(x='year_id', y='mx', title=title)
# + jupyter={"outputs_hidden": true} tags=[]
for title, group in cvd_df.groupby('cause_name'):
group.plot(x='year_id', y='mx', title=title)
# +
# for title, group in resp_df.groupby('cause_name'):
# group.plot(x='year_id', y='mx', title=title)
# -
inf_df_both = inf_df[inf_df['sex'] == 'Both']
inf_df_male = inf_df[inf_df['sex'] == 'Male']
inf_df_female = inf_df[inf_df['sex'] == 'Female']
sns.lineplot(data=inf_df_both, x='year_id', y='mx')
# +
fig, ax = plt.subplots(3, 3, figsize=(35, 20))
inf_df_both.groupby('cause_name')['mx'].min().plot(kind='barh', ax=ax[0][0])
inf_df_both.groupby('cause_name')['mx'].mean().plot(kind='barh', ax=ax[0][1])
inf_df_both.groupby('cause_name')['mx'].max().plot(kind='barh', ax=ax[0][2])
inf_df_male.groupby('cause_name')['mx'].min().plot(kind='barh', ax=ax[1][0])
inf_df_male.groupby('cause_name')['mx'].mean().plot(kind='barh', ax=ax[1][1])
inf_df_male.groupby('cause_name')['mx'].max().plot(kind='barh', ax=ax[1][2])
inf_df_female.groupby('cause_name')['mx'].min().plot(kind='barh', ax=ax[2][0])
inf_df_female.groupby('cause_name')['mx'].mean().plot(kind='barh', ax=ax[2][1])
inf_df_female.groupby('cause_name')['mx'].max().plot(kind='barh', ax=ax[2][2])
plt.tight_layout();
# sns.barplot(data=inf_df_both, y='cause_name', x='mx', ax=ax[0])
# sns.barplot(data=inf_df_male, y='cause_name', x='mx', ax=ax[1])
# sns.barplot(data=inf_df_female, y='cause_name', x='mx', ax=ax[2]);
# -
inf_demo_join_inf =
inf_df
| code/jupyter_notebooks/02_EDA_DataViz.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Apache Toree - Scala
// language: scala
// name: apache_toree_scala
// ---
// # Quick Recap of Scala
//
// Let us quickly recap of some of the core programming concepts of Python before we get into Spark.
// ## Data Engineering Life Cycle
//
// Let us first understand the Data Engineering Life Cycle. We typically read the data, process it by applying business rules and write the data back to different targets
// * Read the data from different sources.
// * Files
// * Databases
// * Mainframes
// * APIs
// * Processing the data
// * Row Level Transformations
// * Aggregations
// * Sorting
// * Ranking
// * Joining multiple data sets
// * Write data to different targets.
// * Files
// * Databases
// * Mainframes
// * APIs
// ## Python CLI or Jupyter Notebook
//
// We can use Python CLI or Jupyter Notebook to explore APIs.
//
// * We can launch Python CLI using `python` command.
// * We can launch the Jupyter Notebook using the `jupyter notebook` command.
// * A web service will be started on port number 8888 by default.
// * We can go to the browser and connect to the web server using IP address and port number.
// * We should be able to explore code in interactive fashion.
// * We can issue magic commands such as %%sh to run shell commands, %%md to document using markdown etc.
// ### Tasks
//
// Let us perform these tasks to just recollect how to use Python CLI or Jupyter Notebook.
// * Create variables i and j assigning 10 and 20.5 respectively.
val i = 10
val j = 20.5
// * Add the values and assign it to res.
val res = i + j
println(res)
// * Get the type of i, j and res.
// ## Basic Programming Constructs
//
// Let us recollect some of the basic programming constructs of Python.
// * Comparison Operations (==, !=, <, >, <=, >=, etc)
// * All the comparison operators return a True or False (Boolean value)
// * Conditionals (if)
// * We typically use comparison operators as part of conditionals.
// * Loops (for)
// * We can iterate through collection using `for i in l` where l is a standard collection such as list or set.
// * Python provides special function called as `range` which will return a collection of integers between the given range. It excludes the upper bound value.
// * In Python, scope is defined by indentation.
// ### Tasks
//
// Let us perform few tasks to quickly recap basic programming constructs of Python.
// * Get all the odd numbers between 1 and 15.
//
(1 to 15 by 2)
// * Print all those numbers which are divisible by 3 from the above list.
for (i <- (1 to 15 by 2))
if(i%3 == 0) println(i)
// ## Developing Functions
//
// Let us understand how to develop functions using Python as programming language.
// * Function starts with `def` followed by function name.
// * Parameters can be of different types.
// * Required
// * Keyword
// * Variable Number
// * Functions
// * Functions which take another function as an argument is called higher order functions.
//
// ### Tasks
//
// Let us perform few tasks to understand how to develop functions in Python.
//
// * Sum of integers between lower bound and upper bound using formula.
//
//
def sumOfN(n: Int) =
(n * (n + 1)) / 2
sumOfN(10)
def sumOfIntegers(lb: Int, ub: Int) =
sumOfN(ub) - sumOfN(lb - 1)
sumOfIntegers(5, 10)
// * Sum of integers between lower bound and upper bound using loops.
def sumOfIntegers(lb: Int, ub: Int) = {
var total = 0
for (e <- (lb to ub))
total += e
total
}
// +
sumOfIntegers(1, 10)
// -
// * Sum of squares of integers between lower bound and upper bound using loops.
def sumOfSquares(lb: Int, ub: Int) = {
var total = 0
for (e <- (lb to ub))
total += e * e
total
}
sumOfSquares(2, 4)
// * Sum of the even numbers between lower bound and upper bound using loops.
def sumOfEvens(lb: Int, ub: Int) = {
var total = 0
for (e <- (lb to ub))
total += e * e
total
}
sumOfEvens(2, 4)
// ## Lambda Functions
//
// Let us recap details related to lambda functions.
//
// * We can develop functions with out names. They are called Lambda Functions and also known as Anonymous Functions.
// * We typically use them to pass as arguments to higher order functions which takes functions as arguments
//
// ### Tasks
//
// Let us perform few tasks related to lambda functions.
//
// * Create a generic function mySum which is supposed to perform arithmetic using integers within a range.
// * It takes 3 arguments - lb, ub and f.
// * Function f should be invoked inside the function on each element within the range.
//
//
def mySum(lb: Int, ub: Int, f: Int => Int) = {
var total = 0
for (e <- (lb to ub))
total += f(e)
total
}
// * Sum of integers between lower bound and upper bound using mySum.
mySum(2, 4, i => i)
// * Sum of squares of integers between lower bound and upper bound using mySum.
mySum(2, 4, i => i * i)
// * Sum of the even numbers between lower bound and upper bound using mySum.
mySum(2, 4, i => if(i%2 == 0) i else 0)
// ## Overview of Collections and Tuples
//
// Let"s quickly recap about Collections and Tuples in Python. We will primarily talk about collections and tuples that comes as part of Python standard library such as `list`, `set`,` dict` and `tuple.`
//
// * Group of elements with length and index - `list`
// * Group of unique elements - `set`
// * Group of key value pairs - `dict`
// * While list, set and dict contain group of homogeneous elements, tuple contains group of heterogeneous elements.
// * We can consider list, set and dict as a table in a database and tuple as a row or record in a given table.
// * Typically we create list of tuples or set of tuples and dict is nothing but collection of tuples with 2 elements and key is unique.
// * We typically use Map Reduce APIs to process the data in collections. There are also some pre-defined functions such as `len`, `sum`,` min`,` max` etc for aggregating data in collections.
// ### Tasks
//
// Let us perform few tasks to quickly recap details about Collections and Tuples in Python. We will also quickly recap about Map Reduce APIs.
//
// * Create a collection of orders by reading data from a file.
import sys.process._
"ls -ltr /data/retail_db/orders/part-00000"!
val ordersPath = "/data/retail_db/orders/part-00000"
import scala.io.Source
val orders = Source.fromFile(ordersPath).
getLines
// * Get all unique order statuses. Make sure data is sorted in alphabetical order.
// +
val ordersPath = "/data/retail_db/orders/part-00000"
import scala.io.Source
val orders = Source.fromFile(ordersPath).
getLines
orders.
map(order => order.split(",")(3)).
toSet.
toList.
sorted.
foreach(println)
// -
// * Get count of all unique dates.
// +
val ordersPath = "/data/retail_db/orders/part-00000"
import scala.io.Source
val orders = Source.fromFile(ordersPath).
getLines
orders.
map(order => order.split(",")(1)).
toSet.
toList.
sorted
// -
// * Sort the data in orders in ascending order by order_customer_id and then order_date.
// +
val ordersPath = "/data/retail_db/orders/part-00000"
import scala.io.Source
val orders = Source.fromFile(ordersPath).
getLines
orders.
toList.
sortBy(k => {
val a = k.split(",")
(a(2).toInt, a(1))
}).
take(20).
foreach(println)
// -
// * Create a collection of order_items by reading data from a file.
// +
val orderItemsPath = "/data/retail_db/order_items/part-00000"
import scala.io.Source
val orderItems = Source.fromFile(orderItemsPath).
getLines.
toList
orderItems.take(10).foreach(println)
// -
// * Get revenue for a given order_item_order_id.
def getOrderRevenue(orderItems: List[String], orderId: Int) = {
val orderItemsFiltered = orderItems.
filter(orderItem => orderItem.split(",")(1).toInt == orderId)
val orderItemsMap = orderItemsFiltered.
map(orderItem => orderItem.split(",")(4).toFloat)
orderItemsMap.sum
}
// +
val orderItemsPath = "/data/retail_db/order_items/part-00000"
import scala.io.Source
val orderItems = Source.fromFile(orderItemsPath).
getLines.
toList
// -
print(getOrderRevenue(orderItems, 2))
// ## Development Life Cycle
//
// Let us understand the development life cycle. We typically use IDEs such as PyCharm to develop Python based applications.
//
// * Create Project - retail
// * Choose the interpreter 3.x
// * Make sure plugins such as pandas are installed.
// * Create config.py script for externalizing run time parameters such as input path, output path etc.
// * Create app folder for the source code.
// ### Tasks
//
// Let us develop a simple application to understand end to end development life cycle.
//
// * Read the data from order_items
// * Get revenue for each order id
// * Save the output which contain order id and revenue to a file.
//
// Click [here](https://github.com/dgadiraju/python-retail/tree/v1.0) for the complete code for the above tasks.
// ## Exercises
//
// Let us perform few exercises to understand how to process the data. We will use LinkedIn data to perform some basic data processing using Python.
//
// * Get LinkedIn archive.
// * Go to https://linkedin.com
// * Me on top -> Settings & Privacy
// * Then go to "How LinkedIn users your data" -> Getting a copy of your data
// * Register and download. You will get a link as part of the email.
// * Data contain multiple CSV files. We will limit the analysis to **Contacts.csv** and **Connections.csv**.
// * Get the number of **contacts** with out email ids.
// * Get the number of **contacts** from each source.
// * Get the number of **connections** with each title.
// * Get the number of **connections** from each company.
// * Get the number of **contacts** for each month in the year 2018.
// * Use Postgres or MySQL as databases (you can setup in your laptop) and write connections data to the database
| spark-scala/02_quick_recap_of_scala.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d0a02eff-ba56-4caa-bd7a-7dc3263a9055", "showTitle": false, "title": ""}
# # Running experiments in Azure Machine Learning
#
# In this lab, you will learn to run experiments in Azure Machine Learning from Azure Databricks. This lab will cover following exercises:
#
# - Exercise 1: Running an Azure ML experiment on Databricks
# - Exercise 2: Reviewing experiment metrics in Azure ML Studio
#
# To install the required libraries please follow the instructions in the lab guide.
#
# **Required Libraries**:
# * `azureml-sdk[databricks]` via PyPI
# * `sklearn-pandas==2.1.0` via PyPI
# * `azureml-mlflow` via PyPI
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b73f3be6-1e5e-4895-9208-c322686e3820", "showTitle": false, "title": ""}
# Run the following cell to load common libraries.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2a97557e-e199-4aab-a4d1-0095a91b1ac6", "showTitle": false, "title": ""}
import os
import numpy as np
import pandas as pd
import pickle
import sklearn
import joblib
import math
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn_pandas import DataFrameMapper
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import matplotlib
import matplotlib.pyplot as plt
import azureml
from azureml.core import Workspace, Experiment, Run
from azureml.core.model import Model
print('The azureml.core version is {}'.format(azureml.core.VERSION))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4787d4a6-92db-469b-b427-95c41dc26855", "showTitle": false, "title": ""}
# ## Connect to the AML workspace
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "080e5e04-10dd-418b-be96-79f048a2e7d9", "showTitle": false, "title": ""}
# In the following cell, be sure to set the values for `subscription_id`, `resource_group`, and `workspace_name` as directed by the comments. Please note, you can copy the `subscription ID` and `resource group` name from the **Overview** page on the blade for the Azure ML workspace in the Azure portal.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "925c1b9c-0c80-4771-8171-a41cbf3b0498", "showTitle": false, "title": ""}
#Provide the Subscription ID of your existing Azure subscription
subscription_id = "XXX-XXXX-XXXX-XXXX-XXXX"
#Replace the name below with the name of your resource group
resource_group = "XXX"
#Replace the name below with the name of your Azure Machine Learning workspace
workspace_name = "aml-ws"
print("subscription_id:", subscription_id)
print("resource_group:", resource_group)
print("workspace_name:", workspace_name)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ea4e1c7e-3561-4bb1-9e27-f6b8343b136f", "showTitle": false, "title": ""}
# **Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
#
# *Also note that the sign-on link and code only appear the first time in a session. If an authenticated session is already established, you won't be prompted to enter the code and authenticate when creating an instance of the Workspace.*
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "dfd43933-a2bf-44a7-85ae-9dc0e9543982", "showTitle": false, "title": ""}
ws = Workspace(subscription_id, resource_group, workspace_name)
print(ws)
print('Workspace region:', ws.location)
print('Workspace configuration succeeded')
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1f0748e6-6339-4ad1-b1ee-6b3b6fdd7af4", "showTitle": false, "title": ""}
# ## Load the training data
#
# In this notebook, we will be using a subset of NYC Taxi & Limousine Commission - green taxi trip records available from [Azure Open Datasets]( https://azure.microsoft.com/en-us/services/open-datasets/). The data is enriched with holiday and weather data. Each row of the table represents a taxi ride that includes columns such as number of passengers, trip distance, datetime information, holiday and weather information, and the taxi fare for the trip.
#
# Run the following cell to load the table into a Spark dataframe and reivew the dataframe.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5864441e-83ed-4635-a8c5-04582b0c8f42", "showTitle": false, "title": ""}
dataset = spark.sql("select * from nyc_taxi").toPandas()
display(dataset)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c4bc4e7f-f07e-4fb3-afcc-0bc41ed81115", "showTitle": false, "title": ""}
# ## Exercise 1: Running an Azure ML experiment on Databricks
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "927480ac-0bb6-47b4-a73c-32fa1f4767c9", "showTitle": false, "title": ""}
# ### Use MLflow with Azure Machine Learning for Model Training
#
# In the subsequent cells you will learn to do the following:
# - Set up MLflow tracking URI so as to use Azure ML
# - Create MLflow experiment – this will create a corresponding experiment in Azure ML Workspace
# - Train a model on Azure Databricks cluster while logging metrics and artifacts using MLflow
#
# After this notebook, you should return to the **lab guide** and follow instructions to review the model performance metrics and training artifacts in the Azure Machine Learning workspace.
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d084826f-4a0f-4e95-9019-06239533e0fa", "showTitle": false, "title": ""}
# #### Set MLflow tracking URI
#
# Set the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "765a2d26-3878-4dc7-9c41-3492aa320611", "showTitle": false, "title": ""}
import mlflow
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
print("MLflow tracking URI to point to your Azure ML Workspace setup complete.")
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9a75e71c-0b52-4f87-a460-6a94511f70ee", "showTitle": false, "title": ""}
# #### Configure experiment
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "97350f00-a7a8-4903-8f21-ad831cfd7ff6", "showTitle": false, "title": ""}
experiment_name = 'MLflow-AML-Exercise'
mlflow.set_experiment(experiment_name)
print("Experiment setup complete.")
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "21e73871-736a-4ed3-8a55-7294d7dbfb3f", "showTitle": false, "title": ""}
# #### Train Model and Log Metrics and Artifacts
#
# Now you are ready to train the model. Run the cell below to do the following:
# - Train model
# - Evaluate model
# - Log evaluation metrics
# - Log artifact: Evaluation graph
# - Save model
# - Log artifact: Trained model
#
# Note that the metrics and artifacts will be saved in your `AML Experiment Run`.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "17329841-a05a-4099-afff-7f6c95ecfaba", "showTitle": false, "title": ""}
print("Training model...")
output_folder = 'outputs'
model_file_name = 'nyc-taxi.pkl'
dbutils.fs.mkdirs(output_folder)
model_file_path = os.path.join('/dbfs', output_folder, model_file_name)
with mlflow.start_run() as run:
df = dataset.dropna(subset=['totalAmount'])
x_df = df.drop(['totalAmount'], axis=1)
y_df = df['totalAmount']
X_train, X_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.2, random_state=0)
numerical = ['passengerCount', 'tripDistance', 'snowDepth', 'precipTime', 'precipDepth', 'temperature']
categorical = ['hour_of_day', 'day_of_week', 'month_num', 'normalizeHolidayName', 'isPaidTimeOff']
numeric_transformations = [([f], Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])) for f in numerical]
categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]
transformations = numeric_transformations + categorical_transformations
clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations, df_out=True)),
('regressor', GradientBoostingRegressor())])
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
y_actual = y_test.values.flatten().tolist()
rmse = math.sqrt(mean_squared_error(y_actual, y_predict))
mlflow.log_metric('rmse', rmse)
mae = mean_absolute_error(y_actual, y_predict)
mlflow.log_metric('mae', mae)
r2 = r2_score(y_actual, y_predict)
mlflow.log_metric('R2 score', r2)
plt.figure(figsize=(10,10))
plt.scatter(y_actual, y_predict, c='crimson')
plt.yscale('log')
plt.xscale('log')
p1 = max(max(y_predict), max(y_actual))
p2 = min(min(y_predict), min(y_actual))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.xlabel('True Values', fontsize=15)
plt.ylabel('Predictions', fontsize=15)
plt.axis('equal')
results_graph = os.path.join('/dbfs', output_folder, 'results.png')
plt.savefig(results_graph)
mlflow.log_artifact(results_graph)
joblib.dump(clf, open(model_file_path,'wb'))
mlflow.log_artifact(model_file_path)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "12556aec-6ef5-43b8-8c18-df15ef12597d", "showTitle": false, "title": ""}
# #### View the Experiment Run in Azure Machine Learning Workspace
#
# Run the cell below and then **right-click** on **Link to Azure Machine Learning studio** link below to open the `AML Experiment Run Details` page in a **new browser tab**.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4531c990-6e47-40ac-b902-679014bdb0cf", "showTitle": false, "title": ""}
list(ws.experiments[experiment_name].get_runs())[0]
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4fd9da3d-0deb-48d7-bdbe-2a0a03e834fc", "showTitle": false, "title": ""}
# ## Exercise 2: Reviewing experiment metrics in Azure ML Studio
#
# Return to the `lab guide` and follow instructions to review the model performance metrics and training artifacts in the Azure Machine Learning workspace.
| 04 Build and operate machine learning solutions with Azure Databricks/01 Get started with Azure Databricks/1.0 Running experiments in Azure Machine Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# # Concolic Fuzzing
#
# We have previously seen how one can use dynamic taints to produce more intelligent test cases than simply looking for program crashes. We have also seen how one can use the taints to update the grammar, and hence focus more on the dangerous methods.
#
# While taints are helpful, uninterpreted strings is only one of the attack vectors. Can we say anything more about the properties of variables at any point in the execution? For example, can we say for sure that a function will always receive the buffers with the correct length? Concolic execution offers a solution.
#
# The idea of _concolic execution_ over a function is as follows: We start with a sample input for the function, and execute the function under trace. At each point the execution passes through a conditional, we save the conditional encountered in the form of relations between symbolic variables.
# (A _symbolic variable_ can be thought of as a sort of placeholder for the real variable, sort of like the x in solving for x in Algebra. The symbolic variables can be used to specify relations without actually solving them.)
#
# With concolic execution, one can collect the constraints that an execution path encounters, and use it to answer questions about the program behavior at any point we prefer along the program execution path. We can further use concolic execution to enhance fuzzing.
#
# In this chapter, we explore in depth how to execute a Python function concolically, and how concolic execution can be used to enhance fuzzing.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"}
# **Prerequisites**
#
# * You should have read the [chapter on coverage](Coverage.ipynb).
# * You should have read the [chapter on information flow](InformationFlow.ipynb).
# * A familiarity with the basic idea of [SMT solvers](https://en.wikipedia.org/wiki/Satisfiability_modulo_theories) would be useful.
# + [markdown] slideshow={"slide_type": "fragment"}
# We first set up our infrastructure so that we can make use of previously defined functions.
# + slideshow={"slide_type": "skip"}
import bookutils
# + [markdown] slideshow={"slide_type": "slide"}
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from fuzzingbook.ConcolicFuzzer import <identifier>
# ```
#
# and then make use of the following features.
#
#
# This chapter defines two main classes: `SimpleConcolicFuzzer` and `ConcolicGrammarFuzzer`. The `SimpleConcolicFuzzer` first uses a sample input to collect predicates encountered. The fuzzer then negates random predicates to generate new input constraints. These, when solved, produce inputs that explore paths that are close to the original path. It can be used as follows.
#
# We first obtain the constraints using `ConcolicTracer`.
#
# ```python
# >>> with ConcolicTracer() as _:
# >>> _[cgi_decode]('a%20d')
# ```
# These constraints are added to the concolic fuzzer as follows:
#
# ```python
# >>> scf = SimpleConcolicFuzzer()
# >>> scf.add_trace(_, 'a%20d')
# ```
# The concolic fuzzer then uses the constraints added to guide its fuzzing as follows:
#
# ```python
# >>> scf = SimpleConcolicFuzzer()
# >>> for i in range(10):
# >>> v = scf.fuzz()
# >>> if v is None:
# >>> break
# >>> print(repr(v))
# >>> with ExpectError():
# >>> with ConcolicTracer() as _:
# >>> _[cgi_decode](v)
# >>> scf.add_trace(_, v)
# ' '
# '%\\x00'
# '%A\\x00'
# '%Ad\\x00'
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 39, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# '%\\x00C\\x00'
# '%\\x004\\x00'
# '%\\x004\\x00'
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# '%E\\x00'
# '%\\x00d\\x00'
# '%\\x00F\\x00'
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# Traceback (most recent call last):
# File "<ipython-input-287-2a3454213b54>", line 9, in <module>
# _[cgi_decode](v)
# File "<ipython-input-33-635d2b1b13c2>", line 3, in __call__
# self.result = self.fn(*self.concolic(args))
# File "<ipython-input-241-630ee123bed8>", line 42, in cgi_decode
# raise ValueError("Invalid encoding")
# ValueError: Invalid encoding (expected)
# ```
# The `SimpleConcolicFuzzer` simply explores all paths near the original path traversed by the sample input. It uses a simple mechanism to explore the paths that are near the paths that it knows about, and other than code paths, knows nothing about the input.
# The `ConcolicGrammarFuzzer` on the other hand, knows about the input grammar, and can collect feedback from the subject under fuzzing. It can lift some of the constraints encountered to the grammar, enabling deeper fuzzing. It is used as follows:
#
# ```python
# >>> from InformationFlow import INVENTORY_GRAMMAR, SQLException
# >>> cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR)
# >>> cgf.prune_tokens(prune_tokens)
# >>> for i in range(10):
# >>> query = cgf.fuzz()
# >>> print(query)
# >>> with ConcolicTracer() as _:
# >>> with ExpectError():
# >>> try:
# >>> res = _[db_select](query)
# >>> print(repr(res))
# >>> except SQLException as e:
# >>> print(e)
# >>> cgf.update_grammar(_)
# >>> print()
# delete from Mi6 where m1(F,t)>l(V)-F6(C,B)
# Table ('Mi6') was not found
#
# delete from wd1l11iPlX68W where 5796.7==(((((h+e-Q-.)))))==384514
# Table ('wd1l11iPlX68W') was not found
#
# select H,w,S,h from m1 where d/t<S
# Table ('m1') was not found
#
# select 4.8 from months where o/r+u+k*O!=y9(Q)>o*j
# Invalid WHERE ('(o/r+u+k*O!=y9(Q)>o*j)')
#
# select m/h+v+P+E,L+j*W+o+k from vehicles
# Invalid WHERE ('(m/h+v+P+E,L+j*W+o+k)')
#
# select (O/S) from months where o/y-:*M==D<pC(a,z)
# Invalid WHERE ('(o/y-:*M==D<pC(a,z))')
#
# select v<M(z) from vehicles where _(E)/r/S>C*K(Y)
# Invalid WHERE ('(_(E)/r/S>C*K(Y))')
#
# insert into months (u3l7ua,p,H1) values ('Rk','Y')
# Column ('u3l7ua') was not found
#
# select 3.7 from I37
# Table ('I37') was not found
#
# update months set xK=S4 where M+r*r-w<X-k+g
# Column ('xK') was not found
#
# ```
#
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=false
# ## Tracking Constraints
# + [markdown] slideshow={"slide_type": "fragment"}
# In the chapter on [information flow](InformationFlow), we have seen how dynamic taints can be used to direct fuzzing by indicating which part of input reached interesting places. However, dynamic taint tracking is limited in the information that it can propagate. For example, we might want to explore what happens when certain properties of the input changes.
# + [markdown] slideshow={"slide_type": "fragment"}
# For example, say we have a function `factorial()` that returns the *factorial value* of its input.
# + slideshow={"slide_type": "subslide"}
def factorial(n):
if n < 0:
return None
if n == 0:
return 1
if n == 1:
return 1
v = 1
while n != 0:
v = v * n
n = n - 1
return v
# + [markdown] slideshow={"slide_type": "fragment"}
# We exercise the function with a value of `5`.
# + slideshow={"slide_type": "fragment"}
factorial(5)
# + [markdown] slideshow={"slide_type": "subslide"}
# Is this sufficient to explore all the features of the function? How do we know? One way to verify that we have explored all features is to look at the coverage obtained. First we need to extend the `Coverage` class from the [chapter on coverage](Coverage.ipynb) to provide us with coverage arcs.
# + slideshow={"slide_type": "skip"}
from Coverage import Coverage
# + slideshow={"slide_type": "skip"}
import inspect
# + slideshow={"slide_type": "fragment"}
class ArcCoverage(Coverage):
def traceit(self, frame, event, args):
if event != 'return':
f = inspect.getframeinfo(frame)
self._trace.append((f.function, f.lineno))
return self.traceit
def arcs(self):
t = [i for f, i in self._trace]
return list(zip(t, t[1:]))
# + [markdown] slideshow={"slide_type": "subslide"}
# Next, we use the `Tracer` to obtain the coverage arcs.
# + slideshow={"slide_type": "fragment"}
with ArcCoverage() as cov:
factorial(5)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now use the coverage arcs to visualize the coverage obtained.
# + slideshow={"slide_type": "skip"}
from ControlFlow import PyCFG, CFGNode, to_graph, gen_cfg
# + slideshow={"slide_type": "skip"}
from graphviz import Source, Graph
# + slideshow={"slide_type": "fragment"}
Source(to_graph(gen_cfg(inspect.getsource(factorial)), arcs=cov.arcs()))
# + [markdown] slideshow={"slide_type": "fragment"}
# We see that the path `[1, 2, 4, 6, 8, 9, 10, 11, 12]` is covered (green) but sub-paths such as `[2, 3]`, `[4, 5]` and `[6, 7]` are unexplored (red). What we need is the ability to generate inputs such that the `True` branch is taken at `2`. How do we do that?
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=false
# ## Concolic Execution
#
# One way is to look at the execution path being taken, and collect the conditional constraints that the path encounters. Then we can try to produce inputs that lead us to taking the non-traversed path.
# + [markdown] slideshow={"slide_type": "fragment"}
# First, let us step through the function.
# + slideshow={"slide_type": "fragment"}
lines = [i[1] for i in cov._trace if i[0] == 'factorial']
src = {i + 1: s for i, s in enumerate(
inspect.getsource(factorial).split('\n'))}
# + [markdown] slideshow={"slide_type": "fragment"}
# * The line (1) is simply the entry point of the function. We know that the input is `n`, which is an integer.
# + slideshow={"slide_type": "fragment"}
src[1]
# + [markdown] slideshow={"slide_type": "subslide"}
# * The line (2) is a predicate `n < 0`. Since the next line taken is line (4), we know that at this point in the execution path, the predicate was `false`.
# + slideshow={"slide_type": "fragment"}
src[2], src[3], src[4]
# + [markdown] slideshow={"slide_type": "fragment"}
# We notice that this is one of the predicates where the `true` branch was not taken. How do we generate a value that takes the `true` branch here? One way is to use symbolic variables to represent the input, encode the constraint, and use an *SMT Solver* to solve the negation of the constraint.
# + [markdown] slideshow={"slide_type": "subslide"}
# As we mentioned in the introduction to the chapter, a symbolic variable can be thought of as a sort of placeholder for the real variable, sort of like the `x` in solving for `x` in Algebra. These variables can be used to encode constraints placed on the variables in the program. We identify what constraints the variable is supposed to obey, and finally produce a value that obeys all constraints imposed.
# + [markdown] slideshow={"slide_type": "slide"}
# ## SMT Solvers
# + [markdown] slideshow={"slide_type": "subslide"}
# To solve these constraints, one can use a _Satisfiability Modulo Theories_ (SMT) solver. An SMT solver is built on top of a _SATISFIABILITY_ (SAT) solver. A SAT solver is being used to check whether boolean formulas in first order logic (e.g `(a | b ) & (~a | ~b)`) can be satisfied using any assignments for the variables (e.g `a = true, b = false`). An SMT solver extends these SAT solvers to specific background theories -- for example, _theory of integers_, or _theory of strings_. That is, given a string constraint expressed as a formula with string variables (e.g `h + t == 'hello,world'`), an SMT solver that understands _theory of strings_ can be used to check if that constraint can be satisfied, and if satisfiable, provide an instantiation of concrete values for the variables used in the formula (e.g `h = 'hello,', t = 'world'`).
#
# We use the SMT solver, `Z3` in this chapter.
# + slideshow={"slide_type": "skip"}
import z3
# + [markdown] slideshow={"slide_type": "subslide"}
# To ensure that the string constraints we use in this chapter are successfully evaluated, we need to specify the `z3str3` solver. Further, we set the timeout for Z3 computations to to 30 seconds.
# + slideshow={"slide_type": "fragment"}
assert z3.get_version() >= (4, 8, 6, 0)
z3.set_option('smt.string_solver', 'z3str3')
z3.set_option('timeout', 30 * 1000) # milliseconds
# + [markdown] slideshow={"slide_type": "fragment"}
# Encoding the constraint requires declaring a corresponding symbolic variable to the input `n`.
# + slideshow={"slide_type": "fragment"}
zn = z3.Int('n')
# + [markdown] slideshow={"slide_type": "fragment"}
# Remember the constraint `(n < 0)` from line 2 in `factorial()`? We can now encode the constraint as follows.
# + slideshow={"slide_type": "fragment"}
zn < 0
# + [markdown] slideshow={"slide_type": "subslide"}
# We previously traced `factorial(5)`. We saw that with input `5`, the execution took the `else` branch on the predicate `n < 0`. We can express this observation as follows.
# + slideshow={"slide_type": "fragment"}
z3.Not(zn < 0)
# + [markdown] slideshow={"slide_type": "fragment"}
# The `z3.solve()` method can also be used to check if the constraints are satisfiable, and if they are, provide values for variables such that the constraints are satisfied. For example, we can ask z3 for an input that will take the `else` branch as follows:
# + slideshow={"slide_type": "fragment"}
z3.solve(z3.Not(zn < 0))
# + [markdown] slideshow={"slide_type": "fragment"}
# This is *a solution* (albeit a trivial one). SMT solvers can be used to solve much harder problems. For example, here is how one can solve a quadratic equation.
# + slideshow={"slide_type": "subslide"}
x = z3.Real('x')
eqn = (2 * x**2 - 11 * x + 5 == 0)
z3.solve(eqn)
# + [markdown] slideshow={"slide_type": "fragment"}
# Again, this is _one solution_. We can ask z3 to give us another solution as follows.
# + slideshow={"slide_type": "fragment"}
z3.solve(x != 5, eqn)
# + [markdown] slideshow={"slide_type": "fragment"}
# Indeed, both `x = 5` and `x = 1/2` are solutions to the quadratic equation $ 2x^2 -11x + 5 = 0 $
# + [markdown] slideshow={"slide_type": "fragment"}
# Similarly, we can ask *Z3* for an input that satisfies the constraint encoded in line 2 of `factorial()` so that we take the `if` branch.
# + slideshow={"slide_type": "subslide"}
z3.solve(zn < 0)
# + [markdown] slideshow={"slide_type": "fragment"}
# That is, if one uses `-1` as an input to `factorial()`, it is guaranteed to take the `if` branch in line 2 during execution.
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us try using that with our coverage. Here, the `-1` is the solution from above.
# + slideshow={"slide_type": "fragment"}
with cov as cov:
factorial(-1)
# + slideshow={"slide_type": "fragment"}
Source(to_graph(gen_cfg(inspect.getsource(factorial)), arcs=cov.arcs()))
# + [markdown] slideshow={"slide_type": "subslide"}
# Ok, so we have managed to cover a little more of the graph. Let us continue with our original input of `factorial(5)`:
# * In line (4) we encounter a new predicate `n == 0`, for which we again took the false branch.
# + slideshow={"slide_type": "fragment"}
src[4]
# + [markdown] slideshow={"slide_type": "fragment"}
# The predicates required, to follow the path until this point are as follows.
# + slideshow={"slide_type": "fragment"}
predicates = [z3.Not(zn < 0), z3.Not(zn == 0)]
# + [markdown] slideshow={"slide_type": "fragment"}
# * If we continue to line (6), we encounter another predicate, for which again, we took the `false` branch
# + slideshow={"slide_type": "fragment"}
src[6]
# + [markdown] slideshow={"slide_type": "fragment"}
# The predicates encountered so far are as follows
# + slideshow={"slide_type": "subslide"}
predicates = [z3.Not(zn < 0), z3.Not(zn == 0), z3.Not(zn == 1)]
# + [markdown] slideshow={"slide_type": "fragment"}
# To take the branch at (6), we essentially have to obey the predicates until that point, but invert the last predicate.
# + slideshow={"slide_type": "fragment"}
last = len(predicates) - 1
z3.solve(predicates[0:-1] + [z3.Not(predicates[-1])])
# + [markdown] slideshow={"slide_type": "subslide"}
# What we are doing here is tracing the execution corresponding to a particular input `factorial(5)`, using concrete values, and along with it, keeping *symbolic shadow variables* that enable us to capture the constraints. As we mentioned in the introduction, this particular method of execution where one tracks concrete execution using symbolic variables is called *Concolic Execution*.
#
# How do we automate this process? One method is to use a similar infrastructure as that of the chapter on [information flow](InformationFlow.ipynb), and use the Python inheritance to create symbolic proxy objects that can track the concrete execution.
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=true
# ## A Concolic Tracer
#
# Given that there is a symbolic context under which the program is executed (that is the symbolic variables that are used in the program execution) we define a context manager called `ConcolicTracer` that keeps track of the context.
# + [markdown] slideshow={"slide_type": "fragment"}
# The `ConcolicTracer` accepts a single argument which contains the declarations for the symbolic variables seen so far, and the pre-conditions if any.
# + slideshow={"slide_type": "fragment"}
class ConcolicTracer:
def __init__(self, context=None):
self.context = context if context is not None else ({}, [])
self.decls, self.path = self.context
# + [markdown] slideshow={"slide_type": "fragment"}
# We add the `enter` and `exit` methods for the context manager.
# + slideshow={"slide_type": "subslide"}
class ConcolicTracer(ConcolicTracer):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
return
# + [markdown] slideshow={"slide_type": "fragment"}
# We use introspection to determine the arguments to the function, which is hooked into the `getitem` method.
# + slideshow={"slide_type": "fragment"}
class ConcolicTracer(ConcolicTracer):
def __getitem__(self, fn):
self.fn = fn
self.fn_args = {i: None for i in inspect.signature(fn).parameters}
return self
# + [markdown] slideshow={"slide_type": "fragment"}
# Finally, the function itself is invoked using the `call` method.
# + slideshow={"slide_type": "subslide"}
class ConcolicTracer(ConcolicTracer):
def __call__(self, *args):
self.result = self.fn(*self.concolic(args))
return self.result
# + [markdown] slideshow={"slide_type": "fragment"}
# For now, we define `concolic()` as a transparent function. It will be modified to produce symbolic variables later.
# + slideshow={"slide_type": "fragment"}
class ConcolicTracer(ConcolicTracer):
def concolic(self, args):
return args
# + [markdown] slideshow={"slide_type": "fragment"}
# It can be used as follows
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[factorial](1)
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "subslide"}
# The `context` is empty as we are yet to hook up the necessary infrastructure to `ConcolicTracer`.
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=true
# ### Concolic Proxy Objects
#
# We now define the concolic proxy objects that can be used for concolic tracing. First, we define the `zproxy_create()` method that given a class name, correctly creates an instance of that class, and the symbolic corresponding variable, and registers the symbolic variable in the context information `context`.
# + slideshow={"slide_type": "fragment"}
def zproxy_create(cls, sname, z3var, context, zn, v=None):
zv = cls(context, z3var(zn), v)
context[0][zn] = sname
return zv
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false
# #### A Proxy Class for Booleans
#
# First, we define the `zbool` class which is used to track the predicates encountered. It is a wrapper class that contains both symbolic (`z`) as well as concrete (`v`) values. The concrete value is used to determine which path to take, and the symbolic value is used to collect the predicates encountered.
#
# The initialization is done in two parts. The first one is using `zproxy_create()` to correctly initialize and register the shadow symbolic variable corresponding to the passed argument. This is used exclusively when the symbolic variable needs to be initialized first. In all other cases, the constructor is called with the preexisting symbolic value.
# + slideshow={"slide_type": "subslide"}
class zbool:
@classmethod
def create(cls, context, zn, v):
return zproxy_create(cls, 'Bool', z3.Bool, context, zn, v)
def __init__(self, context, z, v=None):
self.context, self.z, self.v = context, z, v
self.decl, self.path = self.context
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is how it can be used.
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
za, zb = z3.Ints('a b')
val = zbool.create(_.context, 'my_bool_arg', True)
print(val.z, val.v)
_.context
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Negation of Encoded formula
#
# The `zbool` class allows negation of its concrete and symbolic values.
# + slideshow={"slide_type": "fragment"}
class zbool(zbool):
def __not__(self):
return zbool(self.context, z3.Not(self.z), not self.v)
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is how it can be used.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
val = zbool.create(_.context, 'my_bool_arg', True).__not__()
print(val.z, val.v)
_.context
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Registering Predicates on Conditionals
#
# The `zbool` class is being used to track boolean conditions that arise during program execution. It tracks such conditions by registering the corresponding symbolic expressions in the context.
# + slideshow={"slide_type": "fragment"}
class zbool(zbool):
def __bool__(self):
r, pred = (True, self.z) if self.v else (False, z3.Not(self.z))
self.path.append(pred)
return r
# + [markdown] slideshow={"slide_type": "fragment"}
# The `zbool` class can be used to keep track of boolean values and conditions encountered during the execution. For example, we can encode the conditions encountered by line 6 in `factorial()` as follows:
# + [markdown] slideshow={"slide_type": "subslide"}
# First, we define the concrete value (`ca`), and its shadow symbolic variable (`za`).
# + slideshow={"slide_type": "fragment"}
ca, za = 5, z3.Int('a')
# + [markdown] slideshow={"slide_type": "fragment"}
# Then, we wrap it in `zbool`, and use it in a conditional, forcing the conditional to be registered in the context.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
if zbool(_.context, za == z3.IntVal(5), ca == 5):
print('success')
# + [markdown] slideshow={"slide_type": "fragment"}
# We can retrieve the registered conditional as follows.
# + slideshow={"slide_type": "fragment"}
_.path
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false
# #### A Proxy Class for Integers
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we define a symbolic wrapper `zint` for `int`.
# This class keeps track of the `int` variables used and the predicates encountered in `context`. Finally, it also keeps the concrete value so that it can be used to determine the path to take. As the `zint` extends the primitive `int` class, we have to define a _new_ method to open it for extension.
# + slideshow={"slide_type": "fragment"}
class zint(int):
def __new__(cls, context, zn, v, *args, **kw):
return int.__new__(cls, v, *args, **kw)
# + [markdown] slideshow={"slide_type": "fragment"}
# As in the case of `zbool`, the initialization takes place in two parts. The first using `create()` if a new symbolic argument is being registered, and then the usual initialization.
# + slideshow={"slide_type": "subslide"}
class zint(zint):
@classmethod
def create(cls, context, zn, v=None):
return zproxy_create(cls, 'Int', z3.Int, context, zn, v)
def __init__(self, context, z, v=None):
self.z, self.v = z, v
self.context = context
# + [markdown] slideshow={"slide_type": "fragment"}
# The `int` value of a `zint` object is its concrete value.
# + slideshow={"slide_type": "fragment"}
class zint(zint):
def __int__(self):
return self.v
def __pos__(self):
return self.v
# + [markdown] slideshow={"slide_type": "subslide"}
# Using these proxies is as follows.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
val = zint.create(_.context, 'int_arg', 0)
print(val.z, val.v)
_.context
# + [markdown] slideshow={"slide_type": "fragment"}
# The `zint` class is often used to do arithmetic with, or compare to other `int`s. These `int`s can be either a variable or a constant value. We define a helper method `_zv()` that checks what kind of `int` a given value is, and produces the correct symbolic equivalent.
# + slideshow={"slide_type": "fragment"}
class zint(zint):
def _zv(self, o):
return (o.z, o.v) if isinstance(o, zint) else (z3.IntVal(o), o)
# + [markdown] slideshow={"slide_type": "subslide"}
# It can be used as follows
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
val = zint.create(_.context, 'int_arg', 0)
print(val._zv(0))
print(val._zv(val))
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Equality between Integers
#
# Two integers can be compared for equality using _ne_ and _eq_.
# + slideshow={"slide_type": "fragment"}
class zint(zint):
def __ne__(self, other):
z, v = self._zv(other)
return zbool(self.context, self.z != z, self.v != v)
def __eq__(self, other):
z, v = self._zv(other)
return zbool(self.context, self.z == z, self.v == v)
# + [markdown] slideshow={"slide_type": "fragment"}
# We also define _req_ using _eq_ in case the int being compared is on the left hand side.
# + slideshow={"slide_type": "subslide"}
class zint(zint):
def __req__(self, other):
return self.__eq__(other)
# + [markdown] slideshow={"slide_type": "fragment"}
# It can be used as follows.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
ia = zint.create(_.context, 'int_a', 0)
ib = zint.create(_.context, 'int_b', 0)
v1 = ia == ib
v2 = ia != ib
v3 = 0 != ib
print(v1.z, v2.z, v3.z)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Comparisons between Integers
#
# Integers can also be compared for ordering, and the methods for this are defined below.
# + slideshow={"slide_type": "fragment"}
class zint(zint):
def __lt__(self, other):
z, v = self._zv(other)
return zbool(self.context, self.z < z, self.v < v)
def __gt__(self, other):
z, v = self._zv(other)
return zbool(self.context, self.z > z, self.v > v)
# + [markdown] slideshow={"slide_type": "fragment"}
# We use the comparisons and equality operators to provide the other missing operators.
# + slideshow={"slide_type": "subslide"}
class zint(zint):
def __le__(self, other):
z, v = self._zv(other)
return zbool(self.context, z3.Or(self.z < z, self.z == z),
self.v < v or self.v == v)
def __ge__(self, other):
z, v = self._zv(other)
return zbool(self.context, z3.Or(self.z > z, self.z == z),
self.v > v or self.v == v)
# + [markdown] slideshow={"slide_type": "fragment"}
# These functions can be used as follows.
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
ia = zint.create(_.context, 'int_a', 0)
ib = zint.create(_.context, 'int_b', 1)
v1 = ia > ib
v2 = ia < ib
print(v1.z, v2.z)
v3 = ia >= ib
v4 = ia <= ib
print(v3.z, v4.z)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Binary Operators for Integers
#
# We implement relevant arithmetic operators for integers as described in the [Python documentation](https://docs.python.org/3/reference/datamodel.html#object.__add__). (The commented out operators are not directly available for `z3.ArithRef`. They need to be implemented separately if needed. See the exercises for how it can be done.)
# + slideshow={"slide_type": "subslide"}
INT_BINARY_OPS = [
'__add__',
'__sub__',
'__mul__',
'__truediv__',
# '__div__',
'__mod__',
# '__divmod__',
'__pow__',
# '__lshift__',
# '__rshift__',
# '__and__',
# '__xor__',
# '__or__',
'__radd__',
'__rsub__',
'__rmul__',
'__rtruediv__',
# '__rdiv__',
'__rmod__',
# '__rdivmod__',
'__rpow__',
# '__rlshift__',
# '__rrshift__',
# '__rand__',
# '__rxor__',
# '__ror__',
]
# + slideshow={"slide_type": "subslide"}
def make_int_binary_wrapper(fname, fun, zfun):
def proxy(self, other):
z, v = self._zv(other)
z_ = zfun(self.z, z)
v_ = fun(self.v, v)
if isinstance(v_, float):
# we do not implement float results yet.
assert round(v_) == v_
v_ = round(v_)
return zint(self.context, z_, v_)
return proxy
# + slideshow={"slide_type": "fragment"}
INITIALIZER_LIST = []
# + slideshow={"slide_type": "subslide"}
def initialize():
for fn in INITIALIZER_LIST:
fn()
# + slideshow={"slide_type": "fragment"}
def init_concolic_1():
for fname in INT_BINARY_OPS:
fun = getattr(int, fname)
zfun = getattr(z3.ArithRef, fname)
setattr(zint, fname, make_int_binary_wrapper(fname, fun, zfun))
# + slideshow={"slide_type": "fragment"}
INITIALIZER_LIST.append(init_concolic_1)
# + slideshow={"slide_type": "fragment"}
init_concolic_1()
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
ia = zint.create(_.context, 'int_a', 0)
ib = zint.create(_.context, 'int_b', 1)
print((ia + ib).z)
print((ia + 10).z)
print((11 + ib).z)
print((ia - ib).z)
print((ia * ib).z)
print((ia / ib).z)
print((ia ** ib).z)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Integer Unary Operators
#
# We also implement the relevant unary operators as below.
# + slideshow={"slide_type": "fragment"}
INT_UNARY_OPS = [
'__neg__',
'__pos__',
# '__abs__',
# '__invert__',
# '__round__',
# '__ceil__',
# '__floor__',
# '__trunc__',
]
# + slideshow={"slide_type": "subslide"}
def make_int_unary_wrapper(fname, fun, zfun):
def proxy(self):
return zint(self.context, zfun(self.z), fun(self.v))
return proxy
# + slideshow={"slide_type": "fragment"}
def init_concolic_2():
for fname in INT_UNARY_OPS:
fun = getattr(int, fname)
zfun = getattr(z3.ArithRef, fname)
setattr(zint, fname, make_int_unary_wrapper(fname, fun, zfun))
# + slideshow={"slide_type": "fragment"}
INITIALIZER_LIST.append(init_concolic_2)
# + slideshow={"slide_type": "fragment"}
init_concolic_2()
# + [markdown] slideshow={"slide_type": "fragment"}
# We can use the unary operators we defined above as follows:
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
ia = zint.create(_.context, 'int_a', 0)
print((-ia).z)
print((+ia).z)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Using an Integer in a Boolean Context
#
# An integer may be converted to a boolean context in conditionals or as part of boolean predicates such as `or`, `and` and `not`. In these cases, the `__bool__()` method gets called. Unfortunately, this method requires a primitive boolean value. Hence, we force the current integer formula to a boolean predicate and register it in the current context.
# + slideshow={"slide_type": "fragment"}
class zint(zint):
def __bool__(self):
# return zbool(self.context, self.z, self.v) <-- not allowed
# force registering boolean condition
if self != 0:
return True
return False
# + [markdown] slideshow={"slide_type": "subslide"}
# It is used as follows
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
za = zint.create(_.context, 'int_a', 1)
zb = zint.create(_.context, 'int_b', 0)
if za and zb:
print(1)
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=true
# #### Remaining Methods of the ConcolicTracer
#
# We now complete some of the methods of the `ConcolicTracer`.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Translating to the SMT Expression Format
#
# Given that we are using an SMT Solver z3, it is often useful to retrieve the corresponding SMT expression for a symbolic expression. This can be used as an argument to `z3` or other SMT solvers.
#
# The format of the SMT expression ([SMT-LIB](http://smtlib.github.io/jSMTLIB/SMTLIBTutorial.pdf)) is as follows:
#
# * Variables declarations in [S-EXP](https://en.wikipedia.org/wiki/S-expression) format.
# E.g. The following declares a symbolic integer variable `x`
# ```
# (declare-const x Int)
# ```
# This declares a `bit vector` `b` of length `8`
# ```
# (declare-const b (_ BitVec 8))
# ```
# This declares a symbolic real variable `r`
# ```
# (declare-const x Real)
# ```
# This declares a symbolic string variable `s`
# ```
# (declare-const s String)
# ```
#
# The declared variables can be used in logical formulas that are encoded in *S-EXP* format. For example, here is a logical formula.
#
# ```
# (assert
# (and
# (= a b)
# (= a c)
# (! b c)))
# ```
# Here is another example, using string variables.
#
# ```
# (or (< 0 (str.indexof (str.substr my_str1 7 19) " where " 0))
# (= (str.indexof (str.substr my_str1 7 19) " where " 0) 0))
# ```
#
# + slideshow={"slide_type": "subslide"}
class ConcolicTracer(ConcolicTracer):
def smt_expr(self, show_decl=False, simplify=False, path=[]):
r = []
if show_decl:
for decl in self.decls:
v = self.decls[decl]
v = '(_ BitVec 8)' if v == 'BitVec' else v
r.append("(declare-const %s %s)" % (decl, v))
path = path if path else self.path
if path:
path = z3.And(path)
if show_decl:
if simplify:
return '\n'.join([
*r,
"(assert %s)" % z3.simplify(path).sexpr()
])
else:
return '\n'.join(
[*r, "(assert %s)" % path.sexpr()])
else:
return z3.simplify(path).sexpr()
else:
return ''
# + [markdown] slideshow={"slide_type": "subslide"}
# To see how to use `smt_expr()`, let us consider an example. The `triangle()` function is used to determine if the given sides to a triangle result in an `equilateral` triangle, an `isosceles` triangle, or a `scalene` triangle. It is implemented as follows.
# + slideshow={"slide_type": "subslide"}
def triangle(a, b, c):
if a == b:
if b == c:
return 'equilateral'
else:
return 'isosceles'
else:
if b == c:
return 'isosceles'
else:
if a == c:
return 'isosceles'
else:
return 'scalene'
# + slideshow={"slide_type": "subslide"}
triangle(1, 2, 1)
# + [markdown] slideshow={"slide_type": "fragment"}
# To translate make it run under `ConcolicTracer`, we first define the arguments. The triangle being defined has sides `1, 1, 1`. i.e. it is an `equilateral` triangle.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
za = zint.create(_.context, 'int_a', 1)
zb = zint.create(_.context, 'int_b', 1)
zc = zint.create(_.context, 'int_c', 1)
triangle(za, zb, zc)
print(_.context)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now call `smt_expr()` to retrieve the SMT expression as below.
# + slideshow={"slide_type": "subslide"}
print(_.smt_expr(show_decl=True))
# + [markdown] slideshow={"slide_type": "fragment"}
# The collected predicates can also be solved directly using the Python z3 API.
# + slideshow={"slide_type": "fragment"}
z3.solve(_.path)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Generating Fresh Names
# While using the proxy classes, we often will have to generate new symbolic variables, with names that have not been used before. For this, we define `fresh_name()` that always generates unique integers for names.
# + slideshow={"slide_type": "fragment"}
COUNTER = 0
# + slideshow={"slide_type": "fragment"}
def fresh_name():
global COUNTER
COUNTER += 1
return COUNTER
# + [markdown] slideshow={"slide_type": "fragment"}
# It can be used as follows
# + slideshow={"slide_type": "fragment"}
fresh_name()
# + slideshow={"slide_type": "subslide"}
def reset_counter():
global COUNTER
COUNTER = 0
# + slideshow={"slide_type": "fragment"}
class ConcolicTracer(ConcolicTracer):
def __enter__(self):
reset_counter()
return self
def __exit__(self, exc_type, exc_value, tb):
return
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Translating Arguments to Concolic Proxies
#
# We had previously defined `concolic()` as a transparent function. We now provide the full implementation of this function. It inspects a given function's parameters, and infers the parameter types from the concrete arguments passed in. It then uses this information to instantiate the correct proxy classes for each argument.
# + slideshow={"slide_type": "subslide"}
class ConcolicTracer(ConcolicTracer):
def concolic(self, args):
my_args = []
for name, arg in zip(self.fn_args, args):
t = type(arg).__name__
zwrap = globals()['z' + t]
vname = "%s_%s_%s_%s" % (self.fn.__name__, name, t, fresh_name())
my_args.append(zwrap.create(self.context, vname, arg))
self.fn_args[name] = vname
return my_args
# + [markdown] slideshow={"slide_type": "fragment"}
# This is how it gets used:
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[factorial](5)
# + [markdown] slideshow={"slide_type": "subslide"}
# With the new `concolic()` method, the arguments to the factorial are correctly associated with symbolic variables, which allows us to retrieve the predicates encountered.
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "subslide"}
# As before, we can also print out the SMT expression which can be passed directly to command line SMT solvers.
# + slideshow={"slide_type": "subslide"}
print(_.smt_expr(show_decl=True))
# + [markdown] slideshow={"slide_type": "subslide"}
# We next define methods to evaluate the SMT expression both in Python and from command line.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Evaluating the Concolic Expressions
#
# We define `zeval()` to solve the predicates in a context, and return results. It has two modes. The `python` mode uses `z3` Python API to solve and return the results. If the `python` mode is false, it writes the SMT expression to a file, and invokes the command line `z3` for a solution.
# + slideshow={"slide_type": "fragment"}
class ConcolicTracer(ConcolicTracer):
def zeval(self, python=False, log=False):
r, sol = (zeval_py if python else zeval_smt)(self.path, self, log)
if r == 'sat':
return r, {k: sol.get(self.fn_args[k], None) for k in self.fn_args}
else:
return r, None
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Using the Python API
# Given a set of predicates that the function encountered, and the tracer under which the function was executed, the `zeval_py()` function first declares the relevant symbolic variables, and uses the `z3.Solver()`to provide a set of inputs that would trace the same path through the function.
# + slideshow={"slide_type": "subslide"}
def zeval_py(path, cc, log):
for decl in cc.decls:
if cc.decls[decl] == 'BitVec':
v = "z3.%s('%s', 8)" % (cc.decls[decl], decl)
else:
v = "z3.%s('%s')" % (cc.decls[decl], decl)
exec(v)
s = z3.Solver()
s.add(z3.And(path))
if s.check() == z3.unsat:
return 'No Solutions', {}
elif s.check() == z3.unknown:
return 'Gave up', None
assert s.check() == z3.sat
m = s.model()
return 'sat', {d.name(): m[d] for d in m.decls()}
# + [markdown] slideshow={"slide_type": "subslide"}
# It can be used as follows:
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[factorial](5)
# + slideshow={"slide_type": "fragment"}
_.zeval(python=True)
# + [markdown] slideshow={"slide_type": "fragment"}
# That is, given the set of constraints, the assignment `n == 5` conforms to all constraints.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Using the Command Line
#
# The `zeval_smt()` function writes the SMT expression to the file system, and calls the `z3` SMT solver command line to solve it. The result of SMT expression is again an `sexpr`. Hence, we first define `parse_sexp()` to parse and return the correct values.
# + slideshow={"slide_type": "skip"}
import re
# + slideshow={"slide_type": "skip"}
import subprocess
# + slideshow={"slide_type": "fragment"}
SEXPR_TOKEN = r'''(?mx)
\s*(?:
(?P<bra>\()|
(?P<ket>\))|
(?P<token>[^"()\s]+)|
(?P<string>"[^"]*")
)'''
# + slideshow={"slide_type": "subslide"}
def parse_sexp(sexp):
stack, res = [], []
for elements in re.finditer(SEXPR_TOKEN, sexp):
kind, value = [(t, v) for t, v in elements.groupdict().items() if v][0]
if kind == 'bra':
stack.append(res)
res = []
elif kind == 'ket':
last, res = res, stack.pop(-1)
res.append(last)
elif kind == 'token':
res.append(value)
elif kind == 'string':
res.append(value[1:-1])
else:
assert False
return res
# + [markdown] slideshow={"slide_type": "subslide"}
# The `parse_sexp()` function can be used as follows
# + slideshow={"slide_type": "fragment"}
parse_sexp('abcd (hello 123 (world "hello world"))')
# + [markdown] slideshow={"slide_type": "fragment"}
# We now define `zeval_smt()` which uses the `z3` command line directly, and uses `parse_sexp()` to parse and return the solutions to function arguments if any.
# + slideshow={"slide_type": "skip"}
import tempfile
# + slideshow={"slide_type": "subslide"}
def zeval_smt(path, cc, log):
s = cc.smt_expr(True, True, path)
with tempfile.NamedTemporaryFile(mode='w', suffix='.smt') as f:
f.write(s)
f.write("\n(check-sat)")
f.write("\n(get-model)")
f.flush()
if log:
print(s, '(check-sat)', '(get-model)', sep='\n')
output = subprocess.getoutput("z3 -t:60 " + f.name)
if log:
print(output)
o = parse_sexp(output)
if not o:
return 'Gave up', None
kind = o[0]
if kind == 'unknown':
return 'Gave up', None
elif kind == 'unsat':
return 'No Solutions', {}
assert kind == 'sat'
assert o[1][0] == 'model'
return 'sat', {i[1]: (i[-1], i[-2]) for i in o[1][1:]}
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now use `zeval()` as follows.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[factorial](5)
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# Indeed, we get similar results (`n == 5`) from using the command line as from using the Python API.
# + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=true
# #### A Proxy Class for Strings
#
# Here, we define the proxy string class `zstr`. First we define our initialization routines. Since `str` is a primitive type, we define `new` to extend it.
# + slideshow={"slide_type": "fragment"}
class zstr(str):
def __new__(cls, context, zn, v):
return str.__new__(cls, v)
# + [markdown] slideshow={"slide_type": "fragment"}
# As before, initialization proceeds with `create()` and the constructor.
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
@classmethod
def create(cls, context, zn, v=None):
return zproxy_create(cls, 'String', z3.String, context, zn, v)
def __init__(self, context, z, v=None):
self.context, self.z, self.v = context, z, v
self._len = zint(context, z3.Length(z), len(v))
#self.context[1].append(z3.Length(z) == z3.IntVal(len(v)))
# + [markdown] slideshow={"slide_type": "fragment"}
# We also define `_zv()` helper to help us with methods that accept another string
# + slideshow={"slide_type": "fragment"}
class zstr(zstr):
def _zv(self, o):
return (o.z, o.v) if isinstance(o, zstr) else (z3.StringVal(o), o)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Retrieving Ordinal Value
# We define `zord` that given a symbolic one character long string, obtains the `ord()` for that. It returns two values. The first one is the variable that corresponds to `ord()`, and second is the predicate that links the variable to the passed in single character string.
# + slideshow={"slide_type": "fragment"}
def zord(context, c):
bn = "bitvec_%d" % fresh_name()
v = z3.BitVec(bn, 8)
context[0][bn] = 'BitVec'
z = (z3.Unit(v) == c)
context[1].append(z)
return v
# + [markdown] slideshow={"slide_type": "fragment"}
# We use it as follows
# + slideshow={"slide_type": "fragment"}
zc = z3.String('arg_%d' % fresh_name())
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
zi = zord(_.context, zc)
# + [markdown] slideshow={"slide_type": "fragment"}
# The symbolic bitvector is in `zi`. It is linked to the passed in argument in `context`
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "fragment"}
# We can specify what the result of `ord()` should be, and call `z3.solve()` to provide us with a solution that will provide the required result.
# + slideshow={"slide_type": "fragment"}
z3.solve(_.path + [zi == 65])
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Translating an Ordinal Value to ASCII
# Similarly, we can convert the ASCII value back to a single character string using `zchr()`
# + slideshow={"slide_type": "fragment"}
def zchr(context, i):
sn = 'string_%d' % fresh_name()
s = z3.String(sn)
context[0][sn] = 'String'
z = z3.And([s == z3.Unit(i), z3.Length(s) == 1])
context[1].append(z)
return s
# + [markdown] slideshow={"slide_type": "fragment"}
# For using it, we first define a bitvector that is 8 bits long.
# + slideshow={"slide_type": "fragment"}
i = z3.BitVec('bv_%d' % fresh_name(), 8)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now retrieve the `chr()` representation as below.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
zc = zchr(_.context, i)
# + slideshow={"slide_type": "subslide"}
_.context
# + [markdown] slideshow={"slide_type": "fragment"}
# As before, we can specify what the end result of calling `chr()` should be to get the original argument.
# + slideshow={"slide_type": "fragment"}
z3.solve(_.path + [zc == z3.StringVal('a')])
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Equality between Strings
#
# The equality of `zstr` is defined similar to that of `zint`
# + slideshow={"slide_type": "fragment"}
class zstr(zstr):
def __eq__(self, other):
z, v = self._zv(other)
return zbool(self.context, self.z == z, self.v == v)
def __req__(self, other):
return self.__eq__(other)
# + [markdown] slideshow={"slide_type": "fragment"}
# The `zstr` class is used as follows.
# + slideshow={"slide_type": "subslide"}
def tstr1(s):
if s == 'h':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr1]('h')
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# It works even if we have more than one character.
# + slideshow={"slide_type": "fragment"}
def tstr1(s):
if s == 'hello world':
return True
else:
return False
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
r = _[tstr1]('hello world')
# + slideshow={"slide_type": "fragment"}
_.context
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Concatenation of Strings
# What if we need to concatenate two strings? We need additional helpers to accomplish that.
# + slideshow={"slide_type": "fragment"}
class zstr(zstr):
def __add__(self, other):
z, v = self._zv(other)
return zstr(self.context, self.z + z, self.v + v)
def __radd__(self, other):
return self.__add__(other)
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is how it can be used. First, we create the wrapped arguments
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
v1, v2 = [zstr.create(_.context, 'arg_%d' % fresh_name(), s)
for s in ['hello', 'world']]
if (v1 + ' ' + v2) == 'hello world':
print('hello world')
# + [markdown] slideshow={"slide_type": "fragment"}
# The addition of symbolic variables is preserved in `context`
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Producing Substrings
# Similarly, accessing substrings also require extra help.
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def __getitem__(self, idx):
if isinstance(idx, slice):
start, stop, step = idx.indices(len(self.v))
assert step == 1 # for now
assert stop >= start # for now
rz = z3.SubString(self.z, start, stop - start)
rv = self.v[idx]
elif isinstance(idx, int):
rz = z3.SubString(self.z, idx, 1)
rv = self.v[idx]
else:
assert False # for now
return zstr(self.context, rz, rv)
def __iter__(self):
return zstr_iterator(self.context, self)
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### An Iterator Class for Strings
#
# We define the iterator as follows.
# + slideshow={"slide_type": "subslide"}
class zstr_iterator():
def __init__(self, context, zstr):
self.context = context
self._zstr = zstr
self._str_idx = 0
self._str_max = zstr._len # intz is not an _int_
def __next__(self):
if self._str_idx == self._str_max: # intz#eq
raise StopIteration
c = self._zstr[self._str_idx]
self._str_idx += 1
return c
def __len__(self):
return self._len
# + [markdown] slideshow={"slide_type": "subslide"}
# Here is how it can be used.
# + slideshow={"slide_type": "fragment"}
def tstr2(s):
if s[0] == 'h' and s[1] == 'e' and s[3] == 'l':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr2]('hello')
# + [markdown] slideshow={"slide_type": "fragment"}
# Again, the context shows predicates encountered.
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "subslide"}
# The function `zeval()` returns a solution for the predicate. Note that the value returned is not exactly the argument that we passed in. This is a consequence of the predicates we have. That is, we have no constraints on what the character value on `s[2]` should be.
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Translating to Upper and Lower Equivalents
#
# A major complication is supporting `upper()` and `lower()` methods. We use the previously defined `zchr()` and `zord()` functions to accomplish this.
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def upper(self):
empty = ''
ne = 'empty_%d' % fresh_name()
result = zstr.create(self.context, ne, empty)
self.context[1].append(z3.StringVal(empty) == result.z)
cdiff = (ord('a') - ord('A'))
for i in self:
oz = zord(self.context, i.z)
uz = zchr(self.context, oz - cdiff)
rz = z3.And([oz >= ord('a'), oz <= ord('z')])
ov = ord(i.v)
uv = chr(ov - cdiff)
rv = ov >= ord('a') and ov <= ord('z')
if zbool(self.context, rz, rv):
i = zstr(self.context, uz, uv)
else:
i = zstr(self.context, i.z, i.v)
result += i
return result
# + [markdown] slideshow={"slide_type": "subslide"}
# The `lower()` function is similar to `upper()` except that the character ranges are switched, and the lowercase is above uppercase. Hence, we add the difference to the ordinal to make a character to lowercase.
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def lower(self):
empty = ''
ne = 'empty_%d' % fresh_name()
result = zstr.create(self.context, ne, empty)
self.context[1].append(z3.StringVal(empty) == result.z)
cdiff = (ord('a') - ord('A'))
for i in self:
oz = zord(self.context, i.z)
uz = zchr(self.context, oz + cdiff)
rz = z3.And([oz >= ord('A'), oz <= ord('Z')])
ov = ord(i.v)
uv = chr(ov + cdiff)
rv = ov >= ord('A') and ov <= ord('Z')
if zbool(self.context, rz, rv):
i = zstr(self.context, uz, uv)
else:
i = zstr(self.context, i.z, i.v)
result += i
return result
# + [markdown] slideshow={"slide_type": "subslide"}
# Here is how it is used.
# + slideshow={"slide_type": "fragment"}
def tstr3(s):
if s.upper() == 'H':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr3]('h')
# + [markdown] slideshow={"slide_type": "fragment"}
# Again, we use `zeval()` to solve the collected constraints, and verify that our constraints are correct.
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is a larger example using `upper()`
# + slideshow={"slide_type": "subslide"}
def tstr4(s):
if s.lower() == 'hello world':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr4]('Hello World')
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# Again, we obtain the right input value.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Checking for String Prefixes
# We define `startswith()`.
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def startswith(self, other, beg=0, end=None):
assert end is None # for now
assert isinstance(beg, int) # for now
zb = z3.IntVal(beg)
others = other if isinstance(other, tuple) else (other, )
last = False
for o in others:
z, v = self._zv(o)
r = z3.IndexOf(self.z, z, zb)
last = zbool(self.context, r == zb, self.v.startswith(v))
if last:
return last
return last
# + [markdown] slideshow={"slide_type": "subslide"}
# An example.
# + slideshow={"slide_type": "fragment"}
def tstr5(s):
if s.startswith('hello'):
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr5]('hello world')
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr5]('my world')
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# As before, the predicates only ensure that the `startswith()` returned a true value. Hence, our solution reflects that.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Finding Substrings
# We also define `find()`
# + slideshow={"slide_type": "fragment"}
class zstr(zstr):
def find(self, other, beg=0, end=None):
assert end is None # for now
assert isinstance(beg, int) # for now
zb = z3.IntVal(beg)
z, v = self._zv(other)
zi = z3.IndexOf(self.z, z, zb)
vi = self.v.find(v, beg, end)
return zint(self.context, zi, vi)
# + [markdown] slideshow={"slide_type": "fragment"}
# An example.
# + slideshow={"slide_type": "subslide"}
def tstr6(s):
if s.find('world') != -1:
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr6]('hello world')
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# As before, the predicates only ensure that the `find()` returned a value greater than -1. Hence, our solution reflects that.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Remove Space from Ends
# + [markdown] slideshow={"slide_type": "fragment"}
# We next implement `strip()`.
# + slideshow={"slide_type": "skip"}
import string
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def rstrip(self, chars=None):
if chars is None:
chars = string.whitespace
if self._len == 0:
return self
else:
last_idx = self._len - 1
cz = z3.SubString(self.z, last_idx.z, 1)
cv = self.v[-1]
zcheck_space = z3.Or([cz == z3.StringVal(char) for char in chars])
vcheck_space = any(cv == char for char in chars)
if zbool(self.context, zcheck_space, vcheck_space):
return zstr(self.context, z3.SubString(self.z, 0, last_idx.z),
self.v[0:-1]).rstrip(chars)
else:
return self
# + slideshow={"slide_type": "subslide"}
def tstr7(s):
if s.rstrip(' ') == 'a b':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr7]('a b ')
print(r)
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def lstrip(self, chars=None):
if chars is None:
chars = string.whitespace
if self._len == 0:
return self
else:
first_idx = 0
cz = z3.SubString(self.z, 0, 1)
cv = self.v[0]
zcheck_space = z3.Or([cz == z3.StringVal(char) for char in chars])
vcheck_space = any(cv == char for char in chars)
if zbool(self.context, zcheck_space, vcheck_space):
return zstr(self.context, z3.SubString(
self.z, 1, self._len.z), self.v[1:]).lstrip(chars)
else:
return self
# + slideshow={"slide_type": "subslide"}
def tstr8(s):
if s.lstrip(' ') == 'a b':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr8](' a b')
print(r)
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + slideshow={"slide_type": "fragment"}
class zstr(zstr):
def strip(self, chars=None):
return self.lstrip(chars).rstrip(chars)
# + [markdown] slideshow={"slide_type": "subslide"}
# Example usage.
# + slideshow={"slide_type": "fragment"}
def tstr9(s):
if s.strip() == 'a b':
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr9](' a b ')
print(r)
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# The `strip()` has generated the right constraints.
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Splitting Strings
#
# We implement string `split()` as follows.
# + slideshow={"slide_type": "subslide"}
class zstr(zstr):
def split(self, sep=None, maxsplit=-1):
assert sep is not None # default space based split is complicated
assert maxsplit == -1 # for now.
zsep = z3.StringVal(sep)
zl = z3.Length(zsep)
# zi would be the length of prefix
zi = z3.IndexOf(self.z, zsep, z3.IntVal(0))
# Z3Bug: There is a bug in the `z3.IndexOf` method which returns
# `z3.SeqRef` instead of `z3.ArithRef`. So we need to fix it.
zi = z3.ArithRef(zi.ast, zi.ctx)
vi = self.v.find(sep)
if zbool(self.context, zi >= z3.IntVal(0), vi >= 0):
zprefix = z3.SubString(self.z, z3.IntVal(0), zi)
zmid = z3.SubString(self.z, zi, zl)
zsuffix = z3.SubString(self.z, zi + zl,
z3.Length(self.z))
return [zstr(self.context, zprefix, self.v[0:vi])] + zstr(
self.context, zsuffix, self.v[vi + len(sep):]).split(
sep, maxsplit)
else:
return [self]
# + slideshow={"slide_type": "subslide"}
def tstr10(s):
if s.split(',') == ['a', 'b', 'c']:
return True
else:
return False
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[tstr10]('a,b,c')
print(r)
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### Trip Wire
#
# For easier debugging, we abort any calls to methods in `str` that are not overridden by `zstr`.
# + slideshow={"slide_type": "fragment"}
def make_str_abort_wrapper(fun):
def proxy(*args, **kwargs):
raise Exception('%s Not implemented in `zstr`' % fun.__name__)
return proxy
# + slideshow={"slide_type": "subslide"}
def init_concolic_3():
strmembers = inspect.getmembers(zstr, callable)
zstrmembers = {m[0] for m in strmembers if len(
m) == 2 and 'zstr' in m[1].__qualname__}
for name, fn in inspect.getmembers(str, callable):
# Omitted 'splitlines' as this is needed for formatting output in
# IPython/Jupyter
if name not in zstrmembers and name not in [
'splitlines',
'__class__',
'__contains__',
'__delattr__',
'__dir__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__le__',
'__len__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rmod__',
'__rmul__',
'__setattr__',
'__sizeof__',
'__str__']:
setattr(zstr, name, make_str_abort_wrapper(fn))
# + slideshow={"slide_type": "subslide"}
INITIALIZER_LIST.append(init_concolic_3)
# + slideshow={"slide_type": "fragment"}
init_concolic_3()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Examples
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Triangle
#
# We previously showed how to run `triangle()` under `ConcolicTracer`.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
print(_[triangle](1, 2, 3))
# + [markdown] slideshow={"slide_type": "fragment"}
# The predicates are as follows:
# + slideshow={"slide_type": "fragment"}
_.path
# + slideshow={"slide_type": "subslide"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# We can modify the predicates if necessary. First, we retrieve the symbolic variables.
# + slideshow={"slide_type": "fragment"}
za, zb, zc = [z3.Int(s) for s in _.context[0].keys()]
# + [markdown] slideshow={"slide_type": "fragment"}
# Then, we pass a modified predicate to `zeval()`. The key determines which predicate the new predicate will replace.
# + slideshow={"slide_type": "fragment"}
_.zeval({1: zb == zc})
# + slideshow={"slide_type": "fragment"}
triangle(1, 0, 1)
# + [markdown] slideshow={"slide_type": "fragment"}
# The updated predicate returns `isosceles` as expected.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Round
#
# Here is a function that gives you the nearest ten's multiplier
# + slideshow={"slide_type": "fragment"}
def round10(r):
while r % 10 != 0:
r += 1
return r
# + [markdown] slideshow={"slide_type": "fragment"}
# As before, we execute the function under the `ConcolicTracer` context.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
r = _[round10](1)
# + [markdown] slideshow={"slide_type": "fragment"}
# We verify that we were able to capture all the predicates
# + slideshow={"slide_type": "subslide"}
_.context
# + [markdown] slideshow={"slide_type": "fragment"}
# We use `zeval()` to obtain results.
# + slideshow={"slide_type": "subslide"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Absolute Maximum
#
# Does our concolic proxies work across functions? Say we have a function `max_value()` as below.
# + slideshow={"slide_type": "fragment"}
def abs_value(a):
if a > 0:
return a
else:
return -a
# + [markdown] slideshow={"slide_type": "fragment"}
# It is called by another function `abs_max()`
# + slideshow={"slide_type": "subslide"}
def abs_max(a, b):
a1 = abs_value(a)
b1 = abs_value(b)
if a1 > b1:
c = a1
else:
c = b1
return c
# + [markdown] slideshow={"slide_type": "fragment"}
# Using the `Concolic()` context on `abs_max()`.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[abs_max](2, 1)
# + [markdown] slideshow={"slide_type": "fragment"}
# As expected, we have the predicates across functions.
# + slideshow={"slide_type": "fragment"}
_.context
# + slideshow={"slide_type": "subslide"}
_.zeval()
# + [markdown] slideshow={"slide_type": "fragment"}
# Solving the predicates works as expected.
# + [markdown] slideshow={"slide_type": "fragment"}
# Using negative numbers as arguments so that a different branch is taken in `abs_value()`
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[abs_max](-2, -1)
# + slideshow={"slide_type": "fragment"}
_.context
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# The solution reflects our predicates. (We used `a > 0` in `abs_value()`).
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Binomial Coefficient
#
# For a larger example that uses different kinds of variables, say we want to compute the binomial coefficient by the following formulas
#
# $$
# ^nP_k=\frac{n!}{(n-k)!}
# $$
#
# $$
# \binom nk=\,^nC_k=\frac{^nP_k}{k!}
# $$
#
# + [markdown] slideshow={"slide_type": "subslide"}
# we define the functions as follows.
# + slideshow={"slide_type": "fragment"}
def factorial(n):
v = 1
while n != 0:
v *= n
n -= 1
return v
# + slideshow={"slide_type": "fragment"}
def permutation(n, k):
return factorial(n) / factorial(n - k)
# + slideshow={"slide_type": "fragment"}
def combination(n, k):
return permutation(n, k) / factorial(k)
# + slideshow={"slide_type": "fragment"}
def binomial(n, k):
if n < 0 or k < 0 or n < k:
raise Exception('Invalid values')
return combination(n, k)
# + [markdown] slideshow={"slide_type": "subslide"}
# As before, we run the function under `ConcolicTracer`.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
v = _[binomial](4, 2)
# + [markdown] slideshow={"slide_type": "fragment"}
# Then call `zeval()` to evaluate.
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + slideshow={"slide_type": "skip"} active=""
# The values returned are same as the input values as expected.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Database
# + [markdown] slideshow={"slide_type": "fragment"} toc-hr-collapsed=true
# For a larger example using the Concolic String class `zstr`, We use the DB class from the [chapter on information flow](InformationFlow.ipynb).
# + slideshow={"slide_type": "skip"}
from InformationFlow import DB, sample_db, update_inventory
# + [markdown] slideshow={"slide_type": "fragment"}
# We first populate our database.
# + slideshow={"slide_type": "skip"}
from GrammarMiner import VEHICLES # minor dependency
# + slideshow={"slide_type": "fragment"}
db = sample_db()
for V in VEHICLES:
update_inventory(db, V)
# + slideshow={"slide_type": "fragment"}
db.db
# + [markdown] slideshow={"slide_type": "subslide"}
# We are now ready to fuzz our `DB` class. Hash functions are difficult to handle directly (because they rely on internal C functions). Hence we modify `table()` slightly.
# + slideshow={"slide_type": "fragment"}
class ConcolicDB(DB):
def table(self, t_name):
for k, v in self.db:
if t_name == k:
return v
raise SQLException('Table (%s) was not found' % repr(t_name))
def column(self, decl, c_name):
for k in decl:
if c_name == k:
return decl[k]
raise SQLException('Column (%s) was not found' % repr(c_name))
# + [markdown] slideshow={"slide_type": "subslide"}
# To make it easy, we define a single function `db_select()` that directly invokes `db.sql()`.
# + slideshow={"slide_type": "fragment"}
def db_select(s):
my_db = ConcolicDB()
my_db.db = [(k, v) for (k, v) in db.db.items()]
r = my_db.sql(s)
return r
# + [markdown] slideshow={"slide_type": "fragment"}
# We now want to run SQL statements under our `ConcolicTracer`, and collect predicates obtained.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[db_select]('select kind from inventory')
# + [markdown] slideshow={"slide_type": "fragment"}
# The predicates encountered during the execution are as follows:
# + slideshow={"slide_type": "subslide"}
_.path
# + [markdown] slideshow={"slide_type": "subslide"}
# We can use `zeval()` as before to solve the constraints.
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Fuzzing with Constraints
#
# In this section, we show how to use the infrastructure we built for concolic execution for guiding fuzzing.
#
#
# ### SimpleConcolicFuzzer
#
# The `SimpleConcolicFuzzer` starts with a sample input generated by some other fuzzer. It then runs the function being tested under `ConcolicTracer`, and collects the path predicates. It then negates random predicates within the path and solves it with *z3* to produce a new output that is guaranteed to take a different path than the original.
# + [markdown] slideshow={"slide_type": "subslide"}
# First, we import the `Fuzzer` interface, and an example program `hang_if_no_space()`
# + slideshow={"slide_type": "skip"}
from Fuzzer import Fuzzer, hang_if_no_space
# + slideshow={"slide_type": "skip"}
from ExpectError import ExpectTimeout, ExpectError
# + slideshow={"slide_type": "skip"}
import random
# + [markdown] slideshow={"slide_type": "fragment"}
# To make the fuzzer work, we need a way to represent decisions made during trace. We keep this in a binary tree where each node represents a decision made, and each leaf represents a complete path. A node in the binary tree is represented by the `TraceNode` class.
#
# When a new node is added, it represents a decision taken by the parent on some predicate. This predicate is supplied as `smt_val`, which is `True` for this child to be reached. Since the predicate is actually present in the parent node, we also carry a member `smt` which will be updated by the first child to be added.
# + slideshow={"slide_type": "subslide"}
class TraceNode:
def __init__(self, smt_val, parent, info):
# This is the smt that lead to this node
self._smt_val = z3.simplify(smt_val) if smt_val is not None else None
# This is the predicate that this node might perform at a future point
self.smt = None
self.info = info
self.parent = parent
self.children = {}
self.path = None
self.tree = None
self._pattern = None
self.log = True
def no(self): return self.children.get(self.tree.no_bit)
def yes(self): return self.children.get(self.tree.yes_bit)
def get_children(self): return (self.no(), self.yes())
def __str__(self):
return 'TraceNode[%s]' % ','.join(self.children.keys())
# + [markdown] slideshow={"slide_type": "subslide"}
# We add a `PlausibleChild` class to track the leaf nodes.
# + slideshow={"slide_type": "fragment"}
class PlausibleChild:
def __init__(self, parent, cond, tree):
self.parent = parent
self.cond = cond
self.tree = tree
self._smt_val = None
def __repr__(self):
return 'PlausibleChild[%s]' % (self.parent.pattern() + ':' + self.cond)
# + [markdown] slideshow={"slide_type": "fragment"}
# When the leaf nodes are used to generate new paths, we expect its sibling `TraceNode` to have been already explored. Hence, we make use of the sibling's values for context `cc`, and the `smt_val` from the parent.
# + slideshow={"slide_type": "subslide"}
class PlausibleChild(PlausibleChild):
def smt_val(self):
if self._smt_val is not None:
return self._smt_val
# if the parent has other children, then that child would have updatd the parent's smt
# Hence, we can use that child's smt_value's opposite as our value.
assert self.parent.smt is not None
if self.cond == self.tree.no_bit:
self._smt_val = z3.Not(self.parent.smt)
else:
self._smt_val = self.parent.smt
return self._smt_val
def cc(self):
if self.parent.info.get('cc') is not None:
return self.parent.info['cc']
# if there is a plausible child node, it means that there can
# be at most one child.
sibilings = list(self.parent.children.values())
assert len(sibilings) == 1
# We expect at the other child to have cc
return sibilings[0].info['cc']
# + [markdown] slideshow={"slide_type": "subslide"}
# The `PlausibleChild` instance is used to generate new paths to explore using `path_expression()`.
# + slideshow={"slide_type": "fragment"}
class PlausibleChild(PlausibleChild):
def path_expression(self):
path_to_root = self.parent.get_path_to_root()
assert path_to_root[0]._smt_val is None
return [i._smt_val for i in path_to_root[1:]] + [self.smt_val()]
# + [markdown] slideshow={"slide_type": "fragment"}
# The `TraceTree` class helps us keep track of the binary tree. In the beginning, the root is a sentinel `TraceNode` instance, and simply have two plausible children as leaves. As soon as the first trace is added, one of the plausible children will become a true child.
# + slideshow={"slide_type": "subslide"}
class TraceTree:
def __init__(self):
self.root = TraceNode(smt_val=None, parent=None, info={'num': 0})
self.root.tree = self
self.leaves = {}
self.no_bit, self.yes_bit = '0', '1'
pprefix = ':'
for bit in [self.no_bit, self.yes_bit]:
self.leaves[pprefix + bit] = PlausibleChild(self.root, bit, self)
self.completed_paths = {}
# + [markdown] slideshow={"slide_type": "fragment"}
# The `add_trace()` method of the `TraceTree` provides a way for new traces to be added. It is kept separate from the initialization as we might want to add more than one trace from the same function.
# + slideshow={"slide_type": "subslide"}
class TraceTree(TraceTree):
def add_trace(self, tracer, string):
last = self.root
i = 0
for i, elt in enumerate(tracer.path):
last = last.add_child(elt=elt, i=i + 1, cc=tracer, string=string)
last.add_child(elt=z3.BoolVal(True), i=i + 1, cc=tracer, string=string)
# + [markdown] slideshow={"slide_type": "fragment"}
# To make `add_trace()` work, we need a little more infrastructure, that we define below.
#
# The `bit()` method translates a predicate to a bit that corresponds to the decision taken at each predicate. If the `if` branch is taken, the result is `1`, while `else` branch is indicated by `0`. The pattern indicates the bit-pattern of decisions required to reach the leaf from the root.
# + slideshow={"slide_type": "subslide"}
class TraceNode(TraceNode):
def bit(self):
if self._smt_val is None:
return None
return self.tree.no_bit if self._smt_val.decl(
).name() == 'not' else self.tree.yes_bit
def pattern(self):
if self._pattern is not None:
return self._pattern
path = self.get_path_to_root()
assert path[0]._smt_val is None
assert path[0].parent is None
self._pattern = ''.join([p.bit() for p in path[1:]])
return self._pattern
# + [markdown] slideshow={"slide_type": "subslide"}
# Each node knows how to add a new child, and get the path to root, which is cached.
#
# When we add a child to the root node, it means that there was a decision in the current node, and the child is the result of the decision. Hence, to get the decision being made, we simplify the `smt` expression, and check if it starts with `not`. If it does not start with a `not`, we interpret that as the current decision in the node. If it starts with `not`, then we interpret that `not(smt)` was the expression being evaluated in the current node.
#
# We know the first decision made only after going through the program at least once. As soon as the program is traversed, we update the parent with the decision that resulted in the current child.
# + slideshow={"slide_type": "subslide"}
class TraceNode(TraceNode):
def add_child(self, elt, i, cc, string):
if elt == z3.BoolVal(True):
# No more exploration here. Simply unregister the leaves of *this*
# node and possibly register them in completed nodes, and exit
for bit in [self.tree.no_bit, self.tree.yes_bit]:
child_leaf = self.pattern() + ':' + bit
if child_leaf in self.tree.leaves:
del self.tree.leaves[child_leaf]
self.tree.completed_paths[self.pattern()] = self
return None
child_node = TraceNode(smt_val=elt,
parent=self,
info={'num': i, 'cc': cc, 'string': string})
child_node.tree = self.tree
# bit represents the path that child took from this node.
bit = child_node.bit()
# first we update our smt decision
if bit == self.tree.yes_bit: # yes, which means the smt can be used as is
if self.smt is not None:
assert self.smt == child_node._smt_val
else:
self.smt = child_node._smt_val
# no, which means we have to negate it to get the decision.
elif bit == self.tree.no_bit:
smt_ = z3.simplify(z3.Not(child_node._smt_val))
if self.smt is not None:
assert smt_ == self.smt
else:
self.smt = smt_
else:
assert False
if bit in self.children:
# if self.log:
#print(elt, child_node.bit(), i, string)
#print(i,'overwriting', bit,'=>',self.children[bit],'with',child_node)
child_node = self.children[bit]
#self.children[bit] = child_node
#child_node.children = old.children
else:
self.children[bit] = child_node
# At this point, we have to unregister any leaves that correspond to this child from tree,
# and add the plausible children of this child as leaves to be explored. Note that
# if it is the end (z3.True), we do not have any more children.
child_leaf = self.pattern() + ':' + bit
if child_leaf in self.tree.leaves:
del self.tree.leaves[child_leaf]
pprefix = child_node.pattern() + ':'
# Plausible children.
for bit in [self.tree.no_bit, self.tree.yes_bit]:
self.tree.leaves[pprefix +
bit] = PlausibleChild(child_node, bit, self.tree)
return child_node
# + [markdown] slideshow={"slide_type": "subslide"}
# The path to root from any node is computed once and cached.
# + slideshow={"slide_type": "fragment"}
class TraceNode(TraceNode):
def get_path_to_root(self):
if self.path is not None:
return self.path
parent_path = []
if self.parent is not None:
parent_path = self.parent.get_path_to_root()
self.path = parent_path + [self]
return self.path
# + [markdown] slideshow={"slide_type": "fragment"}
# The `SimpleConcolicFuzzer` is defined with the `Fuzzer` interface.
# + slideshow={"slide_type": "subslide"}
class SimpleConcolicFuzzer(Fuzzer):
def __init__(self):
self.ct = TraceTree()
self.max_tries = 1000
self.last = None
self.last_idx = None
# + [markdown] slideshow={"slide_type": "fragment"}
# The `add_trace()` method we defined earlier is used as follows. First, we use a random string to generate the concolic trace.
# + slideshow={"slide_type": "fragment"}
with ExpectTimeout(2):
with ConcolicTracer() as _:
_[hang_if_no_space]('ab d')
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we initialize and add this trace to the fuzzer.
# + slideshow={"slide_type": "subslide"}
_.path
# + slideshow={"slide_type": "fragment"}
scf = SimpleConcolicFuzzer()
scf.ct.add_trace(_, 'ab d')
# + [markdown] slideshow={"slide_type": "fragment"}
# The path we added above can be obtained from the `TraceTree` as below.
# + slideshow={"slide_type": "fragment"}
[i._smt_val for i in scf.ct.root.get_children()[0].get_children()[
0].get_children()[1].get_path_to_root()]
# + [markdown] slideshow={"slide_type": "fragment"}
# Below are the registered leaves that we can explore at this moment.
# + slideshow={"slide_type": "subslide"}
for key in scf.ct.leaves:
print(key, '\t', scf.ct.leaves[key])
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we need a way to visualize the constructed tree.
# + slideshow={"slide_type": "skip"}
from GrammarFuzzer import display_tree
# + slideshow={"slide_type": "fragment"}
TREE_NODES = {}
# + slideshow={"slide_type": "subslide"}
def my_extract_node(tnode, id):
key, node, parent = tnode
if node is None:
# return '? (%s:%s)' % (parent.pattern(), key) , [], ''
return '?', [], ''
if node.smt is None:
return '* %s' % node.info.get('string', ''), [], ''
no, yes = node.get_children()
num = str(node.info.get('num'))
children = [('0', no, node), ('1', yes, node)]
TREE_NODES[id] = 0
return "(%s) %s" % (num, str(node.smt)), children, ''
# + slideshow={"slide_type": "subslide"}
def my_edge_attr(dot, start_node, stop_node):
# the edges are always drawn '0:NO' first.
if TREE_NODES[start_node] == 0:
color, label = 'red', '0'
TREE_NODES[start_node] = 1
else:
color, label = 'blue', '1'
TREE_NODES[start_node] = 2
dot.edge(repr(start_node), repr(stop_node), color=color, label=label)
# + slideshow={"slide_type": "fragment"}
def display_trace_tree(root):
TREE_NODES.clear()
return display_tree(
('', root, None), extract_node=my_extract_node, edge_attr=my_edge_attr)
# + slideshow={"slide_type": "fragment"}
display_trace_tree(scf.ct.root)
# + [markdown] slideshow={"slide_type": "subslide"}
# For example, the pattern `00:0` corresponds to the following predicates.
# + slideshow={"slide_type": "fragment"}
scf.ct.leaves['00:0']
# + slideshow={"slide_type": "fragment"}
scf.ct.leaves['00:0'].path_expression()
# + [markdown] slideshow={"slide_type": "fragment"}
# Similarly the pattern `:1` corresponds to the following predicates.
# + slideshow={"slide_type": "fragment"}
scf.ct.leaves[':1']
# + slideshow={"slide_type": "fragment"}
scf.ct.leaves[':1'].path_expression()
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now generate the next input to be generated by looking for the a leaf that is incompletely explored. The idea is to collect all leaf nodes, and choose one at random.
# + slideshow={"slide_type": "fragment"}
class SimpleConcolicFuzzer(SimpleConcolicFuzzer):
def add_trace(self, trace, s):
self.ct.add_trace(trace, s)
def next_choice(self):
#lst = sorted(list(self.ct.leaves.keys()), key=len)
c = random.choice(list(self.ct.leaves.keys()))
#c = lst[0]
return self.ct.leaves[c]
# + [markdown] slideshow={"slide_type": "fragment"}
# We use the `next_choice()` as follows.
# + slideshow={"slide_type": "subslide"}
scf = SimpleConcolicFuzzer()
scf.add_trace(_, 'ab d')
node = scf.next_choice()
# + slideshow={"slide_type": "fragment"}
node
# + slideshow={"slide_type": "fragment"}
node.path_expression()
# + [markdown] slideshow={"slide_type": "fragment"}
# We get the next choice for exploration, and expand the path expression, and return it together with a context using `get_newpath()`
# + slideshow={"slide_type": "subslide"}
class SimpleConcolicFuzzer(SimpleConcolicFuzzer):
def get_newpath(self):
node = self.next_choice()
path = node.path_expression()
return path, node.cc()
# + slideshow={"slide_type": "fragment"}
scf = SimpleConcolicFuzzer()
scf.add_trace(_, 'abcd')
path, cc = scf.get_newpath()
path
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Fuzz
# The `fuzz()` method simply generates new lists of predicates, and solves them to produce new inputs.
# + slideshow={"slide_type": "subslide"}
class SimpleConcolicFuzzer(SimpleConcolicFuzzer):
def fuzz(self):
if self.ct.root.children == {}:
# a random value to generate comparisons. This would be
# the initial value around which we explore with concolic
# fuzzing.
return ' '
for i in range(self.max_tries):
path, last = self.get_newpath()
s, v = zeval_smt(path, last, log=False)
if s != 'sat':
#raise Exception("Unexpected UNSAT")
continue
val = list(v.values())[0]
elt, typ = val
if len(elt) == 2 and elt[0] == '-': # negative numbers are [-, x]
elt = '-%s' % elt[1]
# make sure that we do not retry the tried paths
# The tracer we add here is incomplete. This gets updated when
# the add_trace is called from the concolic fuzzer context.
# self.add_trace(ConcolicTracer((last.decls, path)), elt)
if typ == 'Int':
return int(elt)
elif typ == 'String':
return elt
return elt
return None
# + [markdown] slideshow={"slide_type": "subslide"}
# We now fuzz.
# + slideshow={"slide_type": "fragment"}
scf = SimpleConcolicFuzzer()
scf.fuzz()
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is an example program `cgi_decode()`. Note that we will not be able to use the `cgi_decode()` from the `Coverage` chapter directly as the hash lookups in `hex_values` can not be used for transferring constraints yet.
# + slideshow={"slide_type": "subslide"}
def cgi_decode(s):
"""Decode the CGI-encoded string `s`:
* replace "+" by " "
* replace "%xx" by the character with hex number xx.
Return the decoded string. Raise `ValueError` for invalid inputs."""
# Mapping of hex digits to their integer values
hex_values = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
t = ''
i = 0
while i < len(s):
c = s[i]
if c == '+':
t += ' '
elif c == '%':
digit_high, digit_low = s[i + 1], s[i + 2]
i = i + 2
found = 0
v = 0
for key in hex_values:
if key == digit_high:
found = found + 1
v = hex_values[key] * 16
break
for key in hex_values:
if key == digit_low:
found = found + 1
v = v + hex_values[key]
break
if found == 2:
if v >= 128:
# z3.StringVal(urllib.parse.unquote('%80')) <-- bug in z3
raise ValueError("Invalid encoding")
t = t + chr(v)
else:
raise ValueError("Invalid encoding")
else:
t = t + c
i = i + 1
return t
# + slideshow={"slide_type": "subslide"}
with ConcolicTracer() as _:
_[cgi_decode]('a+c')
# + slideshow={"slide_type": "fragment"}
scf = SimpleConcolicFuzzer()
scf.add_trace(_, 'a+c')
# + slideshow={"slide_type": "fragment"}
display_trace_tree(scf.ct.root)
# + [markdown] slideshow={"slide_type": "fragment"}
# So, we fuzz to get a new path.
# + slideshow={"slide_type": "fragment"}
v = scf.fuzz()
v
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now obtain the new trace as before.
# + slideshow={"slide_type": "fragment"}
with ExpectError():
with ConcolicTracer() as _:
_[cgi_decode](v)
# + [markdown] slideshow={"slide_type": "fragment"}
# The new trace is added to our fuzzer using `add_trace()`
# + slideshow={"slide_type": "subslide"}
scf.add_trace(_, v)
# + [markdown] slideshow={"slide_type": "fragment"}
# The updated binary tree is as follows. Note the difference between the child nodes of `Root` node.
# + slideshow={"slide_type": "fragment"}
display_trace_tree(scf.ct.root)
# + [markdown] slideshow={"slide_type": "fragment"}
# A complete fuzzer run is as follows
# + slideshow={"slide_type": "subslide"}
scf = SimpleConcolicFuzzer()
for i in range(10):
v = scf.fuzz()
print(repr(v))
if v is None:
continue
with ConcolicTracer() as _:
with ExpectError():
# z3.StringVal(urllib.parse.unquote('%80')) <-- bug in z3
_[cgi_decode](v)
scf.add_trace(_, v)
# + slideshow={"slide_type": "subslide"}
display_trace_tree(scf.ct.root)
# + [markdown] slideshow={"slide_type": "subslide"}
# **Note.** Our concolic tracer is limited in that it does not track changes in the string length. This leads it to treat every string with same prefix as the same string.
# + [markdown] slideshow={"slide_type": "fragment"}
# The `SimpleConcolicFuzzer` is reasonably efficient at exploring paths near the path followed by a given sample input. However, it is not very intelligent when it comes to choosing which paths to follow. We look at another fuzzer that lifts the predicates obtained to the grammar and achieves better fuzzing.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ConcolicGrammarFuzzer
#
# The concolic framework can be used directly in fuzzing. First, we extend our `GrammarFuzzer` with a helper method `tree_to_string()` such that we can retrieve the derivation tree of the fuzz output. We also define `prune_tree()` and `coalesce()` to reduce the depth of sub trees. These methods accept a list of tokens types such that a node belonging to the token type gets converted from a tree to a leaf node by calling `tree_to_string()`.
# + slideshow={"slide_type": "skip"}
from InformationFlow import INVENTORY_GRAMMAR, SQLException
# + slideshow={"slide_type": "skip"}
from GrammarFuzzer import GrammarFuzzer
# + slideshow={"slide_type": "subslide"}
class ConcolicGrammarFuzzer(GrammarFuzzer):
def tree_to_string(self, tree):
symbol, children, *_ = tree
e = ''
if children:
return e.join([self.tree_to_string(c) for c in children])
else:
return e if symbol in self.grammar else symbol
def prune_tree(self, tree, tokens):
name, children = tree
children = self.coalesce(children)
if name in tokens:
return (name, [(self.tree_to_string(tree), [])])
else:
return (name, [self.prune_tree(c, tokens) for c in children])
def coalesce(self, children):
last = ''
new_lst = []
for cn, cc in children:
if cn not in self.grammar:
last += cn
else:
if last:
new_lst.append((last, []))
last = ''
new_lst.append((cn, cc))
if last:
new_lst.append((last, []))
return new_lst
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now use the fuzzer to produce inputs for our DB.
# + slideshow={"slide_type": "fragment"}
tgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR)
while True:
qtree = tgf.fuzz_tree()
query = str(tgf.tree_to_string(qtree))
if query.startswith('select'):
break
# + slideshow={"slide_type": "skip"}
from ExpectError import ExpectError
# + slideshow={"slide_type": "subslide"}
with ExpectError():
print(repr(query))
with ConcolicTracer() as _:
res = _[db_select](str(query))
print(repr(res))
# + [markdown] slideshow={"slide_type": "subslide"}
# Our fuzzer returns with an exception. It is unable to find the specified table. Let us examine the predicates it encountered.
# + slideshow={"slide_type": "subslide"}
for i, p in enumerate(_.path):
print(i, p)
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that we can obtain constraints that are not present in the grammar from using the `ConcolicTracer`. In particular, see how we are able to obtain the condition that the table needs to be `inventory` (Predicate 11) for the fuzzing to succeed.
# + [markdown] slideshow={"slide_type": "fragment"}
# How do we lift these to the grammar? and in particular how do we do it automatically? One option we have is to simply switch the last predicate obtained. In our case, the last predicate is (11). Can we simply invert the predicate and solve it again?
# + slideshow={"slide_type": "fragment"}
new_path = _.path[0:-1] + [z3.Not(_.path[-1])]
# + slideshow={"slide_type": "fragment"}
new_ = ConcolicTracer((_.decls, new_path))
new_.fn = _.fn
new_.fn_args = _.fn_args
# + slideshow={"slide_type": "fragment"}
new_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# Indeed, this will not work as the string lengths being compared to are different.
# + slideshow={"slide_type": "fragment"}
print(_.path[-1])
z3.solve(z3.Not(_.path[-1]))
# + [markdown] slideshow={"slide_type": "fragment"}
# A better idea is to investigate what _string_ comparisons are being made, and associate that with the corresponding nodes in the grammar. Let us examine our derivation tree (pruned to avoid recursive structures, and to focus on important parts).
# + slideshow={"slide_type": "skip"}
from GrammarFuzzer import display_tree
# + slideshow={"slide_type": "subslide"}
prune_tokens = [
'<value>', '<table>', '<column>', '<literals>', '<exprs>', '<bexpr>'
]
dt = tgf.prune_tree(qtree, prune_tokens)
display_tree(dt)
# + [markdown] slideshow={"slide_type": "fragment"}
# Can we identify which part of the input was supplied by which part of the grammar? We define `span()` that can recover this information from the derivation tree. For a given node, let us assume that the start point is known. Then, for processing the children, we proceed as follows: We choose one child at a time from left to right, and compute the length of the child. The length of the children before the current child in addition to our starting point gives the starting point of the current child. The end point for each node is simply the end point of its last children (or the length of its node if it is a leaf).
# + slideshow={"slide_type": "skip"}
from GrammarFuzzer import START_SYMBOL
# + slideshow={"slide_type": "subslide"}
def span(node, g, node_start=0):
hm = {}
k, cs = node
end_i = node_start
new_cs = []
for c in cs:
chm, (ck, child_start, child_end, gcs) = span(c, g, end_i)
new_cs.append((ck, child_start, child_end, gcs))
end_i = child_end
hm.update(chm)
node_end = end_i if cs else node_start + len(k)
if k in g and k != START_SYMBOL:
hm[k] = (node_start, node_end - node_start)
return hm, (k, node_start, node_end, new_cs)
# + [markdown] slideshow={"slide_type": "fragment"}
# We use it as follows:
# + slideshow={"slide_type": "subslide"}
span_hm, _n = span(dt, INVENTORY_GRAMMAR)
# + slideshow={"slide_type": "fragment"}
span_hm
# + [markdown] slideshow={"slide_type": "fragment"}
# We can check if we got the right values as follows.
# + slideshow={"slide_type": "fragment"}
print("query:", query)
for k in span_hm:
start, l = span_hm[k]
print(k, query[start:start + l])
# + [markdown] slideshow={"slide_type": "subslide"}
# Next, we need to obtain all the comparisons made in each predicate. For that, we define two helper functions. The first is `unwrap_substrings()` that translates multiple calls to `z3.SubString` and returns the start, and length of the given z3 string expression.
# + slideshow={"slide_type": "fragment"}
def unwrap_substrings(s):
assert s.decl().name() == 'str.substr'
cs, frm, l = s.children()
fl = frm.as_long()
ll = l.as_long()
if cs.decl().name() == 'str.substr':
newfrm, _l = unwrap_substrings(cs)
return (fl + newfrm, ll)
else:
return (fl, ll)
# + [markdown] slideshow={"slide_type": "subslide"}
# We define `traverse_z3()` that traverses a given z3 string expression, and collects all direct string comparisons to a substring of the original argument.
# + slideshow={"slide_type": "subslide"}
def traverse_z3(p, hm):
def z3_as_string(v):
return v.as_string()
n = p.decl().name()
if n == 'not':
return traverse_z3(p.children()[0], hm)
elif n == '=':
i, j = p.children()
if isinstance(i, (int, z3.IntNumRef)):
return traverse_z3(j, hm)
elif isinstance(j, (int, z3.IntNumRef)):
return traverse_z3(i, hm)
else:
if i.is_string() and j.is_string():
if i.is_string_value():
cs, frm, l = j.children()
if (isinstance(frm, z3.IntNumRef)
and isinstance(l, z3.IntNumRef)):
hm[z3_as_string(i)] = unwrap_substrings(j)
elif j.is_string_value():
cs, frm, l = i.children()
if (isinstance(frm, z3.IntNumRef)
and isinstance(l, z3.IntNumRef)):
hm[z3_as_string(j)] = unwrap_substrings(i)
else:
assert False # for now
elif n == '<' or n == '>':
i, j = p.children()
if isinstance(i, (int, z3.IntNumRef)):
return traverse_z3(j, hm)
elif isinstance(j, (int, z3.IntNumRef)):
return traverse_z3(i, hm)
else:
assert False
return p
# + slideshow={"slide_type": "subslide"}
comparisons = {}
for p in _.path:
traverse_z3(p, comparisons)
comparisons
# + [markdown] slideshow={"slide_type": "fragment"}
# All that we need now is to declare string variables that match the substrings in `comparisons`, and solve for them for each item in the path. For that, we define `find_alternatives()`.
# + slideshow={"slide_type": "subslide"}
def find_alternatives(spans, cmp):
alts = {}
for key in spans:
start, l = spans[key]
rset = set(range(start, start + l))
for ckey in cmp:
cstart, cl = cmp[ckey]
cset = set(range(cstart, cstart + cl))
# if rset.issubset(cset): <- ignoring subsets for now.
if rset == cset:
if key not in alts:
alts[key] = set()
alts[key].add(ckey)
return alts
# + [markdown] slideshow={"slide_type": "fragment"}
# We use it as follows.
# + slideshow={"slide_type": "subslide"}
alternatives = find_alternatives(span_hm, comparisons)
alternatives
# + [markdown] slideshow={"slide_type": "fragment"}
# So, we have our alternatives for each key in the grammar. We can now update our grammar as follows.
# + slideshow={"slide_type": "fragment"}
INVENTORY_GRAMMAR_NEW = dict(INVENTORY_GRAMMAR)
# + slideshow={"slide_type": "fragment"}
for k in alternatives:
INVENTORY_GRAMMAR_NEW[k] = INVENTORY_GRAMMAR_NEW[k] + list(alternatives[k])
# + [markdown] slideshow={"slide_type": "fragment"}
# We made a choice here. We could have completely overwritten the definition of `<table>` . Instead, we added our new alternatives to the existing definition. This way, our fuzzer will also attempt other values for `<table>` once in a while.
# + slideshow={"slide_type": "fragment"}
INVENTORY_GRAMMAR_NEW['<table>']
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us try fuzzing with our new grammar.
# + slideshow={"slide_type": "subslide"}
cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR_NEW)
# + slideshow={"slide_type": "subslide"}
for i in range(10):
qtree = cgf.fuzz_tree()
query = cgf.tree_to_string(qtree)
print(query)
with ExpectError():
try:
with ConcolicTracer() as _:
res = _[db_select](query)
print(repr(res))
except SQLException as e:
print(e)
print()
# + [markdown] slideshow={"slide_type": "subslide"}
# That is, we were able to reach the dangerous method `my_eval()`.
# In effect, what we have done is to lift parts of predicates to the grammar. The new grammar can generate inputs that reach deeper into the program than before. Note that we have only handled the equality predicate. One can also lift the '<' and '>' comparison operators to the grammar if required.
#
# Compare the output of our fuzzer to the original `GrammarFuzzer` below.
# + slideshow={"slide_type": "subslide"}
gf = GrammarFuzzer(INVENTORY_GRAMMAR)
for i in range(10):
query = gf.fuzz()
print(query)
with ExpectError():
try:
res = db_select(query)
print(repr(res))
except SQLException as e:
print(e)
print()
# + [markdown] slideshow={"slide_type": "subslide"}
# As can be seen, the original grammar fuzzer is unable to proceed beyond the table verification.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### All together
#
# We implement these methods in `ConcolicGrammarFuzzer`. The method `update_grammar()` allows `ConcolicGrammarFuzzer` to collect feedback from concolic fuzzing, and update the grammar used for fuzzing accordingly.
# + slideshow={"slide_type": "subslide"}
class ConcolicGrammarFuzzer(ConcolicGrammarFuzzer):
def prune_tokens(self, tokens):
self.prune_tokens = tokens
def update_grammar(self, trace):
self.comparisons = {}
for p in trace.path:
traverse_z3(p, self.comparisons)
alternatives = find_alternatives(self.span_range, self.comparisons)
if self.log:
print('Alternatives:', alternatives, 'Span:', self.span_range)
new_grammar = dict(self.grammar)
for k in alternatives:
new_grammar[k] = list(set(new_grammar[k] + list(alternatives[k])))
self.grammar = new_grammar
# + [markdown] slideshow={"slide_type": "subslide"}
# The `fuzz()` method simply generates the derivation tree, computes the span range, and returns the string generated from the derivation tree.
# + slideshow={"slide_type": "fragment"}
class ConcolicGrammarFuzzer(ConcolicGrammarFuzzer):
def fuzz(self):
qtree = self.fuzz_tree()
self.pruned_tree = self.prune_tree(qtree, self.prune_tokens)
query = self.tree_to_string(qtree)
self.span_range, _n = span(self.pruned_tree, self.grammar)
return query
# + [markdown] slideshow={"slide_type": "fragment"}
# To ensure that our approach works, let us update our tables slightly.
# + slideshow={"slide_type": "fragment"}
inventory = db.db.pop('inventory', None)
# + slideshow={"slide_type": "subslide"}
db.db['vehicles'] = inventory
db.db['months'] = ({
'month': int,
'name': str
}, [{
'month': i + 1,
'name': m
} for i, m in enumerate([
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec'
])])
db.db
# + [markdown] slideshow={"slide_type": "subslide"}
# The `ConcolicGrammarFuzzer` is used as follows.
# + slideshow={"slide_type": "subslide"}
cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR)
cgf.prune_tokens(prune_tokens)
for i in range(10):
query = cgf.fuzz()
print(query)
with ConcolicTracer() as _:
with ExpectError():
try:
res = _[db_select](query)
print(repr(res))
except SQLException as e:
print(e)
cgf.update_grammar(_)
print()
# + [markdown] slideshow={"slide_type": "subslide"}
# As can be seen, the fuzzer starts with no knowledge of the tables `vehicles`, `months` and `years`, but identifies it from the concolic execution, and lifts it to the grammar. This allows us to improve the effectiveness of fuzzing.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Limitations
#
# As with dynamic taint analysis, implicit control flow can obscure the predicates encountered during concolic execution. However, this limitation could be overcome to some extent by wrapping any constants in the source with their respective proxy objects. Similarly, calls to internal C functions can cause the symbolic information to be discarded, and only partial information may be obtained.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Synopsis
# + [markdown] slideshow={"slide_type": "fragment"}
# This chapter defines two main classes: `SimpleConcolicFuzzer` and `ConcolicGrammarFuzzer`. The `SimpleConcolicFuzzer` first uses a sample input to collect predicates encountered. The fuzzer then negates random predicates to generate new input constraints. These, when solved, produce inputs that explore paths that are close to the original path. It can be used as follows.
# + [markdown] slideshow={"slide_type": "fragment"}
# We first obtain the constraints using `ConcolicTracer`.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
_[cgi_decode]('a%20d')
# + [markdown] slideshow={"slide_type": "fragment"}
# These constraints are added to the concolic fuzzer as follows:
# + slideshow={"slide_type": "fragment"}
scf = SimpleConcolicFuzzer()
scf.add_trace(_, 'a%20d')
# + [markdown] slideshow={"slide_type": "fragment"}
# The concolic fuzzer then uses the constraints added to guide its fuzzing as follows:
# + slideshow={"slide_type": "subslide"}
scf = SimpleConcolicFuzzer()
for i in range(10):
v = scf.fuzz()
if v is None:
break
print(repr(v))
with ExpectError():
with ConcolicTracer() as _:
_[cgi_decode](v)
scf.add_trace(_, v)
# + [markdown] slideshow={"slide_type": "subslide"}
# The `SimpleConcolicFuzzer` simply explores all paths near the original path traversed by the sample input. It uses a simple mechanism to explore the paths that are near the paths that it knows about, and other than code paths, knows nothing about the input.
# The `ConcolicGrammarFuzzer` on the other hand, knows about the input grammar, and can collect feedback from the subject under fuzzing. It can lift some of the constraints encountered to the grammar, enabling deeper fuzzing. It is used as follows:
# + slideshow={"slide_type": "skip"}
from InformationFlow import INVENTORY_GRAMMAR, SQLException
# + slideshow={"slide_type": "subslide"}
cgf = ConcolicGrammarFuzzer(INVENTORY_GRAMMAR)
cgf.prune_tokens(prune_tokens)
for i in range(10):
query = cgf.fuzz()
print(query)
with ConcolicTracer() as _:
with ExpectError():
try:
res = _[db_select](query)
print(repr(res))
except SQLException as e:
print(e)
cgf.update_grammar(_)
print()
# + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Lessons Learned
#
# * Concolic execution can often provide more information than taint analysis with respect to the program behavior. However, this comes at a much larger runtime cost. Hence, unlike taint analysis, real-time analysis is often not possible.
#
# * Similar to taint analysis, concolic execution also suffers from limitations such as indirect control flow and internal function calls.
#
# * Predicates from concolic execution can be used in conjunction with fuzzing to provide an even more robust indication of incorrect behavior than taints, and can be used to create grammars that are better at producing valid inputs.
# + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Next Steps
#
# A costlier but stronger alternative to concolic fuzzing is [symbolic fuzzing](SymbolicFuzzer.ipynb). Similarly, [search based fuzzing](SearchBasedFuzzer.ipynb) can often provide a cheaper exploration strategy than relying on SMT solvers to provide inputs slightly different from the current path.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Background
#
# The technique of concolic execution was originally used to inform and expand the scope of _symbolic execution_ \cite{king1976symbolic}, a static analysis technique for program analysis. Laron et al. cite{Larson2003} was the first to use the concolic execution technique.
#
# The idea of using proxy objects for collecting constraints was pioneered by Cadar et al. \cite{cadar2005execution}. The concolic execution technique for Python programs used in this chapter was pioneered by PeerCheck \cite{PeerCheck}, and Python Error Finder \cite{Barsotti2018}.
# + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"}
# ## Exercises
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exercise 1: Implment a Concolic Float Proxy Class
#
# + [markdown] slideshow={"slide_type": "fragment"}
# While implementing the `zint` binary operators, we asserted that the results were `int`. However, that need not be the case. For example, division can result in `float`. Hence, we need proxy objects for `float`. Can you implement a similar proxy object for `float` and fix the `zint` binary operator definition?
# + [markdown] slideshow={"slide_type": "fragment"}
# __Solution.__ The solution is as follows.
# + [markdown] slideshow={"slide_type": "fragment"}
# As in the case of `zint`, we first open up `zfloat` for extension.
# + slideshow={"slide_type": "fragment"}
class zfloat(float):
def __new__(cls, context, zn, v, *args, **kw):
return float.__new__(cls, v, *args, **kw)
# + [markdown] slideshow={"slide_type": "fragment"}
# We then implement the initialization methods.
# + slideshow={"slide_type": "subslide"}
class zfloat(zfloat):
@classmethod
def create(cls, context, zn, v=None):
return zproxy_create(cls, 'Real', z3.Real, context, zn, v)
def __init__(self, context, z, v=None):
self.z, self.v = z, v
self.context = context
# + [markdown] slideshow={"slide_type": "fragment"}
# The helper for when one of the arguments in a binary operation is not `float`.
# + slideshow={"slide_type": "fragment"}
class zfloat(zfloat):
def _zv(self, o):
return (o.z, o.v) if isinstance(o, zfloat) else (z3.RealVal(o), o)
# + [markdown] slideshow={"slide_type": "fragment"}
# Coerce `float` into bool value for use in conditionals.
# + slideshow={"slide_type": "subslide"}
class zfloat(zfloat):
def __bool__(self):
# force registering boolean condition
if self != 0.0:
return True
return False
# + [markdown] slideshow={"slide_type": "fragment"}
# Define the common proxy method for comparison methods
# + slideshow={"slide_type": "fragment"}
def make_float_bool_wrapper(fname, fun, zfun):
def proxy(self, other):
z, v = self._zv(other)
z_ = zfun(self.z, z)
v_ = fun(self.v, v)
return zbool(self.context, z_, v_)
return proxy
# + [markdown] slideshow={"slide_type": "subslide"}
# We apply the comparison methods on the defined `zfloat` class.
# + slideshow={"slide_type": "fragment"}
FLOAT_BOOL_OPS = [
'__eq__',
# '__req__',
'__ne__',
# '__rne__',
'__gt__',
'__lt__',
'__le__',
'__ge__',
]
# + slideshow={"slide_type": "fragment"}
for fname in FLOAT_BOOL_OPS:
fun = getattr(float, fname)
zfun = getattr(z3.ArithRef, fname)
setattr(zfloat, fname, make_float_bool_wrapper(fname, fun, zfun))
# + [markdown] slideshow={"slide_type": "subslide"}
# Similarly, we define the common proxy method for binary operators.
# + slideshow={"slide_type": "fragment"}
def make_float_binary_wrapper(fname, fun, zfun):
def proxy(self, other):
z, v = self._zv(other)
z_ = zfun(self.z, z)
v_ = fun(self.v, v)
return zfloat(self.context, z_, v_)
return proxy
# + [markdown] slideshow={"slide_type": "fragment"}
# And apply them on `zfloat`
# + slideshow={"slide_type": "subslide"}
FLOAT_BINARY_OPS = [
'__add__',
'__sub__',
'__mul__',
'__truediv__',
# '__div__',
'__mod__',
# '__divmod__',
'__pow__',
# '__lshift__',
# '__rshift__',
# '__and__',
# '__xor__',
# '__or__',
'__radd__',
'__rsub__',
'__rmul__',
'__rtruediv__',
# '__rdiv__',
'__rmod__',
# '__rdivmod__',
'__rpow__',
# '__rlshift__',
# '__rrshift__',
# '__rand__',
# '__rxor__',
# '__ror__',
]
# + slideshow={"slide_type": "subslide"}
for fname in FLOAT_BINARY_OPS:
fun = getattr(float, fname)
zfun = getattr(z3.ArithRef, fname)
setattr(zfloat, fname, make_float_binary_wrapper(fname, fun, zfun))
# + [markdown] slideshow={"slide_type": "fragment"}
# These are used as follows.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
za = zfloat.create(_.context, 'float_a', 1.0)
zb = zfloat.create(_.context, 'float_b', 0.0)
if za * zb:
print(1)
# + slideshow={"slide_type": "fragment"}
_.context
# + [markdown] slideshow={"slide_type": "fragment"}
# Finally, we fix the `zint` binary wrapper to correctly create `zfloat` when needed.
# + slideshow={"slide_type": "subslide"}
def make_int_binary_wrapper(fname, fun, zfun):
def proxy(self, other):
z, v = self._zv(other)
z_ = zfun(self.z, z)
v_ = fun(self.v, v)
if isinstance(v_, float):
return zfloat(self.context, z_, v_)
elif isinstance(v_, int):
return zint(self.context, z_, v_)
else:
assert False
return proxy
# + slideshow={"slide_type": "subslide"}
for fname in INT_BINARY_OPS:
fun = getattr(int, fname)
zfun = getattr(z3.ArithRef, fname)
setattr(zint, fname, make_int_binary_wrapper(fname, fun, zfun))
# + [markdown] slideshow={"slide_type": "fragment"}
# Checking whether it worked as expected.
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
v = _[binomial](4, 2)
# + slideshow={"slide_type": "fragment"}
_.zeval()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exercise 2: Bit Manipulation
# + [markdown] slideshow={"slide_type": "fragment"}
# Similar to floats, implementing the bit manipulation functions such as `xor` involves converting `int` to its bit vector equivalents, performing operations on them, and converting it back to the original type. Can you implement the bit manipulation operations for `zint`?
# + [markdown] slideshow={"slide_type": "fragment"}
# __Solution.__ The solution is as follows.
# + [markdown] slideshow={"slide_type": "fragment"}
# We first define the proxy method as before.
# + slideshow={"slide_type": "subslide"}
def make_int_bit_wrapper(fname, fun, zfun):
def proxy(self, other):
z, v = self._zv(other)
z_ = z3.BV2Int(
zfun(
z3.Int2BV(
self.z, num_bits=64), z3.Int2BV(
z, num_bits=64)))
v_ = fun(self.v, v)
return zint(self.context, z_, v_)
return proxy
# + [markdown] slideshow={"slide_type": "fragment"}
# It is then applied to the `zint` class.
# + slideshow={"slide_type": "subslide"}
BIT_OPS = [
'__lshift__',
'__rshift__',
'__and__',
'__xor__',
'__or__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__rxor__',
'__ror__',
]
# + slideshow={"slide_type": "subslide"}
def init_concolic_4():
for fname in BIT_OPS:
fun = getattr(int, fname)
zfun = getattr(z3.BitVecRef, fname)
setattr(zint, fname, make_int_bit_wrapper(fname, fun, zfun))
# + slideshow={"slide_type": "fragment"}
INITIALIZER_LIST.append(init_concolic_4)
# + slideshow={"slide_type": "fragment"}
init_concolic_4()
# + [markdown] slideshow={"slide_type": "fragment"}
# Invert is the only unary bit manipulation method.
# + slideshow={"slide_type": "fragment"}
class zint(zint):
def __invert__(self):
return zint(self.context, z3.BV2Int(
~z3.Int2BV(self.z, num_bits=64)), ~self.v)
# + [markdown] slideshow={"slide_type": "fragment"}
# The `my_fn()` computes `xor` and returns `True` if the `xor` results in a non zero value.
# + slideshow={"slide_type": "subslide"}
def my_fn(a, b):
o_ = (a | b)
a_ = (a & b)
if o_ & ~a_:
return True
else:
return False
# + [markdown] slideshow={"slide_type": "fragment"}
# Using that under `ConcolicTracer`
# + slideshow={"slide_type": "fragment"}
with ConcolicTracer() as _:
print(_[my_fn](2, 1))
# + [markdown] slideshow={"slide_type": "fragment"}
# We log the computed SMT expression to verify that everything went well.
# + slideshow={"slide_type": "subslide"}
_.zeval(log=True)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can confirm from the formulas generated that the bit manipulation functions worked correctly.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exercise 3: String Translation Functions
# + [markdown] slideshow={"slide_type": "fragment"}
# We have seen how to define `upper()` and `lower()`. Can you define the `capitalize()`, `title()`, and `swapcase()` methods?
# + [markdown] slideshow={"slide_type": "fragment"}
# __Solution.__ Solution not yet available.
| docs/beta/notebooks/ConcolicFuzzer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decorator Pattern
# 1. Help reduce a profusion of subclasses by adding additional responsibilities at runtime
# 1. Is a structural pattern, as it offers a new way to put the program together
# 1. Adds new abilities to an object, dynamically at runtime
# 1. Flexible approach to subclassing
# 1. Also known as **Wrapper Pattern**
#
# **Advantage**
# 1. More Flexible than static inheritance
# 1. Keeps things simple
# 1. No practical limits
# 1. Transparent to clients
# 1. A decorator has a different type - (May be problematic)
# 1. Many little objects
# 1. Factory and Builder can help
# 1. Helps to add new functionality to existing objects
# 1. Better than many subclasses
# 1. Better than many properties
#
# **Decorator Pattern**
# 1. Class defs
# 1. Wraps class instances
# 1. Run time decoration
# 1. Adds functionality to instances
# 1. Has very specific purpose
#
# **@ Decorator**
# 1. Function defs or class definitions, not instance
# 1. @ sytax
# 1. Compile time
# 1. Add functionality to functions and classes
# 1. General purpose
# # Car Dealership
# Three Models
# - Economy
# - Luxury
# - Sport
#
# Options
# - Engine - 4 or 6 Cylinders
# - Paint - White, Red or Black
# - Upholstery - Leather or Vinyl
from abc import ABCMeta, abstractproperty
class ICar(metaclass=ABCMeta):
@abstractproperty
def description(self):
pass
@abstractproperty
def cost(self):
pass
class Economy(ICar):
@property
def description(self):
return 'Economy'
@property
def cost(self):
return 12000.00
# # Decorators
class IDecorator(ICar):
def __init__(self, car):
self._car = car
@property
def car(self):
return self._car
class V6(IDecorator):
@property
def description(self):
return self.car.description + ', V6'
@property
def cost(self):
return self.car.cost + 1200.00
class BlackPaint(IDecorator):
@property
def description(self):
return self.car.description + ', Black Paint'
@property
def cost(self):
return self.car.cost + 2000.00
class Vinyl(IDecorator):
@property
def description(self):
return self.car.description + ', Vinyl Interior'
@property
def cost(self):
return self.car.cost + 4000.00
# # Driver Program
car = Economy()
print(f'{car.description}: {car.cost}$')
car = BlackPaint(car)
print(f'{car.description}: {car.cost}$')
car = V6(car)
print(f'{car.description}: {car.cost}$')
car = Vinyl(car)
print(f'{car.description}: {car.cost}$')
| structural/decorator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Частина І</center>
# $$ f(t, N, \alpha, \beta, \gamma) = \frac{\alpha N^{2}}{N + 1} - \beta N - \gamma N^{2} $$
# +
from scipy.integrate import ode
birth_rate = 128
death_rate = 90
intraspecific_competition = 2
ps = [birth_rate, death_rate, intraspecific_competition]
def f(t, N, ps):
return ps[0] * (N ** 2) / (N + 1) - ps[1] * N - ps[2] * (N ** 2)
def solve(N0, t0=0, t1=1, h=0.05):
r = ode(f).set_integrator('dopri5')
r.set_initial_value(N0, t0).set_f_params(ps)
N = [N0]
t = [t0]
while r.successful() and r.t < t1:
t.append(r.t + h)
N.append(r.integrate(r.t + h))
return N, t
# -
# $$ L = \frac{-\sqrt{(\alpha - \beta - \gamma)^2 - 4\beta\gamma} - \alpha + \beta + \gamma}{-2\gamma} $$
# $$ K = \frac{\sqrt{(\alpha - \beta - \gamma)^2 - 4\beta\gamma} - \alpha + \beta + \gamma}{-2\gamma} $$
# +
num_part = ((ps[0] - ps[1] - ps[2]) ** 2 - 4*ps[1]*ps[2]) ** 0.5
L = (-num_part - ps[0] + ps[1] + ps[2]) / (-2 * ps[2])
K = (num_part - ps[0] + ps[1] + ps[2]) / (-2 * ps[2])
if K < L:
L, K = K, L
print("Нижня межа: {}, верхня межа: {}".format(L, K))
# -
L, K
options = [
[1./4. * L, "< L/2"],
[3./4. * L, "> L/2"],
[L, "L"],
[1./4. * (K + L), "< (K + L)/2"],
[3./4. * (K + L), "> (K + L)/2"],
[K, "K"],
[1.25 * K, "> K"]
]
options
# +
import matplotlib.pyplot as plt
t0 = 0
t1 = 0.5
fig, ax = plt.subplots()
lines=[]
for ind, opt in enumerate(options):
N0 = opt[0]
def_text = opt[1]
N, t = solve(N0, h=0.01)
lines.append(ax.plot(t, N, label=def_text)[0])
ax.legend(handles=lines)
plt.show()
# -
# # <center>Частина ІI</center>
# $$ \frac{dN}{dt} = -0.056 * N + 0.0004 * N^2 $$
# +
options = [
[100, "N(0) = 100"],
[140, "N(0) = 140"],
[180, "N(0) = 180"]
]
t1 = 24
def f(t, N):
return -0.056 * N + 0.0004 * (N**2)
def solve(N0, t0=0, t1=1, h=0.05):
r = ode(f).set_integrator('vode', method='bdf')
r.set_initial_value(N0, t0)
N = [N0]
t = [t0]
while r.successful() and r.t < t1:
t.append(r.t + h)
N.append(r.integrate(r.t + h))
return N, t
# +
plt.gcf().clear()
fig, ax = plt.subplots()
lines = []
for ind, opt in enumerate(options):
N0 = opt[0]
def_text = opt[1]
N, t = solve(N0, t0=0, t1=t1, h=0.01)
lines.append(ax.plot(t, N, label=def_text)[0])
ax.legend(handles=lines)
plt.show()
# -
| eco_systems/dima4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from numpy.random import randint, choice, normal,shuffle
from scipy.special import factorial
from sklearn.model_selection import learning_curve, TimeSeriesSplit, PredefinedSplit
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('fivethirtyeight')
plt.rc('figure', figsize=(14, 8))
# ## Generate Sample Data
max_degree = 10
def f(x):
taylor = [(-1)**i * x ** e / factorial(e) for i, e in enumerate(range(1, max_degree, 2))]
return np.sum(taylor, axis=0)
x = np.linspace(-5, 5, 1000)
data = pd.DataFrame({'y': f(x), 'x': x})
ax = data.plot(x='x', y='y', legend=False)
pd.Series(np.sin(x), index=x).plot(ax=ax, ls='--', lw=2, label='sine')
plt.legend();
# ### Train Model
# +
X={}
datasets = ['Train', 'Test']
X['Train'] = np.linspace(-3, 3, 2500)
X['Test'] = np.linspace(3, 5, 500)
models = {'Underfit': 3, 'Right Fit': 9, 'Overfit': 15}
sample, noise = 100, .2
result = pd.DataFrame()
for i in range(100):
x_ = {d: choice(X[d], size=sample, replace=False) for d in datasets}
y_ = {d: f(x_[d]) for d in datasets}
y_['Train'] += normal(loc=0,
scale=np.std(y_['Train']) * noise,
size=sample)
trained_models = {fit: np.poly1d(np.polyfit(x=x_['Train'],
y=y_['Train'],
deg=deg))
for fit, deg in models.items()}
for fit, model in trained_models.items():
for dataset in datasets:
pred = model(x_[dataset])
result = result.append(pd.DataFrame(dict(x=x_[dataset],
Model=fit,
Data=dataset,
y=pred,
Error=pred - y_[dataset])))
# -
y = {d: f(X[d]) for d in datasets}
y['Train_noise'] = y['Train'] + normal(loc=0,
scale=np.std(y['Train']) * noise,
size=len(y['Train']))
colors = {'Underfit': 'darkblue', 'Right Fit': 'darkgreen', 'Overfit': 'darkred'}
test_data = result[result.Data == 'Test']
# ### Plot result
# +
fig, axes = plt.subplots(ncols=2, figsize=(18, 9), sharey=True)
sns.boxplot(x='Model', y='Error', hue='Data',
data=result, ax=axes[0], linewidth=2)
axes[0].set_title('In- vs Out-of-Sample Errors')
axes[0].axhline(0, ls='--', lw=1, color='k')
axes[0].set_ylabel('Symmetric Log Scale')
for model in colors.keys():
(test_data[(test_data['Model'] == model)]
.plot.scatter(x='x',
y='y',
ax=axes[1],
s=2,
color=colors[model],
alpha=.5,
label=model))
pd.Series(y['Train'], index=X['Train']).sort_index().plot(
ax=axes[1], title='Out-of-sample Predictions')
pd.DataFrame(dict(x=X['Train'], y=y['Train_noise'])
).plot.scatter(x='x', y='y', ax=axes[1])
pd.Series(y['Test'], index=X['Test']).plot(
color='black', lw=5, ls='--', ax=axes[1], label='Actuals')
plt.yscale('symlog')
fig.tight_layout()
fig.suptitle('Bias - Variance Tradeoff: Under vs. Overfitting', fontsize=24)
fig.savefig('bias_variance', dpi=600)
fig.subplots_adjust(top=0.85)
# -
def folds(train, test, nfolds):
shuffle(train)
shuffle(test)
steps = (np.array([len(train), len(test)]) / nfolds).astype(int)
for fold in range(nfolds):
i, j = fold * steps
yield train[i:i + steps[0]], test[j: j+steps[1]]
def rmse(y, x, model):
return np.sqrt(mean_squared_error(y_true=y, y_pred=model.predict(x)))
def create_poly_data(data, degree):
return np.hstack((data.reshape(-1, 1) ** i) for i in range(degree + 1))
# +
train_set = X['Train'] + normal(scale=np.std(f(X['Train']))) * .2
test_set = X['Test'].copy()
sample_sizes = np.arange(.1, 1.0, .01)
indices = ([len(train_set), len(test_set)] *
sample_sizes.reshape(-1, 1)).astype(int)
result = []
lr = LinearRegression()
for label, degree in models.items():
model_train = create_poly_data(train_set, degree)
model_test = create_poly_data(test_set, degree)
for train_idx, test_idx in indices:
train = model_train[:train_idx]
test = model_test[:test_idx]
train_rmse, test_rmse = [], []
for x_train, x_test in folds(train, test, 5):
y_train, y_test = f(x_train[:, 1]), f(x_test[:, 1])
lr.fit(X=x_train, y=y_train)
train_rmse.append(rmse(y=y_train, x=x_train, model=lr))
test_rmse.append(rmse(y=y_test, x=x_test, model=lr))
result.append([label, train_idx,
np.mean(train_rmse), np.std(train_rmse),
np.mean(test_rmse), np.std(test_rmse)])
result = (pd.DataFrame(result,
columns=['Model', 'Train Size',
'Train RMSE', 'Train RMSE STD',
'Test RMSE', 'Test RMSE STD'])
.set_index(['Model', 'Train Size']))
# -
fig, axes = plt.subplots(nrows=3, sharey=True, figsize=(16, 9))
for i, model in enumerate(models.keys()):
result.loc[model, ['Train RMSE', 'Test RMSE']].plot(ax=axes[i], title=model, logy=True, lw=2)
axes[i].set_ylabel('Log RMSE')
plt.tight_layout();
# +
train_set = X['Train'] + normal(scale=np.std(f(X['Train']))) * .2
test_set = X['Test'].copy()
sample_sizes = np.arange(.1, 1.0, .01)
indices = ([len(train_set), len(test_set)] *
sample_sizes.reshape(-1, 1)).astype(int)
result = pd.DataFrame()
lr = LinearRegression()
for label, degree in models.items():
model_train = create_poly_data(train_set, degree)
model_test = create_poly_data(test_set, degree)
for train_idx, test_idx in indices:
train = model_train[:train_idx]
test = model_test[:test_idx]
train_rmse, test_rmse = [], []
for x_train, x_test in folds(train, test, 10):
y_train, y_test = f(x_train[:, 1]), f(x_test[:, 1])
lr.fit(X=x_train, y=y_train)
train_rmse.append(rmse(y=y_train, x=x_train, model=lr))
test_rmse.append(rmse(y=y_test, x=x_test, model=lr))
result = (result
.append(pd.DataFrame({'Model': label,
'Train Size': train_idx,
'Data': 'Train',
'RMSE': train_rmse}))
.append(pd.DataFrame({'Model': label,
'Train Size': train_idx,
'Data': 'Test',
'RMSE': test_rmse})))
# -
fig, axes = plt.subplots(nrows=3, sharey=True, sharex=True, figsize=(18, 10))
for i, model in enumerate(models.keys()):
sns.lineplot(x='Train Size', y='RMSE', hue='Data', data=result[result.Model==model], ax=axes[i], lw=2)
axes[i].set_title(model)
plt.yscale('log')
plt.tight_layout();
| Chapter06/03_bias_variance/bias_variance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from textblob import TextBlob
import sys
import tweepy
import json
import requests
import pandas as pd
from IPython.display import display, HTML
consumerkey=""
consumersecret=""
accesstoken=""
accesstokensecret=""
auth=tweepy.OAuthHandler(consumerkey,consumersecret)
auth.set_access_token(accesstoken,accesstokensecret)
api=tweepy.API(auth)
# +
#Fetching Tweets about Ferrari (Only 200 tweets were allowed to fetch)
noofsearchterm = int(input("enter the no of tweets to analyse:"))
print("Analysing "+ str(noofsearchterm) +" tweets for Ferrari")
searchterm = "ferrari"
tweets=tweepy.Cursor(api.search,q=searchterm,lang="en").items(noofsearchterm)
positive=0
negative=0
neutral=0
polarity=0
tweet_list=[]
for tweet in tweets:
tweet_info=dict()
imgs=[]
tweet_info['Twitter_Text']=tweet.text
tweet_info['Date/Time']=str(tweet.created_at)
tweet_info['No._of_Likes']=tweet.favorite_count
tweet_info['No._of_Retweets']=tweet.retweet_count
#checking for images in tweets
for i in tweet.entities.get("media",[{}]):
if i.get("type",None) == "photo":
image_content=requests.get(i["media_url"])
imgs.append(image_content)
tweet_info['No._of_Images']=len(imgs)
else:
tweet_info['No._of_Images']="None"
#sentiment analysis
analysis=TextBlob(tweet.text)
polarity=analysis.sentiment.polarity
if(polarity==0):
tweet_info['Sentiment']="neutral"
elif(polarity<0):
tweet_info['Sentiment']="negative"
elif(polarity>0):
tweet_info['Sentiment']="positive"
#dumping tweets information into json file
tweet_list.append(tweet_info)
file_info=open('tweets.json','w')
json.dump(tweet_list,file_info,indent=4,sort_keys=True)
file_info.close()
#reading json file and displaying in tabular form
rd = pd.read_json(r'tweets.json')
df=pd.DataFrame(rd)
display(HTML(df.to_html()))
| twitter-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: sicss2021
# language: python
# name: sicss2021
# ---
# # Using Reddit API via Pushshift (psaw)
# +
from psaw import PushshiftAPI
import pandas as pd
import datetime as dt
from os.path import join
api = PushshiftAPI()
# -
src = "../data"
# ## Get data from API
# +
def get_pushshift_data(query, subreddit, api, after, limit = 500):
gen = api.search_comments(q = query, subreddit = subreddit, after = after)
max_response_cache = limit
cache = []
for c in gen:
cache.append(c)
if limit is not None:
# Omit this test to actually return all results. Wouldn't recommend it though: could take a while, but you do you.
if len(cache) >= max_response_cache:
break
# If you really want to: pick up where we left off to get the rest of the results.
if False:
for c in gen:
cache.append(c)
df = pd.DataFrame([thing.d_ for thing in cache])
return(df)
# +
diabetes_terms = ["diabetes",
"Diabetes",
'"I was just diagnosed with diabetes"',
'"today I was diagnosed with diabetes"',
'"I just learned I have diabetes"',
'"learned I got diabetes"',
'"heard I got diabetes"',
'"learned I have diabetes"',
'"heard I have diabetes"',
'"I was recently diagnosed with diabetes"',
'"I recently learned I have diabetes"',
'"I recently learned that I have diabetes"',
'"new diabetic"',
'"New diabetic"',
'"NEW DIABETIC"']
diabetes_query = '|'.join(diabetes_terms)
print(diabetes_query)
# +
start_epoch=int(dt.datetime(2020, 1, 1).timestamp()) # Set filtering date to 1st of January 2020
df = get_pushshift_data(query = diabetes_query,
subreddit = "diabetes",
api = api,
after = start_epoch,
limit = None)
print("Found", len(df), "posts.")
print(df.head())
# -
# ## Identify and remove anniversaries
# +
# many users post about a diagnosis anniversary, remove these tweets as well
df['recent'] = df['body'].apply(lambda x: ('years ago' not in x) and \
('yrs ago' not in x) and \
('year ago' not in x) and \
('YEARS AGO' not in x) and \
('years today' not in x) and \
('flashback') not in x)
past = df[df['recent'] == False].copy()
recent = df[df['recent'] == True].copy()
print('{} comments from past diabetes diagnoses'.format(len(past)))
print('{} comments from recent diabetes diagnoses'.format(len(recent)))
# -
# ## Export data
# +
recent.to_csv(join(src, 'reddit_diagnosed_diabetes_clean.csv'), index=False)
user_list = list(recent['author'].unique())
print('Saving {} usernames.'.format(len(user_list)))
with open(join(src, "reddit_diagnosed_user_IDs.txt"), "w") as outfile:
for username in user_list:
outfile.write("%s\n" % username)
# +
diagnosis_dates = df[['author', 'created_utc', 'id']]\
.sort_values(by=['author', 'created_utc'])\
.reset_index(drop=True)\
.drop_duplicates(subset=['author'])
diagnosis_dates['created_dt'] = diagnosis_dates['created_utc'].apply(lambda x: dt.datetime.fromtimestamp(x).strftime('%Y-%m-%d %H:%M:%S')
)
diagnosis_dates.to_csv(join(src, 'reddit_user_diagnosis_dates.csv'), index=False)
# -
| code/reddit_pushshift.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Dimensionality Reduction Exercises
# 
# # Learning Objectives
#
# - Explain and Apply Principal Component Analysis (PCA)
# - Explain Multidimensional Scaling (MDS)
# - Apply Intel® Extension for Scikit-learn* to leverage underlying compute capabilities of hardware
#
# # scikit-learn*
#
# Frameworks provide structure that Data Scientists use to build code. Frameworks are more than just libraries, because in addition to callable code, frameworks influence how code is written.
#
# A main virtue of using an optimized framework is that code runs faster. Code that runs faster is just generally more convenient but when we begin looking at applied data science and AI models, we can see more material benefits. Here you will see how optimization, particularly hyperparameter optimization can benefit more than just speed.
#
# These exercises will demonstrate how to apply **the Intel® Extension for Scikit-learn*,** a seamless way to speed up your Scikit-learn application. The acceleration is achieved through the use of the Intel® oneAPI Data Analytics Library (oneDAL). Patching is the term used to extend scikit-learn with Intel optimizations and makes it a well-suited machine learning framework for dealing with real-life problems.
#
# To get optimized versions of many Scikit-learn algorithms using a patch() approach consisting of adding these lines of code PRIOR to importing sklearn:
#
# - **from sklearnex import patch_sklearn**
# - **patch_sklearn()**
#
# ## This exercise relies on installation of Intel® Extension for Scikit-learn*
#
# If you have not already done so, follow the instructions from Week 1 for instructions
# ## Introduction
#
# We will be using customer data from a [Portuguese wholesale distributor](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers) for clustering. This data file is called `Wholesale_Customers_Data`.
#
# It contains the following features:
#
# * Fresh: annual spending (m.u.) on fresh products
# * Milk: annual spending (m.u.) on milk products
# * Grocery: annual spending (m.u.) on grocery products
# * Frozen: annual spending (m.u.) on frozen products
# * Detergents_Paper: annual spending (m.u.) on detergents and paper products
# * Delicatessen: annual spending (m.u.) on delicatessen products
# * Channel: customer channel (1: hotel/restaurant/cafe or 2: retail)
# * Region: customer region (1: Lisbon, 2: Porto, 3: Other)
#
# In this data, the values for all spending are given in an arbitrary unit (m.u. = monetary unit).
# +
from __future__ import print_function
import os
data_path = ['data']
from sklearnex import patch_sklearn
patch_sklearn()
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# -
# ## Question 1
#
# * Import the data and check the data types.
# * Drop the channel and region columns as they won't be used.
# * Convert the remaining columns to floats if necessary.
# * Copy this version of the data (using the `copy` method) to a variable to preserve it. We will be using it later.
# +
import pandas as pd
import numpy as np
filepath = os.sep.join(data_path + ['Wholesale_Customers_Data.csv'])
data = pd.read_csv(filepath, sep=',')
# -
data.shape
data.head()
data = data.drop(['Channel', 'Region'], axis=1)
data.dtypes
# Convert to floats
for col in data.columns:
data[col] = data[col].astype(float)
# Preserve the original data.
data_orig = data.copy()
# ## Question 2
#
# As with the previous lesson, we need to ensure the data is scaled and (relatively) normally distributed.
#
# * Examine the correlation and skew.
# * Perform any transformations and scale data using your favorite scaling method.
# * View the pairwise correlation plots of the new data.
# +
corr_mat = data.corr()
# Strip the diagonal for future examination
for x in range(corr_mat.shape[0]):
corr_mat.iloc[x,x] = 0.0
corr_mat
# -
# As before, the two categories with their respective most strongly correlated variable.
corr_mat.abs().idxmax()
# Examine the skew values and log transform. Looks like all of them need it.
# +
log_columns = data.skew().sort_values(ascending=False)
log_columns = log_columns.loc[log_columns > 0.75]
log_columns
# -
# The log transformations
for col in log_columns.index:
data[col] = np.log1p(data[col])
# Scale the data again. Let's use `MinMaxScaler` this time just to mix things up.
# +
mms = MinMaxScaler()
for col in data.columns:
data[col] = mms.fit_transform(data[[col]]).squeeze()
# -
# Visualize the relationship between the variables.
# +
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# +
sns.set_context('notebook')
sns.set_palette('dark')
sns.set_style('white')
sns.pairplot(data);
# -
# ## Question 3
#
# * Using Scikit-learn's [pipeline function](http://scikit-learn.org/stable/modules/pipeline.html), recreate the data pre-processing scheme above (transformation and scaling) using a pipeline. If you used a non-Scikit learn function to transform the data (e.g. NumPy's log function), checkout the custom transformer class called [`FunctionTransformer`](http://scikit-learn.org/stable/modules/preprocessing.html#custom-transformers).
# * Use the pipeline to transform the original data that was stored at the end of question 1.
# * Compare the results to the original data to verify that everything worked.
#
# *Hint:* Scikit-learn has a more flexible `Pipeline` function and a shortcut version called `make_pipeline`. Either can be used. Also, if different transformations need to be performed on the data, a [`FeatureUnion`](http://scikit-learn.org/stable/modules/pipeline.html#featureunion-composite-feature-spaces) can be used.
# +
# The custom NumPy log transformer
log_transformer = FunctionTransformer(np.log1p)
# The pipeline
estimators = [('log1p', log_transformer), ('minmaxscale', MinMaxScaler())]
pipeline = Pipeline(estimators)
# Convert the original data
data_pipe = pipeline.fit_transform(data_orig)
# -
# The results are identical. Note that machine learning models and grid searches can also be added to the pipeline (and in fact, usually are.)
np.allclose(data_pipe, data)
# ## Question 4
#
# * Perform PCA with `n_components` ranging from 1 to 5.
# * Store the amount of explained variance for each number of dimensions.
# * Also store the feature importance for each number of dimensions. *Hint:* PCA doesn't explicitly provide this after a model is fit, but the `components_` properties can be used to determine something that approximates importance. How you decided to do so is entirely up to you.
# * Plot the explained variance and feature importances.
# +
pca_list = list()
feature_weight_list = list()
# Fit a range of PCA models
for n in range(1, 6):
# Create and fit the model
PCAmod = PCA(n_components=n)
PCAmod.fit(data)
# Store the model and variance
pca_list.append(pd.Series({'n':n, 'model':PCAmod,
'var': PCAmod.explained_variance_ratio_.sum()}))
# Calculate and store feature importances
abs_feature_values = np.abs(PCAmod.components_).sum(axis=0)
feature_weight_list.append(pd.DataFrame({'n':n,
'features': data.columns,
'values':abs_feature_values/abs_feature_values.sum()}))
pca_df = pd.concat(pca_list, axis=1).T.set_index('n')
pca_df
# -
# Create a table of feature importances for each data column.
# +
features_df = (pd.concat(feature_weight_list)
.pivot(index='n', columns='features', values='values'))
features_df
# -
# Create a plot of explained variances.
# +
sns.set_context('talk')
ax = pca_df['var'].plot(kind='bar')
ax.set(xlabel='Number of dimensions',
ylabel='Percent explained variance',
title='Explained Variance vs Dimensions');
# -
# And here's a plot of feature importances.
# +
ax = features_df.plot(kind='bar')
ax.set(xlabel='Number of dimensions',
ylabel='Relative importance',
title='Feature importance vs Dimensions');
# -
# ## Question 5
#
# * Fit a `KernelPCA` model with `kernel='rbf'`. You can choose how many components and what values to use for the other parameters.
# * If you want to tinker some more, use `GridSearchCV` to tune the parameters of the `KernelPCA` model.
#
# The second step is tricky since grid searches are generally used for supervised machine learning methods and rely on scoring metrics, such as accuracy, to determine the best model. However, a custom scoring function can be written for `GridSearchCV`, where larger is better for the outcome of the scoring function.
#
# What would such a metric involve for PCA? What about percent of explained variance? Or perhaps the negative mean squared error on the data once it has been transformed and then inversely transformed?
#
#
# +
# Custom scorer--use negative rmse of inverse transform
def scorer(pcamodel, X, y=None):
try:
X_val = X.values
except:
X_val = X
# Calculate and inverse transform the data
data_inv = pcamodel.fit(X_val).transform(X_val)
data_inv = pcamodel.inverse_transform(data_inv)
# The error calculation
mse = mean_squared_error(data_inv.ravel(), X_val.ravel())
# Larger values are better for scorers, so take negative value
return -1.0 * mse
# The grid search parameters
param_grid = {'gamma':[0.001, 0.01, 0.05, 0.1, 0.5, 1.0],
'n_components': [2, 3, 4]}
# The grid search
kernelPCA = GridSearchCV(KernelPCA(kernel='rbf', fit_inverse_transform=True),
param_grid=param_grid,
scoring=scorer,
n_jobs=-1)
kernelPCA = kernelPCA.fit(data)
kernelPCA.best_estimator_
# -
# ## Question 6
#
# Let's explore how our model accuracy may change if we include a `PCA` in our model building pipeline. Let's plan to use sklearn's `Pipeline` class and create a pipeline that has the following steps:
# <ol>
# <li>A scaler</li>
# <li>`PCA(n_components=n)`</li>
# <li>`LogisticRegression`</li>
# </ol>
#
# * Load the Human Activity data from the datasets.
# * Write a function that takes in a value of `n` and makes the above pipeline, then predicts the "Activity" column over a 5-fold StratifiedShuffleSplit, and returns the average test accuracy
# * For various values of n, call the above function and store the average accuracies.
# * Plot the average accuracy by number of dimensions.
filepath = os.sep.join(data_path + ['Human_Activity_Recognition_Using_Smartphones_Data.csv'])
data2 = pd.read_csv(filepath, sep=',')
# +
X = data2.drop('Activity', axis=1)
y = data2.Activity
sss = StratifiedShuffleSplit(n_splits=5, random_state=42)
# From the previous question build the code for this question
def get_avg_score(n):
pipe = [
('scaler', MinMaxScaler()),
('pca', PCA(n_components=n)),
('estimator', LogisticRegression(max_iter=295, C=.001, penalty='l2'))
]
pipe = Pipeline(pipe)
scores = []
for train_index, test_index in sss.split(X, y):
X_train, X_test = X.loc[train_index], X.loc[test_index]
y_train, y_test = y.loc[train_index], y.loc[test_index]
pipe.fit(X_train, y_train)
scores.append(accuracy_score(y_test, pipe.predict(X_test)))
return np.mean(scores)
ns = [10, 20, 50, 100, 150, 200, 300, 400]
#ns = [5, 10, 15, 20, 30]
score_list = [get_avg_score(n) for n in ns]
# +
sns.set_context('talk')
ax = plt.axes()
ax.plot(ns, score_list)
ax.set(xlabel='Number of Dimensions',
ylabel='Average Accuracy',
title='LogisticRegression Accuracy vs Number of dimensions on the Human Activity Dataset')
ax.grid(True)
# -
| AI-and-Analytics/Jupyter/Introduction_to_Machine_Learning/11-Dimensionality_Reduction_and_Advanced_Topics/Dimensionality_Reduction_Exercises-ANSWERS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supervised Learning Algorithms: Ridge Regression
# *In this template, only **data input** and **input/target variables** need to be specified (see "Data Input & Variables" section for further instructions). None of the other sections needs to be adjusted. As a data input example, .csv file from IBM Box web repository is used.*
# ## 1. Libraries
# *Run to import the required libraries.*
# %matplotlib notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# ## 2. Data Input and Variables
# *Define the data input as well as the input (X) and target (y) variables and run the code. Do not change the data & variable names **['df', 'X', 'y']** as they are used in further sections.*
# +
### Data Input
# df =
### Defining Variables
# X =
# y =
### Data Input Example
df = pd.read_csv('https://ibm.box.com/shared/static/q6iiqb1pd7wo8r3q28jvgsrprzezjqk3.csv')
X = df[['horsepower']]
y = df['price']
# -
# ## 3. The Model
# *Run to build the model.*
# +
from sklearn.linear_model import Ridge
# train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state = 0)
# feature normalization
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# ridge regression def
linridge = Ridge(alpha=20.0).fit(X_train_scaled, y_train)
### intercept & coefficient, # of non-zero features & weights, R-squared for training & test data set
print('Ridge regression linear model intercept: {}'
.format(linridge.intercept_))
print('Ridge regression linear model coeff: {}\n'
.format(linridge.coef_))
print('R-squared score (training): {:.3f}'
.format(linridge.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}\n'
.format(linridge.score(X_test_scaled, y_test)))
print('Number of non-zero features: {}'
.format(np.sum(linridge.coef_ != 0)))
# -
# ### 3.1. Regularization parameter alpha on R-squared
# *Run to check how alpha affects the model score.*
print('Ridge regression: effect of alpha regularization parameter\n')
for this_alpha in [0, 1, 10, 20, 50, 100, 1000]:
linridge = Ridge(alpha = this_alpha).fit(X_train_scaled, y_train)
r2_train = linridge.score(X_train_scaled, y_train)
r2_test = linridge.score(X_test_scaled, y_test)
num_coeff_bigger = np.sum(abs(linridge.coef_) > 1.0)
print('Alpha = {:.2f}\nnum abs(coeff) > 1.0: {}, \
r-squared training: {:.2f}, r-squared test: {:.2f}\n'
.format(this_alpha, num_coeff_bigger, r2_train, r2_test))
| MLProjects/Regression/Ridge Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Encoder-Decoder Architecture
#
# The encoder-decoder architecture is a neural network design pattern. In this architecture, the network is partitioned into two parts, the encoder and the decoder. The encoder's role is encoding the inputs into state, which often contains several tensors. Then the state is passed into the decoder to generate the outputs. In machine translation, the encoder transforms a source sentence, e.g. "Hello world.", into state, e.g. a vector, that captures its semantic information. The decoder then uses this state to generate the translated target sentence, e.g. "Bonjour le monde.".
#
# 
#
# In this section, we will show an interface to implement this encoder-decoder architecture.
# + attributes={"classes": [], "id": "", "n": "1"}
from mxnet.gluon import nn
# -
# ## Encoder
#
# The encoder is a normal neural network that takes inputs, e.g. a source sentence, to return outputs.
# + attributes={"classes": [], "id": "", "n": "2"}
# Save to the d2l package.
class Encoder(nn.Block):
"""The base encoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X):
raise NotImplementedError
# -
# ## Decoder
#
# The decoder has an additional method `init_state` to parse the outputs of the encoder with possible additional information, e.g. the valid lengths of inputs, to return the state it needs. In the forward method, the decoder takes both inputs, e.g. a target sentence, and the state. It returns outputs, with potentially modified state if the encoder contains RNN layers.
# + attributes={"classes": [], "id": "", "n": "3"}
# Save to the d2l package.
class Decoder(nn.Block):
"""The base decoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
# -
# ## Model
#
# The encoder-decoder model contains both an encoder an decoder. We implement its forward method for training. It takes both encoder inputs and decoder inputs, with optional additional information. During computation, it first compute encoder outputs to initialize the decoder state, and then returns the decoder outputs.
# + attributes={"classes": [], "id": "", "n": "4"}
# Save to the d2l package.
class EncoderDecoder(nn.Block):
"""The base class for the encoder-decoder architecture."""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
# -
# ## Summary
#
# * An encoder-decoder architecture is a neural network design pattern mainly in natural language processing.
# * An encoder is a network (FC, CNN, RNN, etc) that takes the input, and output a feature map, a vector or a tensor.
# * An decoder is a network (usually the same network structure as encoder) that takes the feature vector from the encoder, and gives the best closest match to the actual input or intended output.
#
#
# ## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/encoder-decoder/2396)
#
# 
| 8 recurrent-neural-networks/encoder-decoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quantum Phase Estimation, Iterative
# $\newcommand{\bra}[1]{\left\langle{#1}\right|}$
# $\newcommand{\ket}[1]{\left|{#1}\right\rangle}$
#
# To economize in qubits, the References below advocate
# using the so called iterative Quantum Phase Estimation (iterative qPE).
# Whereas the usual qPE uses multiple pointer qubits and gives the
# answer in one shot (passage through a single circuit), the iterative
# qPE uses only a single pointer qubit but requires
# passage through multiple circuits, with the parameters
# of each circuit depending on the final pointer measurement of the previous circuit.
# This works because the kickback phases which each power of U
# sends to the pointers in the nomal qPE are cummulative: the k'th
# pointer gets a
# kickback phase which includes the
# kickback phases accrued by all previous pointer qubits.
#
# In this example, we use
#
# $U = e^{i*rads*\sigma_Z}$
#
# for some Real number $rads$
# and we use initial state $\ket{0}$, so $e^{i*rads}$ is the
# eigenvalue we seek.
#
# Here are some of the equations used in the code below
#
# ```
# for k in range(num_reps):
#
# | H
# | exp(i*alpha(k)*sigz)
# U^(2^k)-----@
# | H
# | measure n(k) here
# ```
#
# $\alpha(0) = n(0) =0$
#
# $\alpha(k+1) = 2\alpha(k) + \frac{\pi}{2} n(k)$
#
# $\alpha(k) = \pi 2^{k-2}\sum_{b=0}^{k-1} \frac{n(b)}{2^{b}}$
#
# $rads = \frac{\alpha(num\_reps-1)}{2^{num\_reps-2}}$
#
#
# References
# ----------
#
# 1. https://arxiv.org/abs/1512.06860 by Google team
#
# 2. https://arxiv.org/abs/1605.03590 by Microsoft team
#
#
# First change your working directory to the qubiter directory in your computer, and add its path to the path environment variable.
import os
import sys
print(os.getcwd())
os.chdir('../../')
print(os.getcwd())
sys.path.insert(0,os.getcwd())
from qubiter.SEO_writer import *
from qubiter.SEO_simulator import *
from qubiter.StateVec import *
import numpy as np
import random as ran
# +
rads = 2*np.pi*(1/16 + 1/8 + 1e-8)
z_axis = 3
num_bits = 2
num_reps = 15
file_prefix = 'ph_est_iterative'
emb = CktEmbedder(num_bits, num_bits)
alpha = 0
ptr_state = 0
ptr_st_list = []
for k in range(num_reps):
print('--------k=', k)
# refresh angle alpha to twice its previous value plus
# \pi/2 times latest measurement of pointer qubit
alpha = 2*alpha + np.pi*ptr_state/2
print('rads, alpha/2^(num_reps)=', rads, alpha/(1 << num_reps-2))
# write circuit
wr = SEO_writer(file_prefix, emb)
wr.write_one_bit_gate(0, OneBitGates.had2)
wr.write_one_bit_gate(0, OneBitGates.rot_ax, [alpha, z_axis])
control_pos = 0
target_pos = 1
trols = Controls.new_knob(num_bits, control_pos, kind=True)
wr.write_controlled_one_bit_gate(
target_pos, trols, OneBitGates.rot_ax, [(1 << k)*rads, z_axis])
wr.write_one_bit_gate(0, OneBitGates.had2)
wr.close_files()
# simulate circuit
init_st_vec = StateVec.get_standard_basis_st_vec([0, 0])
sim = SEO_simulator(file_prefix, num_bits, init_st_vec)
StateVec.describe_st_vec_dict(sim.cur_st_vec_dict,
print_st_vec=True, do_pp=True, omit_zero_amps=True, show_pp_probs=True)
# find final state of pointer qubit
fin_st_vec = sim.cur_st_vec_dict["pure"]
# dictionary with key=qubit, value=final (P(0), P(1))
bit_to_probs = StateVec.get_bit_probs(num_bits, fin_st_vec.get_pd())
p0, p1 = bit_to_probs[0]
# random float between 0 and 1
x = ran.random()
if x < p0:
ptr_state = 0
else:
ptr_state = 1
ptr_st_list.append(ptr_state)
print('ptr_state=', ptr_state)
print('---------------------')
print('timeline of bit 0 measurements', ptr_st_list)
print("rads, alpha(num_reps-1)/2^(num_reps-2)", rads, alpha/(1 << num_reps-2))
# -
| qubiter/jupyter_notebooks/phase_est_iterative.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <font size="+5">#03. Decision Tree. A Supervised Classification Model</font>
# <ul>
# <li>Doubts? → Ask me in <img src="https://emoji.gg/assets/emoji/3970-discord.png" style="height: 1em; vertical-align: middle;"> <a href="https://discord.gg/cmB3KGsqMy">Discord</a></li>
# <li>Tutorials → <img src="https://openmoji.org/php/download_asset.php?type=emoji&emoji_hexcode=E044&emoji_variant=color" style="height: 1em; vertical-align: middle;"> <a href="https://www.youtube.com/channel/UCovCte2I3loteQE_kRsfQcw">YouTube</a></li>
# <li>Book Private Lessons → <span style="color: orange">@</span> <a href="https://sotastica.com/reservar">sotastica</a></li>
# </ul>
# # Load the Data
# Load the dataset from [CIS](https://www.cis.es/cis/opencms/ES/index.html) executing the following lines of code:
# +
import pandas as pd
url = 'https://raw.githubusercontent.com/py-thocrates/data/main/internet_usage_spain.csv'
df = pd.read_csv(url)
df.head()
# -
# > - The goal of this dataset is
# > - To predict `internet_usage` of **people** (rows)
# > - Based on their **socio-demographical characteristics** (columns)
# # Data Preprocessing
# > - Categorical variables contains `text`
# > - If you pass the data to the model
# > - It won't know how to interpret the text
# > - Therefore, you need a way to convert text values
# > - To numbers such as `0` or `1` **Dummy Variables**
# > - Which `function()` do you use to achieve this?
# # `DecisionTreeClassifier()` Model in Python
# ## Build the Model
# > 1. **Necesity**: Build Model
# > 2. **Google**: How do you search for the solution?
# > 3. **Solution**: Find the `function()` that makes it happen
# ## Code Thinking
# > - By the time you get to the actual `function()` to compute the model
# > - You'll notice they're asking you for two parameters:
# > 1. `X`: **explanatory variables**
# > 2. `y`: **target varaible**
# ### Finally `fit()` the Model
# > - Pass the `objects` that **contains the sequence of numbers** of the two variables
# > - To the `parameters` of the function that computes the Linear Regression Model
# ## Make a Prediction
# > You should already know that ML models are
# > mathematical equations that you optimize to
# > calculate predictions
# >
# > As you `fit()` the model, the mathematical function
# > is built. Therefore, could you make a prediction?
# >
# > - Select a random person:
person = df.sample()
# > - Compute the probability of this person for `internet_usage`:
# ## Visualize the Model
# > 1. Use `plot_tree()`, contained in `tree` module from `sklearn` library
# > 2. Then, you may use `[shift] + [tab]` to see what **parameters** the function is asking for
# > - Could you justify how we got to the prediction in previous exercise?
# # Decision Tree Algorithm Explanation
# > _You may watch the following video to understand how the Decision Tree works:_
# +
# %%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# -
# # Model Interpretation
# > - Which is the variable in the **root node**?
# > - Is it the most **important feature**? Why?
# + [markdown] tags=[]
# # Real vs. Predicted Data
# -
# > - Compute all predictions:
# >
# > - `model.predict()`
# > How good is our model?
# >
# > 1. Create a new `DataFrame` column to assign the predictions.
# >
# > - `df['pred'] = predictions`
# > - `df.sample(10)` to check if predictions are equal to reality:
# > 2. How to measure the **model's error**?
# > - How good is our model to predict reality?
# > - `model.score()`
# # Confusion Matrix
# > 1. Use function `confusion_matrix()`, or `plot_confusion_matrix()`
# > 2. What represents the first number in the matrix?
# # Other Metrics
# ## Sensitivity
# ## Specificity
# ## Classification Report
# > 1. Use `classification_report()`
# > 2. Save the resulting object in `report`
# > 3. `print(report)`
# > 4. Are some numbers equal to the ones we calculated before (Sensitivity & Specificity?
# ## ROC Curve
# > - Use `plot_roc_curve()`
# >
# > _You may watch the following video to understand the metric:_
# %%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/4jRBRDbJemM" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# # Achieved Goals
# _Haz doble click sobre esta celda y pon una `X` dentro de las casillas [X] si crees que has superado los objetivos:_
#
# - [ ] Entender cómo se usan los **Algoritmos de Árboles**.
# - [ ] Entender otra forma de **comparar los datos reales con las predicciones** del modelo.
# - [ ] No todas las visualizaciones de modelos son iguales. En este caso también podemos **visualizar un árbol** para interpretar el modelo.
# - [ ] Distinguir el papel de la **probabilidad** a la hora de optimizar este tipo de modelos.
# - [ ] Saber determinar **por qué una variable es importante** en el modelo. Es decir, por qué aporta diferencias significativas.
# - [ ] Entender la necesidad de **normalizar** los datos.
| #04. Decision Tree. A Supervised Classification Model/.ipynb_checkpoints/04practice-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.6 64-bit (''alexl'': virtualenv)'
# name: python36664bitalexlvirtualenvb9f0b0a3af2a4e06a89ee778b9503914
# ---
# ## Pandas exercises
# #### The first part oftutorial is intended for windows users, which failed / not able to install the VM in the course "big-data-integration-processing" in coursera. This part will focus on how to load data with pySpark on windows, and focus on ever
# ####
# +
from pyspark import SparkConf, SparkContext, SQLContext
# Create a spark configuration with 20 threads.
# This code will run locally on master
conf = (SparkConf ()
. setMaster("local[20]")
. setAppName("sample app for reading streaming sources")
. set("spark.executor.memory", "2g"))
sc = SparkContext(conf=conf)
# +
import os
# absolute path to this file
cwd = os.getcwd()
sqlContext = SQLContext(sc)
#df = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load('final-project/country-list.csv')
country_lines = sc.textFile(os.path.join (cwd, "big-data-3/final-project/country-list.csv"))
# -
# Convert each line into a pair of words
words = country_lines.map(lambda line: line.split(","))
#words = country_lines.flatMap(lambda line: line.split(","))
# +
# Convert each pair of words into a tuple
#Assign initial count value to each word. Next, we will create tuples for each word with an initial count of 1
#country_tuples = words.map(lambda word : (word, 1))
# -
# Create the DataFrame, look at schema and contents
countryDF = sqlContext.createDataFrame(words, ["country", "code"])
countryDF.printSchema()
countryDF.take(3)
# Read tweets CSV file into RDD of lines
tweet_lines = sc.textFile(os.path.join (cwd, "big-data-3/final-project/tweets.csv"))
# Clean the data: some tweets are empty. Remove the empty tweets using filter()
#Method 1
tweet_lines_clean = tweet_lines.filter(lambda row: row != '')
tweet_lines_clean.count()
# Perform WordCount on the cleaned tweet texts. (note: this is several lines.)
tweet_words = tweet_lines_clean.flatMap(lambda line: line.split(" "))
tweet_tuples = tweet_words.map(lambda word : (word, 1))
tweet_counts = tweet_tuples.reduceByKey(lambda a,b : (a+b)).sortBy(lambda a: a[1], False)
# Create the DataFrame of tweet word count
import pandas as pd
tweetDF = sqlContext.createDataFrame(tweet_counts, ["word", "count"])
#tweetDF["count"] = pd.to_numeric(tweetDF["count"])
tweetDF.printSchema()
tweetDF.take(5)
# Join the country and tweet data frames (on the appropriate column)
join_table = tweetDF.join(countryDF, tweetDF.word == countryDF.country, how='inner').sort(desc("count"))
#tweetDF["count"] = pd.to_numeric(tweetDF["count"])
join_table.printSchema()
join_table.show()
# Question 1: number of distinct countries mentioned
join_table.count()
# Question 2: number of countries mentioned in tweets.
from pyspark.sql.functions import sum
#join_table.rdd.map(lambda x: (1,x[1])).reduceByKey(lambda x,y: x + y).collect()[0][1]
join_table.describe()
# Table 1: top three countries and their counts.
from pyspark.sql.functions import desc
join_table.show()
# Table 2: counts for Wales, Iceland, and Japan.
| big-data-integration-processing/week6-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # EXAMPLE 1: Correct and display
# This example covers the basic usage of the dual-PRF velocity correction function:
#
# - [Load raw data with Py-ART](#load_data_pyart)
# - [Apply the correction function](#apply_vcor)
# - [Display the results](#display)
#
# **EVENT**: A tornado associated to a rotating cell that took place near Cardona town (41º54'51'' N, 1º40'52'' E) on the 7th of January 2018.
# **DATA**: It works with data from the weather radar network (XRAD) of the Meteorological Service of Catalonia. This data is in IRIS RAW format. In this particular case, the data used is from Creu del Vent (CDV) radar.
# **LIBRARIES/FUNCTIONS:**
# +
import pyart
import vcor_dual_prf as vcor
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# -
# **RADAR SETTINGS:**
#
# Input file, elevation/sweep number
file_in = '../sample_data/CDV180107_tornado.RAW'
sw = 3
# **DISPLAY SETTINGS**
#
# Site coordinates (decimal lat/lon), display region limits (decimal lat/lon), tick locations
ylim = (-40, 60)
xlim = (-25, 75)
range_rings = range(25, 150, 25)
# Set a custom colormap for the velocity
cmaplst_l = [plt.get_cmap('ocean', 22)(i) for i in list(range(1, 22))] + [plt.get_cmap('gnuplot_r', 30)(i) for i in list(range(1, 21))]
cmap_vel = mpl.colors.ListedColormap(cmaplst_l)
cmap_vel.set_bad('lightgrey', 1.)
# <a id='load_data_pyart'></a>
# ### Load raw data with Py-ART
# Py-ART reads the data and metadata in the input file into a dictionary structure (radar object instance):
rad = pyart.io.read(file_in)
# Let's check the data fields that have been loaded:
rad.fields.keys()
# For plotting purposes only, we retrieve the Nyquist velocity stored in the radar instance metadata :
# Nyquist velocity
v_ny = rad.instrument_parameters['nyquist_velocity']['data'][0]
v_ny
# <a id='apply_vcor'></a>
# ### Apply the correction function
# As a first example, we apply the correction function for all 4 the methods available.
#
# The function parameters specified in each case correspond (as precisely as possible) to the ones detailed in the original publications. When a parameter is not explicitly mentioned in the publication, we leave the one set by default in the function.
#
# Application of the function includes the addition of a new data field in the radar instance.
#
# **(a) 'mean' method**:
#
# <NAME>. and <NAME>., 2003: Correction of dual PRF velocity errors for operational Doppler weather radars. *J. Atmos. Oceanic Technol.*, 20(4), 429-442
vcor.correct_dualprf(radar=rad, two_step=False,
method_det='mean', kernel_det=np.ones((3, 3)),
vel_field='velocity', new_field='vcor_mean')
# **(b) 'median' method**:
#
# <NAME>. and <NAME>., 2003: Analysis and correction of dual PRF velocity data. *J. Atmos. Oceanic Technol.*, 20(4), 443-453
vcor.correct_dualprf(radar=rad, two_step=False,
method_det='median', kernel_det=np.ones((3, 3)),
vel_field='velocity', new_field='vcor_median')
# **(c) 'cmean_sc' method**:
#
# <NAME>., et al., 2017: Correction of dual-PRF Doppler velocity outliers in the presence of aliasing. *J. Atmos. Oceanic Technol.*, 34(7), 1529-1543
vcor.correct_dualprf(radar=rad, two_step=True,
method_det='cmean_sc', kernel_det=np.ones((7, 7)),
method_cor='median', kernel_cor=np.ones((7, 7)),
vel_field='velocity', new_field='vcor_cmean_sc')
# **(d) 'cmean' method**: <NAME>., et al., 2018: Radar network–based detection of mesocyclones at the German
# Weather Service. *J. Atmos. Oceanic Technol.*, 35(2), 299-321
vcor.correct_dualprf(radar=rad, two_step=True,
method_det='cmean', kernel_det=np.ones((11, 11)),
method_cor='cmean', kernel_cor=np.ones((5, 5)),
vel_field='velocity', new_field='vcor_cmean')
# Let's check that all the corrections have been stored in the radar instance:
rad.fields.keys()
# <a id='display'></a>
# ### Display the results
# +
display = pyart.graph.RadarDisplay(rad)
fig = plt.figure(figsize=(8,7))
ax = fig.add_subplot(111)
display.plot('velocity', sw, ax=ax, vmin=-v_ny, vmax=v_ny, mask_outside=False,
cmap=cmap_vel, colorbar_flag=True, title_flag=False)
display.plot_range_rings(range_rings, lw=0.8, ls=':', ax=ax)
display.plot_cross_hair(0.5, ax=ax)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# +
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(18,15))
display.plot('vcor_mean', sw, ax=ax1, vmin=-v_ny, vmax=v_ny, mask_outside=False,
cmap=cmap_vel, colorbar_flag=True, title_flag=False)
display.plot_range_rings(range_rings, lw=0.8, ls=':', ax=ax1)
display.plot_cross_hair(0.5, ax=ax1)
ax1.set_xlim(xlim)
ax1.set_ylim(ylim)
display.plot('vcor_median', sw, ax=ax2, vmin=-v_ny, vmax=v_ny, mask_outside=False,
cmap=cmap_vel, colorbar_flag=True, title_flag=False)
display.plot_range_rings(range_rings, lw=0.8, ls=':', ax=ax2)
display.plot_cross_hair(0.5, ax=ax2)
ax2.set_xlim(xlim)
ax2.set_ylim(ylim)
display.plot('vcor_cmean_sc', sw, ax=ax3, vmin=-v_ny, vmax=v_ny, mask_outside=False,
cmap=cmap_vel, colorbar_flag=True, title_flag=False)
display.plot_range_rings(range_rings, lw=0.8, ls=':', ax=ax3)
display.plot_cross_hair(0.5, ax=ax3)
ax3.set_xlim(xlim)
ax3.set_ylim(ylim)
display.plot('vcor_cmean', sw, ax=ax4, vmin=-v_ny, vmax=v_ny, mask_outside=False,
cmap=cmap_vel, colorbar_flag=True, title_flag=False)
display.plot_range_rings(range_rings, lw=0.8, ls=':', ax=ax4)
display.plot_cross_hair(0.5, ax=ax4)
ax4.set_xlim(xlim)
ax4.set_ylim(ylim)
fig.tight_layout()
plt.show()
| examples/01_correct_and_display.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#importing some useful packages
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
# +
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
plt.imshow(color_select, cmap="gray") # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# -
red_threshold = 200
green_threshold = 160
blue_threshold = 160
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
print(image[:,:,0],"\n")
print(image[:,:,0]< rgb_threshold[0],"\n")
# print(image[:,:,1],"\n")
# print(image[:,:,2],"\n")
plt.imshow(image[:,:,0])
I_red = np.copy(image)
I_red[:,:,1] = 0
I_red[:,:,2] = 0
plt.imshow(I_red)
plt.imshow(image[:,:,1], cmap="gray")
plt.imshow(image[:,:,2])
# +
# Identify pixels below the threshold
thresholds = (image[:,:,0] < rgb_threshold[0]) \
| (image[:,:,1] < rgb_threshold[1]) \
| (image[:,:,2] < rgb_threshold[2])
print(thresholds)
color_select[thresholds] = [0,0,0]
# Display the image
plt.imshow(color_select)
plt.show()
# +
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# Read in and grayscale the image
image = mpimg.imread('exit-ramp.jpg')
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# Next we'll create a masked edges image using cv2.fillPoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(450, 290), (490,290), (imshape[1],imshape[0])]], dtype=np.int32)
cpy = np.copy(image)
cv2.line(cpy, (0,imshape[0]),(450, 290), (0, 255, 0), 10)
cv2.line(cpy, (490,290), (imshape[1],imshape[0]), (0, 255, 0), 10)
plt.imshow(cpy)
# cv2.fillPoly(mask, vertices, ignore_mask_color)
# masked_edges = cv2.bitwise_and(edges, mask)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
# rho = 2 # distance resolution in pixels of the Hough grid
# theta = np.pi/180 # angular resolution in radians of the Hough grid
# threshold = 15 # minimum number of votes (intersections in Hough grid cell)
# min_line_length = 40 #minimum number of pixels making up a line
# max_line_gap = 20 # maximum gap in pixels between connectable line segments
# line_image = np.copy(image)*0 # creating a blank to draw lines on
# # Run Hough on edge detected image
# # Output "lines" is an array containing endpoints of detected line segments
# lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),
# min_line_length, max_line_gap)
# # Iterate over the output "lines" and draw lines on a blank image
# for line in lines:
# for x1,y1,x2,y2 in line:
# cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# # Create a "color" binary image to combine with line image
# color_edges = np.dstack((edges, edges, edges))
# # Draw the lines on the edge image
# lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
#plt.imshow(lines_edges)
# -
| test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/pgmpy/pgmpy_notebook/blob/master/notebooks/3.%20Causal%20Bayesian%20Networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="qR-vG2CF4jaz"
# # Causal Bayesian Networks
#
# Causal Inference is a new feature for pgmpy, so I wanted to develop a few examples which show off the features that we're developing!
#
# This particular notebook walks through the 5 games that used as examples for building intuition about backdoor paths in *The Book of Why* by Judea Peal. I have consistently been using them to test different implementations of backdoor adjustment from different libraries and include them as unit tests in pgmpy, so I wanted to walk through them and a few other related games as a potential resource to both understand the implementation of CausalInference in pgmpy, as well as develope some useful intuitions about backdoor paths.
#
# ## Objective of the Games
#
# For each game we get a causal graph, and our goal is to identify the set of deconfounders (often denoted $Z$) which will close all backdoor paths from nodes $X$ to $Y$. For the time being, I'll assume that you're familiar with the concept of backdoor paths, though I may expand this portion to explain it.
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="p1uBjhCQgaaG" outputId="25270025-2cd2-4ce6-a8e6-7d16e2855492"
#@title Clone the Development Repo & Install Requirements
#@markdown Because the Causal Inference class is currently in dev, we will actually need to pull the code from GitHub. This cell will give us a pretty good development environment for interactively developing and testing the CausalModel class and its methods.
#@markdown You only need to run this the first time you've started the kernel.
# %%sh
git clone https://github.com/mrklees/pgmpy.git
# mv /content/pgmpy /content/pgmpydev
# cd pgmpydev/
git checkout feature/causalmodel
git pull
#@markdown In testing the CausalModel and Bayesian Network portion of pgmpy we've actually been able to use up to date version of Networkx and other packages, but we may be forced to downgrade to networkx 1.11 if errors arise.
#pip install -U -r requirements-dev.txt
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="EkFVGD3Tv7ma" outputId="a3fd160d-dcdf-4495-e8ea-e5b3aedadaf5"
#@title Imports
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append('/content/pgmpydev')
# !pip3 install -q daft
import matplotlib.pyplot as plt
# %matplotlib inline
import daft
from daft import PGM
# We can now import the development version of pgmpy
from pgmpy.models.BayesianModel import BayesianModel
from pgmpy.inference.causal_inference import CausalInference
def convert_pgm_to_pgmpy(pgm):
"""Takes a Daft PGM object and converts it to a pgmpy BayesianModel"""
edges = [(edge.node1.name, edge.node2.name) for edge in pgm._edges]
model = BayesianModel(edges)
return model
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 201} colab_type="code" id="P90trQAQ7Clc" outputId="2e62d5b8-de0e-4e28-b14d-5c03bcd1ab61"
#@title # Game 1
#@markdown While this is a "trivial" example, many statisticians would consider including either or both A and B in their models "just for good measure". Notice though how controlling for A would close off the path of causal information from X to Y, actually *impeding* your effort to measure that effect.
pgm = PGM(shape=[4, 3])
pgm.add_node(daft.Node('X', r"X", 1, 2))
pgm.add_node(daft.Node('Y', r"Y", 3, 2))
pgm.add_node(daft.Node('A', r"A", 2, 2))
pgm.add_node(daft.Node('B', r"B", 2, 1))
pgm.add_edge('X', 'A')
pgm.add_edge('A', 'Y')
pgm.add_edge('A', 'B')
pgm.render()
plt.show()
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="yQyYJEC83ODX" outputId="07597081-bbec-4209-db8f-b731de34ae3a"
#@markdown Notice how there are no nodes with arrows pointing into X. Said another way, X has no parents. Therefore, there can't be any backdoor paths confounding X and Y. pgmpy will confirm this in the following way:
game1 = convert_pgm_to_pgmpy(pgm)
inference1 = CausalInference(game1)
print(f"Are there are active backdoor paths? {inference1._has_active_backdoors('X', 'Y')}")
adj_sets = inference1.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="b5RJ0UsH_kQ4" outputId="3e4f9430-fca7-40d4-f8b3-ffd241c6a8f8"
#@title # Game 2
#@markdown This graph looks harder, but actualy is also trivial to solve. The key is noticing the one backdoor path, which goes from X <- A -> B <- D -> E -> Y, has a collider at B (or a 'V structure'), and therefore the backdoor path is closed.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 2, 3))
pgm.add_node(daft.Node('C', r"C", 3, 3))
pgm.add_node(daft.Node('D', r"D", 2, 2))
pgm.add_node(daft.Node('E', r"E", 2, 1))
pgm.add_edge('X', 'E')
pgm.add_edge('A', 'X')
pgm.add_edge('A', 'B')
pgm.add_edge('B', 'C')
pgm.add_edge('D', 'B')
pgm.add_edge('D', 'E')
pgm.add_edge('E', 'Y')
pgm.render()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="2d6Ezs6PDDON" outputId="c597c0df-3271-4e8e-9a68-3200c7f105a5"
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {inference._has_active_backdoors('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="Pg6T2WA3DZ8n" outputId="5d79efd3-b8f7-46a8-dcf0-da2b7997be3c"
#@title # Game 3
#@markdown This game actually requires some action. Notice the backdoor path X <- B -> Y. This is a confounding pattern, is one of the clearest signs that we'll need to control for something, in this case B.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 2, 1.75))
pgm.add_node(daft.Node('B', r"B", 2, 3))
pgm.add_edge('X', 'Y')
pgm.add_edge('X', 'A')
pgm.add_edge('B', 'A')
pgm.add_edge('B', 'X')
pgm.add_edge('B', 'Y')
pgm.render()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="l0GI2mM3WQeI" outputId="7ca879e8-e678-42d9-9d03-404ffb67c51c"
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {inference._has_active_backdoors('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="XP2ORZw8EtyZ" outputId="9f267191-b408-483b-87d9-754c949a7d72"
#@title # Game 4
#@markdown Pearl named this particular configuration "M Bias", not only because of it's shape, but also because of the common practice of statisticians to want to control for B in many situations. However, notice how in this configuration X and Y start out as *not confounded* and how by controlling for B we would actually introduce confounding by opening the path at the collider, B.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 2, 2))
pgm.add_node(daft.Node('C', r"C", 3, 3))
pgm.add_edge('A', 'X')
pgm.add_edge('A', 'B')
pgm.add_edge('C', 'B')
pgm.add_edge('C', 'Y')
pgm.render()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="CBaGzLKSFmnQ" outputId="95c1c6a5-dbe3-4768-8f37-94d9547e9db6"
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {inference._has_active_backdoors('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="ZAbSVPvZFxZH" outputId="88a7fff6-0744-4dda-b5e9-8d93dcea84d7"
#@title # Game 5
#@markdown This is the last game in The Book of Why is the most complex. In this case we have two backdoor paths, one going through A and the other through B, and it's important to notice that if we only control for B that the path: X <- A -> B <- C -> Y (which starts out as closed because B is a collider) actually is opened. Therefore we have to either close both A and B or, as astute observers will notice, we can also just close C and completely close both backdoor paths. pgmpy will nicely confirm these results for us.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 2, 2))
pgm.add_node(daft.Node('C', r"C", 3, 3))
pgm.add_edge('A', 'X')
pgm.add_edge('A', 'B')
pgm.add_edge('C', 'B')
pgm.add_edge('C', 'Y')
pgm.add_edge("X", "Y")
pgm.add_edge("B", "X")
pgm.render()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="IF1jYMq_eNHd" outputId="68bcc15d-c1aa-40c4-fb61-b400e7c31abe"
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {inference._has_active_backdoors('X', 'Y')}")
adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {adj_sets}")
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="dSjZqd5fHF06" outputId="05857131-230d-4dd9-88d0-25d4b800eb68"
#@title # Game 6
#@markdown So these are no longer drawn from The Book of Why, but were either drawn from another source (which I will reference) or a developed to try to induce a specific bug.
#@markdown This example is drawn from Causality by Pearl on p. 80. This example is kind of interesting because there are many possible combinations of nodes which will close the two backdoor paths which exist in this graph. In turns out that D plus any other node in {A, B, C, E} will deconfound X and Y.
pgm = PGM(shape=[4, 4])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 1, 3))
pgm.add_node(daft.Node('B', r"B", 3, 3))
pgm.add_node(daft.Node('C', r"C", 1, 2))
pgm.add_node(daft.Node('D', r"D", 2, 2))
pgm.add_node(daft.Node('E', r"E", 3, 2))
pgm.add_node(daft.Node('F', r"F", 2, 1))
pgm.add_edge('X', 'F')
pgm.add_edge('F', 'Y')
pgm.add_edge('C', 'X')
pgm.add_edge('A', 'C')
pgm.add_edge('A', 'D')
pgm.add_edge('D', 'X')
pgm.add_edge('D', 'Y')
pgm.add_edge('B', 'D')
pgm.add_edge('B', 'E')
pgm.add_edge('E', 'Y')
pgm.render()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="30OIiRt7raN2" outputId="4c285443-724c-4988-b9a6-2ad2fcf2654e"
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {inference._has_active_backdoors('X', 'Y')}")
bd_adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {bd_adj_sets}")
fd_adj_sets = inference.get_all_frontdoor_adjustment_sets("X", "Y")
print(f"Ehat's the possible front adjustment sets? {fd_adj_sets}")
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 201} colab_type="code" id="Z4pkuyOwM9xq" outputId="82e581df-b378-423c-95bd-d16b901c5296"
#@title # Game 7
#@markdown This game tests the front door adjustment. B is taken to be unobserved, and therfore we cannot close the backdoor path X <- B -> Y.
pgm = PGM(shape=[4, 3])
pgm.add_node(daft.Node('X', r"X", 1, 1))
pgm.add_node(daft.Node('Y', r"Y", 3, 1))
pgm.add_node(daft.Node('A', r"A", 2, 1))
pgm.add_node(daft.Node('B', r"B", 2, 2))
pgm.add_edge('X', 'A')
pgm.add_edge('A', 'Y')
pgm.add_edge('B', 'X')
pgm.add_edge('B', 'Y')
pgm.render()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="m8DZd_FQ4uLV" outputId="b73751d4-8aa9-4ec1-ba10-d509fe13c1a1"
graph = convert_pgm_to_pgmpy(pgm)
inference = CausalInference(graph)
print(f"Are there are active backdoor paths? {inference._has_active_backdoors('X', 'Y')}")
bd_adj_sets = inference.get_all_backdoor_adjustment_sets("X", "Y")
print(f"If so, what's the possible backdoor adjustment sets? {bd_adj_sets}")
fd_adj_sets = inference.get_all_frontdoor_adjustment_sets("X", "Y")
print(f"Ehat's the possible front adjustment sets? {fd_adj_sets}")
# + colab={} colab_type="code" id="8zeU4DX0Bbxl"
| notebooks/3. Causal Bayesian Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import glob
import os
from evaldets.results import DetectionResults
import pandas as pd
dr = DetectionResults('~/reval_50/baseline_50/evaluator_dump_R50_101/')
assert dr.count_TP() + dr.count_FP() + dr.count_EX() == len(dr)
dr.count_TP(), dr.count_FP(), dr.count_EX()
# + active=""
# dr.AP_score()
# + active=""
# dr.finish_cocoeval()
# -
dumps = glob.glob(os.path.expanduser('~/reval_50/baseline_50/*'))
print(pd.DataFrame([DetectionResults(d).summary() for d in dumps], index=map(os.path.basename, dumps)).sort_index())
dumps = glob.glob(os.path.expanduser('~/reval_05/baseline_05/*'))
print(pd.DataFrame([DetectionResults(d).summary(t_score=0.5) for d in dumps], index=map(os.path.basename, dumps)).sort_index())
dumps = glob.glob(os.path.expanduser('~/reval_05/baseline_05/*'))
print(pd.DataFrame([DetectionResults(d).summary() for d in dumps], index=map(os.path.basename, dumps)).sort_index())
| 2021-07-02-baseline-tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/UlyssesAlcantara/OOP_1-1/blob/main/Classes_and_Objects.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ta0U8kzD1rZ0"
# Application 2- Write a phyton program that displays the fullname of a student, student number, age, course and school. Creat a class name OOP_1-1 and creat info() method to display the information of a student with fullname,student_no, age, course, school as attributes.
# + colab={"base_uri": "https://localhost:8080/"} id="3CMK2tbE3Ev-" outputId="64169ed7-8078-4727-bcdf-5f8f092cbeb2"
class OOP_1_1:
def __init__(self,fullname,student_no,age,course,school):
self.fullname = fullname
self.student_no = student_no
self.age = age
self.course = course
self.school = school
def info(self):
print(self.fullname,self.student_no,self.age,self.course,self.school)
#print("My Name is",self.fullname)
#print("My Student Number is",self.student_no)
#print("My Age is",self.age)
#print("My Course is",self.course)
#print("My School is",self.school)
student = OOP_1_1("<NAME>",202102226,18,"BSCpE 1-1","CvSU")
student.info()
# + [markdown] id="bYWOEo1C9CzS"
#
#
#
#
#
# + id="Tc_fZOIv9K0-"
| Classes_and_Objects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"checksum": "142ec8a68aa5e785bb22f9b6bfbdef3a", "grade": false, "grade_id": "cell-05e844b835aa5580", "locked": true, "schema_version": 1, "solution": false}
# # SLU11 - Advanced Validation: Exercises notebook
# + deletable=false editable=false nbgrader={"checksum": "f2ea5b78ef6f66c7d1b38d9285aa2cb0", "grade": false, "grade_id": "cell-e42bb037a88a592b", "locked": true, "schema_version": 1, "solution": false}
import pandas as pd
import numpy as np
# + [markdown] deletable=false editable=false nbgrader={"checksum": "ae9af53623e8465c2115f6a418e439ba", "grade": false, "grade_id": "cell-90cf0bd12ed05e21", "locked": true, "schema_version": 1, "solution": false}
# ## 1 Bias-variance trade-off
#
# ### Exercise 1: Detecting bias and variance in a simple model (not graded)
#
# Imagine you are measuring voting intentions, namely the percentage of people that will vote in a given political party A, as opposed to political party B.
#
# A way to build this model would be to randomly choose 50 numbers from the phone book, call each one and ask the responder who they planned to vote.
#
# Now, consider we got the following results:
#
# | Party A | Party B | Non-Respondents | Total |
# |---------|---------|-----------------|-------|
# | 13 | 16 | 21 | 50 |
#
# From the data, we estimate the probability of voting A as:
# + deletable=false editable=false nbgrader={"checksum": "2deda826a8f7a2fbcd9914412b084c05", "grade": false, "grade_id": "cell-5217b9fc496aa669", "locked": true, "schema_version": 1, "solution": false}
13 / (13 + 16)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "8eca7e947d90399867b3c94ac25a31a0", "grade": false, "grade_id": "cell-2102e9daa24530a2", "locked": true, "schema_version": 1, "solution": false}
# Using our (flawed, as we will see) model, we predict a victory for the party B. But can we expect our model to generalize, coming the elections?
#
# In order to understand that, we need to idenfify sources of bias and variance.
#
# Below you will find a list of issues undermining the model. You need to identify which ones are sources of bias and which ones are sources of variance:
#
# 1. Only sampling people from the phone book (bias/~~variance~~)
# 2. Not following-up with non-respondents (bias/variance)
# 3. Not weighting responses by likeliness to vote (bias/variance)
# 4. Small sample size (bias/variance)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "0a8fa1407dad370c90e9feff83c57829", "grade": false, "grade_id": "cell-1f15b4c639c45f70", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 2: Detecting bias and variance in the real world (not graded)
#
# For each of the following, identify if they are more likely to be sources of bias or variance:
#
# 1. Using very flexible models (e.g., non-parametric, non-linear), such as K-nearest neighbors or decision trees (bias/variance)
# 2. Using models with simplistic assumptions, such as linear or logistic regressions (bias/variance)
# 3. Increasing the polynomial degree of our hypothesis function (bias/variance)
# 4. Ignoring important features (bias/variance)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "22945f1e74e8a4fc916f19cadea790a7", "grade": false, "grade_id": "cell-58a365f406a228fe", "locked": true, "schema_version": 1, "solution": false}
# ## 2 Train-test split
#
# ### Exercise 3: Create training and test datasets (graded)
# + deletable=false nbgrader={"checksum": "34b97fc95b6c3cca20a3a18739fdf981", "grade": false, "grade_id": "cell-34f35a43586a0e85", "locked": false, "schema_version": 1, "solution": true}
from sklearn.model_selection import train_test_split
def implement_hold_out_method(X, y, test_size=.4, random_state=0):
"""
Implementing the holdout method, using sklearn.
Args:
X (pd.DataFrame): a pandas dataframe containing the features
y (pd.Series): a pandas series containing the target variable
test_size (float): proportion of the dataset to include in the test set
random_state (int): the seed used by the random number generator
Returns:
X_train (pd.DataFrame): the features for the training examples
X_test (pd.DataFrame): the features for the test examples
y_train (pd.Series): target for the training set
y_test (pd.Series): target for the test set
"""
# use train_test_split to create the training and test datasets
# X_train, X_test, y_train, y_test = ...
# YOUR CODE HERE
raise NotImplementedError()
return X_train, X_test, y_train, y_test
# + deletable=false editable=false nbgrader={"checksum": "1fc5fe958d5f0c53633b35d6eb8fe896", "grade": true, "grade_id": "cell-d0784c116c6f5dda", "locked": true, "points": 5, "schema_version": 1, "solution": false}
"""Check that the solution is correct."""
from random import randint
def generate_test_data(m , n):
values = np.random.randint(0, m, size=(m, n))
df = pd.DataFrame(values)
X = df.copy()
y = X.pop(0)
return X, y
X, y = generate_test_data(m=100, n=4)
X_train, X_test, y_train, y_test = implement_hold_out_method(X, y)
assert X_train.shape == (60, 3)
assert X_test.shape == (40, 3)
assert y_train.shape == (60,)
assert y_test.shape == (40,)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "cb2d27a125a36b36149bd63c961a9c96", "grade": false, "grade_id": "cell-af6e4ef9cfe15659", "locked": true, "schema_version": 1, "solution": false}
# ### Exercise 4: Creating a validation dataset (graded)
# + deletable=false nbgrader={"checksum": "749a9ef6faac0a5d761ceda8b053cf52", "grade": false, "grade_id": "cell-8a287c4f6e7371cf", "locked": false, "schema_version": 1, "solution": true}
def implement_validation_dataset(X, y, test_size=.25, val_size=.25, random_state=0):
"""
Implementing the holdout method with validation, using sklearn.
Args:
X (pd.DataFrame): a pandas dataframe containing the features
y (pd.Series): a pandas series containing the target variable
test_size (float): proportion of the dataset to include in the test set
val_size (float): proportion of the dataset to include in the validation set
random_state (int): the seed used by the random number generator
Returns:
X_train (pd.DataFrame): the features for the training examples
X_test (pd.DataFrame): the features for the test examples
X_val (pd.DataFrame): the features of the validation examples
y_train (pd.Series): target for the training set
y_test (pd.Series): target for the test set
y_val (pd.Series): target for the validation set
"""
# use train_test_split to create the test dataset
# X_temp, X_test, y_temp, y_test = ... (1 line)
# YOUR CODE HERE
raise NotImplementedError()
# compute the size of the validation dataset relative to the temp dataset
# so that the final validation dataset corresponds to the validation_size
# val_on_temp_size = ... (1 line)
# YOUR CODE HERE
raise NotImplementedError()
# se train_test_split to create the train and validation datasets
# X_train, X_val, y_train, y_val = ... (1 line)
# YOUR CODE HERE
raise NotImplementedError()
return X_train, X_test, X_val, y_train, y_test, y_val
# + deletable=false editable=false nbgrader={"checksum": "a0f592ec8136db26319a22cf4c4ae09f", "grade": true, "grade_id": "cell-4cd9ffbea2630f66", "locked": true, "points": 5, "schema_version": 1, "solution": false}
"""Check that the solution is correct."""
X, y = generate_test_data(m=1000, n=5)
X_train, X_test, X_val, y_train, y_test, y_val = implement_validation_dataset(X, y)
assert X_train.shape == (500, 4)
assert X_test.shape == (250, 4)
assert X_val.shape == (250, 4)
assert y_train.shape == (500,)
assert y_test.shape == (250,)
assert y_val.shape == (250,)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "1e117347fe671507c78b29eb08dbc0a4", "grade": false, "grade_id": "cell-970ade4434f10e80", "locked": true, "schema_version": 1, "solution": false}
# ## 3 Cross-validation
#
# ### Exercise 5: Implementing K-fold cross-validation (graded)
# + deletable=false nbgrader={"checksum": "027f703f4a23ede22694888eac039907", "grade": false, "grade_id": "cell-d345ee872dc32ad0", "locked": false, "schema_version": 1, "solution": true}
from sklearn.model_selection import KFold
def implement_cross_validation(X, y, n_splits, random_state=0):
"""
Implementing the cross-validation split, to create multiple train
and test set splits.
Args:
X (pd.DataFrame): a pandas dataframe containing the features
y (pd.Series): a pandas series containing the target variable
n_folds (int): number of floats, must be at least 2
random_state (int): the seed used by the random number generator
Returns:
folds (dict): dictionary containing the multiple train, test splits
"""
# initialize the KFold cross-validator from sklearn, using n_splits and
# random_sate
# kf = ... (1 line)
# YOUR CODE HERE
raise NotImplementedError()
# initialize empty dictionary 'folds'
# folds = ... (1 line)
# YOUR CODE HERE
raise NotImplementedError()
for train_index, test_index in kf.split(X):
# use train_index and test_index to create X_train, X_test, y_train,
# and y_test (for reference, check the sklearn documentation for kf,
# and remember that X and y are both dataframes)
# X_train, X_test = ...
# y_train, y_test = ...
# YOUR CODE HERE
raise NotImplementedError()
# create a 'fold' dictionary with keys 'X_train', 'X_test', 'y_train',
# and 'y_test' and use the respective datasets as values (please make
# sure you use the correct keys)
# YOUR CODE HERE
raise NotImplementedError()
# create a variable k (int) with the number of the fold (each of the
# iterations of the loop), to be used as key in the dict 'folds'
# k = ...
# YOUR CODE HERE
raise NotImplementedError()
# add the fold to the folds dictionary, using k as key and fold as
# value (hint: check dict.update())
# YOUR CODE HERE
raise NotImplementedError()
return folds
# + deletable=false editable=false nbgrader={"checksum": "27e9fcbf9cbd2084e9d0b270a55d1781", "grade": true, "grade_id": "cell-3c4d6569fa14994f", "locked": true, "points": 10, "schema_version": 1, "solution": false}
"""Check that the solution is correct."""
X, y = generate_test_data(m=500, n=5)
folds = implement_cross_validation(X, y, 5)
assert len(folds) == 5
for fold in folds.values():
assert fold['X_train'].shape == (400, 4)
assert fold['X_test'].shape == (100, 4)
assert fold['y_train'].shape == (400,)
assert fold['y_test'].shape == (100,)
| units/SLU11_Model_Validation/Exercise notebook - SLU11 (Model Validation).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# # Healt Exploration and Analitycs
# ## What makes us sick?
# ## Author: <NAME> & the Data Science Course Students
# ## In the analitycs are used the datasets from Gapminder (http://www.gapminder.org/data/):
# ### I. How do economical factors influence health across countries?
# ### Project idea:
# In this part of the project, I will try to analyze whether the weaker economic development of a country is important for the health of its citizens, as compared to the more developed ones. For this purpose, i will analyze three countries in the Balkan peninsula - Bulgaria, Romania and Macedonia - with three of Europe's leading countries known for their good lifestyle, namely Germany, Austria and Switzerland.
# ### 1. Read data.
# After reviewing of the data, I notice that they are in a convenient format for me, and all the tables have the same structure. Perfect.
life_expectancy_at_birth = pd.read_excel("EconomicsAndDemographics/life_expectancy_at_birth.xlsx")
life_expectancy_at_birth.head(3)
# To be able to analyze the data, I decide to turn the years after countries and to order all the surveys by columns. That will make my job easier and clearer. I will write a function that I will reuse.
factors_data = None
def add_to_factors_data(file):
dataframe = pd.read_excel(file)
name_of_factor = next(iter(dataframe))
temp_data = pd.melt(dataframe,id_vars=name_of_factor, var_name="Year",value_name=name_of_factor + "_v")
temp_data.rename(columns={name_of_factor:"Country"}, inplace=True)
temp_data["Year"] = temp_data.Year.astype(np.int64)
global factors_data
if factors_data is not None:
factors_data = pd.merge(factors_data,temp_data,how="outer", on=("Country","Year"))
else:
factors_data = temp_data
return factors_data
add_to_factors_data("EconomicsAndDemographics/agriculture_gdp_pct.xlsx")
add_to_factors_data("EconomicsAndDemographics/urban_population_pct.xlsx")
add_to_factors_data("EconomicsAndDemographics/gdp_per_capita.xlsx")
add_to_factors_data("EconomicsAndDemographics/industry_gdp_pct.xlsx")
add_to_factors_data("EconomicsAndDemographics/services_gdp_pct.xlsx")
add_to_factors_data("EconomicsAndDemographics/total_health_expenditure_gdp_pct.xlsx")
add_to_factors_data("EconomicsAndDemographics/coal_consumption_per_capita.xlsx")
add_to_factors_data("EconomicsAndDemographics/carbon_dioxide_emissions_per_capita.xlsx")
add_to_factors_data("EconomicsAndDemographics/electricity_consumption_per_capita.xlsx")
add_to_factors_data("EconomicsAndDemographics/under_five_mortality_rate.xlsx")
add_to_factors_data("EconomicsAndDemographics/life_expectancy_at_birth.xlsx")
# Short description of what each column means:
# * Country - name of country
# * Year - years with data for exploration
# * Agriculture - agriculture, value added (% of GDP)
# * Urban_population - urban population (% of total)
# * GDP - GDP per capita
# * Industry - industry, value added (% of GDP)
# * Services - services, etc., value added (% of GDP)
# * Healthy_costs - total expenditure on health as percentage of GDP (gross domestic product)
# * Coal_consumation - coal Consumption per person (tonnes oil equivalent)
# * CO2 - CO2 per capita
# * Electricity - electricity consumption, per capita (kWh)
# * Under_five_mortality - under-five mortality rate (per 1,000 live births)
# * Life_expectancy - Life expectancy at birth (years)
#
factors_data.columns = ["Country", "Year", "Agriculture","Urban_population", "GDP", "Industry","Services","Healthy_costs",
"Coal_consumation","CO2", "Electricity", "Under_five_mortality", "Life_expectancy"]
# ### 2. Data analyzing.
# I notice that after 2010 have miss data and decide to use for my analysis the range of 10 years between 2000 and 2010.
country_list = ["Bulgaria", "Romania", "Macedonia, FYR", "Germany", "Austria", "Switzerland"]
economics_data = factors_data[(factors_data.Country.isin(country_list))
& ((factors_data.Year >=2000) & (factors_data.Year <= 2010))]
economics_data = economics_data.sort_values(["Country","Year"])
economics_data = economics_data.reset_index(drop=True)
economics_data.head()
# The resulting economics_data i'm researching for types and missing data.
economics_data.info()
economics_data[economics_data.Coal_consumation.isnull()]
coal_correlation = economics_data[economics_data.Coal_consumation.notnull()]
coal_correlation["Coal_consumation"].corr(coal_correlation["Life_expectancy"])
economics_data = economics_data.drop(["Coal_consumation"], axis=1)
economics_data.info()
# След обследване на данните в колоната Coal_consumation установих, че липсващите данни са на държавата Македония. След като ги изключих и направих коефициент на корелация и продължителността на живота, установих че има ниска корелация 0.31, което ми дава право да я изключа и изтрия от анализа. След промяна на типа на датата, вече данните изглеждат чисти и пълни.
economics_data[["Country","Year", "Life_expectancy"]]
life_by_years = economics_data.groupby(by="Year").mean()
life_by_years
plt.bar(life_by_years.index,life_by_years.Life_expectancy)
plt.title("Life expectancy in years")
plt.xlabel("Years")
plt.ylabel("Count")
plt.show()
# И шесте дъражави за разглеждания период имат сходен ръст на продължителността на живота с около 2 години. Анализа, който смятам да направя е базиран на осредняването на съответните фактори по години.
mean_factors =economics_data.groupby(by="Country").mean()
mean_factors = mean_factors.sort_values(["Life_expectancy"])
mean_factors = mean_factors.drop("Year",axis=1)
mean_factors
plt.hist(mean_factors.Life_expectancy)
plt.title("Life expectancy in years")
plt.xlabel("Years")
plt.ylabel("Count")
plt.show()
# Горната графика много ясно показва две групи държави с разлика в продължителността на живота от около 8 години. В първата група попадат България, Румъния и Македония, а в другата Германия, Австрия и Швейцария.
plt.figure(figsize = (7, 4))
plt.title("Life expectancy by years")
plt.xlabel("Years")
plt.barh(range(len(mean_factors.Life_expectancy)), mean_factors.Life_expectancy)
plt.yticks(list(range(len(mean_factors.Life_expectancy))), mean_factors.index)
plt.show()
mean_factors
mean_factors.corr()
# Summary: Резултата от анализа ясно показва, че по-ниските разходи отделяни за здравеопазване от страна на държавата, съчетани с по-висок дял на индустриализация и селско стопанство от БВП, води до намаляване на продължителността на живота.
# ## II. How do our choices in food and drinks affect our overall health?
# ### 1. Read data.
factors_data = None
add_to_factors_data("FoodAndDrink/food_consumption.xlsx")
add_to_factors_data("FoodAndDrink/sugar_consumption.xlsx")
add_to_factors_data("FoodAndDrink/blood_pressure_female.xlsx")
add_to_factors_data("FoodAndDrink/blood_pressure_male.xlsx")
add_to_factors_data("FoodAndDrink/bmi_female.xlsx")
add_to_factors_data("FoodAndDrink/bmi_male.xlsx")
add_to_factors_data("FoodAndDrink/cholesterol_female.xlsx")
add_to_factors_data("FoodAndDrink/cholesterol_male.xlsx")
factors_data = factors_data[(factors_data.Year >= 2000) & (factors_data.Country.isin(country_list))]
factors_data
factors_data.columns = ["Country", "Year", "Food","Shugar", "Blood_pressure_f","Blood_pressure_m","BMI_f",
"BMI_m","Cholesterol_f", "Cholesterol_m"]
# Short description of what each column means:
#
# * Country - name of country
# * Year - years with data for exploration
# * Food - The total supply of food available in a country, divided by the population and 365 (the number of days in the year).
# * Shugar - Sugar per person (g per day)
# * Blood_pressure_f - SBP female (mm Hg), age standardized mean
# * Blood_pressure_m - SBP male (mm Hg), age standardized mean
# * BMI_f - BMI (Body Mass Index) of the female population
# * BMI_m - BMI (Body Mass Index) of the male population
# * Cholesterol_f - Total Cholesterol female (mmol/L), age standardized mean
# * Cholesterol_m - Total Cholesterol male (mmol/L), age standardized mean
#
factors_data.info()
# Типовете на колоните са ок, липсва6тите данни също няма да създадът проблем. тъй като аз ще работя с осреднените им стоиности.
mean_factors_foods_and_drinks = factors_data.groupby(by="Country").mean()
mean_factors_foods_and_drinks = mean_factors_foods_and_drinks.drop("Year",axis=1)
mean_factors_foods_and_drinks
# #### To the created table, it is necessary to add the average life expectancy from the previous analysis.
#
mean_factors_foods_and_drinks["Life_expectancy"] = mean_factors["Life_expectancy"]
mean_factors_foods_and_drinks
mean_factors_foods_and_drinks.corr()
plt.hist(mean_factors_foods_and_drinks.Cholesterol_f,bins=6, alpha = 0.7, label= "female")
plt.hist(mean_factors_foods_and_drinks.Cholesterol_m, bins=6, alpha = 0.7, label = "male")
plt.title("Cholesterol referents")
plt.legend()
plt.show()
# Съгласно [Wikipedia](https://bg.wikipedia.org/wiki/%D0%A5%D0%BE%D0%BB%D0%B5%D1%81%D1%82%D0%B5%D1%80%D0%BE%D0%BB), границите които трябва да бъдат поддържани за нивата на холестерола, трябва да са под 5.2, а както виждаме, стойностите на западните държави са над тази стойност. Както казват старите хора, този врат не е от туршия. :) Както се вижда от графиките по-долу същите три развити западни държави консумират повече храни и захар, което е свързано с по-високите нива на холестерола.
# +
plt.figure(figsize = (7, 4))
plt.title("Food consumation")
plt.barh(range(len(mean_factors_foods_and_drinks.Food)), mean_factors_foods_and_drinks.Food)
plt.yticks(list(range(len(mean_factors_foods_and_drinks.Food))), mean_factors_foods_and_drinks.index)
plt.show()
# -
plt.barh(mean_factors_foods_and_drinks.index,mean_factors_foods_and_drinks.Shugar)
plt.title("Shugar consumation")
plt.show()
# От всичко казано дотук, излиза че за продължителността на живота на една нация, не са най-важни стойностите на холестерола, а това хората и да се хранят добре, да си доставят наслади и да имат добро здравеопазване.
# ## III. How do eating habits of US people influence their health?
# The survey results are located [here](https://www.kaggle.com/bls/eating-health-module-dataset/data). Here i want to understand if weigh affect the status of the person.
# ### 1. Reading data.
healt_data = pd.read_csv("ehresp_2014.csv")
healt_data.head()
healt_data.euwgt.unique()
# ### Short description of what each column means:
#
# * TUCASEID - identifies each household
# * EUFINLWGT - statistical weight
# * EUGENHTH - general health status
# * EUEAT-Were there any times you were eating any meals or snacks yesterday, for example while you were doing something else? 1-Yes, 2-No
# * EUWGT - How much do you weigh without shoes? (in pounds)
#
healt_data = healt_data[['tucaseid', 'eufinlwgt', 'eueat', 'euwgt','eugenhth']]
healt_data.head()
# ### 2. Data tyding and cleaning.
healt_data.groupby("eugenhth").size()
# To conduct the survey, I choose the categories of the "eugenhth" indicator. Its valid values are from 1 to 5, and due to her insignificant number i decide to remove rows with invalid entries.
healt_data = healt_data[healt_data.eugenhth >= 1]
healt_data["eugenhth"] = healt_data["eugenhth"].replace(1,"Excellent")
healt_data["eugenhth"] = healt_data["eugenhth"].replace(2,"Very good")
healt_data["eugenhth"] = healt_data["eugenhth"].replace(3,"Good")
healt_data["eugenhth"] = healt_data["eugenhth"].replace(4,"Fair")
healt_data["eugenhth"] = healt_data["eugenhth"].replace(5,"Poor")
healt_data["eugenhth"] = healt_data["eugenhth"].astype("category")
healt_data.groupby("eueat").size()
# I validate the "eueat" column by removing the insignificant lines and making it boolean.
healt_data = healt_data[healt_data["eueat"] > 0]
healt_data["eueat"] = healt_data["eueat"].replace(2,0)
healt_data["eueat"] = healt_data.eueat.astype(np.bool)
# I validate the "euwgt" column by removing. Idecide valid pregnancy records not to include in the analyse.
healt_data = healt_data[(healt_data["euwgt"] >= 98) & (healt_data["euwgt"] <= 340)]
healt_data.head()
healt_data.info()
weigh_data = healt_data.groupby(by="eugenhth").euwgt.mean()
weigh_data
plt.figure(figsize = (7, 4))
plt.title("Food consumation")
plt.barh(range(len(weigh_data)), weigh_data)
plt.yticks(list(range(len(weigh_data))), weigh_data.index)
plt.show()
# It is clearly shown, that higher weight contributes to making a person's status worse.
pd.crosstab(healt_data.eueat, healt_data.eugenhth)
eufinlwgt_coeficients = healt_data.groupby(by="eueat").eufinlwgt.mean()
eufinlwgt_coeficients
# Even after weighing, it appears that doing something else has no effect on the status of a person.
| Exam/.ipynb_checkpoints/Exam-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Q#
# language: qsharp
# name: iqsharp
# ---
# # Measurement Kata Workbook
#
# **What is this workbook?**
# A workbook is a collection of problems, accompanied by solutions to them.
# The explanations focus on the logical steps required to solve a problem; they illustrate the concepts that need to be applied to come up with a solution to the problem, explaining the mathematical steps required.
#
# Note that a workbook should not be the primary source of knowledge on the subject matter; it assumes that you've already read a tutorial or a textbook and that you are now seeking to improve your problem-solving skills. You should attempt solving the tasks of the respective kata first, and turn to the workbook only if stuck. While a textbook emphasizes knowledge acquisition, a workbook emphasizes skill acquisition.
#
# This workbook describes the solutions to the problems offered in the [Measurement kata](./Measurements.ipynb). Since the tasks are offered as programming problems, the explanations also cover some elements of Q# that might be non-obvious for a first-time user.
#
# **What you should know for this workbook**
#
# You should be familiar with the following concepts before tackling the Measurements kata (and this workbook):
#
# 1. Basic linear algebra
# 2. The concept of qubit and multi-qubit systems
# 3. Single-qubit and multi-qubit quantum gates and using them to manipulate the state of the system
# To begin, first prepare this notebook for execution (if you skip the first step, you'll get "Syntax does not match any known patterns" error when you try to execute Q# code in the next cells; if you skip the second step, you'll get "Invalid test name" error):
%package Microsoft.Quantum.Katas::0.11.2006.403
%workspace reload
# ## Part I. Discriminating Orthogonal States
# ### Task 1.1. $|0\rangle$ or $|1\rangle$?
#
# **Input:** A qubit which is guaranteed to be in either the $|0\rangle$ or the $|1\rangle$ state.
#
# **Output:** `true` if the qubit was in the $|1\rangle$ state, or `false` if it was in the $|0\rangle$ state. The state of the qubit at the end of the operation does not matter.
# ### Solution
#
# The input qubit is guaranteed to be either in basis state $|0\rangle$ or $|1\rangle$. This means that when measuring the qubit in the Pauli $Z$ basis (the computational basis), the measurement will report the input state without any doubt.
#
# In Q# the operation [`M()`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.intrinsic.m) can be used to measure a single qubit in the Pauli $Z$ basis. The measurement result is a value of type `Result`: the operation `M` will return `One` if the input qubit was in the $|1\rangle$ state and `Zero` if the input qubit was in the $|0\rangle$ state. Since we need to encode the first case as `true` and the second one as `false`, we can return the result of equality comparison between measurement result and `One`.
# +
%kata T101_IsQubitOne_Test
operation IsQubitOne (q : Qubit) : Bool {
return M(q) == One;
}
# -
# [Return to task 1.1 of the Measurements kata.](./Measurements.ipynb#Task-1.1.-$|0\rangle$-or-$|1\rangle$?)
# ### Task 1.2. Set the qubit to the $|0\rangle$ state.
#
# **Input:** A qubit in an arbitrary state.
#
# **Goal:** Change the state of the qubit to $|0\rangle$.
# ### Solution
#
# A fundamental postulate of quantum computing says that when we measure a qubit in a possible superposition state, it will collapse to the state that corresponds to the outcome of the measurement. This means that regardless of the original qubit state, after we measure the qubit in the Pauli $Z$ basis, it will end up in the $|0\rangle$ or $|1\rangle$ state.
#
# After we use the operation `M()` to measure the input qubit in the Pauli $Z$ basis, there are two possibilities:
# 1. The qubit collapses to the state $|0\rangle$ (measurement outcome `Zero`), and we don’t need to change anything.
# 2. The qubit collapses to the state $|1\rangle$ (measurement outcome `One`), and we need to flip the state of the qubit. This can be done with the [**X** gate](../tutorials/SingleQubitGates/SingleQubitGates.ipynb#Pauli-Gates).
# +
%kata T102_InitializeQubit_Test
operation InitializeQubit (q : Qubit) : Unit {
if (M(q) == One) {
X(q);
}
}
# -
# [Return to task 1.2 of the Measurements kata.](./Measurements.ipynb#Task-1.2.-Set-the-qubit-to-the-$|0\rangle$-state.)
# ### Task 1.3. $|+\rangle$ or $|-\rangle$?
#
# **Input:** A qubit which is guaranteed to be in either the $|+\rangle$ or the $|-\rangle$ state. As a reminder, $|+\rangle = \frac{1}{\sqrt{2}} \big(|0\rangle + |1\rangle\big)$, $|-\rangle = \frac{1}{\sqrt{2}} \big(|0\rangle - |1\rangle\big)$.
#
# **Output:** `true` if the qubit was in the $|+\rangle$ state, or `false` if it was in the $|-\rangle$ state. The state of the qubit at the end of the operation does not matter.
# ### Solution
#
# Both input state are superposition states, with equal absolute values of amplitudes of both basis states. This means if the sate is measured in the Pauli $Z$ basis, like we did in the previous task, there is a 50-50 chance of measuring `One` or `Zero`, which won't give us the necessary information.
#
# To determine in which state the input qubit is with certainty, we want to transform the qubit into a state where there is no superposition with respect to the basis in which we perform the measurement.
#
# Consider how we can prepare the input states, starting with basis states: $H|0\rangle = |+\rangle$ and $H|1\rangle = |-\rangle$.
# This transformation can also be undone by applying the **H** gate again (remember that the **H** gate is self-adjoint, i.e., it equals its own inverse): $H|+\rangle = |0\rangle$ and $H|-\rangle = |1\rangle$.
#
# Once we have the $|0\rangle$ or $|1\rangle$ state, we can use the same principle as in task 1.1 to measure the state and report the outcome. Note that in this task return value `true` corresponds to input state $|+\rangle$, so we compare the measurement result with `Zero`.
# +
%kata T103_IsQubitPlus_Test
operation IsQubitPlus (q : Qubit) : Bool {
H(q);
return M(q) == Zero;
}
# -
# Another possible solution could be to measure in the Pauli $X$ basis (${|+\rangle, |-\rangle}$ basis), this means a transformation with the **H** gate before measurement is not needed. Again, measurement result `Zero` would correspond to state $|+\rangle$.
#
# In Q#, measuring in another Pauli basis can be done with the [`Measure()`](https://docs.microsoft.com/en-us/qsharp/api/qsharp/microsoft.quantum.intrinsic.measure) operation.
# +
%kata T103_IsQubitPlus_Test
operation IsQubitPlus (q : Qubit) : Bool {
return Measure([PauliX], [q]) == Zero;
}
# -
# [Return to task 1.3 of the Measurements kata.](./Measurements.ipynb#Task-1.3.-$|+\rangle$-or-$|-\rangle$?)
# ### Task 1.4. $|A\rangle$ or $|B\rangle$?
#
# **Inputs:**
#
# 1. Angle $\alpha$, in radians, represented as a `Double`.
# 2. A qubit which is guaranteed to be in either the $|A\rangle$ or the $|B\rangle$ state, where $|A\rangle = \cos \alpha |0\rangle + \sin \alpha |1\rangle$ and $|B\rangle = - \sin \alpha |0\rangle + \cos \alpha |1\rangle$.
#
# **Output:** `true` if the qubit was in the $|A\rangle$ state, or `false` if it was in the $|B\rangle$ state. The state of the qubit at the end of the operation does not matter.
# ### Solution
#
# We take a similar approach to the previous task: figure out a way to prepare the input states from the basis states and apply adjoint of that preparation before measuring the qubit.
#
# To create the input states $|A\rangle$ and $|B\rangle$, a [**Ry**](../tutorials/SingleQubitGates/SingleQubitGates.ipynb#Rotation-Gates) gate with $\theta= 2\alpha$ was applied to the basis states $|0\rangle$ and $|1\rangle$. As a reminder,
#
# $$R_y = \begin{bmatrix} \cos\frac{\theta}{2} & - \sin\frac{\theta}{2} \\ \sin\frac{\theta}{2} & \cos\frac{\theta}{2} \end{bmatrix}$$
#
# We can return the inputs state to the basis sates by applying **Ry** gate with $-2 \alpha$ as the rotation angle parameter to the input qubit.
#
# The measurement in Pauli $Z$ basis gives two possibilities:
# 1. The qubit is measured as $|1\rangle$, the input state was $|B\rangle$, we return `false`.
# 2. The qubit is measured as $|0\rangle$, the input state was $|A\rangle$, we return `true`.
# +
%kata T104_IsQubitA_Test
operation IsQubitA (alpha : Double, q : Qubit) : Bool {
Ry(-2.0 * alpha, q);
return M(q) == Zero;
}
# -
# [Return to task 1.4 of the Measurements kata.](./Measurements.ipynb#Task-1.4.-$|A\rangle$-or-$|B\rangle$?)
# ### Task 1.5. $|00\rangle$ or $|11\rangle$?
#
# **Input:** Two qubits (stored in an array of length 2) which are guaranteed to be in either the $|00\rangle$ or the $|11\rangle$ state.
#
# **Output:** 0 if the qubits were in the $|00\rangle$ state, or 1 if they were in the $|11\rangle$ state. The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Both qubits in the input array are in the same state: for $|00\rangle$ each individual qubit is in state $|0\rangle$, for $|11\rangle$ each individual qubit is in state $|1\rangle$. Therefore, if we measure one qubit we will know the state of the other qubit.
# In other words, if the first qubit measures as `One`, we know that the qubits in the input array are in state $|11\rangle$, and if it measures as `Zero`, we know they are in state $|00\rangle$.
#
# > `condition ? trueValue | falseValue` is Q#'s ternary operator: it returns `trueValue` if `condition` is true and `falseValue` otherwise.
# +
%kata T105_ZeroZeroOrOneOne_Test
operation ZeroZeroOrOneOne (qs : Qubit[]) : Int {
return M(qs[0]) == One ? 1 | 0;
}
# -
# [Return to task 1.5 of the Measurements kata.](./Measurements.ipynb#Task-1.5.-$|00\rangle$-or-$|11\rangle$?)
# ### Task 1.6. Distinguish four basis states.
#
# **Input:** Two qubits (stored in an array of length 2) which are guaranteed to be in one of the four basis states ($|00\rangle$, $|01\rangle$, $|10\rangle$, or $|11\rangle$).
#
# **Output:**
#
# * 0 if the qubits were in the $|00\rangle$ state,
# * 1 if they were in the $|01\rangle$ state,
# * 2 if they were in the $|10\rangle$ state,
# * 3 if they were in the $|11\rangle$ state.
#
# In this task and the subsequent ones the order of qubit states in task description matches the order of qubits in the array (i.e., $|10\rangle$ state corresponds to `qs[0]` in state $|1\rangle$ and `qs[1]` in state $|0\rangle$).
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Unlike in the previous task, this time measuring the first qubit won't give us any information on the second qubit, so we need to measure both qubits.
#
# First, we measure both qubits in the input array and store the result in `m1` and `m2`. We can decode these results like this:
# - `m1` is $|0\rangle$ and `m2` is $|0\rangle$: we return $0\cdot2+0 = 0$
# - `m1` is $|0\rangle$ and `m2` is $|1\rangle$: we return $0\cdot2+1 = 1$
# - `m1` is $|1\rangle$ and `m2` is $|0\rangle$: we return $1\cdot2+0 = 2$
# - `m1` is $|1\rangle$ and `m2` is $|1\rangle$: we return $1\cdot2+1 = 3$
#
# In other words, we treat the measurement results as the binary notation of the return value in [big endian notation](../tutorials/MultiQubitSystems/MultiQubitSystems.ipynb#Endianness).
# +
%kata T106_BasisStateMeasurement_Test
operation BasisStateMeasurement (qs : Qubit[]) : Int {
// Measurement on the first qubit gives the higher bit of the answer, on the second - the lower
let m1 = M(qs[0]) == Zero ? 0 | 1;
let m2 = M(qs[1]) == Zero ? 0 | 1;
return m1 * 2 + m2;
}
# -
# [Return to task 1.6 of the Measurements kata.](./Measurements.ipynb#Task-1.6.-Distinguish-four-basis-states.)
# ### Task 1.7. Distinguish two basis states given by bit strings
#
# **Inputs:**
#
# 1. $N$ qubits (stored in an array of length $N$) which are guaranteed to be in one of the two basis states described by the given bit strings.
# 2. Two bit strings represented as `Bool[]`s.
#
# **Output:**
#
# * 0 if the qubits were in the basis state described by the first bit string,
# * 1 if they were in the basis state described by the second bit string.
#
# Bit values `false` and `true` correspond to $|0\rangle$ and $|1\rangle$ states. You are guaranteed that both bit strings have the same length as the qubit array, and that the bit strings differ in at least one bit.
#
# **You can use exactly one measurement.** The state of the qubits at the end of the operation does not matter.
#
# > Example: for bit strings `[false, true, false]` and `[false, false, true]` return 0 corresponds to state $|010\rangle$, and return 1 corresponds to state $|001\rangle$.
# ### Solution
#
# To solve this task we will use two steps. Like many other programming languages, Q# allows you to write functions to make code more readable and reusable.
#
# The first step is to find first bit that differs between bit strings `bit1` and `bit2`. For that we define a function `FindFirstDiff()` which loops through both `Bool[]`s and returns the first index where the bit strings differ.
function FindFirstDiff (bits1 : Bool[], bits2 : Bool[]) : Int {
for (i in 0 .. Length(bits1) - 1) {
if (bits1[i] != bits2[i]) {
return i;
}
}
return -1;
}
# The second step is implementing the main operation: once we have found the first different bit, we measure the qubit in the corresponding position to see whether it is in state $|0\rangle$ or $|1\rangle$. If it is in state $|0\rangle$, `res` takes the value `false`, if it is in state $|1\rangle$ it takes the value `true`.
#
# `res == bits1[firstDiff]` compares the measurement result with the bit of `bits1` in the differing position. This effectively checks if the qubits are in the basis state described by the first or by the second bit string.
# The two possible outcomes are:
# 1. The qubits are in the state described by the first bit string; then `res` will be equal to `bits1[firstDiff]` and the method will return `0`.
# 2. The qubits are in the state described by the second bit string; then `res` will be not equal to `bits1[firstDiff]` (we know it has to be equal to `bits2[firstDiff]` which does not equal `bits1[firstDiff]`), and the method will return `1`.
# +
%kata T107_TwoBitstringsMeasurement_Test
operation TwoBitstringsMeasurement (qs : Qubit[], bits1 : Bool[], bits2 : Bool[]) : Int {
// find the first index at which the bit strings are different and measure it
let firstDiff = FindFirstDiff(bits1, bits2);
let res = M(qs[firstDiff]) == One;
return res == bits1[firstDiff] ? 0 | 1;
}
# -
# [Return to task 1.7 of the Measurements kata.](./Measurements.ipynb#Task-1.7.-Distinguish-two-basis-states-given-by-bit-strings)
# ### Task 1.8. Distinguish two superposition states given by two arrays of bit strings - 1 measurement
#
# **Inputs:**
#
# 1. $N$ qubits (stored in an array of length $N$) which are guaranteed to be in one of the two superposition states described by the given arrays of bit strings.
# 2. Two arrays of bit strings represented as `Bool[][]`s.
# The arrays have dimensions $M_1 \times N$ and $M_2 \times N$ respectively, where $N$ is the number of qubits and $M_1$ and $M_2$ are the numbers of bit strings in each array. Note that in general $M_1 \neq M_2$.
# An array of bit strings `[b₁, ..., bₘ]` defines a state that is an equal superposition of all basis states defined by bit strings $b_1, ..., b_m$.
# For example, an array of bit strings `[[false, true, false], [false, true, true]]` defines a superposition state $\frac{1}{\sqrt2}\big(|010\rangle + |011\rangle\big)$.
#
# You are guaranteed that there exists an index of a qubit Q for which:
# - all the bit strings in the first array have the same value in this position (all `bits1[j][Q]` are the same),
# - all the bit strings in the second array have the same value in this position (all `bits2[j][Q]` are the same),
# - these values are different for the first and the second arrays.
#
# > For example, for arrays `[[false, true, false], [false, true, true]]` and `[[true, false, true], [false, false, true]]` return 0 corresponds to state $\frac{1}{\sqrt2}\big(|010\rangle + |011\rangle\big)$, return 1 - to state $\frac{1}{\sqrt2}\big(|101\rangle + |001\rangle\big)$, and you can distinguish these states perfectly by measuring the second qubit.
#
# **Output:**
#
# * 0 if qubits were in the superposition state described by the first array,
# * 1 if they were in the superposition state described by the second array.
#
# **You are allowed to use exactly one measurement.**
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Like in the previous solution, we are looking for the index Q where the two bit strings differ.
# Let's define a function `FindFirstSuperpositionDiff()` which searches for an index Q which has 2 properties:
#
# 1. The value of all arrays in `bits1` at the index Q is either `true` or `false`, and the same for all arrays in `bits2`.
# If this is not the case, you cannot be sure that measuring the corresponding qubit will always return the same result.
# > For example, if you are given the state $\frac{1}{\sqrt2}\big(|010\rangle + |011\rangle\big)$, and if you measure the third qubit, you will get $0$ $50\%$ of the time and $1$ $50%$ of the time, therefore to get reliable information you want to measure one of the first twio qubits.
# 2. This value is different for `bits1` and `bits2`.
# > Indeed, if you want to distinguish states $\frac{1}{\sqrt2}\big(|010\rangle + |011\rangle\big)$ and $\frac{1}{\sqrt2}\big(|000\rangle + |001\rangle\big)$, there are two qubits that will produce a fixed measurement result for each state - the first one and the second one. However, measuring the first qubit will give $0$ for both states, while measuring the second qubit will give $1$ for the first state and $0$ for the second state, allowing to distinguish the states.
#
# To do this, we will iterate over all qubit indices, and for each of them we'll calculate the number of 1s in that position in `bits1` and `bits2`.
# 1. The first condition means that this count should equal 0 (if all bit strings have 0 bit in this position) or the length of the array of bit strings (if all bit strings have 1 bit in this position).
# 2. The second condition means that this count is different for `bits1` and `bits2`, i.e., one of the counts should equal 0 and another one - the length of the corresponding array of bit strings.
function FindFirstSuperpositionDiff (bits1 : Bool[][], bits2 : Bool[][], Nqubits : Int) : Int {
for (i in 0 .. Nqubits - 1) {
// count the number of 1s in i-th position in bit strings of both arrays
mutable val1 = 0;
mutable val2 = 0;
for (j in 0 .. Length(bits1) - 1) {
if (bits1[j][i]) {
set val1 += 1;
}
}
for (k in 0 .. Length(bits2) - 1) {
if (bits2[k][i]) {
set val2 += 1;
}
}
if ((val1 == Length(bits1) and val2 == 0) or (val1 == 0 and val2 == Length(bits2))) {
return i;
}
}
return -1;
}
# The second step is very similar to the previous exercise: given the index we just found, we measure the qubit on that position.
# Here we use the library function `ResultAsBool(M(qs[diff]))` that returns `true` if the measurement result is `One` and `false` if the result is `Zero`; a call to this library function is equivalent to comparison `M(qs[diff]) == One` that was used in the previous task.
# +
%kata T108_SuperpositionOneMeasurement_Test
open Microsoft.Quantum.Convert;
operation SuperpositionOneMeasurement (qs : Qubit[], bits1 : Bool[][], bits2 : Bool[][]) : Int {
let diff = FindFirstSuperpositionDiff(bits1, bits2, Length(qs));
let res = ResultAsBool(M(qs[diff]));
if (res == bits1[0][diff]) {
return 0;
}
else {
return 1;
}
}
# -
# [Return to task 1.8 of the Measurements kata.](./Measurements.ipynb#Task-1.8.-Distinguish-two-superposition-states-given-by-two-arrays-of-bit-strings---1-measurement)
# ### Task 1.9. Distinguish two superposition states given by two arrays of bit strings
#
# **Inputs:**
#
# 1. $N$ qubits (stored in an array of length $N$) which are guaranteed to be in one of the two superposition states described by the given arrays of bit strings.
# 2. Two arrays of bit strings represented as `Bool[][]`s.
# The arrays describe the superposition states in the same way as in the previous task,
# i.e. they have dimensions $M_1 \times N$ and $M_2 \times N$ respectively, $N$ being the number of qubits.
#
# The only constraint on the bit strings is that **all bit strings in the two arrays are distinct**.
#
# > Example: for bit strings `[[false, true, false], [false, false, true]]` and `[[true, true, true], [false, true, true]]` return 0 corresponds to state $\frac{1}{\sqrt2}\big(|010\rangle + |001\rangle\big)$, return 1 to state $\frac{1}{\sqrt2}\big(|111\rangle + |011\rangle\big)$.
#
# **Output:**
#
# * 0 if qubits were in the superposition state described by the first array,
# * 1 if they were in the superposition state described by the second array.
#
# **You can use as many measurements as you wish.**
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Because all the bit strings are guaranteed to be different and we are not limited in the number of measurements we can do, we can use a simpler solution then before.
#
# When we measure all qubits of a certain superposition state, it collapses to one of the basis vectors that comprised the superposition. We can do exactly that and compare the resulting state to the given bit strings to see which array it belongs to.
#
# We use three built-in library primitives. First, we measure all the qubits in the array using [`MultiM()`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.measurement.multim) operation that returns an array of `Result`.
#
# To make this array easier to compare to the input bit strings, we convert this array into an integer using the [`ResultArrayAsInt()`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.convert.resultarrayasint) function, and we convert each of the input bit strings to an integer using the [`BoolArrayAsInt()`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.convert.boolarrayasint) function. Both functions use little-endian encoding when converting bits to integers.
#
# Now that we have two integers, we can easily compare the measurement results to each of the bit strings in the first array to check whether they belong to it; if they do, we know we were given the first state, otherwise it was the second state.
# +
%kata T109_SuperpositionMeasurement_Test
open Microsoft.Quantum.Convert;
open Microsoft.Quantum.Measurement;
operation SuperpositionMeasurement (qs : Qubit[], bits1 : Bool[][], bits2 : Bool[][]) : Int {
let measuredState = ResultArrayAsInt(MultiM(qs));
for (s in bits1) {
if (BoolArrayAsInt(s) == measuredState) {
return 0;
}
}
return 1;
}
# -
# [Return to task 1.9 of the Measurements kata.](./Measurements.ipynb#Task-1.9.-Distinguish-two-superposition-states-given-by-two-arrays-of-bit-strings)
# ### Task 1.10. $|0...0\rangle$ state or W state ?
#
# **Input:** $N$ qubits (stored in an array of length $N$) which are guaranteed to be either in the $|0...0\rangle$ state or in the [W state](https://en.wikipedia.org/wiki/W_state) (an equal superposition of all basis states that have exactly one $|1\rangle$ in them).
#
# **Output:**
#
# * 0 if the qubits were in the $|0...0\rangle$ state,
# * 1 if they were in the W state.
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Here is an example of the W state for $N = 3$: $\frac{1}{\sqrt{3}}(|001\rangle+|010\rangle+|100\rangle)$. We can see that each basis state in this expression always has exactly one qubit in the $|1\rangle$ state, while in the $|0...0\rangle$ state all qubits are in the $|0\rangle$ state.
#
# We can use this to arrive to the solution: we will count the number of qubits that were measured in `One` state; if this number equals 1, we had a W state, if it equals 0 we know it was the $|0..0\rangle$ state.
#
# > Note the use of a mutable variable `countOnes` to store the number of qubits measured in `One` state, and the use of a ternary operator `condition ? trueValue | falseValue` to express the return value.
# +
%kata T110_AllZerosOrWState_Test
operation AllZerosOrWState (qs : Qubit[]) : Int {
mutable countOnes = 0;
for (q in qs) {
if (M(q) == One) {
set countOnes += 1;
}
}
return countOnes == 0 ? 0 | 1;
}
# -
# In the previous task we used `MultiM()` and `ResultArrayAsInt()`; these can also be used in this task to make the solution shorter.
# +
%kata T110_AllZerosOrWState_Test
open Microsoft.Quantum.Convert;
open Microsoft.Quantum.Measurement;
operation AllZerosOrWState (qs : Qubit[]) : Int {
return ResultArrayAsInt(MultiM(qs)) == 0 ? 0 | 1;
}
# -
# [Return to task 1.10 of the Measurements kata.](./Measurements.ipynb#Task-1.10.-$|0...0\rangle$-state-or-W-state-?)
# ### Task 1.11. GHZ state or W state ?
#
# **Input:** $N \ge 2$ qubits (stored in an array of length $N$) which are guaranteed to be either in the [GHZ state](https://en.wikipedia.org/wiki/Greenberger%E2%80%93Horne%E2%80%93Zeilinger_state) or in the [W state](https://en.wikipedia.org/wiki/W_state).
#
# **Output:**
#
# * 0 if the qubits were in the GHZ state,
# * 1 if they were in the W state.
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Here is an example of the GHZ state for $N = 3$: $\frac{1}{\sqrt2}(|000\rangle + |111\rangle)$.
#
# As we've seen in the previous task, each of the basis states that form the W state will have exactly one qubit in the $|1\rangle$ state. Basis states that forms the GHZ state will either have all qubits in the $|1\rangle$ state or all qubits in the $|0\rangle$ state.
#
# This means that if we count the number of qubits that were measured in the `One` state, we'll get 1 for the W state and 0 or $N$ for the GHZ state. The code ends up almost the same as in the previous task (in fact, you can use this exact code to solve the previous task).
# +
%kata T111_GHZOrWState_Test
operation GHZOrWState (qs : Qubit[]) : Int {
mutable countOnes = 0;
for (q in qs) {
if (M(q) == One) {
set countOnes += 1;
}
}
return countOnes == 1 ? 1 | 0;
}
# -
# [Return to task 1.11 of the Measurements kata.](./Measurements.ipynb#Task-1.11.-GHZ-state-or-W-state-?)
# ### Task 1.12. Distinguish four Bell states.
#
# **Input:** Two qubits (stored in an array of length 2) which are guaranteed to be in one of the four Bell states.
#
# **Output:**
#
# * 0 if they were in the state $|\Phi^{+}\rangle = \frac{1}{\sqrt{2}} \big(|00\rangle + |11\rangle\big)$,
# * 1 if they were in the state $|\Phi^{-}\rangle = \frac{1}{\sqrt{2}} \big(|00\rangle - |11\rangle\big)$,
# * 2 if they were in the state $|\Psi^{+}\rangle = \frac{1}{\sqrt{2}} \big(|01\rangle + |10\rangle\big)$,
# * 3 if they were in the state $|\Psi^{-}\rangle = \frac{1}{\sqrt{2}} \big(|01\rangle - |10\rangle\big)$.
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# If the qubits are entangled in one of the Bell states, you can't simply measure individual qubits to distinguish the states: if you do, the first two states will both give you 00 or 11, and the last two - 01 or 10. We need to come up with a way to transform the original states to states that are easy to distinguish before measuring them.
#
# First, let's take a look at the preparation of the Bell states starting with the $|00\rangle$ basis state.
#
# > A more detailed discussion of preparing all the Bell states can be found in the [task 7 of the Superposition kata](./../Superposition/Workbook_Superposition.ipynb#all-bell-states).
#
# The unitary transformation $\text{CNOT}\cdot(H \otimes I)$ (which corresponds to applying the **H** gate to the first qubit, followed by applying the **CNOT** gate with the first qubit as control and the second qubit as target) transforms the 4 basis vectors of the computational basis into the 4 Bell states.
#
# $$\text{CNOT}\cdot(H \otimes I) = \frac{1}{\sqrt2} \begin{bmatrix} 1 & 0 & 1 & 0 \\ 0 & 1 & 0 & 1 \\ 0 & 1 & 0 & -1 \\ \underset{|\Phi^{+}\rangle}{\underbrace{1}} & \underset{|\Psi^{+}\rangle}{\underbrace{0}} & \underset{|\Phi^{-}\rangle}{\underbrace{-1}} & \underset{|\Psi^{-}\rangle}{\underbrace{0}} \end{bmatrix}$$
#
# To transform the Bell states back to the basis states, you can apply [adjoint](./../tutorials/SingleQubitGates/SingleQubitGates.ipynb#Matrix-Representation) of this transformation, which will undo its effects.
# In this case both gates used are self-adjoint, so the adjoint transformation will require applying the same gates in reverse order (first **CNOT**, then **H**).
#
# After this the original states will be transformed as follows:
#
# <table>
# <col width="50"/>
# <col width="200"/>
# <col width="100"/>
# <tr>
# <th style="text-align:center">Return value</th>
# <th style="text-align:center">Original state</th>
# <th style="text-align:center">Maps to basis state</th>
# </tr>
# <tr>
# <td style="text-align:center">0</td>
# <td style="text-align:center">$|\Phi^{+}\rangle = \frac{1}{\sqrt{2}} \big (|00\rangle + |11\rangle\big)$</td>
# <td style="text-align:center">$|00\rangle$</td>
# </tr>
# <tr>
# <td style="text-align:center">1</td>
# <td style="text-align:center">$|\Phi^{-}\rangle = \frac{1}{\sqrt{2}} \big (|00\rangle - |11\rangle\big)$</td>
# <td style="text-align:center">$|10\rangle$</td>
# </tr>
# <tr>
# <td style="text-align:center">2</td>
# <td style="text-align:center">$|\Psi^{+}\rangle = \frac{1}{\sqrt{2}} \big (|01\rangle + |10\rangle\big)$</td>
# <td style="text-align:center">$|01\rangle$</td>
# </tr>
# <tr>
# <td style="text-align:center">3</td>
# <td style="text-align:center">$|\Psi^{-}\rangle = \frac{1}{\sqrt{2}} \big (|01\rangle - |10\rangle\big)$</td>
# <td style="text-align:center">$|11\rangle$</td>
# </tr>
# </table>
#
# These are the same four 2-qubit basis states we've seen in task 1.6, though in different order compared to that task, so mapping the measurement results to the return values will differ slightly.
# +
%kata T112_BellState_Test
operation BellState (qs : Qubit[]) : Int {
CNOT(qs[0], qs[1]);
H(qs[0]);
let m1 = M(qs[0]) == Zero ? 0 | 1;
let m2 = M(qs[1]) == Zero ? 0 | 1;
return m2 * 2 + m1;
}
# -
# [Return to task 1.12 of the Measurements kata.](./Measurements.ipynb#Task-1.12.-Distinguish-four-Bell-states.)
# ### Task 1.13. Distinguish four orthogonal 2-qubit states.
#
# **Input:** Two qubits (stored in an array of length 2) which are guaranteed to be in one of the four orthogonal states.
#
# **Output:**
#
# * 0 if they were in the state $|S_0\rangle = \frac{1}{2} \big(|00\rangle + |01\rangle + |10\rangle + |11\rangle\big)$,
# * 1 if they were in the state $|S_1\rangle = \frac{1}{2} \big(|00\rangle - |01\rangle + |10\rangle - |11\rangle\big)$,
# * 2 if they were in the state $|S_2\rangle = \frac{1}{2} \big(|00\rangle + |01\rangle - |10\rangle - |11\rangle\big)$,
# * 3 if they were in the state $|S_3\rangle = \frac{1}{2} \big(|00\rangle - |01\rangle - |10\rangle + |11\rangle\big)$.
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Similarly to the previous task, let's see whether these states can be converted back to the basis states from the [task 1.6](#Task-1.6.-Distinguish-four-basis-states.).
#
# To find a transformation that would convert the basis states to $|S_0\rangle, ... |S_3\rangle$, let's write out the coefficients of these states as column vectors side by side, so that they form a matrix.
#
# $$\frac12 \begin{bmatrix}
# 1 & 1 & 1 & 1 \\ 1 & -1 & 1 & -1 \\ 1 & 1 & -1 & -1 \\
# \underset{|S_0\rangle}{\underbrace{1}} & \underset{|S_1\rangle}{\underbrace{-1}} & \underset{|S_2\rangle}{\underbrace{-1}} & \underset{|S_3\rangle}{\underbrace{1}} \end{bmatrix}$$
#
# Applying this matrix to each of the basis states will produce the given states.
# You can check explicitly that applying this transformation to the basis sate $|00\rangle$ gives:
# $$\frac{1}{2} \begin{bmatrix} 1 & 1 & 1 & 1 \\ 1 & -1 & 1 & -1 \\ 1 & 1 & -1 & -1 \\ 1 & -1 & -1 & 1 \end{bmatrix} \cdot \begin{bmatrix}1 \\ 0 \\ 0 \\ 0 \end{bmatrix} = \frac{1}{2}\begin{bmatrix}1 \\ 1 \\ 1 \\ 1 \end{bmatrix} = \frac{1}{2} \big(|00\rangle + |01\rangle + |10\rangle + |11\rangle\big) = |S_0\rangle$$
#
# and similarly for the rest of the states.
#
# Notice that the top left $2 \times 2$ block of this matrix is the same as the top right and the bottom left, and the same as the bottom right block multiplied by $-1$. This means that we can represent this transformation as a tensor product of two **H** gates (for background on this check the [Multi-qubit tutorial workbook](../tutorials/MultiQubitGates/Workbook_MultiQubitGates.ipynb#Exercise-1:-Compound-Gate)):
# $$ H \otimes H = \frac{1}{\sqrt{2}} \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} \otimes \frac{1}{\sqrt{2}} \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} = \frac{1}{2} \begin{bmatrix} 1 & 1 & 1 & 1 \\ 1 & -1 & 1 & -1 \\ 1 & 1 & -1 & -1 \\ 1 & -1 & -1 & 1 \end{bmatrix} $$
#
# Knowing how to prepare the given states, we can convert the input state back to the corresponding basis state, like we've done in the previous task, and measure both qubits to get the answer.
#
# > We are leveraging the `BasisStateMeasurement()` operation which we have written in task 1.6, to reduce code duplication.
# > If you get the error `No identifier with the name "BasisStateMeasurement" exists.`, remember to execute the cell with the solution to [task 1.6](#Task-1.6.-Distinguish-four-basis-states.), otherwise this operation will be undefined indeed.
# +
%kata T113_TwoQubitState_Test
operation TwoQubitState (qs : Qubit[]) : Int {
H(qs[0]);
H(qs[1]);
return BasisStateMeasurement(qs);
}
# -
# [Return to task 1.13 of the Measurements kata.](./Measurements.ipynb#Task-1.13.-Distinguish-four-orthogonal-2-qubit-states.)
# ### Task 1.14*. Distinguish four orthogonal 2-qubit states, part 2.
#
# **Input:** Two qubits (stored in an array of length 2) which are guaranteed to be in one of the four orthogonal states.
#
# **Output:**
#
# * 0 if they were in the state $|S_0\rangle = \frac{1}{2} \big(+ |00\rangle - |01\rangle - |10\rangle - |11\rangle\big)$,
# * 1 if they were in the state $|S_1\rangle = \frac{1}{2} \big(- |00\rangle + |01\rangle - |10\rangle - |11\rangle\big)$,
# * 2 if they were in the state $|S_2\rangle = \frac{1}{2} \big(- |00\rangle - |01\rangle + |10\rangle - |11\rangle\big)$,
# * 3 if they were in the state $|S_3\rangle = \frac{1}{2} \big(- |00\rangle - |01\rangle - |10\rangle + |11\rangle\big)$.
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Here we can leverage the same method as used in the previous exercise: find a transformation that maps the basis states to the given states and apply its adjoint to the input state before measuring.
#
# It is a lot harder to recognize the necessary transformation, though. The coefficient $\frac12$ hints that there are still two **H** gates involved, but this transformation is not a tensor product. After some experimentation you can find that the given states can be prepared by applying the **H** gate to the second qubit of the matching Bell states:
#
# <table>
# <col width="100"/>
# <col width="200"/>
# <col width="300"/>
# <col width="50"/>
# <tr>
# <th style="text-align:center">Basis state</th>
# <th style="text-align:center">Bell state</th>
# <th style="text-align:center">Input state (after applying H gate <br/> to the second qubit)</th>
# <th style="text-align:center">Return value</th>
# </tr>
# <tr>
# <td style="text-align:center">$|00\rangle$</td>
# <td style="text-align:center">$|\Phi^{+}\rangle = \frac{1}{\sqrt{2}} \big (|00\rangle + |11\rangle\big)$</td>
# <td style="text-align:center">$\frac12 \big (|00\rangle + |01\rangle + |10\rangle - |11\rangle\big) = -|S_3\rangle$</td>
# <td style="text-align:center">3</td>
# </tr>
# <tr>
# <td style="text-align:center">$|10\rangle$</td>
# <td style="text-align:center">$|\Phi^{-}\rangle = \frac{1}{\sqrt{2}} \big (|00\rangle - |11\rangle\big)$</td>
# <td style="text-align:center">$\frac12 \big (|00\rangle + |01\rangle - |10\rangle + |11\rangle\big) = -|S_2\rangle$</td>
# <td style="text-align:center">2</td>
# </tr>
# <tr>
# <td style="text-align:center">$|01\rangle$</td>
# <td style="text-align:center">$|\Psi^{+}\rangle = \frac{1}{\sqrt{2}} \big (|01\rangle + |10\rangle\big)$</td>
# <td style="text-align:center">$\frac12 \big (|00\rangle - |01\rangle + |10\rangle + |11\rangle\big) = -|S_1\rangle$</td>
# <td style="text-align:center">1</td>
# </tr>
# <tr>
# <td style="text-align:center">$|11\rangle$</td>
# <td style="text-align:center">$|\Psi^{-}\rangle = \frac{1}{\sqrt{2}} \big (|01\rangle - |10\rangle\big)$</td>
# <td style="text-align:center">$\frac12 \big (|00\rangle - |01\rangle - |10\rangle - |11\rangle\big) = |S_0\rangle$</td>
# <td style="text-align:center">0</td>
# </tr>
# </table>
# +
%kata T114_TwoQubitStatePartTwo_Test
operation TwoQubitStatePartTwo (qs : Qubit[]) : Int {
H(qs[1]);
CNOT(qs[0], qs[1]);
H(qs[0]);
let m1 = M(qs[0]) == One ? 0 | 1;
let m2 = M(qs[1]) == One ? 0 | 1;
return m2 * 2 + m1;
}
# -
# [Return to task 1.14 of the Measurements kata.](./Measurements.ipynb#Task-1.14*.-Distinguish-four-orthogonal-2-qubit-states,-part-2.)
# ### Task 1.15**. Distinguish two orthogonal states on three qubits.
#
# **Input:** Three qubits (stored in an array of length 3) which are guaranteed to be in one of the two orthogonal states.
#
# **Output:**
#
# * 0 if they were in the state $|S_0\rangle = \frac{1}{\sqrt{3}} \big(|100\rangle + \omega |010\rangle + \omega^2 |001\rangle \big)$,
# * 1 if they were in the state $|S_1\rangle = \frac{1}{\sqrt{3}} \big(|100\rangle + \omega^2 |010\rangle + \omega |001\rangle \big)$.
#
# Here $\omega = e^{2i \pi/ 3}$.
#
# The state of the qubits at the end of the operation does not matter.
# ### Solution
#
# Let's find a unitary transformation that converts the state $|S_0\rangle$ to the basis state $|000\rangle$. To do this, we first apply a unitary operation that maps the first state to the W state $\frac{1}{\sqrt{3}} \big( |100\rangle + |010\rangle + |001\rangle \big ) $.
#
# > We will use a convenient [rotation gate](./../tutorials/SingleQubitGates/SingleQubitGates.ipynb#Rotation-Gates) $R_1$ which applies a relative phase to the $|1\rangle$ state and doesn't change the $|0\rangle$ state. In matrix form
# > $$R_1(\theta) = \begin{bmatrix} 0 & 0 \\ 0 & e^{i\theta} \end{bmatrix} $$
#
# This can be accomplished by a tensor product $I \otimes R_1(-\frac{2\pi}{3}) \otimes R_1(-\frac{4\pi}{3})$, where
# - $I$ is the identity gate applied to qubit 0,
# - $R_1(-\frac{2\pi}{3}) = \begin{bmatrix} 0 & 0 \\ 0 & \omega^{-1} \end{bmatrix} $, applied to qubit 1,
# - $R_1(-\frac{4\pi}{3}) = \begin{bmatrix} 0 & 0 \\ 0 & \omega^{-2} \end{bmatrix} $, applied to qubit 2.
#
# > Note that applying this operation to the state $|S_1\rangle$ converts it to $ \frac{1}{\sqrt{3}} \big ( |100\rangle + \omega |010\rangle + \omega^2 |001\rangle \big) $.
#
# Now we can use adjoint of the state preparation routine for W state (from [task 17](./../Superposition/Workbook_Superposition_Part2.ipynb#Task-17**.-W-state-on-an-arbitrary-number-of-qubits.) of the Superposition kata),
# which will map the W state to the state $|000\rangle$ and the second state to some other state $|S'_1\rangle$.
#
# We don't need to do the math to figure out the exact state $|S'_1\rangle$ in which $|S_1\rangle$ will end up after those two transformations.
# Remember that our transformations are unitary, i.e., they preserve the inner products of vectors.
# Since the states $|S_0\rangle$ and $|S_1\rangle$ were orthogonal, their inner product $\langle S_0|S_1\rangle = 0$ is preserved when applying unitary transformations, and the states after the transformation will remain orthogonal.
#
# The state $|S'_1\rangle$ is guaranteed to be orthogonal to the state $|000\rangle$, i.e., $|S_1\rangle$ gets mapped to a superposition that does not include basis state $|000\rangle$.
# To distinguish the states $|000\rangle$ and $|S'_1\rangle$, we measure all qubits; if all measurement results were 0, the state was $|000\rangle$ and we return 0, otherwise we return 1.
# +
open Microsoft.Quantum.Math;
open Microsoft.Quantum.Convert;
operation WState_Arbitrary (qs : Qubit[]) : Unit is Adj + Ctl {
let N = Length(qs);
if (N == 1) {
// base case of recursion: |1⟩
X(qs[0]);
} else {
// |W_N⟩ = |0⟩|W_(N-1)⟩ + |1⟩|0...0⟩
// do a rotation on the first qubit to split it into |0⟩ and |1⟩ with proper weights
// |0⟩ -> sqrt((N-1)/N) |0⟩ + 1/sqrt(N) |1⟩
let theta = ArcSin(1.0 / Sqrt(IntAsDouble(N)));
Ry(2.0 * theta, qs[0]);
// do a zero-controlled W-state generation for qubits 1..N-1
X(qs[0]);
Controlled WState_Arbitrary(qs[0 .. 0], qs[1 .. N - 1]);
X(qs[0]);
}
}
# +
%kata T115_ThreeQubitMeasurement_Test
open Microsoft.Quantum.Math;
open Microsoft.Quantum.Measurement;
open Microsoft.Quantum.Arithmetic;
operation ThreeQubitMeasurement (qs : Qubit[]) : Int {
R1(-2.0 * PI() / 3.0, qs[1]);
R1(-4.0 * PI() / 3.0, qs[2]);
// Apply inverse state prep of 1/sqrt(3) ( |100⟩ + |010⟩ + |001⟩ )
Adjoint WState_Arbitrary(qs);
// measure all qubits: if all of them are 0, we have the first state,
// if at least one of them is 1, we have the second state
return MeasureInteger(LittleEndian(qs)) == 0 ? 0 | 1;
}
# -
# The solutions to the rest of the tasks are included in the [Measurements Workbook, Part 2](./Workbook_Measurements_Part2.ipynb).
| Measurements/Workbook_Measurements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In-Class Coding Lab: Files
#
# The goals of this lab are to help you to understand:
#
# - Reading data from a file all at once or one line at a time.
# - Searching for data in files
# - Parsing text data to numerical data.
# - How to build complex programs incrementally.
#
# ## Average Spam Confidence
#
# For this lab, we will write a program to read spam confidence headers from a mailbox file like `ICCL-mbox-tiny.txt` or `ICCL-mbox-small.txt`. These files contain raw email data, and in that data is a SPAM confidence number for each message:
#
# `X-DSPAM-Confidence:0.8475`
#
# Our goal will be to find each of these lines in the file, and extract the confidence number (In this case `0.8475`), with the end-goal of calculating the average SPAM Confidence of all the emails in the file.
#
# ### Reading from the file
#
# Let's start with some code to read the lines of text from `ICCL-mbox-tiny.txt`
filename = "CCL-mbox-tiny.txt"
count=0
with open(filename, 'r') as f:
for line in f.readlines():
count=count+1
print(count)
# ### Now Try It
#
# Now modify the code above to print the number of lines in the file, instead of printing the lines themselves. You'll need to increment a variable each time through the loop and then print it out afterwards.
#
# There should be **332** lines.
#
# ### Finding the SPAM Confidence
#
# Next, we'll focus on only getting lines addressing `X-DSPAM-Confidence:`. We do this by including an `if` statement inside the `for` loop.
#
# You need to edit line 4 of the code below to only print lines which begin with `X-DSPAM-Confidence:` There should be **5**
filename = "CCL-mbox-tiny.txt"
with open(filename, 'r') as f:
for line in f.readlines():
if line.startswith('X-DSPAM-Confidence'):
print(line.strip())
# ### Parsing out the confidence value
#
# The final step is to figure out how to parse out the confidence value from the string.
# For example for the given line: `X-DSPAM-Confidence: 0.8475` we need to get the value `0.8475` as a float.
#
# The strategy here is to replace `X-DSPAM-Confidence:` with an empty string, then calling the `float()` function to convert the results to a float.
#
# ### Now Try It
#
line = 'X-DSPAM-Confidence: 0.8475'
dspam,number = line.split(':')
number = float(number)
#TODO remove 'X-DSPAM-Confidence:' , then convert to a float.
print (number)
# ### Putting it all together
#
# Now that we have all the working parts, let's put it all together.
#
# ```
# 1. line count is 0
# 2. total confidence is 0
# 3. open mailbox file
# 4. for each line in file
# 5. if line starts with `X-DSPAM-Confidence:`
# 6. remove `X-DSPAM-Confidence:` from line and convert to float
# 7. increment line count
# 8. add spam confidence to total confidence
# 9. print average confidence (total confidence/line count)
# ```
## TODO: Write program here:
line_count = 0
total_confidence = 0
filename = "CCL-mbox-tiny.txt"
with open ("CCL-mbox-tiny.txt","r")as f:
for line in f.readlines():
line_count = line_count + 1
if line.startswith('X-DSPM-Confidence'):
dspam,number = line.split(':')
number = float(number)
total_confidence = total_confidence + number
print(float(total_confidence / line_count))
# When you have the program working, try it with `ICCL-mbox-short.txt` mailbox file, too.
| content/lessons/08/Class-Coding-Lab/CCL-Files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/shubhamchoudharyiiitdmj/ALA/blob/master/spectral_temp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="lIYdn1woOS1n"
# + [markdown] id="dFwkZjqqUZKB"
# # Spectral Clustering
# ---
# Spectral clustering for link prediction:
# 1. Compute eigendecomposition of graph Laplacian
# 2. Take top K eigenvectors --> node embedding vectors (Z)
# 3. Edge scores = sigmoid(Z * Z^T)
# 3. Test scores these scores against actual edge values (ROC AUC, AP)
#
# Scikit-learn documentation: http://scikit-learn.org/stable/modules/generated/sklearn.manifold.spectral_embedding.html
# + [markdown] id="CHKUMP6EUZKE"
# ## 1. Read in Graph Data
# + colab={"base_uri": "https://localhost:8080/"} id="IQn0FoAJUxXk" outputId="811f645d-6d66-4a73-9298-337766779cc6"
from google.colab import drive
drive.mount('/content/drive')
# + id="B3FQCajLUZKF"
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import scipy.sparse as sp
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import pickle
# + id="gS7uWrtjUZKG" colab={"base_uri": "https://localhost:8080/"} outputId="08e6485a-d5cd-4ea6-c617-0bccffb31765"
EGO_USER = 0 # which ego network to look at
# Load pickled (adj, feat) tuple
network_dir = '/content/drive/MyDrive/fb-processed/{}-adj-feat.pkl'.format(EGO_USER)
print(network_dir)
with open(network_dir, 'rb') as f:
adj, features = pickle.load(f)
g = nx.Graph(adj) # re-create graph using node indices (0 to num_nodes-1)
# + id="GFPtM0KtUZKG" colab={"base_uri": "https://localhost:8080/", "height": 268} outputId="bef1675e-a89d-4dc6-bb6b-ae05d56926a2"
# draw network
nx.draw_networkx(g, with_labels=False, node_size=50, node_color='r')
plt.show()
# + [markdown] id="V8wXyGzZUZKH"
# ## 2. Preprocessing/Train-Test Split
# + id="76ljaRbuUZKH"
import sys
sys.path.append('/content/drive/MyDrive')
from gae.preprocessing import mask_test_edges
np.random.seed(0) # make sure train-test split is consistent between notebooks
adj_sparse = nx.to_scipy_sparse_matrix(g)
# Perform train-test split
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = mask_test_edges(adj_sparse)
g_train = nx.from_scipy_sparse_matrix(adj_train) # new graph object with only non-hidden edges
# + id="mtfw3lQrUZKI" outputId="1b2f3953-ff42-4a69-fcf2-884fcd8fec5f"
# Inspect train/test split
print "Total nodes:", adj_sparse.shape[0]
print "Total edges:", int(adj_sparse.nnz/2) # adj is symmetric, so nnz (num non-zero) = 2*num_edges
print "Training edges (positive):", len(train_edges)
print "Training edges (negative):", len(train_edges_false)
print "Validation edges (positive):", len(val_edges)
print "Validation edges (negative):", len(val_edges_false)
print "Test edges (positive):", len(test_edges)
print "Test edges (negative):", len(test_edges_false)
# + id="YHMCyK9VUZKI"
def get_roc_score(edges_pos, edges_neg, embeddings):
score_matrix = np.dot(embeddings, embeddings.T)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Store positive edge predictions, actual values
preds_pos = []
pos = []
for edge in edges_pos:
preds_pos.append(sigmoid(score_matrix[edge[0], edge[1]])) # predicted score
pos.append(adj_sparse[edge[0], edge[1]]) # actual value (1 for positive)
# Store negative edge predictions, actual values
preds_neg = []
neg = []
for edge in edges_neg:
preds_neg.append(sigmoid(score_matrix[edge[0], edge[1]])) # predicted score
neg.append(adj_sparse[edge[0], edge[1]]) # actual value (0 for negative)
# Calculate scores
preds_all = np.hstack([preds_pos, preds_neg])
labels_all = np.hstack([np.ones(len(preds_pos)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
# + [markdown] id="FWT3iwn_UZKJ"
# ## 3. Spectral Clustering
# + id="pJarbhAxUZKJ"
from sklearn.manifold import spectral_embedding
# Get spectral embeddings (16-dim)
emb = spectral_embedding(adj_train, n_components=16, random_state=0)
# + id="wC6ywK28UZKK" colab={"base_uri": "https://localhost:8080/"} outputId="a76ff382-2553-4d71-b0cc-cb71d673c9dd"
# Calculate ROC AUC and Average Precision
sc_roc, sc_ap = get_roc_score(test_edges, test_edges_false, emb)
print 'Spectral Clustering Test ROC score: ', str(sc_roc)
print 'Spectral Clustering Test AP score: ', str(sc_ap)
| spectral_temp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''mlep-w1-lab'': conda)'
# name: python3
# ---
# + [markdown] colab_type="text" id="mw2VBrBcgvGa"
# In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula.
#
# So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc.
#
# How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc.
#
# Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc.
# -
import tensorflow as tf
import numpy as np
from tensorflow import keras
# + colab={} colab_type="code" id="PUNO2E6SeURH"
# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.arange(1, 10)
ys = 0.5 + 0.5 * xs
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer="sgd", loss="mean_squared_error")
model.fit(xs, ys, epochs=100)
return model.predict(y_new)[0]
# -
prediction = house_model([7.0])
print(prediction)
# +
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
# + language="javascript"
# <!-- Save the notebook -->
# IPython.notebook.save_checkpoint();
# + language="javascript"
# IPython.notebook.session.delete();
# window.onbeforeunload = null
# setTimeout(function() { window.close(); }, 1000);
| Course1/Week 1/Code/Exercise_1_House_Prices_Question.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# metadata:
# interpreter:
# hash: dca0ade3e726a953b501b15e8e990130d2b7799f14cfd9f4271676035ebe5511
# name: 'Python 3.8.5 64-bit (''base'': conda)'
# ---
import pandas as pd
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
import h2o
from h2o.frame import H2OFrame
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_curve, classification_report
# <h3>Load Data</h3>
fraud_df = pd.read_csv("../Collection of DS take home challenges/data collection-Product dataset数据挑战数据集/ML Identifying Fraudulent Activities with solution/Fraud_Data.csv")
ipaddress_df = pd.read_csv("../Collection of DS take home challenges/data collection-Product dataset数据挑战数据集/ML Identifying Fraudulent Activities with solution/IpAddress_to_Country.csv")
fraud_df.head()
ipaddress_df.head()
fraud_df.info()
ipaddress_df.info()
countries = []
for i in range(len(fraud_df)):
country = ipaddress_df[(ipaddress_df["lower_bound_ip_address"] <= fraud_df["ip_address"][i]) & (ipaddress_df["upper_bound_ip_address"] >= fraud_df["ip_address"][i])]["country"].values
if len(country) == 1:
countries.append(country[0])
else:
countries.append("NA")
fraud_df["country"] = countries
fraud_df.describe()
fraud_df.info()
fraud_df["signup_time"] = pd.to_datetime(fraud_df["signup_time"])
fraud_df["purchase_time"] = pd.to_datetime(fraud_df["purchase_time"])
fraud_df.isnull().sum()
columns = ["source", "browser", "country"]
for i in columns:
uniques = sorted(fraud_df[i].unique())
print("{0:10s} {1:10d}\t".format(i, len(uniques)), uniques[:5])
# <h3>Feature Engineering</h3>
fraud_df.head()
# time interval
def time_interval(x):
if x.hour >= 6 and x.hour <= 12:
return("Morning")
elif x.hour > 12 and x.hour <= 16:
return("Afternoon")
elif x.hour > 16 and x.hour <= 23:
return("Evening/Night")
elif x.hour >= 0 and x.hour < 6:
return("Midnight")
fraud_df["signup_interval"] = fraud_df["signup_time"].apply(time_interval)
fraud_df["purchase_interval"] = fraud_df["purchase_time"].apply(time_interval)
# signup and purchase diff
fraud_df["difference"] = fraud_df["purchase_time"]-fraud_df["signup_time"]
fraud_df["difference"] = fraud_df["difference"].apply(lambda x: x.seconds)
# how many user_id associated with the device_id
fraud_df["num_user_id"] = fraud_df["device_id"].apply(lambda x: len(fraud_df[fraud_df["device_id"] == x]))
# lambda function is really slow, try to use merge next time
# how many user_id associated with the ip_address
ip_count = fraud_df.groupby("ip_address").size().reset_index().rename(columns = {0:"num_ip_address"})
fraud_df = fraud_df.merge(ip_count, how = "left", on = "ip_address")
# day of week
fraud_df["signup_day"] = fraud_df["signup_time"].apply(lambda x: x.strftime('%A'))
fraud_df["purchase_day"] = fraud_df["purchase_time"].apply(lambda x: x.strftime('%A'))
fraud_df = pd.read_csv("fraud_df.csv")
fraud_df.head()
# <h3>Model Building</h3>
# select features and target
df = fraud_df[["purchase_value", "source", "browser", "sex", "age", "country", "difference", "num_user_id", "num_ip_address", "signup_day", "purchase_day", "class"]]
h2o.init()
h2o.remove_all()
# +
h2o_df = H2OFrame(df)
for i in ["source", "browser", "sex", "country", "signup_day", "purchase_day", "class"]:
h2o_df[i] = h2o_df[i].asfactor()
# +
# train test split
strat_split = h2o_df["class"].stratified_split(test_frac= 0.3)
train = h2o_df[strat_split == "train"]
test = h2o_df[strat_split == "test"]
features = ["purchase_value", "source", "browser", "sex", "age", "country", "difference", "num_user_id", "num_ip_address", "signup_day", "purchase_day"]
target = "class"
# -
clf = H2ORandomForestEstimator(balance_classes = True, stopping_rounds=5, stopping_metric='auc', score_each_iteration=True)
clf.train(x = features, y=target, training_frame=train, validation_frame=test)
clf.varimp_plot()
# +
# predict
train_true = train.as_data_frame()['class'].values
test_true = test.as_data_frame()['class'].values
train_pred = clf.predict(train).as_data_frame()['p1'].values
test_pred = clf.predict(test).as_data_frame()['p1'].values
train_fpr, train_tpr, _ = roc_curve(train_true, train_pred)
test_fpr, test_tpr, _ = roc_curve(test_true, test_pred)
train_auc = np.round(auc(train_fpr, train_tpr), 3)
test_auc = np.round(auc(test_fpr, test_tpr), 3)
# -
# Classification report
print(classification_report(y_true=test_true, y_pred=(test_pred > 0.5).astype(int)))
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(train_fpr, train_tpr, label='Train AUC: ' + str(train_auc))
ax.plot(test_fpr, test_tpr, label='Test AUC: ' + str(test_auc))
ax.plot(train_fpr, train_fpr, 'k--', label='Chance Curve')
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.grid(True)
ax.legend(fontsize=12)
plt.show()
cols = ['num_user_id', 'difference', 'country', 'num_ip_address']
_ = clf.partial_plot(data=train, cols=cols, nbins=200, figsize=(18, 20))
h2o.cluster().shutdown()
| Identifying Fraudulent Activities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import pandas as pd
q = pd.read_csv("/Users/sowcar/Downloads/train.csv")
q.head(4)
q.plot.area("cat1","loss")
q
q.select("id")
print(q.tail(3))
import matplotlib
q.plot()
import matplotlib as m
m - q.plot()
m = q.plot.area("cat1","loss")
import matplotlib as M
q.describe
q.dtypes
import sklearn as s
s.utils
s.base
from sklearn import svm
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
from sklearn import datasets
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp utils
# -
# %load_ext autoreload
# %autoreload 2
# # Notations
# # general utility functions
#
# +
#export
import re
import numpy as np
import pandas as pd
from functools import singledispatch
# -
# ## make codes
#export
def make_codes(n=100, letters=26, numbers=100, seed=False):
"""
Generate a dataframe with a column of random codes
Args:
letters (int): The number of different letters to use
numbers (int): The number of different numbers to use
Returns
A dataframe with a column with one or more codes in the rows
"""
# each code is assumed to consist of a letter and a number
alphabet = list('abcdefghigjklmnopqrstuvwxyz')
letters=alphabet[:letters+1]
# make random numbers same if seed is specified
if seed:
np.random.seed(0)
# determine the number of codes to be drawn for each event
n_codes=np.random.negative_binomial(1, p=0.3, size=n)
# avoid zero (all events have to have at least one code)
n_codes=n_codes+1
# for each event, randomly generate a the number of codes specified by n_codes
codes=[]
for i in n_codes:
diag = [np.random.choice(letters).upper()+
str(int(np.random.uniform(low=1, high=numbers)))
for num in range(i)]
code_string=','.join(diag)
codes.append(code_string)
# create a dataframe based on the list
df=pd.DataFrame(codes)
df.columns=['code']
return df
# ## make data
#export
def make_data(n=100, letters=26, numbers=100, seed=False, expand=False,
columns=['pid', 'gender', 'birth_date', 'date', 'region', 'codes']):
"""
Generate a dataframe with a column of random codes
Args:
letters (int): The number of different letters to use
numbers (int): The number of different numbers to use
Returns
A dataframe with a column with one or more codes in the rows
Examples
>>>df = make_data(n=100, letters=5, numbers=5, seed=True)
"""
if seed:
np.random.seed(seed=seed)
pid = range(n)
df_person=pd.DataFrame(index = pid)
#female = np.random.binomial(1, 0.5, size =n)
gender = np.random.choice(['male', 'female'], size=n)
region = np.random.choice(['north', 'south', 'east', 'west'], size=n)
birth_year = np.random.randint(1920, 2019, size=n)
birth_month = np.random.randint(1,12, size=n)
birth_day = np.random.randint(1,28, size=n) # ok, I know!
events_per_year = np.random.poisson(1, size=n)
years = 2020 - birth_year
events = years * events_per_year
events = np.where(events==0,1,events)
events = events.astype(int)
all_codes=[]
codes = [all_codes.extend(make_codes(n=n, letters=letters,
numbers=numbers,
seed=seed)['code'].tolist())
for n in events]
days_alive = (2020 - birth_year) *365
days_and_events = zip(days_alive.tolist(), events.tolist())
all_days=[]
days_after_birth = [all_days.extend(np.random.randint(0, max_day, size=n)) for max_day, n in days_and_events]
pid_and_events = zip(list(pid), events.tolist())
all_pids=[]
pids = [all_pids.extend([p+1]*e) for p, e in pid_and_events]
df_events = pd.DataFrame(index=all_pids)
df_events['codes'] = all_codes
df_events['days_after'] = all_days
#df_person['female'] = female
df_person['gender'] = gender
df_person['region'] = region
df_person['year'] = birth_year
df_person['month'] = birth_month
df_person['day'] = birth_day
df = df_events.merge(df_person, left_index=True, right_index=True)
df['birth_date'] = pd.to_datetime(df[['year', 'month', 'day']])
df['date'] = df['birth_date'] + pd.to_timedelta(df.days_after, unit='d')
del df['month']
del df['day']
del df['days_after']
df['pid'] = df.index
df.index_name = 'pid_index'
df=df.sort_values(['pid', 'date'])
df=df[columns]
if expand:
splitted = df.codes.str.split(',', expand=True).add_prefix('code_').fillna(np.nan)
df = pd.concat([df,splitted], axis=1)
del df['codes']
# include deaths too?
return df
# ## get rows
#export
# mark rows that contain certain codes in one or more colums
def get_rows(df, codes, cols=None, sep=None, pid='pid', all_codes=None, fix=True, info=None):
"""
Make a boolean series that is true for all rows that contain the codes
Args
df (dataframe or series): The dataframe with codes
codes (str, list, set, dict): codes to be counted
cols (str or list): list of columns to search in
sep (str): The symbol that seperates the codes if there are multiple codes in a cell
pid (str): The name of the column with the personal identifier
>>>get_rows(df=df, codes='F3', cols='codes', sep=',')
"""
# check if evaluated previously
info, rows = memory(info=info, func = 'get_rows', expr=codes)
if rows:
return rows
# check if codes and columns need to be expanded (needed if they use notation)
if fix:
# do this when if cols exist, but if it does not ...
cols = expand_columns(cols, all_columns=list(df.columns), info=info)
all_codes = sorted(unique(df=df, cols=cols, sep=sep))
codes = expand_code(codes, all_codes=all_codes)
# codes and cols should be lists
codes = listify(codes)
cols = listify(cols)
# approach depends on whether we have multi-value cells or not
# if sep exist, then have multi-value cells
if sep:
# have multi-valued cells
# note: this assumes the sep is a regex word delimiter
codes = [rf'\b{code}\b' for code in codes]
codes_regex = '|'.join(codes)
# starting point: no codes have been found
# needed since otherwise the function might return None if no codes exist
rows = pd.Series(False*len(df),index=df.index)
# loop over all columns and mark when a code exist
for col in cols:
rows=rows | df[col].str.contains(codes_regex, na=False)
# if not multi valued cells
else:
mask = df[cols].isin(codes)
rows = mask.any(axis=1)
return rows
# ## extract codes
#export
def extract_codes(df, codes, cols=None, sep=None, new_sep=',', na_rep='',
prefix=None, merge=False, out='bool', fix=True,
series=True, group=False, all_codes=None, info=None):
"""
Produce one or more columns with only selected codes
Args:
df (dataframe): Dataframe with events
codes (string, list or dict): The codes for the disease
cols (string, list): Name of columns where codes are located
sep (string, default: None): Separator between codes in same cell (if exist)
(If None, the function will infer the separator)
pid (str, default: 'pid'): Name of column with the personal identification number
codebook (list): User specified list of all possible or allowed codes
merge (bool): Content of all columns is merged to one series # only if out='text'?
group (bool): Star an other notation remain a single group, not split into individual codes
out (string, ['text', 'category', 'bool' or 'int']): Datatype of output column(s)
Notes:
Can produce a set of dummy columns for codes and code groups.
Can also produce a merged column with only extracted codes.
Accept star notation.
Also accepts both single value columns and columns with compound codes and separators
Repeat events in same rows are only extracted once
Example:
to create three dummy columns, based on codes in icdmain column:
>>> extract_codes(df=df,
>>> codes={'fracture' : 'S72*', 'cd': 'K50*', 'uc': 'K51*'},
>>> cols=['icdmain', 'icdbi'],
>>> merge=False,
>>> out='text')
extract_codes(df=df, codes={'b':['A1','F3'], 'c':'c*'}, cols='codes', sep=',', merge = False)
extract_codes(df=df, codes={'b':['A1','F3'], 'c':'C*'}, cols='codes', sep=',', merge = False)
extract_codes(df=df, codes=['A1','F3', 'C*'], cols='codes', sep=',', merge = False)
extract_codes(df=df, codes='C*', cols='codes', sep=',', merge = False)
nb: problem with extract rows if dataframe is empty (none of the requested codes)
"""
if isinstance(df, pd.Series):
df=df.to_frame()
cols=[df.columns]
if not cols:
cols=[df.columns]
if fix:
cols=expand_columns(cols, all_columns=list(df.columns))
all_codes = unique(df=df, cols=cols, sep=sep)
if isinstance(codes, str):
codes=listify(codes)
if (isinstance(codes, list)) and (not merge):
codes = expand_code(codes, all_codes=all_codes, info=info)
codes = {code:code for code in codes}
if (isinstance(codes, list)) and (merge):
codes = {str(tuple(codes)):codes}
codes = expand_code(codes, all_codes=all_codes, info=info)
print('after fix', cols, codes)
subset = pd.DataFrame(index=df.index)
for k, v in codes.items():
if v:
rows = get_rows(df=df, codes=v, cols=cols, sep=sep, all_codes=all_codes, fix=False)
else:
rows=False
if out == 'bool':
subset[k] = rows
elif out == 'int':
subset[k] = rows.astype(int)
elif out == 'category':
subset.loc[rows, k] = k
subset[k] = subset[k].astype('category')
else:
subset[k] = na_rep
subset.loc[rows, k] = k
if (merge) and (out == 'bool'):
subset = subset.astype(int).astype(str)
new_codes = list(subset.columns)
if (merge) and (len(codes) > 1):
headline = ', '.join(new_codes)
merged = subset.iloc[:, 0].str.cat(subset.iloc[:, 1:].values, sep=new_sep,
na_rep=na_rep) # strange .T.values seemed to work previouslyi but it should not have
merged = merged.str.strip(',')
subset = merged
subset.name = headline
if out == 'category':
subset = subset.astype('category')
# return a series if only one code is asked for (and also if merged?)
if series and (len(codes) == 1):
subset = subset.squeeze()
return subset
# # General helper functions
# ## Info
#export
class Info():
"""
A class to store information about the data and results from analysis
"""
def __init__(self):
self.evaluated = {}
# ## memory
#export
def memory(info, func, expr):
"""
checks if the function has been called with the same argument previously and
if so, returns the same results instead of running the function again
args:
-
"""
rows=None
if info:
if func in info.evaluated:
if expr in info.evaluated[func]:
rows = info.evaluated[func][expr]
else:
info.evaluated[func] = {}
else:
info = Info()
info.evaluated[func] = {}
return info, rows
# ## listify
#export
def listify(string_or_list):
"""
return a list if the input is a string, if not: returns the input as it was
Args:
string_or_list (str or any):
Returns:
A list if the input is a string, if not: returns the input as it was
Note:
- allows user to use a string as an argument instead of single lists
- cols='icd10' is allowed instead of cols=['icd10']
- cols='icd10' is transformed to cols=['icd10'] by this function
"""
if isinstance(string_or_list, str):
string_or_list = [string_or_list]
return string_or_list
#export
def reverse_dict(dikt):
new_dict = {}
for name, codelist in dikt.items():
codelist = _listify(codelist)
new_dict.update({code: name for code in codelist})
return new_dict
# # Notation
# ## del dot and zero
# +
#export
def del_dot(code):
if isinstance(code, str):
return code.replace('.','')
else:
codes = [c.replace('.','') for c in code]
return codes
def del_zero(code, left=True, right=False):
if isinstance(codes, str):
codes=[code]
if left:
codes = [c.lstrip('0') for c in code]
if right:
codes = [c.rstrip('0') for c in code]
if isinstance(code, str):
codes=codes[0]
return codes
# -
# ## expand hyphen
# +
#export
# function to expand a string like 'K51.2-K53.8' to a list of codes
# Need regex to extract the number component of the input string
# The singledispach decorator enables us to have the same name, but use
# different functions depending on the datatype of the first argument.
#
# In our case we want one function to deal with a single string input, and
# another to handle a list of strings. It could all be handled in a single
# function using nested if, but singledispatch makes it less messy and more fun!
# Here is the main function, it is just the name and an error message if the
# argument does not fit any of the inputs that wil be allowed
@singledispatch
def expand_hyphen(expr):
"""
Expands codes expression(s) that have hyphens to list of all codes
Args:
code (str or list of str): String or list of strings to be expanded
Returns:
List of strings
Examples:
expand_hyphen('C00-C26')
expand_hyphen('b01.1*-b09.9*')
expand_hyphen('n02.2-n02.7')
expand_hyphen('c00*-c260')
expand_hyphen('b01-b09')
expand_hyphen('b001.1*-b009.9*')
expand_hyphen(['b001.1*-b009.9*', 'c11-c15'])
Note:
Unequal number of decimals in start and end code is problematic.
Example: C26.0-C27.11 will not work since the meaning is not obvious:
Is the step size 0.01? In which case C27.1 will not be included, while
C27.10 will be (and traing zeros can be important in codes)
"""
raise ValueError('The argument must be a string or a list')
# register the function to be used if the input is a string
@expand_hyphen.register(str)
def _(expr):
# return immediately if nothing to expand
if '-' not in expr:
return [expr]
lower, upper = expr.split('-')
lower=lower.strip()
# identify the numeric component of the code
lower_str = re.search("\d*\.\d+|\d+", lower).group()
upper_str = re.search("\d*\.\d+|\d+", upper).group()
# note: what about european decimal notation?
# also note: what if multiple groups K50.1J8.4-etc
lower_num = int(lower_str.replace('.',''))
upper_num = int(upper_str.replace('.','')) +1
if upper_num<lower_num:
raise ValueError('The start code cannot have a higher number than the end code')
# remember length in case of leading zeros
length = len(lower_str)
nums = range(lower_num, upper_num)
# must use integers in a loop, not floats
# which also means that we must multiply and divide to get decimal back
# and take care of leading and trailing zeros that may disappear
if '.' in lower_str:
lower_decimals = len(lower_str.split('.')[1])
upper_decimals = len(upper_str.split('.')[1])
if lower_decimals==upper_decimals:
multiplier = 10**lower_decimals
codes = [lower.replace(lower_str, format(num /multiplier, f'.{lower_decimals}f').zfill(length)) for num in nums]
# special case: allow k1.1-k1.123, but not k.1-k2.123 the last is ambigious: should it list k2.0 only 2.00?
elif (lower_decimals<upper_decimals) & (upper_str.split('.')[0]==lower_str.split('.')[0]):
from_decimal = int(lower_str.split('.')[1])
to_decimal = int(upper_str.split('.')[1]) +1
nums = range(from_decimal, to_decimal)
decimal_str = '.'+lower.split('.')[1]
codes = [lower.replace(decimal_str, '.'+str(num)) for num in nums]
else:
raise ValueError('The start code and the end code do not have the same number of decimals')
else:
codes = [lower.replace(lower_str, str(num).zfill(length)) for num in nums]
return codes
# register the function to be used if if the input is a list of strings
@expand_hyphen.register(list)
def _(expr):
extended = []
for word in expr:
extended.extend(expand_hyphen(word))
return extended
# -
# ## expand star
# +
#export
# A function to expand a string with star notation (K50*)
# to list of all codes starting with K50
@singledispatch
def expand_star(code, all_codes=None):
"""
Expand expressions with star notation to a list of all values with the specified pattern
Args:
expr (str or list): Expression (or list of expressions) to be expanded
all_codes (list) : A list of all codes
Examples:
expand_star('K50*', all_codes=icd9)
expand_star('K*5', all_codes=icd9)
expand_star('*5', all_codes=icd9)
"""
raise ValueError('The argument must be a string or a list')
@expand_star.register(str)
def _(code, all_codes=None):
# return immediately if there is nothing to expand
if '*' not in code:
return [code]
start_str, end_str = code.split('*')
if start_str and end_str:
codes = {code for code in all_codes if (code.startswith(start_str) & code.endswith(end_str))}
if start_str:
codes = {code for code in all_codes if code.startswith(start_str)}
if end_str:
codes = {code for code in all_codes if code.endswith(end_str)}
return sorted(list(codes))
@expand_star.register(list)
def _(code, all_codes=None):
expanded=[]
for star_code in code:
new_codes = expand_star(star_code, all_codes=all_codes)
expanded.extend(new_codes)
# uniqify in case some overlap
expanded = list(set(expanded))
return sorted(expanded)
# -
# ## expand colon
# +
#export
# function to get all codes in a list between the specified start and end code
# Example: Get all codes between K40:L52
@singledispatch
def expand_colon(code, all_codes=None):
raise ValueError('The argument must be a string or a list')
@expand_colon.register(str)
def _(code, all_codes=None):
"""
Expand expressions with colon notation to a list of complete code names
code (str or list): Expression (or list of expressions) to be expanded
all_codes (list or array) : The list to slice from
Examples
K50:K52
K50.5:K52.19
A3.0:A9.3
Note: This is different from hyphen and star notation because it can handle
different code lengths and different number of decimals
"""
if ':' not in code:
return [code]
startstr, endstr = code.split(':')
# remove spaces
startstr = startstr.strip()
endstr =endstr.strip()
# find start and end position
startpos = all_codes.index(startstr)
endpos = all_codes.index(endstr) + 1
# slice list
expanded = all_codes[startpos:endpos+1]
return expanded
@expand_colon.register(list)
def _(code, all_codes=None, regex=False):
expanded=[]
for cod in code:
new_codes = expand_colon(cod, all_codes=all_codes)
expanded.extend(new_codes)
return expanded
# -
# ## expand regex
# +
#export
# Return all elements in a list that fits a regex pattern
@singledispatch
def expand_regex(code, all_codes):
raise ValueError('The argument must be a string or a list of strings')
@expand_regex.register(str)
def _(code, all_codes=None):
code_regex = re.compile(code)
expanded = {code for code in all_codes if code_regex.match(code)}
# uniqify
expanded = list(set(expanded))
return expanded
@expand_regex.register(list)
def _(code, all_codes):
expanded=[]
for cod in code:
new_codes = expand_regex(cod, all_codes=all_codes)
expanded.extend(new_codes)
# uniqify in case some overlap
expanded = sorted(list(set(expanded)))
return expanded
# -
# ## expand code
# +
#export
@singledispatch
def expand_code(code, all_codes=None,
hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
raise ValueError('The argument must be a string or a list of strings')
@expand_code.register(str)
def _(code, all_codes=None,
hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
#validating input
if (not regex) and (':' in code) and (('-' in code) or ('*' in code)):
raise ValueError('Notation using colon must start from and end in specific codes, not codes using star or hyphen')
if regex:
codes = expand_regex(code, all_codes=all_codes)
return codes
if drop_dot:
code = del_dot(code)
codes=[code]
if hyphen:
codes=expand_hyphen(code)
if star:
codes=expand_star(codes, all_codes=all_codes)
if colon:
codes=expand_colon(codes, all_codes=all_codes)
if sort_unique:
codes = sorted(list(set(codes)))
return codes
@expand_code.register(list)
def _(code, all_codes=None, hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
expanded=[]
for cod in code:
new_codes = expand_code(cod, all_codes=all_codes, hyphen=hyphen, star=star, colon=colon, regex=regex, drop_dot=drop_dot, drop_leading_zero=drop_leading_zero)
expanded.extend(new_codes)
# uniqify in case some overlap
expanded = list(set(expanded))
return sorted(expanded)
# a dict of names and codes (in a string or a list)
@expand_code.register(dict)
def _(code, all_codes=None, hyphen=True, star=True, colon=True, regex=False,
drop_dot=False, drop_leading_zero=False,
sort_unique=True, info=None):
expanded={}
for name, cod in code.items():
if isinstance(cod,str):
cod = [cod]
expanded_codes=[]
for co in cod:
new_codes = expand_code(co, all_codes=all_codes, hyphen=hyphen, star=star, colon=colon, regex=regex, drop_dot=drop_dot, drop_leading_zero=drop_leading_zero)
expanded_codes.extend(new_codes)
expanded[name] = list(set(expanded_codes))
return expanded
# -
codes={'F3':'F3'}
all_codes=['G3', 'F3']
expand_code(codes, all_codes=all_codes)
cod=[]
cod.extend('H3')
cod
expand_code('F3', all_codes=all_codes)
# ## expand columns
# +
#export
@singledispatch
def expand_columns(expr, all_columns=None, df=None, star=True,
hyphen=True, colon=True, regex=None, info=None):
"""
Expand columns with special notation to their full column names
"""
raise ValueError('Must be str or list of str')
@expand_columns.register(str)
def _(expr, all_columns=None, df=None, star=True,
hyphen=True, colon=True, regex=None, info=None):
notations = '* - :'.split()
# return immediately if not needed
if not any(symbol in expr for symbol in notations):
return [expr]
# get a list of columns of it is only implicity defined by the df
# warning: may depreciate this, require explicit all_columns
if df & (not all_columns):
all_columns=list(df.columns)
if regex:
cols = [col for col in all_columns if re.match(regex, expr)]
else:
if hyphen:
cols = expand_hyphen(expr)
if star:
cols = expand_star(expr, all_codes=all_columns)
if colon:
cols = expand_colon(expr, all_codes=all_columns)
return cols
@expand_columns.register(list)
def _(expr, all_columns=None, df=None, star=True,
hyphen=True, colon=True, regex=None, info=None):
all_columns=[]
for col in expr:
new_columns = expand_columns(col, all_columns=all_columns, df=df, star=star,
hyphen=hyphen, colon=colon, regex=regex, info=info)
all_columns.extend(new_columns)
return all_columns
# -
# # More helper functions
#
#export
def format_codes(codes, merge=True):
"""
Makes sure that the codes has the desired format: a dict with strings as
keys (name) and a list of codes as values)
Background: For several functions the user is allower to use strings
when there is only one element in the list, and a list when there is
no code replacement or aggregations, or a dict. To avoid (even more) mess
the input is standardised as soon as possible in a function.
Examples:
codes = '4AB02'
codes='4AB*'
codes = ['4AB02', '4AB04', '4AC*']
codes = ['4AB02', '4AB04']
codes = {'tumor' : 'a4*', 'diabetes': ['d3*', 'd5-d9']}
codes = 'S72*'
codes = ['K50*', 'K51*']
_format_codes(codes, merge=False)
TODO: test for correctness of input, not just reformat (is the key a str?)
"""
codes = _listify(codes)
# treatment of pure lists depends on whether special classes should be treated as one merged group or separate codes
# exmple xounting of Z51* could mean count the total number of codes with Z51 OR a shorthand for saying "count all codes starting with Z51 separately
# The option "merged, enables the user to switch between these two interpretations
if isinstance(codes, list):
if merge:
codes = {'_'.join(codes): codes}
else:
codes = {code: [code] for code in codes}
elif isinstance(codes, dict):
new_codes = {}
for name, codelist in codes.items():
if isinstance(codelist, str):
codelist = [codelist]
new_codes[name] = codelist
codes = new_codes
return codes
# ## reverse dict
def reverse_dict(dikt):
"""
each value in the list of values in the dict become keys in a new dict
"""
new_dict = {}
for name, codelist in dikt.items():
codelist = listify(codelist)
new_dict.update({code: name for code in codelist})
return new_dict
#export
def _expand_regex(expr, full_list):
exprs = _listify(expr)
expanded = []
if isinstance(full_list, pd.Series):
pass
elif isinstance(full_list, list):
unique_series = pd.Series(full_list)
elif isinstance(full_list, set):
unique_series = pd.Series(list(full_list))
for expr in exprs:
match = unique_series.str.contains(expr)
expanded.extend(unique_series[match])
return expanded
def persons_with(df,
codes,
cols,
pid='pid',
sep=None,
merge=True,
first_date=None,
last_date=None,
group=False,
_fix=True):
"""
Determine whether people have received a code
Args:
codes (list or dict): codes to mark for
codes to search for
- if list: each code will represent a column
- if dict: the codes in each item will be aggregated to one indicator
cols (str or list of str): Column(s) with the codes
pid (str): colum with the person identifier
first_date (str): use only codes after a given date
the string either represents a date (same for all individuals)
or the name of a column with dates (may be different for different individuals)
last_date (str): only use codes after a given date
the string either represents a date (same for all individuals)
or the name of a column with dates (may be different for different individuals)
Returns:
Series or Dataframe
Examples:
fracture = persons_with(df=df, codes='S72*', cols='icdmain')
fracture = persons_with(df=df, codes={'frac':'S72*'}, cols='icdmain')
Todo:
- function may check if pid_index is unique, in which it does not have to aggregate
- this may apply in general? functions that work on event data may then also work on person level data
- allow user to input person level dataframe source?
"""
sub = df
if _fix:
df, cols = _to_df(df=df, cols=cols)
codes, cols, allcodes, sep = _fix_args(df=df, codes=codes, cols=cols, sep=sep, merge=merge, group=group)
rows = get_rows(df=df, codes=allcodes, cols=cols, sep=sep, _fix=False)
sub = df[rows]
df_persons = sub.groupby(pid)[cols].apply(lambda s: pd.unique(s.values.ravel()).tolist()).astype(str)
# alternative approach, also good, and avoids creaintg personal dataframe
# but ... regeis is fast since it stopw when it finds one true code!
# c=df.icdbi.str.split(', ', expand=True).to_sparse()
# c.isin(['S720', 'I10']).any(axis=1).any(level=0)
persondf = pd.DataFrame(index=df[pid].unique().tolist())
for name, codes in codes.items():
codes_regex = '|'.join(codes)
persondf[name] = df_persons.str.contains(codes_regex, na=False)
return persondf
# # formatting an expression
# ## insert_external
#export
def insert_external(expr):
"""
Replaces variables prefixed with @ in the expression with the
value of the variable from the global namespace
Example:
x=['4AB02', '4AB04', '4AB06']
expr = '@x before 4AB02'
insert_external(expr)
"""
externals = [word.strip('@') for word in expr.split() if word.startswith('@')]
for external in externals:
tmp = globals()[external]
expr = expr.replace(f'@{external} ', f'{tmp} ')
return expr
# # Descriptive and analysis
# ## unique
# +
#export
# A function to identify all unique values in one or more columns
# with one or multiple codes in each cell
def unique(df, cols=None, sep=None, all_str=True, info=None):
"""
Lists unique values from one or more columns
sep (str): separator if cells have multiple values
all_str (bool): converts all values to strings
unique(df=df, cols='inpatient', sep=',')
"""
# if no column(s) are specified, find unique values in whole dataframe
if cols==None:
cols=list(df.columns)
cols = listify(cols)
# multiple values with separator in cells
if sep:
all_unique=set()
for col in cols:
new_unique = set(df[col].str.cat(sep=',').split(','))
all_unique.update(new_unique)
# single valued cells
else:
all_unique = pd.unique(df[cols].values.ravel('K'))
# if need to make sure all elements are strings without surrounding spaces
if all_str:
all_unique=[str(value).strip() for value in all_unique]
return all_unique
# -
# ## count codes
#export
def count_codes(df, codes=None, cols=None, sep=None, normalize=False,
ascending=False, fix=True, merge=False, group=False, dropna=True, all_codes=None, info=None):
"""
Count frequency of values in multiple columns and columns with seperators
Args:
codes (str, list of str, dict): codes to be counted. If None, all codes will be counted
cols (str or list of str): columns where codes are
sep (str): separator if multiple codes in cells
merge (bool): If False, each code wil be counted separately
If True (default), each code with special notation will be counted together
strip (bool): strip space before and after code before counting
ignore_case (bool): determine if codes with same characters,
but different cases should be the same
normalize (bool): If True, outputs percentages and not absolute numbers
dropna (bool): If True, codes not listed are not counted and ignored when calculating percentages
allows
- star notation in codes and columns
- values in cells with multiple valules can be separated (if sep is defined)
- replacement and aggregation to larger groups (when code is a dict)
example
To count the number of stereoid events (codes starting with H2) and use of
antibiotics (codes starting with xx) in all columns where the column names
starts with "atc":
count_codes(df=df,
codes={'stereoids' : 'H2*', 'antibiotics' : =['AI3*']},
cols='atc*',
sep=',')
more examples
-------------
df.count_codes(codes='K51*', cols='icd', sep=',')
count_codes(df, codes='K51*', cols='icdm', sep=',', group=True)
count_codes(df, codes='Z51*', cols=['icd', 'icdbi'], sep=',')
count_codes(df, codes='Z51*', cols=['icdmain', 'icdbi'], sep=',', group=True)
count_codes(df, codes={'radiation': 'Z51*'}, cols=['icd'], sep=',')
count_codes(df, codes={'radiation': 'Z51*'}, cols=['icdmain', 'icdbi'], sep=',')
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',')
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',', dropna=True)
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',', dropna=False)
count_codes(df, codes={'crohns': 'K50*', 'uc':'K51*'}, cols=['icdmain', 'icdbi'], sep=',', dropna=False, group=False)
count_codes(df, codes=['K50*', 'K51*'], cols=['icd'], sep=',', dropna=False, group=True, merge=False)
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=False, group=False, merge=False)
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=False, group=False, merge=True)
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=True, group=True, merge=True)
#group fasle, merge true, for list = wrong ...
count_codes(df, codes=['K50*', 'K51*'], cols=['icdmain', 'icdbi'], sep=',', dropna=True, group=False, merge=False)
"""
# preliminary formating
if isinstance(df, pd.Series):
df=df.to_frame()
cols=list(df.columns)
# maybe df[pid]=df.index
if not codes:
codes=unique(df=df, cols=cols, sep=sep, info=info)
all_codes = list(set(codes))
cols=expand_columns(cols, all_columns=list(df.columns))
if not all_codes:
all_codes = unique(df=df, cols=cols, sep=sep)
old_codes=codes
codes = expand_code(codes, all_codes=all_codes, info=info)
if isinstance(old_codes, str) and (merge):
codes = {old_codes:codes}
elif isinstance(old_codes, str) and not (merge):
codes = {code:code for code in codes}
elif isinstance(old_codes, list) and (merge):
codes = {str(old_codes): codes}
elif isinstance(old_codes, list) and not (merge):
codes = {code: code for code in codes}
only_codes=[]
for name, code in codes.items():
code=listify(code)
only_codes.extend(code)
# prevent duplicates
only_codes=list(set(only_codes))
sub = df
if dropna:
rows = get_rows(df=sub, codes=only_codes, cols=cols, sep=sep, all_codes=all_codes)
sub = sub[rows]
if sep:
count=Counter()
for col in cols:
codes_in_col = [code.strip() for code in sub[col].str.cat(sep=sep).split(sep)]
count.update(codes_in_col)
code_count=pd.Series(count)
else:
code_count = sub[cols].apply(pd.Series.value_counts).sum(axis=1)
if codes:
not_included_n = code_count[~code_count.isin(only_codes)].sum()
code_count = code_count[only_codes]
if not dropna:
code_count['na'] = not_included_n
if isinstance(codes, dict):
code_count = code_count.rename(index=reverse_dict(codes)).sum(level=0)
if normalize:
code_n = code_count.sum()
code_count = code_count / code_n
else:
code_count = code_count.astype(int)
if ascending:
code_count = code_count.sort_values(ascending=True)
else:
code_count = code_count.sort_values(ascending=False)
return code_count
df=make_data()
df.head()
from collections import Counter
# +
#count_codes(df=df, codes={'a':['G4*', 'C4*', 'c4'], 'b':'A4*'}, cols='codes', sep=',', merge=True, normalize=True)
# -
count_codes(df=df, codes='A4*', cols='codes', sep=',')
#df.hrr3.count_codes(codes='A*', cols='codes', sep=',')
df.codes.hrrb.count_codes(sep=',')
df.codes.hrrb.count_codes(sep=',',)
str(['a', 'b'])
# # find codes
# +
#export
def lookup_codes(dikt, codes):
"""
returns those elements in a dict where key starts with the expressions listed in codes
todo: more complicated star notations: starts with, contains, endswith
lookup(medcodes, 'L04*')
"""
codes = _listify(codes)
codes = [code.upper().strip('*') for code in codes]
codes = tuple(codes)
selected_codes = {k: v for k, v in dikt.items() if str(k).upper().startswith(codes)}
return selected_codes
# %%
def get_codes(dikt, text):
"""
returns those elements in a dict where value contains the expressions listed in codes
todo: more complicated star notations: starts with, contains, endswith
alterative name: find_codes? get_codes?
example
get all codes that have "steroid" in the explanatory text
get_codes(medcodes, 'steroid*')
"""
text = _listify(text)
text = [txt.upper().strip('*') for txt in text]
# codes = " ".join(codes)
selected_codes = {k: v for k, v in dikt.items() if any(txt in str(v).upper() for txt in text)}
return selected_codes
# -
# # Register functions
# +
@pd.api.extensions.register_dataframe_accessor("hrr6")
class RegisterResearchAccessor:
def __init__(self, df):
self._df = df
def count_codes(df, codes=None, cols=None, sep=None, normalize=False,
ascending=False, fix=True, merge=False, group=False, dropna=True, all_codes=None, info=None):
df=df._df
result = count_codes(df=df, codes=codes, cols=cols, sep=sep, normalize=normalize,
ascending=ascending, fix=fix, merge=merge, dropna=dropna, all_codes=all_codes, info=info)
return result
@pd.api.extensions.register_series_accessor("hrr6")
class RegisterResearchAccessor:
def __init__(self, df):
self._df = df
def count_codes(df, codes=None, cols=None, sep=None, normalize=False,
ascending=False, fix=True, merge=False, group=False, dropna=True, all_codes=None, info=None):
df=df._df
result = count_codes(df=df, codes=codes, cols=cols, sep=sep, normalize=normalize,
ascending=ascending, fix=fix, merge=merge, dropna=dropna, all_codes=all_codes, info=info)
return result
# +
@pd.api.extensions.register_series_accessor("hrrb")
class RegisterResearchAccessorSeries:
def __init__(self, df):
self._df = df
def count_codes(df, **kwargs):
df=df._df
kwargs.update(df=df)
result = count_codes(**kwargs)
return result
# +
#import nbdev
# +
#from nbdev.sync import script2notebook
# -
from nbdev.export import *
notebook2script()
| utilities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109B Data Science 2: Advanced Topics in Data Science
# ## Homework 6 - RNNs
#
#
#
# **Harvard University**<br/>
# **Fall 2020**<br/>
# **Instructors**: <NAME>, <NAME>, & <NAME>
#
#
# <hr style="height:2pt">
#RUN THIS CELL
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
# ### INSTRUCTIONS
#
# <span style="color:red">**Model training can be very slow; start doing this HW early**</span>
#
# - To submit your assignment follow the instructions given in Canvas.
#
# - This homework can be submitted in pairs.
#
# - If you submit individually but you have worked with someone, please include the name of your **one** partner below.
# - Please restart the kernel and run the entire notebook again before you submit.
#
# **Names of person you have worked with goes here:**
# <br><BR>
# <div class="theme"> Overview: Named Entity Recognition Challenge</div>
# Named entity recognition (NER) seeks to locate and classify named entities present in unstructured text into predefined categories such as organizations, locations, expressions of times, names of persons, etc. This technique is often used in real use cases such as classifying content for news providers, efficient search algorithms over large corpora and content-based recommendation systems.
#
# This represents an interesting "many-to-many" problem, allowing us to experiment with recurrent architectures and compare their performances against other models.
#
# +
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
plt.style.use("ggplot")
# +
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend
from tensorflow.keras import Model, Sequential
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Input, SimpleRNN, Embedding, Dense, TimeDistributed, GRU, \
Dropout, Bidirectional, Conv1D, BatchNormalization
print(tf.keras.__version__)
print(tf.__version__)
# -
# Set seed for repeatable results
np.random.seed(123)
tf.random.set_seed(456)
# + [markdown] colab_type="text" id="rUkgUGwJXUcH"
# <div class="theme"> Part 1: Data </div>
# Read `HW6_data.csv` into a pandas dataframe using the provided code below.
# -
# Given code
path_dataset = './data/HW6_data.csv'
data = pd.read_csv(path_dataset,
encoding="latin1")
data = data.fillna(method="ffill")
data.head(15)
# As you can see, we have a dataset with sentences (```Sentence #``` column), each composed of words (```Word``` column) with part-of-speech tagging (```POS``` tagging) and inside–outside–beginning (IOB) named entity tags (```Tag``` column) attached. ```POS``` will not be used for this homework. We will predict ```Tag``` using only the words themselves.
#
# Essential info about entities:
# * geo = Geographical Entity
# * org = Organization
# * per = Person
# * gpe = Geopolitical Entity
# * tim = Time indicator
# * art = Artifact
# * eve = Event
# * nat = Natural Phenomenon
#
# IOB prefix:
# * B: beginning of named entity
# * I: inside of named entity
# * O: outside of named entity
#
# <div class='exercise'><b> Question 1: Data [20 points total]</b></div>
#
# **1.1** Create a list of unique words found in the 'Word' column and sort it in alphabetic order. Then append the special word "ENDPAD" to the end of the list, and assign it to the variable ```words```. Store the length of this list as ```n_words```. **Print your results for `n_words`**
#
# **1.2** Create a list of unique tags and sort it in alphabetic order. Then append the special word "PAD" to the end of the list, and assign it to the variable ```tags```. Store the length of this list as ```n_tags```. **Print your results for `n_tags`**
#
# **1.3** Process the data into a list of sentences where each sentence is a list of (word, tag) tuples. Here is an example of how the first sentence in the list should look:
#
# [('Thousands', 'O'),
# ('of', 'O'),
# ('demonstrators', 'O'),
# ('have', 'O'),
# ('marched', 'O'),
# ('through', 'O'),
# ('London', 'B-geo'),
# ('to', 'O'),
# ('protest', 'O'),
# ('the', 'O'),
# ('war', 'O'),
# ('in', 'O'),
# ('Iraq', 'B-geo'),
# ('and', 'O'),
# ('demand', 'O'),
# ('the', 'O'),
# ('withdrawal', 'O'),
# ('of', 'O'),
# ('British', 'B-gpe'),
# ('troops', 'O'),
# ('from', 'O'),
# ('that', 'O'),
# ('country', 'O'),
# ('.', 'O')]
#
# **1.4** Find out the number of words in the longest sentence, and store it to variable ```max_len```. **Print your results for `max_len`.**
#
# **1.5** It's now time to convert the sentences data in a suitable format for the RNNs training/evaluation procedures. Create a ```word2idx``` dictionary mapping distinct words from the dataset into distinct integers. Also create a ```idx2word``` dictionary.
#
# **1.6** Prepare the predictors matrix ```X```, as a list of lists, where each inner list is a sequence of words mapped into integers accordly to the ```word2idx``` dictionary.
#
# **1.7** Apply the keras ```pad_sequences``` function to standardize the predictors. You should retrieve a matrix with all padded sentences and length equal to ```max_len``` previously computed. The dimensionality should therefore be equal to ```[# of sentences, max_len]```. Run the provided cell to print your results. Your ```X[i]``` now should be something similar to this:
#
# `[ 8193 27727 31033 33289 22577 33464 23723 16665 33464 31142 31319 28267
# 27700 33246 28646 16052 21 16915 17349 7924 32879 32985 18238 23555
# 24 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178]`
#
# **1.8** Create a ```tag2idx``` dictionary mapping distinct named entity tags from the dataset into distinct integers. Also create a ```idx2tag``` dictionary.
#
# **1.9** Prepare targets matrix ```Y```, as a list of lists,where each inner list is a sequence of tags mapped into integers accordly to the ```tag2idx``` dictionary.
#
# **1.10** apply the keras ```pad_sequences``` function to standardize the targets. Inject the ```PAD``` tag for the padding words. You should retrieve a matrix with all padded sentences'tags and length equal to ```max_length``` previously computed.
#
# **1.11** Use the ```to_categorical``` keras function to one-hot encode the tags. Now your ```Y``` should have dimension ```[# of sentences, max_len, n_tags]```. Run the provided cell to print your results.
#
# **1.12** Split the dataset into train and test sets (test 10%).
# + [markdown] colab_type="text" id="hBtmANNuuS6h"
# ## Answers
# -
# **1.1** Create a list of unique words found in the 'Word' column and sort it in alphabetic order. Then append the special word "ENDPAD" to the end of the list, and assign it to the variable ```words```. Store the length of this list as ```n_words```. **Print your results for `n_words`**
# your code here
words = sorted(data['Word'].unique())
words.append('ENDPAD')
n_words = len(words)
words[-20:]
# Run this cell to show your results for n_words
print(n_words)
# **1.2** Create a list of unique tags and sort it in alphabetic order. Then append the special word "PAD" to the end of the list, and assign it to the variable ```tags```. Store the length of this list as ```n_tags```. **Print your results for `n_tags`**
# your code here
tags = sorted(data['Tag'].unique())
tags.append('PAD')
n_tags = len(tags)
# Run this cell to show your results for n_tags
print(n_tags)
# **1.3** Process the data into a list of sentences where each sentence is a list of (word, tag) tuples. Here is an example of how the first sentence in the list should look:
#
# [('Thousands', 'O'),
# ('of', 'O'),
# ('demonstrators', 'O'),
# ('have', 'O'),
# ('marched', 'O'),
# ('through', 'O'),
# ('London', 'B-geo'),
# ('to', 'O'),
# ('protest', 'O'),
# ('the', 'O'),
# ('war', 'O'),
# ('in', 'O'),
# ('Iraq', 'B-geo'),
# ('and', 'O'),
# ('demand', 'O'),
# ('the', 'O'),
# ('withdrawal', 'O'),
# ('of', 'O'),
# ('British', 'B-gpe'),
# ('troops', 'O'),
# ('from', 'O'),
# ('that', 'O'),
# ('country', 'O'),
# ('.', 'O')]
#
# your code here
def wordtag(data):
return [x for x in zip(data['Word'], data['Tag'])]
sentence = data.groupby('Sentence #').apply(wordtag)
# list of (word,tag) tuples for the first sentence
sentence[0]
# **1.4** Find out the number of words in the longest sentence, and store it to variable ```max_len```. **Print your results for `max_len`.**
# your code here
max_len = max([len(s)-1 for s in sentence]) #-1 because the last element is the full stop '.' and not a word
# Run this cell to show your results for max_len
print(max_len)
# **1.5** It's now time to convert the sentences data in a suitable format for the RNNs training/evaluation procedures. Create a ```word2idx``` dictionary mapping distinct words from the dataset into distinct integers. Also create a ```idx2word``` dictionary.
# your code here
word2idx = dict()
idx2word = dict()
for idx, word in enumerate(words):
word2idx[word] = idx
idx2word[idx] = word
# **1.6** Prepare the predictors matrix ```X```, as a list of lists, where each inner list is a sequence of words mapped into integers accordly to the ```word2idx``` dictionary.
# your code here
X = []
for s in sentence:
inner_list = [] #inner list is a sequence of words in each sentence
for word in s:
idx = word2idx[word[0]]
inner_list.append(idx)
X.append(inner_list)
# **1.7** Apply the keras ```pad_sequences``` function to standardize the predictors. You should retrieve a matrix with all padded sentences and length equal to ```max_len``` previously computed. The dimensionality should therefore be equal to ```[# of sentences, max_len]```. Run the provided cell to print your results. Your ```X[i]``` now should be something similar to this:
#
# `[ 8193 27727 31033 33289 22577 33464 23723 16665 33464 31142 31319 28267
# 27700 33246 28646 16052 21 16915 17349 7924 32879 32985 18238 23555
# 24 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178 35178
# 35178 35178 35178 35178 35178 35178 35178 35178]`
# your code here
X = pad_sequences(X, maxlen=max_len ,padding='post',value=word2idx['ENDPAD'])
# Run this cell to show your results #
print("The index of word 'Harvard' is: {}\n".format(word2idx["Harvard"]))
print("Sentence 1: {}\n".format(X[1]))
print(X.shape)
# **1.8** Create a ```tag2idx``` dictionary mapping distinct named entity tags from the dataset into distinct integers. Also create a ```idx2tag``` dictionary.
# your code here
tag2idx= dict()
idx2tag = dict()
for idx, tag in enumerate(tags):
tag2idx[tag] = idx
idx2tag[idx] = tag
# **1.9** Prepare targets matrix ```Y```, as a list of lists,where each inner list is a sequence of tags mapped into integers accordly to the ```tag2idx``` dictionary.
# your code here
Y = []
for s in sentence:
inner_list = [] #inner list is a sequence of words in each sentence
for tag in s:
idx = tag2idx[tag[1]]
inner_list.append(idx)
Y.append(inner_list)
# **1.10** apply the keras ```pad_sequences``` function to standardize the targets. Inject the ```PAD``` tag for the padding words. You should retrieve a matrix with all padded sentences'tags and length equal to ```max_length``` previously computed.
# your code here
Y = pad_sequences(Y, maxlen=max_len,padding='post',value=tag2idx['PAD'])
# **1.11** Use the ```to_categorical``` keras function to one-hot encode the tags. Now your ```Y``` should have dimension ```[# of sentences, max_len, n_tags]```. Run the provided cell to print your results.
# your code here
Y = to_categorical(Y, n_tags)
# Run this cell to show your results #
print("The index of tag 'B-gpe' is: {}\n".format(tag2idx["B-gpe"]))
print("The tag of the last word in Sentence 1: {}\n".format(Y[0][-1]))
print(np.array(Y).shape)
# **1.12** Split the dataset into train and test sets (test 10%).
# your code here
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
# ## Part 2: Modelling
#
# After preparing the train and test sets, we are ready to build five models:
# * frequency-based baseline
# * vanilla feedforward neural network
# * recurrent neural network
# * gated recurrent neural network
# * bidirectional gated recurrent neural network
#
# More details are given about architecture in each model's section. The input/output dimensionalities will be the same for all models:
# * input: ```[# of sentences, max_len]```
# * output: ```[# of sentences, max_len, n_tags]```
#
# Follow the information in each model's section to set up the architecture of each model. And the end of each training, use the given ```store_model``` function to store the weights and architectures in the ```./models``` path for later testing;```load_keras_model()``` is also provided to you
#
# A further ```plot_training_history``` helper function is given in case you need to check the training history.
#
# +
# Store model
def store_keras_model(model, model_name):
model_json = model.to_json() # serialize model to JSON
with open("./models/{}.json".format(model_name), "w") as json_file:
json_file.write(model_json)
model.save_weights("./models/{}.h5".format(model_name)) # serialize weights to HDF5
print("Saved model to disk")
# Plot history
def plot_training_history(history):
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(loss)+1)
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('epoch')
plt.legend()
plt.show()
# -
# Load model
def load_keras_model(model_name):
# Load json and create model
json_file = open('./models/{}.json'.format(model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
model = tf.keras.models.model_from_json(loaded_model_json)
# Load weights into new model
model.load_weights("./models/{}.h5".format(model_name))
return model
# <div class='exercise'><b>Question 2: Models [40 points total]</b></div>
#
# **2.1** **Model 1: Baseline Model**
#
# Predict the tag of a word simply with the most frequently-seen named entity tag of this word from the training set.
#
# e.g. word "Apple" appears 10 times in the training set; 7 times it was tagged as "Corporate" and 3 times it was tagged as "Fruit". If we encounter the word "Apple" in the test set, we predict it as "Corporate".
#
# **Create an np.array ```baseline``` of length [n_words]**
# where the ith element ```baseline[i]``` is the index of the most commonly seen named entity tag of word i summarised from training set. (e.g. [16, 16, 16, ..., 0, 16, 16])
#
#
# **2.2** **Model 2: Vanilla Feed Forward Neural Network**
#
# This model is provided for you. Please pay attention to the architecture of this neural network, especially the input/output dimensionalities and the Embedding layer.
#
#
# **2.2a** Explain what the embedding layer is and why we need it here.
#
# **2.2b** Explain why the Param # of Embedding layer is 1758950 (as shown in `print(model.summary())`).
#
# **2.3** **Model 3: RNN**
#
# Set up a simple RNN model by stacking the following layers in sequence:
#
# an input layer
# a simple Embedding layer transforming integer words into vectors
# a dropout layer to regularize the model
# a SimpleRNN layer
# a TimeDistributed layer with an inner Dense layer which output dimensionality is equal to n_tag
#
# *(For hyperparameters, use those provided in Model 2)*
#
# **2.3a** Define, compile, and train an RNN model. Use the provided code to save the model and plot the training history.
#
# **2.3b** Visualize outputs from the SimpleRNN layer, one subplot for B-tags and one subplot for I-tags. Comment on the patterns you observed.
#
# **2.4** **Model 4: GRU**
#
# **2.4a** Briefly explain what a GRU is and how it's different from a simple RNN.
#
# **2.4b** Define, compile, and train a GRU architecture by replacing the SimpleRNN cell with a GRU one. Use the provided code to save the model and plot the training history.
#
# **2.4c** Visualize outputs from GRU layer, one subplot for **B-tags** and one subplot for **I-tags**. Comment on the patterns you observed.
#
# **2.5** **Model 5: Bidirectional GRU**
#
# **2.5a** Explain how a Bidirectional GRU differs from GRU model above.
#
# **2.5b** Define, compile, and train a bidirectional GRU by wrapping your GRU layer in a Bidirectional one. Use the provided code to save the model and plot the training history.
#
# **2.5c** Visualize outputs from bidirectional GRU layer, one subplot for **B-tags** and one subplot for **I-tags**. Comment on the patterns you observed.
# + [markdown] colab_type="text" id="hBtmANNuuS6h"
# ## Answers
# -
# **2.1** **Model 1: Baseline Model**
#
# Predict the tag of a word simply with the most frequently-seen named entity tag of this word from the training set.
#
# e.g. word "Apple" appears 10 times in the training set; 7 times it was tagged as "Corporate" and 3 times it was tagged as "Fruit". If we encounter the word "Apple" in the test set, we predict it as "Corporate".
#
# **Create an np.array ```baseline``` of length [n_words]**
# where the ith element ```baseline[i]``` is the index of the most commonly seen named entity tag of word i summarised from training set. (e.g. [16, 16, 16, ..., 0, 16, 16])
#
# +
# your code here
baseline = np.full(n_words, tag2idx['O'])
for i in range(n_words):
baseline[i] = y_train[X_train==i].sum(axis=0).argmax()
# -
# Run this cell to show your results #
print(baseline[X].shape,'\n')
print('Sentence:\n {}\n'.format([idx2word[w] for w in X[0]]))
print('Predicted Tags:\n {}'.format([idx2tag[i] for i in baseline[X[0]]]))
# **2.2** **Model 2: Vanilla Feed Forward Neural Network**
#
# This model is provided for you. Please pay attention to the architecture of this neural network, especially the input/output dimensionalities and the Embedding layer.
#
# ### Use these hyperparameters for all NN models
# +
n_units = 100
drop_rate = .1
dim_embed = 50
optimizer = "rmsprop"
loss = "categorical_crossentropy"
metrics = ["accuracy"]
batch_size = 32
epochs = 10
validation_split = 0.1
verbose = 1
# +
# Define model
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(input_dim=n_words, output_dim=dim_embed, input_length=max_len))
model.add(tf.keras.layers.Dropout(drop_rate))
model.add(tf.keras.layers.Dense(n_tags, activation="softmax"))
# Compile model
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# -
print(model.summary())
# +
# Load or Train model
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose)
# -
store_keras_model(model, 'model_FFNN')
plot_training_history(history)
# **2.2a** Explain what the embedding layer is and why we need it here.
# *your answer here*
#
# **2.2b** Explain why the Param # of Embedding layer is 1758950 (as shown in `print(model.summary())`).
# *your answer here*
# ### Viewing Hidden Layers
# In addition to the final result, we also want to see the intermediate results from hidden layers. Below is an example showing how to get outputs from a hidden layer, and visualize them on the reduced dimension of 2D by PCA. (**Please note that this code and the parameters cannot be simply copied and pasted for other questions; some adjustments need to be made**)
# +
FFNN = load_keras_model("model_FFNN")
def create_truncated_model_FFNN(trained_model):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(input_dim=n_words, output_dim=dim_embed, input_length=max_len))
model.add(tf.keras.layers.Dropout(drop_rate))
# set weights of first few layers using the weights of trained model
for i, layer in enumerate(model.layers):
layer.set_weights(trained_model.layers[i].get_weights())
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
truncated_model = create_truncated_model_FFNN(FFNN)
hidden_features = truncated_model.predict(X_test)
# flatten data
hidden_features = hidden_features.reshape(-1,50)
# find first two PCA components
pca = PCA(n_components=2)
pca_result = pca.fit_transform(hidden_features)
print('Variance explained by PCA: {}'.format(np.sum(pca.explained_variance_ratio_)))
# -
# visualize hidden featurs on first two PCA components
# this plot only shows B-tags
def visualize_hidden_features(pca_result):
color=['r', 'C1', 'y', 'C3', 'b', 'g', 'm', 'orange']
category = np.argmax(y_test.reshape(-1,18), axis=1)
fig, ax = plt.subplots()
fig.set_size_inches(6,6)
for cat in range(8):
indices_B = np.where(category==cat)[0]
#length=min(1000,len(indices_B))
#indices_B=indices_B[:length]
ax.scatter(pca_result[indices_B,0], pca_result[indices_B, 1], label=idx2tag[cat],s=2,color=color[cat],alpha=0.5)
legend=ax.legend(markerscale=3)
legend.get_frame().set_facecolor('w')
plt.show()
visualize_hidden_features(pca_result)
# ### Full function for other questions ###
def get_hidden_output_PCA(model,X_te,y_te,layer_index,out_dimension):
output = tf.keras.backend.function([model.layers[0].input],[model.layers[layer_index].output])
hidden_feature=np.array(output([X_te]))
hidden_feature=hidden_feature.reshape(-1,out_dimension)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(hidden_feature)
print('Variance explained by PCA: {}'.format(np.sum(pca.explained_variance_ratio_)))
return pca_result
def visualize_B_I(pca_result):
color = ['r', 'C1', 'y', 'C3', 'b', 'g', 'm', 'orange']
category = np.argmax(y_test.reshape(-1,18), axis=1)
fig, ax = plt.subplots(1,2)
fig.set_size_inches(12,6)
for i in range(2):
for cat in range(8*i,8*(i+1)):
indices = np.where(category==cat)[0]
ax[i].scatter(pca_result[indices,0], pca_result[indices, 1], label=idx2tag[cat],s=2,color=color[cat-8*i],alpha=0.5)
legend = ax[i].legend(markerscale=3)
legend.get_frame().set_facecolor('w')
ax[i].set_xlabel("first dimension")
ax[i].set_ylabel("second dimension")
fig.suptitle("visualization of hidden feature on reduced dimension by PCA")
plt.show()
h = get_hidden_output_PCA(FFNN,X_test,y_test,1,50)
visualize_B_I(h)
# **2.3** **Model 3: RNN**
#
# Set up a simple RNN model by stacking the following layers in sequence:
#
# an input layer
# a simple Embedding layer transforming integer words into vectors
# a dropout layer to regularize the model
# a SimpleRNN layer
# a TimeDistributed layer with an inner Dense layer which output dimensionality is equal to n_tag
#
# *(For hyperparameters, use those provided in Model 2)*
#
# **2.3a** Define, compile, and train an RNN model. Use the provided code to save the model and plot the training history.
# +
# your code here
model = Sequential()
model.add(Embedding(input_dim=n_words, output_dim=dim_embed, input_length=max_len))
model.add(Dropout(drop_rate))
model.add(SimpleRNN(n_units, return_sequences=True))
model.add(TimeDistributed(Dense(n_tags, activation="softmax")))
# Compile model
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# -
# Train model
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose)
# save your mode #
store_keras_model(model, 'model_RNN')
# run this cell to show your results #
print(model.summary())
# run this cell to show your results #
plot_training_history(history)
# **2.3b** Visualize outputs from the SimpleRNN layer, one subplot for B-tags and one subplot for I-tags. Comment on the patterns you observed.
# +
# your code here
RNN = load_keras_model("model_RNN")
h = get_hidden_output_PCA(RNN, X_test, y_test, 2, 100)
visualize_B_I(h)
# -
# <div class='explication'> </div>
# **2.4** **Model 4: GRU**
#
# **2.4a** Briefly explain what a GRU is and how it's different from a simple RNN.
# *your answer here*
#
# **2.4b** Define, compile, and train a GRU architecture by replacing the SimpleRNN cell with a GRU one. Use the provided code to save the model and plot the training history.
# +
# your code here
model = Sequential()
model.add(Embedding(input_dim=n_words, output_dim=dim_embed, input_length=max_len))
model.add(Dropout(drop_rate))
model.add(GRU(n_units, return_sequences=True))
model.add(TimeDistributed(Dense(n_tags, activation="softmax")))
# Compile model
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# -
# Train model
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose)
# save your model #
store_keras_model(model, 'model_GRU')
# run this cell to show your results #
print(model.summary())
# run this cell to show your results #
plot_training_history(history)
# **2.4c** Visualize outputs from GRU layer, one subplot for **B-tags** and one subplot for **I-tags**. Comment on the patterns you observed.
# +
# your code here
GRU = load_keras_model("model_GRU")
h = get_hidden_output_PCA(GRU, X_test, y_test, 2, 100)
visualize_B_I(h)
# -
# *your answer here*
# **2.5** **Model 5: Bidirectional GRU**
#
# **2.5a** Explain how a Bidirectional GRU differs from GRU model above.
#
#
# *your answer here*
# **2.5b** Define, compile, and train a bidirectional GRU by wrapping your GRU layer in a Bidirectional one. Use the provided code to save the model and plot the training history.
#
# +
# your code here
model = Sequential()
model.add(Embedding(input_dim=n_words, output_dim=dim_embed, input_length=max_len))
model.add(Dropout(drop_rate))
model.add(Bidirectional(GRU(n_units, return_sequences=True)))
model.add(TimeDistributed(Dense(n_tags, activation="softmax")))
# Compile model
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# -
# Train model
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose)
# save your model #
store_keras_model(model, 'model_BiGRU')
# run this cell to show your results #
print(model.summary())
# run this cell to show your results #
plot_training_history(history)
# **2.5c** Visualize outputs from bidirectional GRU layer, one subplot for **B-tags** and one subplot for **I-tags**. Comment on the patterns you observed.
# +
# your code here
BiGRU = load_keras_model("model_BiGRU")
h = get_hidden_output_PCA(BiGRU, X_test, y_test, 2, 100)
visualize_B_I(h)
# -
# *your answer here*
# <div class='exercise'><b> Question 3: Analysis [40pt]</b></div>
#
# **3.1** For each model, iteratively:
#
# - Load the model using the given function ```load_keras_model```
#
# - Apply the model to the test dataset
#
# - Compute an F1 score for each ```Tag``` and store it
#
# **3.2** Plot the F1 score per Tag and per model making use of a grouped barplot.
#
# **3.3** Briefly discuss the performance of each model
#
#
# **3.4** Which tags have the lowest f1 score? For instance, you may find from the plot above that the test accuracy on "B-art", and "I-art" are very low (just an example, your case maybe different). Here is an example when models failed to predict these tags right
#
# <img src="data/B_art.png" alt="drawing" width="600"/>
#
# **3.5** Write functions to output another example in which the tags of the lowest accuracy was predicted wrong in a sentence (include both "B-xxx" and "I-xxx" tags). Store the results in a DataFrame (same format as the above example) and use styling functions below to print out your df.
#
# **3.6** Choose one of the most promising models you have built, improve this model to achieve an f1 score higher than 0.8 for as many tags as possible (you have lots of options here, e.g. data balancing, hyperparameter tuning, changing the structure of NN, a different optimizer, etc.)
#
# **3.7** Explain why you chose to change certain elements of the model and how effective these adjustments were.
#
# + [markdown] colab_type="text" id="hBtmANNuuS6h"
# ## Answers
# -
# **3.1** For each model, iteratively:
#
# - Load the model using the given function ```load_keras_model```
#
# - Apply the model to the test dataset
#
# - Compute an F1 score for each ```Tag``` and store it
# +
# your code here
FFNN = load_keras_model("model_FFNN")
RNN = load_keras_model("model_RNN")
GRU = load_keras_model("model_GRU")
BiGRU = load_keras_model("model_BiGRU")
# -
ffnn_test = FFNN.predict(X_test)
rnn_test = RNN.predict(X_test)
gru_test = GRU.predict(X_test)
bigru_test = BiGRU.predict(X_test)
ffnn_test.flatten().shape
ffnn_f1 = f1_score(y_test, ffnn_test, labels=tags, average=None)
rnn_f1 = f1_score(y_test, rnn_test)
gru_f1 = f1_score(y_test, gru_test)
bigru_f1 = f1_score(y_test, bigru_test)
# **3.2** Plot the F1 score per Tag and per model making use of a grouped barplot.
# your code here
# **3.3** Briefly discuss the performance of each model
# *your answer here*
#
# **3.4** Which tags have the lowest f1 score? For instance, you may find from the plot above that the test accuracy on "B-art", and "I-art" are very low (just an example, your case maybe different). Here is an example when models failed to predict these tags right
#
# <img src="data/B_art.png" alt="drawing" width="600"/>
# *your answer here*
#
# **3.5** Write functions to output another example in which the tags of the lowest accuracy was predicted wrong in a sentence (include both "B-xxx" and "I-xxx" tags). Store the results in a DataFrame (same format as the above example) and use styling functions below to print out your df.
def highlight_errors(s):
is_max = s == s.y_true
return ['' if v or key=='Word' else 'color: red' for key,v in is_max.iteritems()]
# your code here
# your code here
# your code here
# **3.6** Choose one of the most promising models you have built, improve this model to achieve an f1 score higher than 0.8 for as many tags as possible (you have lots of options here, e.g. data balancing, hyperparameter tuning, changing the structure of NN, a different optimizer, etc.)
# your code here
# **3.7** Explain why you chose to change certain elements of the model and how effective these adjustments were.
# *your answer here*
| content/homeworks/hw06/notebook/models/cs109b_hw6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3 Sum
#
# [原题](https://mp.weixin.qq.com/s/0KPgom-f4syRJbG3EZslvQ)
# ## Question
#
# Given an array `nums` of $n$ integers, find all unique triplets (three numbers, $a$, $b$, & $c$) in `nums` such that $a+b+c=0$. Note that there may not be any triplets that sum to zero in `nums`, and that the triplets must not be duplicates.
# ## Example
#
# ```text
# Input: nums = [0, -1, 2, -3, 1]
# Output: [0, -1, 1], [2, -3, 1]
# ```
''' Class Definition '''
class Solution(object):
def threeSum(self, nums: list):
# Define a result list.
result = []
# Sort the list ascending.
nums.sort()
# Define 2 index pointer.
minimum = 0
maximum = len(nums) - 1
# Keep checking until 2 index meet each other.
while minimum < maximum:
# Check if the triplets exists.
is_exist = (-(nums[minimum] + nums[maximum]) in nums)
if is_exist:
index = nums.index(-(nums[minimum] + nums[maximum]))
if index != minimum and index != maximum:
result.append([
nums[minimum],
-(nums[minimum] + nums[maximum]),
nums[maximum]
])
if nums[minimum] + nums[maximum] > 0:
maximum -= 1
elif nums[minimum] + nums[maximum] < 0:
minimum += 1
return result
''' Test Program '''
nums = [1, -2, 1, 0, 5]
print(Solution().threeSum(nums))
# [[-2, 1, 1]]
| February/Week8/51.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
# ## Load data
data = pd.read_csv('abalone_dataset.csv')
sample = pd.read_csv('abalone_app.csv')
data.head()
# ## Transform
# +
def conv(sex):
if sex == 'M':
sex = 0
elif sex == 'F':
sex = 1
else:
sex = 2
return sex
data['sex'] = data['sex'].apply(conv)
sample['sex'] = sample['sex'].apply(conv)
data.head()
# -
# ## Separating
# +
features = data.columns.difference(['type'])
X = data[features]
y = data['type']
samp = sample
# -
# ## Creating and training a classifier
knn = KNeighborsClassifier(n_neighbors = 1,
weights = 'distance',
algorithm = 'auto',
p = 2)
knn.fit(X, y)
X_new = np.array([[0, 0.535, 0.420, 0.150, 0.6995, 0.2575, 0.1530, 0.2400]])
X_new.shape
knn.predict(X_new)
# +
from sklearn.model_selection import GridSearchCV
param_grid = {
"algorithm": ['auto', 'ball_tree', 'kd_tree', 'brute'],
"n_neighbors": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"p": [1, 2],
"weights": ['distance', 'uniform']
}
grid_search = GridSearchCV(knn, param_grid, scoring="accuracy")
grid_search.fit(X, y)
knn = grid_search.best_estimator_
grid_search.best_params_, grid_search.best_score_
# +
from sklearn.model_selection import cross_val_score
scores = cross_val_score(knn, X, y, scoring='accuracy', cv=5)
print(scores.mean())
# +
import requests
#realizando previsões com o arquivo de
print(' - Aplicando modelo e enviando para o servidor')
y_pred = knn.predict(sample)
# Enviando previsões realizadas com o modelo para o servidor
URL = "https://aydanomachado.com/mlclass/03_Validation.php"
#TODO Substituir pela sua chave aqui
DEV_KEY = "VovóLearn"
# json para ser enviado para o servidor
data = {'dev_key':DEV_KEY,
'predictions':pd.Series(y_pred).to_json(orient='values')}
# Enviando requisição e salvando o objeto resposta
#r = requests.post(url = URL, data = data)
# Extraindo e imprimindo o texto da resposta
pastebin_url = r.text
print(" - Resposta do servidor:\n", r.text, "\n")
| 03_Validation/knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
a = [1,2,3]
b = [4,5,6]
x = 30
print("sum = " , np.sum(a))
print("average = ", np.mean(a))
print("multiply = ", np.multiply(a,b))
print("subtract = ", np.subtract(b,a))
print("Modulus = ", np.mod(b,a))
print("standard deviation = ", np.std(a))
print("variance = ", np.var(a))
print("minimum in array = ", np.min(a))
print("maximum in array = ",np.max(a))
print("ascending order sorting = ", np.sort(a))
print("descending order sorting = ",np.sort(a)[::-1])
print("percentile = ", np.percentile(a,30))
print("median = ", np.median(a))
print("sinx = ", np.sin(x))
print("cosx = ",np.cos(x))
print("tanx =", np.tan(x))
print("sqrtx = ", np.sqrt(x))
print("logarithm", np.log(x))
print("scalar product", np.dot(a,b))
print("roots of a given polynomial cofficients = ", np.roots(a))
print("count the number of element in array = ",np.count_nonzero(a))
print("for checking the deminsion of array = ",np.ndim(a))
print("type checking of array = ", np.ndarray(a))
print("range array =", np.arange(2,100,5))
print("Add evenly spaced values btw interval to array of length = ", np.linspace(2,9,5))
print("Create and array filled with zeros = ",np.zeros((3,3)))
print("Creates an array filled with ones = ", np.ones((3,3)))
print("Creates random array = ", np.random.random((3,3)))
print("Creates an empty array = ", np.empty((3,3)))
print("Creates copy of array = ",np.copy(a))
print("Append items to array = ", np.append(a,b))
print("isert items into array at axis 0 or 1 = ", np.insert(a, 1, 2, 0))
print("Resize array to shape = ", np.resize(a, (2,4)))
print("Deletes items from array = ", np.delete(a,1,0))
print("Concatenates 2 arrays, adds to end = ", np.concatenate((a,b), 0))
print("Stack array row-wise = ", np.vstack((a,b)))
print("Split an array in sub-arrays of (nearly) identical size = ", np.array_split(a, 3))
print("for fixing random state", np.random.seed(7))
print("creat random array in specific numbers", np.random.randint(4, size= (1,2,3,4)))
print("Finding cumsum", np.cumsum(a,0))
print("shape of array = ",np.shape(a))
print("size of array = ",np.size(a,0))
print("the absolute value element-wise = ", np.absolute(a))
print("Return the gradient of an N-dimensional array",np.gradient(a))
print("Return the product of array elements over a given axis :", np.prod(a))
print("Convert angles from degrees to radians: ", np.radians(x))
print("Rounded values :",np.round_([1.23,1.34,1.32], 1))
print("finding unique value:", np.unique([1,1,1,4,2,5,5,7,0,10]))
print("index where label occurs in array: ", np.argmax(a))
print("Generate a uniform random sample: ", np.random.choice(5,3))
| numpy-assignment #1.ipynb |