code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HEADING 1: Algebraic, trigonometric, and transcendental functions
# ### Polynomial function
# We enocunter polynomial functions in everyday computations.
#
# For example, if we say that the cost (___C___) of an industrial product is proportional to the raw material (___M___) it consumes (plus a fixed cost ___F___), then we can write the total cost as,
# $$C = k.M+F\text{ where } k\text{ is the constant of proportionality}$$
#
# Here This cost function (and the type of the equation) is polynomial because it contains the
# # HEADING 2: Python libraries for numerical computing and visualization
# ## `NumPy` library
# In the daily work of a data scientist, reading and manipulating arrays is one of the most important and frequently encountered jobs. These arrays could be a one-dimensional list or multi-dimensional table or matrix, full of numbers. An array could be filled with integers, floating point numbers, Booleans, strings, or even mixed type. However, in the majority of cases, numeric data types are predominant.
#
# In this regard, __NumPy arrays__ will be the most important object in Python that you need to know in depth. `NumPy` and `SciPy` are open-source libraries within the Python ecosystem that provide common mathematical and numerical routines in fast (and often pre-compiled) functions. One of the main objects of the `NumPy` module is to handle or create __single- or multi-dimensional arrays__. We will use this advanced data structure more extensively in the Part-III of this book when we discuss linear algebra. For this chapter, however, we will focus on the basic mathematical operations that can be performed using the NumPy library.
import numpy as np
# ### Create an array from a Python list
my_list = [2,4,5]
my_array = np.array(my_list)
print(my_array)
lst1 = [1,2,3]
array1 = np.array(lst1)
lst2 = [10,11,12]
array2 = np.array(lst2)
list_sum = lst1+lst2
array_sum = array1 + array2
print("Sum of two lists:",list_sum)
print("Sum of two numpy arrays:",array_sum)
# ### Basic mathematical operations using arrays
print("array1 multiplied by array2: ",array1*array2)
print("array2 divided by array1: ",array2/array1)
print("array2 raised to the power of array1: ",array2**array1)
# ### More advanced mathematical operations on Numpy arrays
print ("Sine of 0.5:", np.sin(0.5))
print("Exponential of 2.2:", np.exp(2.2))
print("Natural logarithm of 5:", np.log(5))
print("10-base logarithm of 100:", np.log10(100))
print("Inverse cosine of 0.25:", np.arccos(0.25))
print("Hyperbolic sine of 2.5:", np.sinh(2.5))
# ### Proving a mathematical identity using NumPy operations
a = [i for i in range(1,21)]
arr = np.array(a)
x = [0.5]*20
x_arr = np.array(x)
log = ((-1)**(arr+1))*x_arr**arr/arr
print("Result using summation of the NumPy array:",np.sum(log))
print("Result using direct NumPy function:",np.log(1+0.5))
# ## Visualization using Matplotlib library
# ### Creating a basic plot
# To create a simple one-dimensional plot we need some data. Let us first generate the data using a NumPy function as we learned in the previous section.
import matplotlib.pyplot as plt
x = np.arange(1,50.1,0.1)
print(x)
sinx = np.sin(x)
plt.plot(sinx)
# ### Advanced features of the plotting function
plt.figure(figsize=(10,5))
plt.title("Plot of sin(x) vs. x\n",fontsize=20)
plt.xlabel("x values",fontsize=16)
plt.ylabel("Trigonometric function, sin(x)",fontsize=16)
plt.grid (True)
plt.ylim(-1.5,1.5)
plt.xticks([i*5 for i in range(55)],fontsize=15)
plt.yticks(fontsize=15)
plt.scatter(x=x,y=sinx,c='orange',s=50)
plt.text(x=25,y=1.1,s="A vertical blue dashed line \nis drawn at x=21",fontsize=15)
plt.vlines(x=21,ymin=-1.5,ymax=1.5,linestyles='dashed',color='blue',lw=3)
plt.legend(['Plot of sin(x)'],loc=2,fontsize=14)
plt.show()
# ### Plots of few common functions
x = np.arange(0,10,0.1)
def plotx(x,y, title):
plt.figure(figsize=(6,4))
plt.title (title, fontsize=20)
plt.plot(x,y,lw=3,c='blue')
plt.grid(True)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel("x-values",fontsize=15)
plt.ylabel("Function values",fontsize=15)
plt.show()
y = x
title = "Linear function"
plotx(x,y,title)
y = x**2
title = "Quadratic function"
plotx(x,y,title)
y = np.exp(x)
title = "Exponential function"
plotx(x,y,title)
y = np.log(x)
title = "Natural logarithm function"
plotx(x,y,title)
y = np.cos(x)
title = "Cosine function"
plotx(x,y,title)
y = np.exp(-0.3*x)*np.sin(2*x)
title = "Exponentially decayed sine function"
plotx(x,y,title)
# ### Quadratic equation solve
def solve_quad(a=1,b=2,c=1):
"""
Solves a quadratic equation and returns the roots (real or complex) as a tuple
"""
from math import sqrt
z= b**2-4*a*c
if z >= 0:
x1 = (-b+sqrt(z))/(2*a)
x2 = (-b-sqrt(z))/(2*a)
return (x1,x2)
else:
x1 = complex((-b/2*a),sqrt(-z)/(2*a))
x2 = complex((-b/2*a),-sqrt(-z)/(2*a))
return (x1,x2)
solve_quad(1,-2,1)
solve_quad(2,-5,4)
# #### Solving
# $$2x^2-7x+4=0$$
x1,x2=solve_quad(2,-7,4)
x1
x2
# #### Product of the roots
x1*x2
# #### Sum of the roots
x1+x2
# ### Growth of functions
logx = []
linx = []
quadx = []
for x in range(1,21):
logx.append(15*np.log10(x))
linx.append(2*x-6)
quadx.append(x**2-100)
logx=np.array(logx)
linx=np.array(linx)
quadx=np.array(quadx)
plt.figure(figsize=(8,6))
plt.plot(logx,c='r')
plt.plot(linx)
plt.plot(quadx)
plt.grid(True)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.legend(['Logarithmic function','Linear function','Quadratic function'],fontsize=15)
plt.show()
| Packt-Maths-Data-Scientist/CHAPTER 2 - Functions, equations, plots, and limit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ydadwhal/DataScienceDIAT/blob/main/2502.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="RzrNKXJ6CQYb"
import pandas as pd
import numpy as np
# + id="p5i1yVWdCkmD"
list1=[1,2,3,4,5,6]
sr1=pd.Series(list1)
# + colab={"base_uri": "https://localhost:8080/"} id="KIwLGv6dCzE4" outputId="b733ebee-fcf9-4f2e-ffb2-39b8b948faeb"
sr1
# + id="ZioxDVj3C5r3"
idx=['a','b','c','d','e','f']
# + id="9FCiYobdDHjA"
sr2=pd.Series(list1,index=idx)
# + colab={"base_uri": "https://localhost:8080/"} id="pcN3seTEDUUY" outputId="73b865cf-7b9d-4971-9928-05e6b24565b6"
sr2
# + colab={"base_uri": "https://localhost:8080/"} id="6YeDVGR2n-5J" outputId="3a044d5d-2ea6-4d1f-bccd-e4c219e6a15b"
sr2.keys()
# + colab={"base_uri": "https://localhost:8080/"} id="LVjyAb1WoBOA" outputId="c3ddb9de-8d2f-4bc8-e168-2613b0069788"
'a' in sr2
# + colab={"base_uri": "https://localhost:8080/"} id="nsuReAiNoHxc" outputId="130571d7-67d7-46f2-ef95-0570a7a7f495"
list(sr2.items())
# + id="YI5ND39HoHuY"
sr2['g']=8
# + colab={"base_uri": "https://localhost:8080/"} id="7Wd708U_oHoD" outputId="c928b293-2cda-40e9-b6f6-d7105b6f424b"
sr2['a':'c']
# + colab={"base_uri": "https://localhost:8080/"} id="B1jeoMs_oVCp" outputId="0858701c-393a-4ad9-d1b2-2a38e9cbd850"
sr2[0:2]
# + colab={"base_uri": "https://localhost:8080/"} id="LvHHS0rhoU_Y" outputId="6be4a485-1c0f-4edc-edcc-f1aae4244af1"
sr2[(sr2>3) & (sr2<5)]
# + colab={"base_uri": "https://localhost:8080/"} id="NS_mw2vHoU5Y" outputId="84dfbf06-f5e1-4950-f889-8538966af816"
sr2[['a','e']]
# + id="NXvYz_njoU2Z"
# + id="xJzxg_OODVY_"
r1=np.array([1,1,2,5,4,5,8])
# + id="toEB7l_2DtVn"
sr3=pd.Series(r1)
# + colab={"base_uri": "https://localhost:8080/"} id="E_UKw4gsD1fg" outputId="0e062c52-3fe7-4f8c-b2c9-47285dd8b74a"
sr3
# + id="wTQlQHdYD2wI"
dict1={'a':20,'a':30,'c':40}
# + colab={"base_uri": "https://localhost:8080/"} id="XSUY5H1MHJ9e" outputId="83b999f3-9707-4c67-ec32-4e4ad22a7eac"
dict1['a']
# + id="or6A2ztCEDpi"
sr4=pd.Series(dict1)
# + colab={"base_uri": "https://localhost:8080/"} id="Je4d3wBhEIMm" outputId="dadbf7d6-3129-4536-abfe-f1ec8a6c50f9"
sr4
# + id="Lp4_qv2uELSo"
dict2=dict(sr4)
# + colab={"base_uri": "https://localhost:8080/"} id="VZfY5LLXEUBv" outputId="e62716e7-5915-4e1f-ea02-27c6e4986ae5"
dict2
# + colab={"base_uri": "https://localhost:8080/"} id="AdqStjvwEVAX" outputId="95ce8acf-3931-46bd-a96b-003652013f91"
dict(sr1)
# + id="xQpIq9kUEX5p"
sr5=sr4.append(sr1)
# + colab={"base_uri": "https://localhost:8080/"} id="FE453JsbEpz3" outputId="4d18eda8-b47d-4d3e-a446-613acb9ff733"
sr5
# + colab={"base_uri": "https://localhost:8080/"} id="6eF9MuO1EqpH" outputId="e7a0e78e-7001-4c24-f63c-1397096ad1b5"
sr5[-4:-1]
# + colab={"base_uri": "https://localhost:8080/"} id="c1AuAWbeE0hZ" outputId="b9ea0cf8-7046-4d04-a4e4-1443f3594e61"
sr5.drop('c')
# + colab={"base_uri": "https://localhost:8080/"} id="nCECqWgZFL3X" outputId="da161d78-c886-496f-b795-fafd3d5ae605"
sr5.add(sr1)
# + colab={"base_uri": "https://localhost:8080/"} id="Csqzi4GVFYp_" outputId="ee71c799-c887-45cd-de0c-31fb51b3bba8"
sr5.sub(sr1)
# + colab={"base_uri": "https://localhost:8080/"} id="oMkAkc7SFkUP" outputId="997560ee-2767-4495-dc9f-d2a026f74765"
sr5.div(sr1)
# + colab={"base_uri": "https://localhost:8080/"} id="m7BAa5PoFnlX" outputId="38a51fd6-d583-46ea-ddde-106d5909083a"
sr5.mean()
# + colab={"base_uri": "https://localhost:8080/"} id="8TDTCsAgF0pH" outputId="dd68bc9b-2fea-4976-fea4-c7ad96e14cce"
sr5.median()
# + colab={"base_uri": "https://localhost:8080/"} id="GlNFG-gqF9Q_" outputId="0b1639d7-0a7e-4437-f204-37d0b1b30ef5"
sr5.describe()
# + id="_8YnJ-8CGKGf" colab={"base_uri": "https://localhost:8080/"} outputId="8b66b5c1-6db6-439e-c8a4-6d9ab661a636"
sr2.loc['b']
# + id="HqdFbqcyGhO3" colab={"base_uri": "https://localhost:8080/"} outputId="aa204e07-ff41-46b6-a051-899d070cc1c9"
sr2.loc['a':'c']
# + id="JyA4s_BGo-Jq"
# + id="j8iib3u7o-Fx"
# + id="LdhOSgiEo-CU"
# + id="hDdawoz9o9-7"
# + id="fQL1erEgo96x"
# + id="sfa63Dkdo91C"
# + [markdown] id="TzYJPuRGGvIw"
# # Data Frames
# + id="xQx4v3NDo8_R"
# + id="pyN2mpwlHj-2"
dict1={'Nname':['Girish','Bala','Ruchi','Nalini','Rohit','Prayas'],'Age':[23,19,20,23,55,19]}
# + id="HA_C-5EVIYGi"
df1=pd.DataFrame(dict1,index=[1001,1002,1003,1004,1005,1006])
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="Q6AuvzdHIX_P" outputId="b8aed6a9-bcdd-4e7c-eadb-44dd3b9916c9"
df1
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="Bd66hsEPH9lS" outputId="5384a24e-03c8-41dd-8def-390d3c3b8b0d"
df1.head(3)
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="COgBdKHtI2Qd" outputId="111a0725-e1ba-4c64-98d5-081780a9351d"
df1.tail(3)
# + colab={"base_uri": "https://localhost:8080/"} id="Saw0sOBCJAIO" outputId="5e33e289-d346-4334-cfa0-74cee0d899de"
df1.index
# + colab={"base_uri": "https://localhost:8080/"} id="sWhYoXruJEs-" outputId="984de6c7-205a-41f3-d926-1a54531b410f"
df1.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="LEKeZI2YJJYm" outputId="3230d404-ccdb-470d-d03e-00ceae632163"
df1.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="r4JMEbhoJQEF" outputId="c75da69f-6cf4-45a8-9532-18d27a624efc"
df1[2:4]
# + colab={"base_uri": "https://localhost:8080/"} id="35VRwjTJJZjF" outputId="be317057-3597-41d0-986b-8da19f5056b6"
df1.loc[1002]
# + colab={"base_uri": "https://localhost:8080/", "height": 175} id="PoW6MR9eJjvm" outputId="f9c7b808-19e4-4f29-92fb-3cc175cfe92f"
df1.iloc[1:5]
# + id="rXJ5ylDqJpg2"
| 2502.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import numpy as np
import matplotlib.pyplot as plt
logs = pickle.load(open("log.txt", "rb"))
y = []
yerr = []
x = []
for log in logs:
y.append(np.mean(log['return_per_episode']))
yerr.append(np.std(log['return_per_episode']))
x.append(log['difficulty'])
plt.errorbar(y=y, x=x, yerr=yerr)
plt.xticks(x)
plt.title("Evaluating Mean Reward per Episode by Difficulty")
plt.xlabel("Difficulty")
plt.ylabel("mean reward per episode")
plt.savefig("perf_hist.png")
y = []
yerr = []
x = []
for log in logs:
y.append(np.sum(np.array(log['return_per_episode']) != 0) / len(log['return_per_episode']))
x.append(log['difficulty'])
plt.errorbar(y=y, x=x)
plt.xticks(x)
plt.title("Successes by Difficulty")
plt.xlabel("Difficulty")
plt.ylabel("Chance of success")
plt.savefig("successes_by_difficulty.png")
y = []
yerr = []
x = []
for log in logs:
return_per_episode = np.array([x for x in np.array(log['return_per_episode']) if x != 0])
y.append(np.mean(return_per_episode))
yerr.append(np.std(return_per_episode))
x.append(log['difficulty'])
plt.errorbar(y=y, x=x)
plt.xticks(x)
plt.title("Return of Succesful trajectories by Difficulty")
plt.xlabel("Difficulty")
plt.ylabel("Return")
plt.savefig("return_of_succesful_traj_by_difficulty.png")
y = []
x = []
yerr = []
for log in logs:
y.append(np.mean(log['target_path_length']))
yerr.append(np.std(log['target_path_length']))
x.append(log['difficulty'])
plt.errorbar(y=y, x=x, yerr=yerr)
plt.xticks(x)
plt.title("Target Path Length by Difficulty")
plt.xlabel("Difficulty")
plt.ylabel("Shortest path length to target door")
plt.savefig("target_path_length.png")
| viz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 2: Fourier Transform
# ## Notations
# Let $A\in\mathbb{C}^{M\times N}$, then its **conjugate transpose** is $A^*$ and its **transpose** is $A^T$.
import numpy as np
A = np.array([[1+1j,1,0]]);print(A.T);print(np.asmatrix(A).H)
# ## 1D Fourier Transform
# Let $F_n$ be the unitary $n\times n$ Fourier matrix as
# $$F_n=\frac{1}{\sqrt{n}}\begin{bmatrix}1&1&\cdots&1\\
# 1&\omega&\cdots&\omega^{n-1}\\
# \cdots&\cdots&&\cdots\\
# 1&\omega^{n-1}&\cdots&\omega^{(n-1)^2}\end{bmatrix},$$
# where $\omega=e^{-2\pi i/n}$.
#
# The discrete Fourier transform of $x\in\mathbb{C}^n$ is $F_nx$.
# Typically, the flop count of an $n\times n$ matrix multiplied by a vector is $O(n^2)$, but $F_nx$ can be computed in $O(n\log n)$ flops, which has a huge computational advantage. So this is also called the Fast Fourier transform.
# $F_n$ is unitary as $F_n^*F_n=F_nF_n^*=I_n$ where $I_n$ is the $n\times n$ identity matrix.
# **In Python, np.fft.fft(x) = $\sqrt{n}F_nx$, and np.fft.fft(x, norm='ortho') = $F_nx$**
# ## 2D Fourier Transform
# The 2D fourier transform on an $M\times N$ matrix is $F_2(X)=F_MXF_N^T$, and the inverse Fourier transform is $F_2^{-1}(u)=F_1^*(u)=F_m^*u(F_n^*)^T$.
import matplotlib.pyplot as plt
# %matplotlib inline
X = plt.imread('pepper.png')
X.shape
FX = np.fft.fft2(X) # 2D fourier transform
# showing the original and its fourier transform (only magnitude) together
f = plt.figure(figsize = (10,10))
f.add_subplot(1,2,1)
plt.imshow(X, cmap = 'gray')
f.add_subplot(1,2,2)
plt.imshow(np.absolute(np.fft.fftshift(FX)), cmap = 'gray')
plt.show(block = True)
# Now we verify that $F_MXF_N^T$ also gives the 2D Fourier Transform.
#
# np.fft.fft(X,axis=0) is to apply 1D FFT to every column of X. This is $F_MX$.
#
# np.fft.fft(X) is to apply 1D FFT to every row of X. This is $XF_N^T$.
FX2 = np.fft.fft(np.fft.fft(X,axis=0))
np.linalg.norm(FX-FX2)
# We can test on simplier matrices:
np.fft.fft(np.fft.fft([[1,1,1,1],[1,1,1,1]],axis=0))
np.fft.fft2([[1,1,1,1],[1,1,1,1]])
| nb/L2Fourier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3.6.0 64-bit
# name: python360jvsc74a57bd0aab07c05e18e3fae3dc841dc401f8bbd0373bf54f494cf150722bab0ea91cfef
# ---
# # Traduzione
#
# Una delle forze motrici che ha permesso lo sviluppo della civiltà umana è la capacità di comunicare reciprocamente. Nella maggior parte delle attività umane, la comunicazione è fondamentale.
#
# 
#
# L'intelligenza artificiale (IA) può aiutare a semplificare la comunicazione traducendo il testo o il parlato tra le lingue, aiutando a rimuovere le barriere alla comunicazione tra paesi e culture.
#
# ## Crea una risorsa di servizi cognitivi
#
# In Azure, puoi usare i servizi cognitivi per tradurre tra più lingue.
#
# Se non ne hai già una, procedi come segue per creare una risorsa di **Servizi Cognitivi** nella tua sottoscrizione di Azure:
#
# > **Nota**: Se disponi già di una risorsa di Servizi Cognitivi, basta aprire la sua pagina **Avvio rapido** nel portale di Azure e copiare la sua chiave e l'endpoint nella cella seguente. Altrimenti, procedi come segue per crearne una.
#
# 1. In un'altra scheda del browser, apri il portale di Azure all'indirizzo https://portal.azure.com, accedendo con il tuo account Microsoft.
# 2. Fai clic sul pulsante **+Crea una risorsa**, cerca *Servizi cognitivi* e crea una risorsa di **Servizi cognitivi** con le impostazioni seguenti:
# - **Sottoscrizione**: *La tua sottoscrizione di Azure*.
# - **Gruppo di risorse**: *Seleziona o crea un gruppo di risorse con un nome univoco*.
# - **Area geografica**: *Scegli una qualsiasi area disponibile*:
# - **Nome**: *Immetti un nome univoco*.
# - **Piano tariffario**: S0
# - **Confermo di aver letto e compreso gli avvisi**: Selezionato.
# 3. Attendi il completamento della distribuzione. Vai quindi alla tua risorsa di servizi cognitivi e, nella pagina **Panoramica**, fai clic sul link per gestire le chiavi per il servizio. Avrai bisogno dell'endpoint e delle chiavi per connetterti alla tua risorsa di servizi cognitivi dalle applicazioni client.
#
# ### Ottieni la chiave e la posizione per la tua risorsa di Servizi cognitivi
#
# Per usare la risorsa di servizi cognitivi, le applicazioni client hanno bisogno della chiave di autenticazione e della posizione:
#
# 1. Nel portale di Azure, nella pagina **Chiavi ed endpoint** per la tua risorsa di servizio cognitivo, copia la **Key1** per la tua risorsa e incollala nel codice sottostante, sostituendo **YOUR_COG_KEY**.
# 2. Copia la **Posizione** per la tua risorsa e incollala nel codice sottostante, sostituendo **YOUR_COG_LOCATION**.
# >**Nota**: Rimani nella pagina **Chiavi ed endpoint** e copia la **Posizione** da questa pagina (esempio: _westus_). Non aggiungere spazi tra le parole per il campo Posizione.
# 3. Esegui il codice seguente facendo clic sul pulsante **Esegui cella** (▷) a sinistra della cella.
# + gather={"logged": 1599695377020}
cog_key = 'YOUR_COG_KEY'
cog_location = 'YOUR_COG_LOCATION'
print('Ready to use cognitive services in {} using key {}'.format(cog_location, cog_key))
# -
# ## Traduzione del testo
#
# Come si evince dal nome, il servizio **Traduzione testuale** permette di tradurre il testo da una lingua all'altra.
#
# Non esiste un SDK Python per questo servizio, ma è possibile utilizzare la sua interfaccia REST per inviare richieste ad un endpoint su HTTP: una procedura relativamente semplice da eseguire in Python utilizzando la libreria **requests**. Le informazioni sul testo da tradurre e il testo tradotto risultante sono scambiati in formato JSON.
#
# Esegui la seguente cella per creare una funzione che svolga questa operazione, e poi testala con una semplice traduzione dall'inglese al francese.
# + gather={"logged": 1599695393341}
# Create a function that makes a REST request to the Text Translation service
def translate_text(cog_location, cog_key, text, to_lang='fr', from_lang='en'):
import requests, uuid, json
# Create the URL for the Text Translator service REST request
path = 'https://api.cognitive.microsofttranslator.com/translate?api-version=3.0'
params = '&from={}&to={}'.format(from_lang, to_lang)
constructed_url = path + params
# Prepare the request headers with Cognitive Services resource key and region
headers = {
'Ocp-Apim-Subscription-Key': cog_key,
'Ocp-Apim-Subscription-Region':cog_location,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# Add the text to be translated to the body
body = [{
'text': text
}]
# Get the translation
request = requests.post(constructed_url, headers=headers, json=body)
response = request.json()
return response[0]["translations"][0]["text"]
# Test the function
text_to_translate = "Hello"
translation = translate_text(cog_location, cog_key, text_to_translate, to_lang='fr', from_lang='en')
print('{} -> {}'.format(text_to_translate,translation))
# -
# Il servizio dovrebbe aver tradotto il testo inglese "Hello" al francese "Bonjour".
#
# Tieni presente che le lingue sono specificate usando un sistema standard di abbreviazioni linguistiche, con *en* per l'inglese e *fr* per il francese. Puoi anche usare abbreviazioni che includono culture specifiche: è utile quando la stessa lingua è usata in diverse aree geografiche, spesso con grafie diverse. Ad esempio *en-US* indica l'inglese negli Stati Uniti, mentre *en-GB* indica l'inglese in Gran Bretagna.
#
# Esegui la seguente cella per tradurre tra l'inglese britannico e l'italiano.
# + gather={"logged": 1599695400335}
text_to_translate = "Hello"
translation = translate_text(cog_location, cog_key, text_to_translate, to_lang='it-IT', from_lang='en-GB')
print('{} -> {}'.format(text_to_translate,translation))
# -
# Proviamo un'altra traduzione, questa volta dall'inglese americano al cinese.
# + gather={"logged": 1599695403076}
text_to_translate = "Hello"
translation = translate_text(cog_location, cog_key, text_to_translate, to_lang='zh-CN', from_lang='en-US')
print('{} -> {}'.format(text_to_translate,translation))
# -
# ## Traduzione vocale
#
# Puoi usare il servizio **Voce** per tradurre la lingua parlata.
#
# Ora puoi eseguire la seguente cella per creare e testare una funzione che utilizza l'SDK Voce per tradurre la voce udibile.
# + gather={"logged": 1599695532629}
# Create a function to translate audio in one language to text in another
def translate_speech(cog_location, cog_key, audio_file=None, to_lang='fr-FR', from_lang='en-US'):
from azure.cognitiveservices.speech import SpeechConfig, AudioConfig, ResultReason
from azure.cognitiveservices.speech.translation import SpeechTranslationConfig, TranslationRecognizer
# Configure the speech translation service
translation_config = SpeechTranslationConfig(subscription=cog_key, region=cog_location)
translation_config.speech_recognition_language = from_lang
translation_config.add_target_language(to_lang)
# Configure audio input
if audio_file is None:
audio_config = AudioConfig() # Use default input (microphone)
else:
audio_config = AudioConfig(filename=audio_file) # Use file input
# Create a translation recognizer and use it to translate speech input
recognizer = TranslationRecognizer(translation_config, audio_config)
result = recognizer.recognize_once()
# Did we get it?
translation = ''
speech_text = ''
if result.reason == ResultReason.TranslatedSpeech:
speech_text = result.text
translation = result.translations[to_lang]
elif result.reason == ResultReason.RecognizedSpeech:
speech_text = result.text
translation = 'Unable to translate speech'
else:
translation = 'Unknown'
speech_text = 'Unknown'
# rturn the translation
return speech_text, translation
# Test the function
import os
file_name = 'english.wav'
file_path = os.path.join('data', 'translation', file_name)
speech, translated_speech = translate_speech(cog_location, cog_key, file_path, to_lang='es', from_lang='en-US')
result = '{} -> {}'.format(speech, translated_speech)
# Show translated text
print(result)
# -
# Tieni presente che la lingua "a" deve essere identificata con un codice lingua di 2 caratteri (ad esempio *en*), mentre la lingua "da" deve includere l'indicatore di cultura (ad esempio *en-US*).
#
# Proviamo a tradurre dal francese all'inglese.
# + gather={"logged": 1599695542192}
import os
file_name = 'french.wav'
file_path = os.path.join('data', 'translation', file_name)
speech, translated_speech = translate_speech(cog_location, cog_key, file_path, to_lang='en', from_lang='fr-FR')
result = '{} -> {}'.format(speech, translated_speech)
# Show translated text
print(result)
# -
# ## Scopri di più
#
# Puoi sapere di più su [Traduzione testuale](https://docs.microsoft.com/azure/cognitive-services/translator/) e sulla [traduzione con il servizio Voce](https://docs.microsoft.com/azure/cognitive-services/speech-service/index-speech-translation) nella documentazione del servizio.
| 09 - Translation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import localgraphclustering as lgc
import time
import numpy as np
# -
# # Example for multiclass label prediction
# +
g = lgc.GraphLocal('datasets/JohnsHopkins.graphml','graphml')
# List of nodes around which we want to find labels
labels = [[1,10,300],[3215],[1002,2500,540]]
output_mc=lgc.multiclass_label_prediction(g,labels=labels)
# -
print(output_mc)
| notebooks/multiclasslabelprediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Question 1 (55 marks)
# The Austin Animal Rescue dataset is a log of felines (cats) take into their care. The dataset provided on Moodle contains details for 1,000 such animals.
#
# There are 1,000 records in total. You have been provided with an XML file containing this data (see the Moodle page).
#
# a) Create a function to import this XML file. Your function should include appropriate exception handling clauses. [15 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
import xml.etree.ElementTree as ET
import sys
# function to import the xml file; pass file name with path as parameter
def importXMl(file) :
tree = ET.parse(file)
root = tree.getroot()
return root
try:
# invoking importXMl function
shelter = importXMl("./shelter.xml")
# checking for ET.ParseError exception
except ET.ParseError:
print("Formatting error in XML file")
except FileNotFoundError:
print("File not found.Please check the file name or location")
else:
if sys.exc_info()[2] != None :
print("Other type of error {}" .format(sys.exc_info()[2]))
else :
print("XML file imported successfully")
# + [markdown] pycharm={"name": "#%% md\n"}
# b) Use the print function to display the ‘breed’, ‘color’ and ‘coat_pattern’ of the first, third, fifth, seventh and ninth records in the XML dataset. (Hint: you may use the range() function). [10 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
#to get all row in the object
row = shelter.findall("row")
# to iterate over specific rows, using range method
for rw in range(1,10,2):
print("Row number :: ", rw)
print(f"breed : {row[rw].find('breed').text}")
print(f"color : {row[rw].find('color').text}")
print(f"coat_pattern : {row[rw].find('coat_pattern').text}\n")
# + [markdown] pycharm={"name": "#%% md\n"}
# c) Extract all the XML data and write it to a CSV file. Include appropriate exception handling. Your CSV file should also contain the column names. [30 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
import xmltodict
import json
# reading the XML as a flat file
try:
with open("./shelter.xml","r") as file :
shelter_xml = file.read()
# writing the XML to JSON
shelter_json = xmltodict.parse(shelter_xml)
with open("./shelter.json","w") as jsonFile :
json.dump(xmltodict.parse(shelter_xml), jsonFile)
# loading the json
with open('./shelter.json') as shelter_json:
details = json.load(shelter_json)
json_data = details['document']['row']
except FileNotFoundError:
print("File not found.Please check the file name or location")
else:
if sys.exc_info()[2] != None :
print("Other type of error {}" .format(sys.exc_info()[2]))
else :
print("File importing/ exporting successful")
# converting the json to Data Frame
# define the dataframe columns
col_names = ['ID', 'age_upon_outcome', 'breed', 'color', 'date_of_birth', 'coat_pattern', 'name', 'outcome_type', 'sex_upon_outcome', 'sex']
# importing panda
import pandas as pd
# instantiate the row_details dataframe
row_details = pd.DataFrame(columns=col_names)
# inserting records in data frame row by row
for data in json_data:
ID = data['ID']
age_upon_outcome = data['age_upon_outcome']
breed = data['breed']
color = data['color']
date_of_birth = data['date_of_birth']
coat_pattern = data['coat_pattern']
name = data['name']
outcome_type = data['outcome_type']
sex_upon_outcome = data['sex_upon_outcome']
sex = data['sex']
row_details = row_details.append({
'ID': ID,
'age_upon_outcome': age_upon_outcome,
'breed': breed,
'color' : color,
'date_of_birth': date_of_birth,
'coat_pattern': coat_pattern,
'name': name,
'outcome_type': outcome_type,
'sex_upon_outcome': sex_upon_outcome,
'sex': sex}, ignore_index=True)
row_details
import csv
# dumping data into the CSV
try :
row_details.to_csv('./shelter.csv',index= False)
except Exception:
print("Exception raised while dumping the data frame to csv")
else:
if sys.exc_info()[2] != None :
print("Other type of error {}" .format(sys.exc_info()[2]))
else :
print("CSV exporting successful")
# + [markdown] pycharm={"name": "#%% md\n"}
# # Question 2 (25 marks)
# a) Create a NumPy array filled with 2,000 numbers. Ensure that your array has 500 rows and 4 columns. [5 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
import numpy as np
# using arange to create 1 t0 2000 variables
array = np.arange(1,2001).reshape(500,4)
print("shape of array : ", array.shape)
# + [markdown] pycharm={"name": "#%% md\n"}
# b) Using slicing, split this array into 5 separate arrays. The number of rows in each array should be equal, and there should still be 4 columns. [10 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
# using vsplit method to split the array
array1 = array[:100,:]
array2 = array[100:200,:]
array3 = array[200:300,:]
array4 = array[300:400,:]
array5 = array[400:500,:]
array_split = []
array_split.append(array1)
array_split.append(array2)
array_split.append(array3)
array_split.append(array4)
array_split.append(array5)
for arr in range(0,len(array_split)) :
print(f"shape of array {arr + 1} is {array_split[arr].shape}")
# -
# c) Reshape these 3 of these arrays into any dimensions of your choice. They should all have different dimensions. [5 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
array_split[0] = array_split[0].reshape(4,100)
array_split[1] = array_split[1].reshape(50,8)
array_split[2] = array_split[2].reshape(8,50)
print("Changed the shape of first 3 array")
for arr in range(0,len(array_split)) :
print(f"shape of array {arr + 1} is {array_split[arr].shape}")
# + [markdown] pycharm={"name": "#%% md\n"}
# d) Split 2 of these reshaped arrays horizontally [5 marks]
# + pycharm={"name": "#%%\n", "is_executing": false}
print("\n\n")
array_hsplit4 = np.hsplit(array_split[4],2)
array_hsplit5 = np.hsplit(array_split[4],4)
print("Spliting done to 4th and 5th array")
# + [markdown] pycharm={"name": "#%% md\n"}
# # Question 3 (20 marks)
# Given the following string:
#
# """
# All I want is a proper cup of coffee
# Made in a proper copper coffee pot
# I may be off my dot
# But I want a cup of coffee
# From a proper coffee pot
# Tin coffee pots and iron coffee pots
# They’re no use to me
# If I can’t have a proper cup of coffee
# In a proper copper coffee pot
# I’ll have a cup of tea.
# """
#
# a) Using regular expressions, write a single function to highlight the words “coffee”,“pot” or “pots” if they appear at the end of a line. [15 marks]
#
# The text produced by your code look like this:
#
# All I want is a proper cup of {coffee}
# Made in a proper copper coffee {pot}
# I may be off my dot
# But I want a cup of {coffee}
# From a proper coffee {pot}
# Tin coffee pots and iron coffee {pots}
# They’re no use to me
# If I can’t have a proper cup of {coffee}
# In a proper copper coffee {pot}
# I’ll have a cup of tea.
# + pycharm={"name": "#%%\n", "is_executing": false}
import re
def re_view(pattern, string) :
print(re.compile(pattern, re.M).sub("{\g<0>}", string.rstrip()))
string = '''
All I want is a proper cup of coffee
Made in a proper copper coffee pot
I may be off my dot
But I want a cup of coffee
From a proper coffee pot
Tin coffee pots and iron coffee pots
They’re no use to me
If I can’t have a proper cup of coffee
In a proper copper coffee pot
I’ll have a cup of tea.
'''
re_view(r"(coffee|pot|pots)$",string)
# -
# b) Using regular expressions, write a single function to highlight the words “proper” and “want” regardless of where they are found in a sentence. The words listed in part a) above should also be highlighted but only when they appear at the end of a sentence. [5 marks]
#
# The text produced by your code should look like this:
#
# All I {want} is a {proper} cup of {coffee}
# Made in a {proper} copper coffee {pot}
# I may be off my dot
# But I {want} a cup of {coffee}
# From a {proper} coffee pot.
# Tin coffee pots and iron coffee {pots}
# They’re no use to me
# If I can’t have a {proper} cup of {coffee}
# In a {proper} copper coffee {pot}
# I’ll have a cup of tea.
# + pycharm={"name": "#%%\n", "is_executing": false}
re_view(r"((coffee|pot|pots)$|(proper|want))",string)
| Old_CA/x19148496.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# - Load the iris dataset from sklearn.datasets
# - Print feature names
# - Print sepal length (SL) values for each iris example
from sklearn.datasets import load_iris
data = load_iris()
print(data.feature_names)
sl_values = data.data[:,0]
print(sl_values)
# Plot a simple histogram of the SL values using the matplotlib.pyplot package
# +
import matplotlib.pyplot as plt
plt.hist(sl_values)
plt.show()
# -
# Perform a transformation to uniform distribution of the SL values using the sklearn.preprocessing package QuantileTransformer and 10 quantiles.
#
# Plot a simple histogram of the uniform SL values
# +
from sklearn.preprocessing import QuantileTransformer
qt = QuantileTransformer(n_quantiles=10,output_distribution='uniform')
uni_SL=qt.fit_transform(sl_values.reshape(-1, 1))
plt.hist(uni_SL)
plt.show()
# -
# Perform a transformation to normal distribution of the SL values using the same package (and 10 quantiles again)
# +
qt = QuantileTransformer(n_quantiles=10,output_distribution='normal')
norm_SL=qt.fit_transform(sl_values.reshape(-1, 1))
plt.hist(norm_SL)
plt.show()
| notebooks/Solutions/DATAPREP_01a_GenericTransformations_Lab_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mlenv
# language: python
# name: mlenv
# ---
# ### Deliverable 1: Preprocessing the Data for a Neural Network
# +
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,OneHotEncoder
import pandas as pd
import tensorflow as tf
# Import and read the charity_data.csv.
import pandas as pd
application_df = pd.read_csv("charity_data.csv")
application_df.head()
# -
# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.
application_df = application_df.drop(['EIN', 'NAME'], axis=1)
# Determine the number of unique values in each column.
application_df.nunique()
# Look at APPLICATION_TYPE value counts for binning
APPLICATION_TYPE_counts = application_df.APPLICATION_TYPE.value_counts()
APPLICATION_TYPE_counts
# Visualize the value counts of APPLICATION_TYPE
APPLICATION_TYPE_counts.plot.density()
# +
# Determine which values to replace if counts are less than ...?
replace_application = list(APPLICATION_TYPE_counts[APPLICATION_TYPE_counts < 500].index)
# Replace in dataframe
for app in replace_application:
application_df.APPLICATION_TYPE = application_df.APPLICATION_TYPE.replace(app,"Other")
# Check to make sure binning was successful
application_df.APPLICATION_TYPE.value_counts()
# -
# Look at CLASSIFICATION value counts for binning
CLASSIFICATION_counts = application_df.CLASSIFICATION.value_counts()
CLASSIFICATION_counts
# Visualize the value counts of CLASSIFICATION
CLASSIFICATION_counts.plot.density()
# +
# Determine which values to replace if counts are less than ..?
replace_class = list(CLASSIFICATION_counts[CLASSIFICATION_counts < 1800].index)
# Replace in dataframe
for cls in replace_class:
application_df.CLASSIFICATION = application_df.CLASSIFICATION.replace(cls,"Other")
# Check to make sure binning was successful
application_df.CLASSIFICATION.value_counts()
# -
# Generate our categorical variable lists
application_cat = ["APPLICATION_TYPE","AFFILIATION","CLASSIFICATION","USE_CASE","ORGANIZATION","INCOME_AMT","SPECIAL_CONSIDERATIONS"]
# +
# Create a OneHotEncoder instance
enc = OneHotEncoder(sparse=False)
# Fit and transform the OneHotEncoder using the categorical variable list
encode_df = pd.DataFrame(enc.fit_transform(application_df[application_cat]))
# Add the encoded variable names to the dataframe
encode_df.columns = enc.get_feature_names(application_cat)
encode_df
# -
# Merge one-hot encoded features and drop the originals
application_df = application_df.merge(encode_df,left_index=True,right_index=True).drop(application_cat,1)
application_df
# +
# Split our preprocessed data into our features and target arrays
y = application_df["IS_SUCCESSFUL"].values
X = application_df.drop(["IS_SUCCESSFUL"],1).values
# Split the preprocessed data into a training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# +
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# -
# ### Deliverable 2: Compile, Train and Evaluate the Model
# +
# Import checkpoint dependencies
import os
from tensorflow.keras.callbacks import ModelCheckpoint
# Define the checkpoint path and filenames
os.makedirs("checkpoints/",exist_ok=True)
checkpoint_path = "checkpoints/weights.{epoch:02d}.hdf5"
# +
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
number_input_features = len(X_train[0])
hidden_nodes_layer1 = 80
hidden_nodes_layer2 = 75
nn = tf.keras.models.Sequential()
# First hidden layer
nn.add(
tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation="relu")
)
# Second hidden layer
nn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation="relu"))
# Output layer
nn.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Check the structure of the model
nn.summary()
# +
# Compile the model
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Create a callback that saves the model's weights every epoch
cp_callback = ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
save_freq='epoch',
period=5)
# -
# Train the model
fit_model = nn.fit(X_train_scaled,y_train,epochs=100,callbacks=[cp_callback])
# Evaluate the model using the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# Export our model to HDF5 file
nn.save("AlphabetSoupCharity.h5")
| Deliverable 3 Opt/AlphabetSoupCharity_Optimization_Add_Neurons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pytorch_p36]
# language: python
# name: conda-env-pytorch_p36-py
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
plt.rcParams["figure.figsize"] = (20, 20)
import pickle
import numpy as np
import pandas as pd
import networkx as nx
from umap import UMAP
from itertools import combinations
from tqdm import tqdm_notebook as tqdm
from sklearn.cluster import AgglomerativeClustering
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# -
# # assemble the data
with open("/mnt/efs/wikipedia/good_article_links.pkl", "rb") as fp:
graph_dict = pickle.load(fp)
G = nx.from_dict_of_lists(graph_dict)
adjacency_matrix = torch.Tensor(nx.adjacency_matrix(G).todense())
# ### dataset
class AdjacencyDataset(Dataset):
def __init__(self, adjacency_matrix):
self.adjacency_matrix = adjacency_matrix
def __getitem__(self, index):
return self.adjacency_matrix[index]
def __len__(self):
return len(self.adjacency_matrix)
dataset = AdjacencyDataset(adjacency_matrix)
# ### dataloader
# +
batch_size = 64
dataloader = DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=5
)
# -
# # define autoencoder model
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size=50):
super().__init__()
self.input_size = input_size
self.embedding_size = embedding_size
# use the multiplicative midpoint between the two sizes
self.mid_size = int(
self.input_size // np.sqrt(self.input_size / self.embedding_size)
)
print()
self.encode = nn.Sequential(
nn.Linear(in_features=self.input_size, out_features=self.mid_size),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(in_features=self.mid_size, out_features=self.embedding_size),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(
in_features=self.embedding_size, out_features=self.embedding_size
),
)
def forward(self, x):
return self.encode(x)
class Decoder(nn.Module):
def __init__(self, output_size, embedding_size=50):
super().__init__()
self.output_size = output_size
self.embedding_size = embedding_size
# use the multiplicative midpoint between the two sizes
self.mid_size = int(
self.output_size // np.sqrt(self.output_size / self.embedding_size)
)
self.decode = nn.Sequential(
nn.Linear(
in_features=self.embedding_size, out_features=self.embedding_size
),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(in_features=self.embedding_size, out_features=self.mid_size),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(in_features=self.mid_size, out_features=self.output_size),
)
def forward(self, x):
return self.decode(x)
class Autoencoder(nn.Module):
def __init__(self, input_size, embedding_size=50):
super().__init__()
self.embedding_size = embedding_size
self.input_size = input_size
self.output_size = input_size
self.encoder = Encoder(self.input_size, self.embedding_size)
self.decoder = Decoder(self.output_size, self.embedding_size)
def forward(self, x):
embedding = self.encoder(x)
decoded = self.decoder(embedding)
return nn.Sigmoid()(decoded)
# # create model
input_size = len(adjacency_matrix)
autoencoder = Autoencoder(input_size=len(G.nodes), embedding_size=20).to(device)
autoencoder
# # training
# +
losses = []
def train(model, train_loader, n_epochs, loss_function, optimiser, device=device):
model.train()
for epoch in range(n_epochs):
loop = tqdm(train_loader)
for batch in loop:
data = batch.cuda(non_blocking=True)
target = batch.cuda(non_blocking=True)
optimiser.zero_grad()
prediction = model(data)
loss = loss_function(prediction, target)
losses.append(loss.item())
loss.backward()
optimiser.step()
loop.set_description(f"Epoch {epoch + 1}/{n_epochs}")
loop.set_postfix(loss=loss.item())
# +
torch.backends.cudnn.benchmark = True
trainable_parameters = filter(lambda p: p.requires_grad, autoencoder.parameters())
loss_function = nn.BCELoss()
optimiser = optim.Adam(trainable_parameters, lr=0.001)
# -
train(
model=autoencoder,
train_loader=dataloader,
loss_function=loss_function,
optimiser=optimiser,
n_epochs=10,
)
loss_data = pd.Series(losses).rolling(window=15).mean()
ax = loss_data.plot(subplots=True);
with torch.no_grad():
embeddings_50d = (
autoencoder.encoder(adjacency_matrix.to(device)).detach().cpu().numpy()
)
embeddings_2d = UMAP(n_components=2, metric="cosine").fit_transform(embeddings_50d)
df = pd.DataFrame(embeddings_2d)
cluster = AgglomerativeClustering()
df["cluster"] = cluster.fit_predict(embeddings_50d)
df.plot.scatter(x=0, y=1, c=df["cluster"], cmap="Paired");
node_names[df[df["cluster"] == 1].index.values]
# # query with nmslib
# +
import nmslib
index = nmslib.init(method="hnsw")
index.addDataPointBatch(embeddings_50d)
index.createIndex({"post": 2}, print_progress=True)
# +
node_names = np.array(G.nodes)
query_index = np.random.choice(len(node_names))
query_embedding = embeddings_50d[query_index].reshape(1, -1)
query_node_name = node_names[query_index]
query_node_name
# -
ids, distances = index.knnQuery(query_embedding, k=10)
node_names[ids]
| notebooks/wikipedia/notebooks/05 - autoencoders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Задача 1: вычислить $\sqrt [k]a$
# #### - методом бисекции
# #### - методом Ньютона
import random
# Сразу отмечу, что я буду рассматривать $a >= 0$. Если же $a < 0$, то если $k$ - чётно, то решений нет, а если $k$ - нечётно, то достаточно взять корень k-й степени из $|a|$ и умножить на $(-1)$
# ### Метод бисекции
def bisection(f, l, r, eps):
if f(l) == 0:
return l
if f(r) == 0:
return r
assert(eps > 0)
assert(f(l) * f(r) < 0)
assert(l <= r)
while r - l > eps:
m = (l + r) / 2
if f(m) == 0:
return m
if f(l) * f(m) < 0:
r = m
else:
l = m
return (l + r) / 2
eps = 10**(-6)
for r in range(2, 11):
k = r
a = random.randint(1, 1000)
res = bisection(lambda x: (x ** k - a), 0, max(a, 1), eps)
diff = res - a**(1/k)
print(f"a = {a}, k = {k}, a**(1/k) = {a**(1/k)}, method's result = {res}, diff = {diff}, eps = {eps}")
# ### Метод Ньютона
def newton_method(f, df, x, eps):
x_prev = x + 2 * eps
while abs(x_prev - x) > eps:
x_prev = x
x = x_prev - f(x_prev)/df(x_prev)
return x
eps = 10**(-6)
for r in range(2, 11):
k = r
a = random.randint(1, 1000)
res = newton_method(lambda x: (x ** k - a), lambda x: (k * (x ** (k - 1))), a, eps)
diff = res - a**(1/k)
print(f"a = {a}, k = {k}, a**(1/k) = {a**(1/k)}, method's result = {res}, diff = {diff}, eps = {eps}")
# # Задание 2
# Первые две части стоит рассматривать в совокупности. Давайте посмотрим на $p(x)$. Какие у него могут быть корни?
#
# Во-первых, корни нечетной кратности - они будут между нулями производной (т.е. между экстремумами исходной функции) или также исходные границы. Что будет соответствовать нашему разбиению (т.е. пункт а) - возращаемое значению функцией lrs(...).
#
# Во-вторых, корни чётной кратности. Они не будут подходить под условие изолированности из первого пункта (т.к. с двух сторон от корня будет либо положительность, либо отрицательность). Поэтому я решил, что не стоит вносить их в функцию lrs. Однако их нужно безусловно также искать - это будут точки экстремума (ну и теоретически l и r). Давайте всех их и проверим (функция even_roots).
from numpy import polynomial
import numpy as np
def filter_eps(a, eps):
if len(a) == 0:
return []
ans = [a[0]]
last = a[0]
for i in range(1, len(a)):
if abs(a[i] - last) >= 2*eps:
ans.append(a[i])
last = a[i]
return ans
def lrs(p, l, r, eps):
droots = roots(p.deriv(), l, r, eps)
ans = []
if len(droots) > 0:
if p(l) * p(droots[0]) < 0:
ans.append((l, droots[0]))
for i in range(1, len(droots)):
if p(droots[i-1]) * p(droots[i]) < 0:
ans.append((droots[i-1], droots[i]))
if p(r) * p(droots[-1]) < 0:
ans.append((droots[-1], r))
else:
if p(l) * p(r) < 0:
ans.append((l, r))
return ans
def even_roots(p, l, r, eps):
droots = roots(p.deriv(), l, r, eps)
ans = []
if abs(p(l)) < eps:
ans.append(l)
for dr in droots:
if abs(p(dr)) < eps:
ans.append(dr)
if abs(p(r)) < eps:
ans.append(r)
return ans
def roots(p, l, r, eps):
if p.degree() == 0:
return []
ans = []
for (l_, r_) in lrs(p, l, r, eps):
ans.append(bisection(p, l_, r_, eps))
ans.extend(even_roots(p, l, r, eps))
return filter_eps(sorted(ans), eps)
# Таким образом мы решили первые два пункта.
#
# Какие потенциальные точки минимума? Это все экстремумы (т.е. нули производной и l, r).
def minimum(p, l, r, eps):
ans = 0
if p(l) < p(r):
ans = p(l)
else:
ans = p(r)
for x in roots(p.deriv(), l, r, eps):
if p(x) < ans:
ans = p(x)
return ans
# +
eps = 10**(-8)
p2 = polynomial.Polynomial(coef=(0.53, -2.42, 0.3))
p2_roots_from_wolfram = [0.225301, 7.84137]
p2_min_from_wolfram = -4.3503
print(f"roots: {roots(p2, -100, 100, eps)}, roots from wolframalpha: {p2_roots_from_wolfram}\nminimum: {minimum(p2, -100, 100, eps)}, minimum from wolfram: {p2_min_from_wolfram}")
p3 = polynomial.Polynomial(coef=(1, 1, 1, 1))
p3_roots_from_wolfram = [-1]
p3_min_from_wolfram = -909
print(f"roots: {roots(p3, -10, 10, eps)}, roots from wolframalpha: {p3_roots_from_wolfram}\nminimum: {minimum(p3, -10, 10, eps)}, minimum from wolfram: {p3_min_from_wolfram}")
p4 = polynomial.Polynomial(coef=(1, -1, 1, -1, -1))
p4_roots_from_wolfram = [-1.92756, 0.774804]
p4_min_from_wolfram = -100990099
print(f"roots: {roots(p4, -100, 100, eps)}, roots from wolframalpha: {p4_roots_from_wolfram}\nminimum: {minimum(p4, -100, 100, eps)}, minimum from wolfram: {p4_min_from_wolfram}")
p5 = polynomial.Polynomial(coef=(1, 2, 3, 4, 5, 6))
p5_roots_from_wolfram = [-0.670332]
p5_min_from_wolfram = -59503970199
print(f"roots: {roots(p5, -100, 100, eps)}, roots from wolframalpha: {p5_roots_from_wolfram}\nminimum: {minimum(p5, -100, 100, eps)}, minimum from wolfram: {p5_min_from_wolfram}")
# -
# # Задание 3
# $f(x) = e^{ax} + e^{-bx} + c(x-d)^2$
# Требуется найти минимум $f(x)$ при условии, что $a, b, c > 0$
# Давайте рассмотрим $f(x)$. Она выпукла, т.к. все слагаемые выпуклы, а линейная комбинация выпуклых функций - выпукла.
#
# Давайте тогда посчитаем производную $f$:
# $f'(x) = a\cdot e^{ax} - b\cdot e^{-bx} + 2c(x-d)$
#
# Посмотрим сразу на вторую производную:
# $f''(x) = a^2\cdot e^{ax} + b^2\cdot e^{-bx} + 2c$.
#
# Заметим, что при наших ограничениях на $a, b, c$ имеем $f''(x) > 0$ (кроме того очевидно, что $f, f', f''$ непрерывны). Это значит, что $f'(x)$ монотонно возрастает. А это значит в свою очередь, что $f(x)$ имеет один экстремум (в точке, где $f'(x) = 0$), а т.к. $f'(x)$ - возрастающая, то это минимум. Значит нам необходимо найти ноль производной.
# +
from numpy import exp
eps = 10**(-9)
def task3_funcs(a, b, c, d):
f = lambda x: exp(a*x) + exp(-b*x) + c*(x - d)**2
df = lambda x: a * (exp(a*x)) - b * (exp(-b*x)) + 2 * c * (x - d)
d2f = lambda x: a * a * (exp(a*x)) + b * b * (exp(-b*x)) + 2 * c
return (f, df, d2f)
# -
def ternarny_search(f, l, r, eps):
while (r - l) > eps:
ml = l + (r - l) / 3
mr = r - (r - l) / 3
if f(ml) > f(mr):
l = ml
else:
r = mr
return (r + l) / 2
# +
(f, df, d2f) = task3_funcs(1, 2, 3, 4)
r = 1
while df(r) <= 0:
r *= 2
l = -1
while df(l) >= 0:
l *= 2
bisection_res = bisection(df, l, r, eps)
print(f"Бисекция: x = {bisection_res}, f(x) = {f(bisection_res)}")
# -
newton_res = newton_method(df, d2f, 0, eps = 10**(-18))
print(f"<NAME>: x = {newton_res}, f(x) = {f(newton_res)}")
ternarny_res = ternarny_search(f, l, r, eps)
print(f"Тернарный поиск: x = {ternarny_res}, f(x) = {f(ternarny_res)}")
| korotchenko-denis/hw1/hw1-korotchenko.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploring Youth Literacy Rates Using World Bank Data
#
# ## Section 1: Business Understanding
# Literacy is crucial for individuals and countries as a whole. In this post, I will examine three questions related to youth literacy:
# ### Questions
# 1. How has youth literacy changed over time?
# 2. Which countries have the lowest youth literacy rates?
# 3. How are youth literacy rates, per capita income, and population related?
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
# %matplotlib inline
# ## Section 2: Data Understanding
#
# The data has already been retreived from the World Bank API, cleaned, and stored in csv files.
#
# Data Source: https://data.worldbank.org/
# Read the first dataset from csv file and create a dataframe
pd_lit_cat = pd.read_csv('data/worldbanklit.csv', index_col=0)
pd_lit_cat.head(10)
# +
# Read the worldbank-lit-inc.csv file.
# Since we only want the countries, not the aggregate groups, we will also read country-list.csv
# and use a right join to keep only countires
df = pd.read_csv('data/worldbank-lit-inc.csv', index_col=0)
df_countries = pd.read_csv('data/country-list.csv', index_col=0)
df_lit_country = df.join(df_countries.drop(columns=['country']), how='right').sort_values('youth_literacy')
print('Countries with missing youth literacy rates: {}'.format(df_lit_country['youth_literacy'].isna().sum()))
print('Countries with missing gnp: {}'.format(df_lit_country['gnp'].isna().sum()))
print('Countries with missing population: {}'.format(df_lit_country['population'].isna().sum()))
print('\nRows: {}, Columns:{}'.format(df_lit_country.shape[0], df_lit_country.shape[1]))
df_lit_country.head()
# -
# ## Section 3: Data Preparation
# The data has already been retreived from the World Bank API, cleaned, and stored in csv files.
#
# See the data-scripts and data folders in the repository.
#
# Drop the row with all Nan values(high income aggregate)
pd_lit_cat.dropna(inplace=True)
pd_lit_cat.head()
# +
#Drop the rows with missing literacy rates or gnp
df_lit_country.dropna(inplace=True)
print('Rows: {}, Columns:{}'.format(df_lit_country.shape[0], df_lit_country.shape[1]))
df_lit_country.head()
# -
# ## Section 4: Data Modeling
#
# +
c = np.log10(df_lit_country['population']).to_numpy()
key = [round(round(10**(n/10),-int(np.floor(np.log10(abs(10**(n/10))))))) for n in range(48,92,8)]
plt.rcParams["figure.figsize"] = (10,7)
scatter = plt.scatter(df_lit_country['gnp'],df_lit_country['youth_literacy'], s=(3*c)**2, c=c/10, alpha=0.5)
plt.legend(handles=scatter.legend_elements()[0], labels=key,loc="lower right", title="Population")
plt.title('Percent youth literacy, income, and population')
plt.xlabel('GNI per capita, Atlas method (current US$)')
plt.ylabel('Youth Literacy Percent')
plt.show();
# -
# #### Supervised regression models: one x-variable, gnp
# 1. Linear
# 2. Natural logarithm
# 3. Exponential
# 4. Decision tree
# +
#Regression one variable
X = np.array(df_lit_country['gnp']).reshape(-1, 1)
y = df_lit_country['youth_literacy']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression()
print('One variable: gnp')
lm_model.fit(X_train, y_train)
print('Linear: Train score: {}, Test score: {}'.format(lm_model.score(X_train,y_train),lm_model.score(X_test,y_test)))
lm_model.fit(np.log(X_train), y_train)
print('Log: Train score: {}, Test score: {}'.format(
lm_model.score(np.log(X_train),y_train),lm_model.score(np.log(X_test),y_test)))
lm_model.fit(X_train, np.log(y_train))
print('Exp: Train score: {}, Test score: {}'.format(
lm_model.score(X_train,np.log(y_train)),lm_model.score(X_test,np.log(y_test))))
regr = DecisionTreeRegressor(max_depth=2)
regr.fit(X_train, y_train)
print('Decision Tree: Train score: {}, Test score: {}'.format(regr.score(X_train,y_train),regr.score(X_test,y_test)))
# -
# #### Supervised regression models: two x-variables, gnp and population
# 1. Linear
# 2. Natural logarithm
# 3. Exponential
# 4. Decision tree
# +
# Two variable regression
X2 = df_lit_country.loc[:, ['gnp','population']]
y2 = df_lit_country['youth_literacy']
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size = .30, random_state=42)
print('Two variable: gnp and population')
lm_model = LinearRegression()
lm_model.fit(X2_train, y2_train)
print('Linear: Train score: {}, Test score: {}'.format(lm_model.score(X2_train,y2_train),lm_model.score(X2_test,y2_test)))
lm_model.fit(np.log(X2_train), y2_train)
print('Log: Train score: {}, Test score: {}'.format(
lm_model.score(np.log(X2_train),y2_train),lm_model.score(np.log(X2_test),y2_test)))
lm_model.fit(X2_train, np.log(y2_train))
print('Exp: Train score: {}, Test score: {}'.format(
lm_model.score(X2_train,np.log(y2_train)),lm_model.score(X2_test,np.log(y2_test))))
regr = DecisionTreeRegressor(max_depth=2)
regr.fit(X2_train, y2_train)
print('Decision Tree: Train score: {}, Test score: {}'.format(regr.score(X2_train,y2_train),regr.score(X2_test,y2_test)))
# -
# Equation for best model
lm_model.fit(np.log(X_train), y_train)
print('y = {} * log(x1) + {}'.format(lm_model.coef_[0],lm_model.intercept_))
# The scores did not change when adding the population variable. The best model of those tested is logarithmic.
#
# The equation is: y = 8.563943116108106 * log(x) + 18.437000414628613 where x is GNP.
# Calculate score using entire dataset
print('Score: {}'.format(lm_model.score(np.log(X),y)))
# #### Checking for outliers
# Next I checked for and removed countries who had a GNP that would qualify as an outler using the IQR mehtod.
#Find gnp outliers
var = 'gnp'
Q1 = df_lit_country[var].quantile(0.25)
Q3 = df_lit_country[var].quantile(0.75)
IQR = Q3 - Q1
print('Q1:{}, Q3:{}, IQR:{}, Lower bound: {}, Upper bound:{}'.format(Q1,Q3,IQR,Q1-1.5*IQR,Q3+1.5*IQR))
lower = Q1 - 1.5*IQR
upper = Q3+1.5*IQR
print('Outliers: {}'.format(
df_lit_country[df_lit_country[var]<lower][var].count() + df_lit_country[df_lit_country[var]>upper][var].count()))
#Filter outliers
df_filtered = df_lit_country[df_lit_country['gnp']<=upper]
# #### Supervised regression models with outliers removed
# 1. Linear
# 2. Natural logarithm
# 3. Exponential
# 4. Decision tree
# +
#Linear regression one variable
X = np.array(df_filtered['gnp']).reshape(-1, 1)
y = df_filtered['youth_literacy']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
print('One variable: gnp; Outliers removed')
lm_model.fit(X_train, y_train)
print('Linear: Train score: {}, Test score: {}'.format(lm_model.score(X_train,y_train),lm_model.score(X_test,y_test)))
lm_model.fit(np.log(X_train), y_train)
print('Log: Train score: {}, Test score: {}'.format(
lm_model.score(np.log(X_train),y_train),lm_model.score(np.log(X_test),y_test)))
lm_model.fit(X_train, np.log(y_train))
print('Exp: Train score: {}, Test score: {}'.format(
lm_model.score(X_train,np.log(y_train)),lm_model.score(X_test,np.log(y_test))))
regr = DecisionTreeRegressor(max_depth=2)
regr.fit(X_train, y_train)
print('Decision Tree: Train score: {}, Test score: {}'.format(regr.score(X_train,y_train),regr.score(X_test,y_test)))
# -
# The best model of those tested is logarithmic with outliers removed.
lm_model.fit(np.log(X_train), y_train)
print('y = {} * log(x1) + {}'.format(lm_model.coef_[0],lm_model.intercept_))
# Calculate score using entire dataset
print('Score: {}'.format(lm_model.score(np.log(X),y)))
# ## Section 5: Evaluate the Results
# ### Question 1
# ### How has youth literacy changed over time?
#
# Here we have % youth literacy (ages 15-24) for years 2000, 2005, 2010, and 2015 aggregated by income group.
# You can learn more about the country income groups here:
#
# https://datatopics.worldbank.org/world-development-indicators/the-world-by-income-and-region.html
# Youth literacy rates were unavailable for the high income group.
plt.rcParams["figure.figsize"] = (10,7)
plt.plot(['2000','2005','2010','2015'], pd_lit_cat.iloc[:,1:].transpose())
plt.legend(pd_lit_cat['country'], loc='lower right')
plt.title('Percent youth literacy by country income category')
plt.xlabel('Year')
plt.ylabel('% youth literacy (ages 15-24)')
plt.show();
# We see that literacy rates have increased over this time period for all income groups. The low income group had the most dramatic increase but that group is still far behind the other groups.
# ### Question 2
# ### Which countries have the lowest youth literacy rates?
df_lit_country.head(25)
max_countries = 25
plt.barh(df_lit_country['country'].head(max_countries),df_lit_country['youth_literacy'].head(max_countries))
plt.title('Percent youth literacy lowest 25 countries')
plt.xlabel('Youth Literacy Percent')
plt.ylabel('Country')
plt.show()
# We see that Somalia has the lowest literacy rate in this dataset but the most recent figure available for this indicator was from 1972. It is likely to be out of date.
#
# The remaining countries in this query range from a low of about 31% to a high of 77%.
# There are 50 countries for which no youth literacy data is available.
# ### Question 3
# ### How are youth literacy rates, per capita income, and population related?
#
# Income is measured as GNI converted to current U.S. dollars.
#
# You can read more about GNI here:
#
# https://datahelpdesk.worldbank.org/knowledgebase/articles/77933-what-is-the-world-bank-atlas-method
# +
c = np.log10(df_lit_country['population']).to_numpy()
key = [round(round(10**(n/10),-int(np.floor(np.log10(abs(10**(n/10))))))) for n in range(48,92,8)]
plt.rcParams["figure.figsize"] = (10,7)
scatter = plt.scatter(df_lit_country['gnp'],df_lit_country['youth_literacy'], s=(3*c)**2, c=c/10, alpha=0.5)
plt.legend(handles=scatter.legend_elements()[0], labels=key,loc="lower right", title="Population")
plt.title('Percent youth literacy, income, and population')
plt.xlabel('GNI per capita, Atlas method (current US$)')
plt.ylabel('Youth Literacy Percent')
plt.show();
# -
# We can see from the scatterplot that there appears to be a relationship between income and youth literacy rate. All of the countries with low youth literacy rates also have low income. The relationship is not clear for the countries with high youth literacy rates. It is hard to tell from the graph if population is related to youth literacy rates. The model did not improve when adding the population variable to the model.
# Countries under literacy rate percentages
percents = range(50,100,10)
for n in percents:
print('Under {}%, {} countries'.format(n,df_lit_country[df_lit_country['youth_literacy']<n]['youth_literacy'].count()))
print('Out of {} countries'.format(df_lit_country.shape[0]))
# +
# Scatter plot of counties under 80%
max_countries = 27
df_low_lit = df_lit_country.head(max_countries)
c2 = np.log10(df_low_lit['population']).to_numpy()
plt.rcParams["figure.figsize"] = (10,7)
plt.scatter(df_low_lit['gnp'],df_low_lit['youth_literacy'], s=(3*c2)**2, c=c2/10, alpha=0.5)
plt.legend(handles=scatter.legend_elements()[0], labels=key,loc="lower right", title="Population")
plt.title('Percent youth literacy, income, and population')
plt.xlabel('GNI per capita, Atlas method (current US$)')
plt.ylabel('Youth Literacy Percent')
plt.show();
# -
print('Median GNI for countries under 80% youth literacy: {} USD'.format(df_low_lit['gnp'].median()))
print('Maximum GNI for countries under 80% youth literacy: {} USD'.format(df_low_lit['gnp'].max()))
print('Median GNI for all countries in the dataset: {} USD'.format(df_lit_country['gnp'].median()))
# +
#scatter plot with equation
plt.rcParams["figure.figsize"] = (10,7)
df = df_lit_country.sort_values('gnp')
scatter = plt.scatter(df_lit_country['gnp'],df_lit_country['youth_literacy'], s=(3*c)**2, c=c/10, alpha=0.5)
plt.legend(handles=scatter.legend_elements()[0], labels=key,loc="lower right", title="Population")
plt.title('Percent youth literacy, income, and population')
plt.xlabel('GNI per capita, Atlas method (current US$)')
plt.ylabel('Youth Literacy Percent')
plt.plot(df['gnp'], 8.563943116108106 * np.log(df['gnp']) + 18.437000414628613)
plt.show();
# +
#Scatter plot with equation outliers removed
df_filtered = df_filtered.sort_values('gnp')
c = np.log10(df_filtered['population']).to_numpy()
key = [round(round(10**(n/10),-int(np.floor(np.log10(abs(10**(n/10))))))) for n in range(48,92,8)]
plt.rcParams["figure.figsize"] = (10,7)
scatter = plt.scatter(df_filtered['gnp'],df_filtered['youth_literacy'], s=(3*c)**2, c=c/10, alpha=0.5)
plt.legend(handles=scatter.legend_elements()[0], labels=key,loc="lower right", title="Population")
plt.title('Percent youth literacy, income, and population')
plt.xlabel('GNI per capita, Atlas method (current US$)')
plt.ylabel('Youth Literacy Percent')
plt.plot(df_filtered['gnp'], 9.94755686025289 * np.log(df_filtered['gnp']) + 8.314456520728868)
plt.show();
# -
# We can see that of the models tested, the best model had a test score of 0.48. There is clearly a relationship between income and youth literacy but more variables could be investigated to help improve the model.
# ## Summary
#
# In this notebook, we saw the change in youth literacy rates since 2000. The rate has been increasing for all country income groups. We also saw which countries have the lowest youth literacy rates. Finally, we looked at the relationship between youth literacy and GNI per capita and saw that the counties with the lowest literacy rates had lower incomes.
| YouthLiteracyWorldBank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from lxml import etree
import pandas as pd
from collections import Counter
import os
import glob
import re
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
from numpy import array
import numpy as np
wdir = "/home/jose/Dropbox/biblia/tb/"
file = "TEIBible" # "*.xml"
outdir = "/home/jose/Dropbox/biblia/tb/resulting data/"
columns = ["len_chapters", "len_verses","len_chars","am_refs","am_1lev_quotes"]
#,"am_diff_people","am_diff_groups","am_diff_places",
# -
parser = etree.XMLParser(encoding='utf-8')
documento_xml = etree.parse(wdir+file+".xml", parser)
documento_root = documento_xml.getroot()
namespaces_concretos = {'tei':'http://www.tei-c.org/ns/1.0','xi':'http://www.w3.org/2001/XInclude'}
books_names = [title for book in documento_root.xpath('//tei:TEI', namespaces=namespaces_concretos, with_tail=True) for title in book.xpath('.//tei:title[1]/text()', namespaces=namespaces_concretos, with_tail=True) ]
# +
for book in documento_root.xpath('//tei:TEI', namespaces=namespaces_concretos, with_tail=True):
print("\n")
print("title:", book.xpath('.//tei:title[1]/text()', namespaces=namespaces_concretos, with_tail=True))
print("viaf:", book.xpath('.//tei:title/tei:idno[@type="viaf"]/text()', namespaces=namespaces_concretos, with_tail=True))
print("code:", book.xpath('.//tei:title/tei:idno[@type="string"]/text()', namespaces=namespaces_concretos, with_tail=True))
print("verses:", len(book.xpath('.//tei:ab[@type="verse"]', namespaces=namespaces_concretos, with_tail=True)))
print("chapters:", len(book.xpath('.//tei:div[@type="chapter"]', namespaces=namespaces_concretos, with_tail=True)))
print("pericopes:", len(book.xpath('.//tei:div[@type="pericope"]', namespaces=namespaces_concretos, with_tail=True)))
verses = book.xpath('.//tei:ab[@type="verse"]', namespaces=namespaces_concretos, with_tail=True)
#for verse in verses:
print(len(verses))
q_in_verses = []
rss_in_verses = []
for verse in verses:
q_in_verses.append(len(verse.xpath('.//tei:q', namespaces=namespaces_concretos, with_tail=True)))
rss_in_verses.append(len(verse.xpath('.//tei:rs', namespaces=namespaces_concretos, with_tail=True)))
#print(q_in_verses)
q_in_verses = array(q_in_verses)
rss_in_verses = array(rss_in_verses)
#print("q in verses", sorted(q_in_verses))
print("amount of q", len(q_in_verses))
print("mean of q in verses", q_in_verses.mean())
print("median of q in verses", np.median(q_in_verses))
print("std of q in verses", np.std(q_in_verses, ddof=0))
print("100th percentile", np.percentile(q_in_verses, q=100))
position = [i for i,x in enumerate(q_in_verses) if x == 4]
print(position)
grouped_referenced_entities = book.xpath('.//tei:rs/@key', namespaces=namespaces_concretos, with_tail=True)
print("references:", len(grouped_referenced_entities))
entities_referenced = [entity for group_entities in grouped_referenced_entities for entity in group_entities.split(" ") ]
print(len(grouped_referenced_entities))
print(len(entities_referenced))
print(len(set(entities_referenced)))
print(Counter(entities_referenced).most_common(3))
people_referenced = [entity for entity in entities_referenced if "per" in entity]
groups_referenced = [entity for entity in entities_referenced if "org" in entity]
places_referenced = [entity for entity in entities_referenced if "pla" in entity]
times_referenced = [entity for entity in entities_referenced if "tim" in entity]
works_referenced = [entity for entity in entities_referenced if "wor" in entity]
print(Counter(people_referenced).most_common(1))
print(len(people_referenced))
print(len(set(people_referenced)))
print(Counter(groups_referenced).most_common(1))
print(len(groups_referenced))
print(len(set(groups_referenced)))
print(Counter(places_referenced).most_common(1))
print(len(places_referenced))
print(len(set(places_referenced)))
print(Counter(times_referenced).most_common(1))
print(len(times_referenced))
print(len(set(times_referenced)))
print(Counter(works_referenced).most_common(1))
print(len(works_referenced))
print(len(set(works_referenced)))
print("quotations:", len(book.xpath('.//tei:q', namespaces=namespaces_concretos, with_tail=True)))
print("1 level quotations:", len(book.xpath('.//tei:ab[@type="verse"]/tei:q', namespaces=namespaces_concretos, with_tail=True)))
grouped_who = book.xpath('.//tei:q/@who', namespaces=namespaces_concretos, with_tail=True)
who_entities = [entity for group_who in grouped_who for entity in group_who.split(" ") ]
print(len(who_entities))
print(len(set(who_entities)))
grouped_toWohm = book.xpath('.//tei:q/@corresp', namespaces=namespaces_concretos, with_tail=True)
toWhom_entities = [entity for group_toWohm in grouped_toWohm for entity in group_toWohm.split(" ") ]
print(len(toWhom_entities))
print(len(set(toWhom_entities)))
q_oral = book.xpath('.//tei:q[@type="oral"]', namespaces=namespaces_concretos, with_tail=True)
q_dream = book.xpath('.//tei:q[@type="dream"]', namespaces=namespaces_concretos, with_tail=True)
q_prayer = book.xpath('.//tei:q[@type="prayer"]', namespaces=namespaces_concretos, with_tail=True)
q_oath = book.xpath('.//tei:q[@type="oath"]', namespaces=namespaces_concretos, with_tail=True)
q_written = book.xpath('.//tei:q[@type="written"]', namespaces=namespaces_concretos, with_tail=True)
q_song = book.xpath('.//tei:q[@type="song"]', namespaces=namespaces_concretos, with_tail=True)
print(len(q_oral))
print(len(q_dream))
print(len(q_prayer))
print(len(q_oath))
print(len(q_written))
print(len(q_song))
# -
books_names[0:3]
metadata_bible = pd.DataFrame(index = books_names, columns = columns).fillna(0)
metadata_bible
| code/python/.ipynb_checkpoints/20180319 dividing books-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem 1: Language Modeling with RNNs
# * <b>Learning Objective:</b> In this problem, you are going to implement simple recurrent neural networks to deeply understand how RNNs works.
# * <b>Provided Code:</b> We provide the skeletons of classes you need to complete. Forward checking and gradient checkings are provided for verifying your implementation as well.
# * <b>TODOs:</b> you will implement a LSTM and use them it to train a model that can generate text using your own text source (novel, lyrics etc). <b>Also please do not forget to answer to the two inline questions before LSTM.</b>
# +
from lib.rnn import *
from lib.layer_utils import *
from lib.grad_check import *
from lib.optim import *
from lib.train import *
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
# # Recurrent Neural Networks
# We will use recurrent neural network (RNN) language models for text generation. The file `lib/layer_utils.py` contains implementations of different layer types that are needed for recurrent neural networks, and the file `lib/rnn.py` uses these layers to implement an text generation model.
#
# We will implement LSTM layers in `lib/layer_utils.py`. As a reference, you are given complete codes for other layers including a vanilla RNN. Let's first look through the vanilla RNN, and other layers you may need for language modeling. The first part doesn't involve any coding. You can simply check the codes and run to make sure everything works as you expect.
# # Vanilla RNN: step forward
# Open the file `lib/layer_utils.py`. This file implements the forward and backward passes for different types of layers that are commonly used in recurrent neural networks.
#
# First check the implementation of the function `step_forward` which implements the forward pass for a single timestep of a vanilla recurrent neural network. We provide this function for you. After doing so run the following code. You should see errors less than 1e-8.
# +
N, D, H = 3, 10, 4
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.linspace(-0.4, 0.7, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.2, 0.5, num=N*H).reshape(N, H)
rnn.params[rnn.wx_name] = np.linspace(-0.1, 0.9, num=D*H).reshape(D, H)
rnn.params[rnn.wh_name] = np.linspace(-0.3, 0.7, num=H*H).reshape(H, H)
rnn.params[rnn.b_name] = np.linspace(-0.2, 0.4, num=H)
next_h, _ = rnn.step_forward(x, prev_h)
expected_next_h = np.asarray([
[-0.58172089, -0.50182032, -0.41232771, -0.31410098],
[ 0.66854692, 0.79562378, 0.87755553, 0.92795967],
[ 0.97934501, 0.99144213, 0.99646691, 0.99854353]])
print('next_h error: ', rel_error(expected_next_h, next_h))
# -
# # Vanilla RNN: step backward
# In the `VanillaRNN` class in the file `lib/layer_utils.py` check the `step_backward` function. After doing so run the following to numerically gradient check the implementation. You should see errors less than `1e-8`.
# +
np.random.seed(599)
N, D, H = 4, 5, 6
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
rnn.params[rnn.wx_name] = Wx
rnn.params[rnn.wh_name] = Wh
rnn.params[rnn.b_name] = b
out, meta = rnn.step_forward(x, h)
dnext_h = np.random.randn(*out.shape)
dx_num = eval_numerical_gradient_array(lambda x: rnn.step_forward(x, h)[0], x, dnext_h)
dprev_h_num = eval_numerical_gradient_array(lambda h: rnn.step_forward(x, h)[0], h, dnext_h)
dWx_num = eval_numerical_gradient_array(lambda Wx: rnn.step_forward(x, h)[0], Wx, dnext_h)
dWh_num = eval_numerical_gradient_array(lambda Wh: rnn.step_forward(x, h)[0], Wh, dnext_h)
db_num = eval_numerical_gradient_array(lambda b: rnn.step_forward(x, h)[0], b, dnext_h)
dx, dprev_h, dWx, dWh, db = rnn.step_backward(dnext_h, meta)
print('dx error: ', rel_error(dx_num, dx))
print('dprev_h error: ', rel_error(dprev_h_num, dprev_h))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# -
# # Vanilla RNN: forward
# Now that you have checked the forward and backward passes for a single timestep of a vanilla RNN, you will see how they are combined to implement a RNN that process an entire sequence of data.
#
# In the `VanillaRNN` class in the file `lib/layer_utils.py`, check the function `forward`. We provide this function for you. This is implemented using the `step_forward` function that you defined above. After doing so run the following to check the implementation. You should see errors less than `1e-7`.
# +
N, T, D, H = 2, 3, 4, 5
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.linspace(-0.1, 0.3, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.3, 0.1, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.4, num=D*H).reshape(D, H)
Wh = np.linspace(-0.4, 0.1, num=H*H).reshape(H, H)
b = np.linspace(-0.7, 0.1, num=H)
rnn.params[rnn.wx_name] = Wx
rnn.params[rnn.wh_name] = Wh
rnn.params[rnn.b_name] = b
h = rnn.forward(x, h0)
expected_h = np.asarray([
[
[-0.42070749, -0.27279261, -0.11074945, 0.05740409, 0.22236251],
[-0.39525808, -0.22554661, -0.0409454, 0.14649412, 0.32397316],
[-0.42305111, -0.24223728, -0.04287027, 0.15997045, 0.35014525],
],
[
[-0.55857474, -0.39065825, -0.19198182, 0.02378408, 0.23735671],
[-0.27150199, -0.07088804, 0.13562939, 0.33099728, 0.50158768],
[-0.51014825, -0.30524429, -0.06755202, 0.17806392, 0.40333043]]])
print('h error: ', rel_error(expected_h, h))
# -
# # Vanilla RNN: backward
# In the file `lib/layer_utils.py`, check the backward pass for a vanilla RNN in the function `backward` in the `VanillaRNN` class. We provide this function for you. This runs back-propagation over the entire sequence, calling into the `step_backward` function defined above. You should see errors less than 5e-7.
# +
np.random.seed(599)
N, D, T, H = 2, 3, 10, 5
rnn = VanillaRNN(D, H, init_scale=0.02, name="rnn_test")
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
rnn.params[rnn.wx_name] = Wx
rnn.params[rnn.wh_name] = Wh
rnn.params[rnn.b_name] = b
out = rnn.forward(x, h0)
dout = np.random.randn(*out.shape)
dx, dh0 = rnn.backward(dout)
dx_num = eval_numerical_gradient_array(lambda x: rnn.forward(x, h0), x, dout)
dh0_num = eval_numerical_gradient_array(lambda h0: rnn.forward(x, h0), h0, dout)
dWx_num = eval_numerical_gradient_array(lambda Wx: rnn.forward(x, h0), Wx, dout)
dWh_num = eval_numerical_gradient_array(lambda Wh: rnn.forward(x, h0), Wh, dout)
db_num = eval_numerical_gradient_array(lambda b: rnn.forward(x, h0), b, dout)
dWx = rnn.grads[rnn.wx_name]
dWh = rnn.grads[rnn.wh_name]
db = rnn.grads[rnn.b_name]
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# -
# # Word embedding: forward
# In deep learning systems, we commonly represent words using vectors. Each word of the vocabulary will be associated with a vector, and these vectors will be learned jointly with the rest of the system.
#
# In the file `lib/layer_utils.py`, check the function `forward` in the `word_embedding` class to convert words (represented by integers) into vectors. We provide this function for you. Run the following to check the implementation. You should see error around `1e-8`.
# +
N, T, V, D = 2, 4, 5, 3
we = word_embedding(V, D, name="we")
x = np.asarray([[0, 3, 1, 2], [2, 1, 0, 3]])
W = np.linspace(0, 1, num=V*D).reshape(V, D)
we.params[we.w_name] = W
out = we.forward(x)
expected_out = np.asarray([
[[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0.42857143, 0.5, 0.57142857]],
[[ 0.42857143, 0.5, 0.57142857],
[ 0.21428571, 0.28571429, 0.35714286],
[ 0., 0.07142857, 0.14285714],
[ 0.64285714, 0.71428571, 0.78571429]]])
print('out error: ', rel_error(expected_out, out))
# -
# # Word embedding: backward
# Check the backward pass for the word embedding function in the function `backward` in the `word_embedding` class. We provide this function for you. After doing so run the following to numerically gradient check the implementation. You should see errors less than `1e-11`.
# +
np.random.seed(599)
N, T, V, D = 50, 3, 5, 6
we = word_embedding(V, D, name="we")
x = np.random.randint(V, size=(N, T))
W = np.random.randn(V, D)
we.params[we.w_name] = W
out = we.forward(x)
dout = np.random.randn(*out.shape)
we.backward(dout)
dW = we.grads[we.w_name]
f = lambda W: we.forward(x)
dW_num = eval_numerical_gradient_array(f, W, dout)
print('dW error: ', rel_error(dW, dW_num))
# -
# ### Inline Question: Why do we want to represent words using word embeddings instead of one hot vector ( https://en.wikipedia.org/wiki/One-hot )? Provide one advantage of word embeddings.
#
# #### Ans: We prefer representing the words using word embedding instead of one hot vectors, because one hot vectors are high-dimensional and sparse. This creates a problem in the case when we are working with a big dataset, as a one hot vector representation can be computationally inefficient.
# # Temporal Fully Connected layer
# At every timestep we use an affine function to transform the RNN hidden vector at that timestep into scores for each word in the vocabulary. Because this is very similar to the fully connected layer that you implemented in assignment 1, we have provided this function for you in the `forward` and `backward` functions in the file `lib/layer_util.py`. Run the following to perform numeric gradient checking on the implementation. You should see errors less than 1e-9.
# +
np.random.seed(599)
# Gradient check for temporal affine layer
N, T, D, M = 2, 3, 4, 5
t_fc = temporal_fc(D, M, init_scale=0.02, name='test_t_fc')
x = np.random.randn(N, T, D)
w = np.random.randn(D, M)
b = np.random.randn(M)
t_fc.params[t_fc.w_name] = w
t_fc.params[t_fc.b_name] = b
out = t_fc.forward(x)
dout = np.random.randn(*out.shape)
dx_num = eval_numerical_gradient_array(lambda x: t_fc.forward(x), x, dout)
dw_num = eval_numerical_gradient_array(lambda w: t_fc.forward(x), w, dout)
db_num = eval_numerical_gradient_array(lambda b: t_fc.forward(x), b, dout)
dx = t_fc.backward(dout)
dw = t_fc.grads[t_fc.w_name]
db = t_fc.grads[t_fc.b_name]
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
# -
# # Temporal Softmax loss
# In an RNN language model, at every timestep we produce a score for each word in the vocabulary. We know the ground-truth word at each timestep, so we use a softmax loss function to compute loss and gradient at each timestep. We sum the losses over time and average them over the minibatch.
#
# We provide this loss function for you; look at the `temporal_softmax_loss` function in the file `lib/layer_utils.py`.
#
# Run the following cell to sanity check the loss and perform numeric gradient checking on the function. You should see an error for dx less than 1e-7.
# +
loss_func = temporal_softmax_loss()
# Sanity check for temporal softmax loss
N, T, V = 100, 1, 10
def check_loss(N, T, V, p):
x = 0.001 * np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = np.random.rand(N, T) <= p
print(loss_func.forward(x, y, mask))
check_loss(100, 1, 10, 1.0) # Should be about 2.3
check_loss(100, 10, 10, 1.0) # Should be about 23
check_loss(5000, 10, 10, 0.1) # Should be about 2.3
# Gradient check for temporal softmax loss
N, T, V = 7, 8, 9
x = np.random.randn(N, T, V)
y = np.random.randint(V, size=(N, T))
mask = (np.random.rand(N, T) > 0.5)
loss = loss_func.forward(x, y, mask)
dx = loss_func.backward()
dx_num = eval_numerical_gradient(lambda x: loss_func.forward(x, y, mask), x, verbose=False)
print('dx error: ', rel_error(dx, dx_num))
# -
# ### Inline Question: Using softmax function over vocabulary for word prediction is common in language modeling. However, this technique is not perfect, what do you think are the major disadvantages of it? Pleaes provide one disadvantage of softmax function over vocabulary.
#
# #### Ans: One disadvantage of using Softmax function over the vocabulary is that it is computationally inefficient and is prone to problems of saturation(0 gradient). When the vocabulary is very large, computing all inner products between the context vector and all embeddings becomes very slow during training, even with modern GPUs and techniques like matrix-matrix multiplications.
# # RNN for language modeling
# Now that you have the necessary layers, you can combine them to build an language modeling model. Open the file `lib/rnn.py` and look at the `TestRNN` class.
#
# Check the forward and backward pass of the model in the `loss` function. For now you only see the implementation of the case where `cell_type='rnn'` for vanialla RNNs; you will implement the LSTM case later. After doing so, run the following to check the forward pass using a small test case; you should see error less than `1e-10`.
# +
N, D, H = 10, 20, 40
V = 4
T = 13
model = TestRNN(D, H, cell_type='rnn')
loss_func = temporal_softmax_loss()
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
model.assign_params()
features = np.linspace(-1.5, 0.3, num=(N * D * T)).reshape(N, T, D)
h0 = np.linspace(-1.5, 0.5, num=(N*H)).reshape(N, H)
labels = (np.arange(N * T) % V).reshape(N, T)
pred = model.forward(features, h0)
# You'll need this
mask = np.ones((N, T))
loss = loss_func.forward(pred, labels, mask)
dLoss = loss_func.backward()
expected_loss = 51.0949189134
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
# -
# Run the following cell to perform numeric gradient checking on the `TestRNN` class; you should errors around `1e-7` or less.
# +
np.random.seed(599)
batch_size = 2
timesteps = 3
input_dim = 4
hidden_dim = 6
label_size = 4
labels = np.random.randint(label_size, size=(batch_size, timesteps))
features = np.random.randn(batch_size, timesteps, input_dim)
h0 = np.random.randn(batch_size, hidden_dim)
model = TestRNN(input_dim, hidden_dim, cell_type='rnn')
loss_func = temporal_softmax_loss()
pred = model.forward(features, h0)
# You'll need this
mask = np.ones((batch_size, timesteps))
loss = loss_func.forward(pred, labels, mask)
dLoss = loss_func.backward()
dout, dh0 = model.backward(dLoss)
grads = model.grads
for param_name in sorted(grads):
f = lambda _: loss_func.forward(model.forward(features, h0), labels, mask)
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s relative error: %e' % (param_name, e))
# -
# # LSTM
# Vanilla RNNs can be tough to train on long sequences due to vanishing and exploding gradiants. LSTMs solve this problem by replacing the simple update rule of the vanilla RNN with a gating mechanism as follows.
#
# Similar to the vanilla RNN, at each timestep we receive an input $x_t\in\mathbb{R}^D$ and the previous hidden state $h_{t-1}\in\mathbb{R}^H$; what is different in the LSTM is to maintains an $H$-dimensional *cell state*, so we also receive the previous cell state $c_{t-1}\in\mathbb{R}^H$. The learnable parameters of the LSTM are an *input-to-hidden* matrix $W_x\in\mathbb{R}^{4H\times D}$, a *hidden-to-hidden* matrix $W_h\in\mathbb{R}^{4H\times H}$ and a *bias vector* $b\in\mathbb{R}^{4H}$.
#
# At each timestep we first compute an *activation vector* $a\in\mathbb{R}^{4H}$ as $a=W_xx_t + W_hh_{t-1}+b$. We then divide this into four vectors $a_i,a_f,a_o,a_g\in\mathbb{R}^H$ where $a_i$ consists of the first $H$ elements of $a$, $a_f$ is the next $H$ elements of $a$, etc. We then compute the *input gate* $g\in\mathbb{R}^H$, *forget gate* $f\in\mathbb{R}^H$, *output gate* $o\in\mathbb{R}^H$ and *block input* $g\in\mathbb{R}^H$ as
#
# $$
# \begin{align*}
# i = \sigma(a_i) \hspace{2pc}
# f = \sigma(a_f) \hspace{2pc}
# o = \sigma(a_o) \hspace{2pc}
# g = \tanh(a_g)
# \end{align*}
# $$
#
# where $\sigma$ is the sigmoid function and $\tanh$ is the hyperbolic tangent, both applied elementwise.
#
# Finally we compute the next cell state $c_t$ and next hidden state $h_t$ as
#
# $$
# c_{t} = f\odot c_{t-1} + i\odot g \hspace{4pc}
# h_t = o\odot\tanh(c_t)
# $$
#
# where $\odot$ is the elementwise product of vectors.
#
# In the rest of the notebook we will implement the LSTM update rule and apply it to the text generation task.
#
# In the code, we assume that data is stored in batches so that $X_t \in \mathbb{R}^{N\times D}$, and will work with *transposed* versions of the parameters: $W_x \in \mathbb{R}^{D \times 4H}$, $W_h \in \mathbb{R}^{H\times 4H}$ so that activations $A \in \mathbb{R}^{N\times 4H}$ can be computed efficiently as $A = X_t W_x + H_{t-1} W_h$
# # LSTM: step forward
# Implement the forward pass for a single timestep of an LSTM in the `step_forward` function in the file `lib/layer_utils.py`. This should be similar to the `step_forward` function that you implemented above, but using the LSTM update rule instead.
#
# Once you are done, run the following to perform a simple test of your implementation. You should see errors around `1e-8` or less.
# +
N, D, H = 3, 4, 5
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.linspace(-0.4, 1.2, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.3, 0.7, num=N*H).reshape(N, H)
prev_c = np.linspace(-0.4, 0.9, num=N*H).reshape(N, H)
Wx = np.linspace(-2.1, 1.3, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.7, 2.2, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.3, 0.7, num=4*H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
next_h, next_c, cache = lstm.step_forward(x, prev_h, prev_c)
expected_next_h = np.asarray([
[ 0.24635157, 0.28610883, 0.32240467, 0.35525807, 0.38474904],
[ 0.49223563, 0.55611431, 0.61507696, 0.66844003, 0.7159181 ],
[ 0.56735664, 0.66310127, 0.74419266, 0.80889665, 0.858299 ]])
expected_next_c = np.asarray([
[ 0.32986176, 0.39145139, 0.451556, 0.51014116, 0.56717407],
[ 0.66382255, 0.76674007, 0.87195994, 0.97902709, 1.08751345],
[ 0.74192008, 0.90592151, 1.07717006, 1.25120233, 1.42395676]])
print('next_h error: ', rel_error(expected_next_h, next_h))
print('next_c error: ', rel_error(expected_next_c, next_c))
# -
# # LSTM: step backward
# Implement the backward pass for a single LSTM timestep in the function `step_backward` in the file `lib/layer_utils.py`. Once you are done, run the following to perform numeric gradient checking on your implementation. You should see errors around `1e-6` or less.
# +
np.random.seed(599)
N, D, H = 4, 5, 6
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.random.randn(N, D)
prev_h = np.random.randn(N, H)
prev_c = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
next_h, next_c, cache = lstm.step_forward(x, prev_h, prev_c)
dnext_h = np.random.randn(*next_h.shape)
dnext_c = np.random.randn(*next_c.shape)
fx_h = lambda x: lstm.step_forward(x, prev_h, prev_c)[0]
fh_h = lambda h: lstm.step_forward(x, prev_h, prev_c)[0]
fc_h = lambda c: lstm.step_forward(x, prev_h, prev_c)[0]
fWx_h = lambda Wx: lstm.step_forward(x, prev_h, prev_c)[0]
fWh_h = lambda Wh: lstm.step_forward(x, prev_h, prev_c)[0]
fb_h = lambda b: lstm.step_forward(x, prev_h, prev_c)[0]
fx_c = lambda x: lstm.step_forward(x, prev_h, prev_c)[1]
fh_c = lambda h: lstm.step_forward(x, prev_h, prev_c)[1]
fc_c = lambda c: lstm.step_forward(x, prev_h, prev_c)[1]
fWx_c = lambda Wx: lstm.step_forward(x, prev_h, prev_c)[1]
fWh_c = lambda Wh: lstm.step_forward(x, prev_h, prev_c)[1]
fb_c = lambda b: lstm.step_forward(x, prev_h, prev_c)[1]
num_grad = eval_numerical_gradient_array
dx_num = num_grad(fx_h, x, dnext_h) + num_grad(fx_c, x, dnext_c)
dh_num = num_grad(fh_h, prev_h, dnext_h) + num_grad(fh_c, prev_h, dnext_c)
dc_num = num_grad(fc_h, prev_c, dnext_h) + num_grad(fc_c, prev_c, dnext_c)
dWx_num = num_grad(fWx_h, Wx, dnext_h) + num_grad(fWx_c, Wx, dnext_c)
dWh_num = num_grad(fWh_h, Wh, dnext_h) + num_grad(fWh_c, Wh, dnext_c)
db_num = num_grad(fb_h, b, dnext_h) + num_grad(fb_c, b, dnext_c)
dx, dh, dc, dWx, dWh, db = lstm.step_backward(dnext_h, dnext_c, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dh error: ', rel_error(dh_num, dh))
print('dc error: ', rel_error(dc_num, dc))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# -
# # LSTM: forward
# In the class `lstm` in the file `lib/layer_utils.py`, implement the `forward` function to run an LSTM forward on an entire timeseries of data.
#
# When you are done, run the following to check your implementation. You should see an error around `1e-7`.
# +
N, D, H, T = 2, 5, 4, 3
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.linspace(-0.4, 0.6, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.4, 0.8, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.9, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.3, 0.6, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.2, 0.7, num=4*H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
h = lstm.forward(x, h0)
expected_h = np.asarray([
[[ 0.01764008, 0.01823233, 0.01882671, 0.0194232 ],
[ 0.11287491, 0.12146228, 0.13018446, 0.13902939],
[ 0.31358768, 0.33338627, 0.35304453, 0.37250975]],
[[ 0.45767879, 0.4761092, 0.4936887, 0.51041945],
[ 0.6704845, 0.69350089, 0.71486014, 0.7346449 ],
[ 0.81733511, 0.83677871, 0.85403753, 0.86935314]]])
print('h error: ', rel_error(expected_h, h))
# -
# # LSTM: backward
# Implement the backward pass for an LSTM over an entire timeseries of data in the function `backward` in the `lstm` class in the file `lib/layer_utils.py`. When you are done, run the following to perform numeric gradient checking on your implementation. You should see errors around `1e-7` or less.
# +
np.random.seed(599)
N, D, T, H = 2, 3, 10, 6
lstm = LSTM(D, H, init_scale=0.02, name='test_lstm')
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
lstm.params[lstm.wx_name] = Wx
lstm.params[lstm.wh_name] = Wh
lstm.params[lstm.b_name] = b
out = lstm.forward(x, h0)
dout = np.random.randn(*out.shape)
dx, dh0 = lstm.backward(dout)
dWx = lstm.grads[lstm.wx_name]
dWh = lstm.grads[lstm.wh_name]
db = lstm.grads[lstm.b_name]
dx_num = eval_numerical_gradient_array(lambda x: lstm.forward(x, h0), x, dout)
dh0_num = eval_numerical_gradient_array(lambda h0: lstm.forward(x, h0), h0, dout)
dWx_num = eval_numerical_gradient_array(lambda Wx: lstm.forward(x, h0), Wx, dout)
dWh_num = eval_numerical_gradient_array(lambda Wh: lstm.forward(x, h0), Wh, dout)
db_num = eval_numerical_gradient_array(lambda b: lstm.forward(x, h0), b, dout)
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# -
# # LSTM model
#
# Now that you have implemented an LSTM, update the initialization of the `TestRNN` class in the file `lib/rnn.py` to handle the case where `self.cell_type` is `lstm`. This should require adding only one line of codes.
#
# Once you have done so, run the following to check your implementation. You should see a difference of less than `1e-10`.
# +
N, D, H = 10, 20, 40
V = 4
T = 13
model = TestRNN(D, H, cell_type='lstm')
loss_func = temporal_softmax_loss()
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
model.assign_params()
features = np.linspace(-1.5, 0.3, num=(N * D * T)).reshape(N, T, D)
h0 = np.linspace(-1.5, 0.5, num=(N*H)).reshape(N, H)
labels = (np.arange(N * T) % V).reshape(N, T)
pred = model.forward(features, h0)
# You'll need this
mask = np.ones((N, T))
loss = loss_func.forward(pred, labels, mask)
dLoss = loss_func.backward()
expected_loss = 49.2140256354
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
# -
# # Let's have some fun!!
#
# Now you have everything you need for language modeling. You will work on text generation using RNNs from any text source (novel, lyrics). The network is trained to predict what word is coming next given a previous word. Once you train the model, by looping the network, you can keep generating a new text which is mimicing the original text source. Let's first put your source text you want to model in the following text box!
#
# **Notice**: in order to run next cell, paste your own text words into the form and hit Enter. Do not use notebook's own 'run cell' since it wouldn't read in anything.
from ipywidgets import widgets, interact
from IPython.display import display
input_text = widgets.Text()
input_text.value = "Paste your own text words here and hit Enter."
def f(x):
print('set!!')
print(x.value)
input_text.on_submit(f)
input_text
# # copy paste your text source in the box below and hit enter.
# If you don't have any preference,
# you can copy paste the lyrics from here https://www.azlyrics.com/lyrics/ylvis/thefox.html
# simply run the following code to construct training dataset<br />
# +
import re
text = re.split(' |\n',input_text.value.lower()) # all words are converted into lower case
outputSize = len(text)
word_list = list(set(text))
dataSize = len(word_list)
output = np.zeros(outputSize)
for i in range(0, outputSize):
index = np.where(np.asarray(word_list) == text[i])
output[i] = index[0]
data_labels = output.astype(np.int)
gt_labels = data_labels[1:]
data_labels = data_labels[:-1]
print('Input text size: %s' % outputSize)
print('Input word number: %s' % dataSize)
# -
# We defined a LanguageModelRNN class for you to fill in the TODO block in rnn.py.
# * Here design a recurrent neutral network consisting of a word_embedding layer, recurrent unit, and temporal fully connected layers so that they match the provided dimentions.
# * Please read the train.py under lib directory carefully and complete the TODO blocks in the train_net function first.
# +
# you can change the following parameters.
D = 10 # input dimention
H = 20 # hidden space dimention
T = 50 # timesteps
N = 10 # batch size
max_epoch = 100 # max epoch size
loss_func = temporal_softmax_loss()
# you can change the cell_type between 'rnn' and 'lstm'.
model = LanguageModelRNN(dataSize, D, H, cell_type='lstm')
optimizer = Adam(model, 5e-4)
data = { 'data_train': data_labels, 'labels_train': gt_labels }
results = train_net(data, model, loss_func, optimizer, timesteps=T, batch_size=N, max_epochs=max_epoch, verbose=True)
# -
# Simply run the following code block to check the loss and accuracy curve.
# +
opt_params, loss_hist, train_acc_hist = results
# Plot the learning curves
plt.subplot(2, 1, 1)
plt.title('Training loss')
loss_hist_ = loss_hist[1::100] # sparse the curve a bit
plt.plot(loss_hist_, '-o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(train_acc_hist, '-o', label='Training')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
# -
# Now you can generate a text using the trained model. You can also start from a specific word in the original text. If you trained your model with "The Fox", you can check how well it is modeled by starting from "dog", "cat", etc.
# +
# you can change the generated text length below.
text_length = 100
idx = 0
# you also can start from specific word.
# since the words are all converted into lower case, make sure you put lower case below.
idx = int(np.where(np.asarray(word_list) == 'dog')[0])
# sample from the trained model
words = model.sample(idx, text_length-1)
# convert indices into words
output = [ word_list[i] for i in words]
print(' '.join(output))
# -
# ### Inline Question: Play around with different settings to get better understanding of its behavior and describe your observation. Make sure at least you cover the following points:
# * Vanilla RNN vs LSTM (you can set different time steps and test with longer texts.)
# * Problems in these approaches (there's no unique answer. just explain your own opinion from experiments.)
# #### Ans: LSTM performs better than Vanilla RNN in terms of the vanishing gradient problem, as it does a better job avoiding this problem. Also, LSTM can model longer dependences as compared to the Vanilla RNN. But in some special cases, Vanilla RNNs can match the performance of the LSTMs, like in the case when for initializing the recurrent weight matrix we use the identity matrix.
# RNNs typically face the problem of Vanishing and exploding gradients. Also, the range of context that can be accessed by the standard Vanilla RNNs is limited, which could be a problem in practical cases.
# LSTM, on the other hand, is relatively more complex and is slower as compared to the other normal activation functions(like ReLu, tanh, or sigmoid).
| Assignment 2/Problem_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
print 1 == 0
print 1 == 1
print 1 != 0
print 5 >= 5
print 5 >= 6
print 2 > 1 and 3 > 2
print 2 > 1 and 3 < 2
print 2 > 1 or 3 < 2
print 2 < 1 and 3 < 2
print (3 > 2 or 1 < 3) and (1!=3 and 4>3) and not ( 3 < 2 or 1 < 3 and (1!=3 and 4>3))
print 3 > 2 or 1 < 3 and (1!=3 and 4>3) and not ( 3 < 2 or 1 < 3 and (1!=3 and 4>3))
if statement1:
# if the statement1 is true, execute the code here.
# code.....
# code.....
elif statement2:
# if the statement 1 is false, skip the codes above to this part.
# code......
# code......
else:
# if none of the above statements is True, skip to this part
# code......
i = 0
if i == 0:
print 'i==0 is True'
p = 1 > 0
q = 2 > 3
if p and q:
print 'p and q is true'
elif p and not q:
print 'q is false'
elif q and not p:
print 'p is false'
else:
print 'None of p and q is true'
i = 0
while i < 5:
print i
i += 1
for i in [1,2,3,4,5]:
print i
stocks = ['AAPL','GOOG','IBM','FB','F','V', 'G', 'GE']
selected = ['AAPL','IBM']
new_list = []
for i in stocks:
if i not in selected:
new_list.append(i)
print stocks
stocks = ['AAPL','GOOG','IBM','FB','F','V', 'G', 'GE']
for i in stocks:
print i
if i == 'FB':
break
stocks = ['AAPL','GOOG','IBM','FB','F','V', 'G', 'GE']
for i in stocks:
if i == 'FB':
continue
print i
squares = []
for i in [1,2,3,4,5]:
squares.append(i**2)
print squares
list = [1,2,3,4,5]
squares = [x**2 for x in list]
print squares
stocks = ['AAPL','GOOG','IBM','FB','F','V', 'G', 'GE']
selected = ['AAPL','IBM']
new_list = [x for x in stocks if x in selected]
print new_list
print [(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]
print [str(x)+' vs '+str(y) for x in ['AAPL','GOOG','IBM','FB'] for y in ['F','V','G','GE'] if x!=y]
| 05 Introduction to Financial Python[]/02 Logical Operations and Loops/02 Logical Operations and Loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [TensorFlow Hub によるテキストの分類: 映画レビュー](https://www.tensorflow.org/tutorials/keras/text_classification_with_hub?hl=ja)
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import tensorflow as tf
# !pip install -q tensorflow-hub
# !pip install -q tfds-nightly
import tensorflow_hub as hub
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# # Data Exploration
# +
train_data, val_data, test_data = tfds.load(name='imdb_reviews', split=['train[:60%]', 'train[60%:]', 'test'], as_supervised=True)
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch
# -
# # Modeling
# +
# Simple use in TensorFlow Hub model
embed = hub.load('https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1')
embeddings = embed(['cat is on the mat', 'dog is in the fog'])
print(embeddings)
# +
hub_layer = hub.KerasLayer('https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1', input_shape=[], dtype=tf.string, trainable=True)
from tensorflow import keras
model = keras.Sequential()
model.add(hub_layer)
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy'])
# -
history = model.fit(train_data.shuffle(10000).batch(512), epochs=20, validation_data=val_data.batch(512), verbose=1)
model.evaluate(test_data.batch(512), verbose=2)
| TensorFlow/basic-tutorial/tensorflow-tutorial-tensorflow-hub.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Web Scraper
# +
import requests
from bs4 import BeautifulSoup
#Scrape the source code from the specified link:
#In this example I'm using my own medium wall
data = requests.get("https://fabiom91.medium.com")
soup = BeautifulSoup(data.content, 'html5lib')
# +
import re
keywords= ['privacy','data','analytics','data science','cybersecurity']
urls = []
for key in keywords:
html = str(soup.find_all('a', attrs = {'href':re.compile(r'^.*\b%s\b.*$' % key)}))
href_start = [s.start() for s in re.finditer('href="',html)]
for start in href_start:
url = html[start+6:]
url = url[:url.find('"')]
if not any(c in '#?^%*()=' for c in url):
if 'http' in url:
urls.append(url)
else:
urls.append("https://fabiom91.medium.com" + url)
urls = list(set(urls))
# -
from tqdm import tqdm
previews = []
for article in tqdm(urls):
data = requests.get(article)
soup = BeautifulSoup(data.content, 'html5lib')
title = soup.find('h1').contents[0]
subtitle = soup.find('p').contents[0]
subtitle = BeautifulSoup(str(subtitle), 'lxml').text
for x in soup.find_all('img'):
try:
image = str(x.attrs['srcset']).split(" ")[0]
break
except:
pass
article_preview = {
'title': str(title),
'subtitle': str(subtitle),
'image': str(image)
}
previews.append(article_preview)
print(previews)
# # Newsletter template
template = open('email.html')
soup = BeautifulSoup(template.read(), "html.parser")
article_template = soup.find('div', attrs={'class':'columns'})
html_start = str(soup)[:str(soup).find(str(article_template))]
html_end = str(soup)[str(soup).find(str(article_template))+len(str(article_template)):]
html_start = html_start.replace('\n','')
html_end = html_end.replace('\n','')
# +
newsletter_content = ""
for i,article in enumerate(previews):
try:
img = article_template.img
img['src'] = article['image']
article_template.img.replace_with(img)
except:
pass
title = article_template.h1
title.string = article['title'][:300]
subtitle = article_template.p
subtitle.string = article['subtitle'][:300] + "..."
link = article_template.a
link['href'] = urls[i]
link.string = urls[i]
article_template.a.replace_with(link)
newsletter_content += str(article_template).replace('\n','')
email_content = html_start + newsletter_content + html_end
# -
print(BeautifulSoup(email_content).prettify())
# # Send email (SMTP)
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# +
# sender_email = "<EMAIL>"
# receiver_email = "<EMAIL>"
# # sender email password
# password = "**************"
# message = MIMEMultipart("alternative")
# message["Subject"] = "My awesome newsletter"
# message["From"] = sender_email
# message["To"] = receiver_email
# # Create the plain-text and HTML version of your message
# text = "Hi, I've found some article that you might find interesting: %s" % previews
# html = email_content
# # Turn these into plain/html MIMEText objects
# part1 = MIMEText(text, "plain")
# part2 = MIMEText(html, "html")
# # Add HTML/plain-text parts to MIMEMultipart message
# # The email client will try to render the last part first
# message.attach(part1)
# message.attach(part2)
# # Create secure connection with server and send email
# context = ssl.create_default_context()
# with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
# server.ehlo()
# server.login(sender_email, password)
# server.sendmail(
# sender_email, receiver_email, message.as_string()
# )
| notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project 2020 - Fundamentals of Data Analysis
# ---
# ### Project requirements:
#
# Perform and explain simple linear regression using Python on the **powerproduction** dataset.
# The goal is to;
# - accurately predict wind turbine power output from wind speed values using the data set as a basis.
# 1. using simple linear regression on the data set.
# 2. An explanation of the rgression and an analysis of its accuracy.
#
# #### Methodology:
#
# * Importing the data set
# * Explore the data set
# * Cleanse the data set
# * Perform Analysis
#
# +
# Getting plotts inline
# %matplotlib inline
# importing required packages and libraries
# numerical library
import numpy as np
rng = np.random.default_rng()
# data frames
import pandas as pd
# plotting libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Better sized plots.
plt.rcParams['figure.figsize'] = (12, 8)
# Nicer colours and styles for plots.
# plt.style.use("ggplot")
plt.style.use("fivethirtyeight")
# -
# importing the data set
url = "https://raw.githubusercontent.com/ianmcloughlin/2020A-machstat-project/master/dataset/powerproduction.csv"
# transform the data set into a data frame
df = pd.read_csv(url)
df
# Plotting the data set to explore the shape
sns.scatterplot(data = df, x = "speed", y = "power", label = "Exploring the Data Set's shape")
# #### We observe that many values for the power variable are of 0 value, this can be referred to many factors as for example but not limited to;
# - Human error during the collection of data
# - Technical error during exporting of data
# - Technical error which can be related to the methodology of reporting
# - Actual reporting parameters related to the actual infrastrucure and technical model settings.
#
# #### Since the scope is to implement an accurate linear regression with diregard to the condition around the 0 values, The 0 values will be eliminated.
# dropping the rows with element 0
df = df[df != 0].dropna()
df
# transpose the data set
df.transpose()
# validating the data set
df.describe()
sns.scatterplot(data = df, x = "speed", y = "power", label = "Exploring the Data Set's shape")
# <BR>
#
# ***
#
# # ANALYSIS
#
# ***
# <br>
#
# ---
# ## Simple Linear Regression using _`Numpy`_
# ---
# Do regression on the speed and power arrays using numpy.
np.polyfit(df["speed"], df["power"], 1)
# +
speed = df["speed"]
power = df["power"]
# Create variables with those values.
m, p = np.polyfit(speed, power, 1)
# Have a look at m and r.
m, p
# -
# Plot speed and power and the regression line in red.
plt.plot(speed, power, "k.", label = "PowerProduction")
plt.plot(speed, m * speed + p, "r-", label = "Linear_regression" )
plt.xlabel("speed")
plt.ylabel("power")
plt.legend()
# ### Calculating the best `m` and `r`
# +
# Calculate mean speed and mean power.
speed_avg = np.mean(speed)
power_avg = np.mean(power)
# Subtract means from speed and power.
speed_zero = speed - speed_avg
power_zero = power - power_avg
# Dot product of mean-adjusted speed and power divided by dot product of mean adjusted speed with itself.
m = np.sum(speed_zero * power_zero) / np.sum(speed_zero * speed_zero)
# Subtract m times average x from average y.
p = power_avg - m * speed_avg
# Let's have a look - same values as above.
m, p
# -
# simple linear regression by using seaborn [1]
sns.regplot(x = "speed", y = "power", data = df, x_estimator=np.mean, color = "g", label ="simple linear regression")
plt.legend()
# <br>
#
# ---
#
# ## Simple Linear Regression using _`Scikit-learn`_
# ---
def f(x, p):
return p[0] + x * p[1]
# #### Train
#
# +
# using linear_model : https://scikit-learn.org/stable/modules/linear_model.html
# https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/models.ipynb
# pda : Notebook: Models
import sklearn.linear_model as lim
from sklearn.linear_model import LinearRegression
speed = df["speed"].to_numpy()
power = df["power"].to_numpy()
speed = speed.reshape(-1, 1)
# Create a linear regression model instance.
m = lim.LinearRegression()
# Ask the model to fit the data.
m.fit(speed, power)
# -
# Here's our intercept.
m.intercept_
# The coefficients.
m.coef_
# See how good the fit is.
m.score(speed, power)
# training model
p = [m.intercept_, m.coef_[0]]
p
# #### predict
#
# +
# linear regression function
def predict(speed):
"""
predicting the power using speed as input
"""
return f(speed, p)
# -
# f(x, p)
powerout = predict(speed)
plt.plot(predict(speed), "r-", label = "Linear Regression with Power Output")
plt.title("Power Output")
plt.xlabel("speed")
plt.ylabel("power")
plt.legend()
# <br>
#
# ### Advanced _`Scikit-Learn`_
# importing scikit-learn libraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import IsolationForest
from sklearn.metrics import mean_absolute_error
# retrieve the array
data = df.values
# split into input and output elements
speed, power = data[:, :-1], data[:, -1]
# summarize the shape of the dataset
print(speed.shape, power.shape)
# *We can see from the above, the data set was loaded correctly and there are **451 rows** of data with **1 input variable** and **a single target variable**.*
#
# split into train and test sets
speed_train, speed_test, power_train, power_test = train_test_split(speed, power, test_size=0.33, random_state=1)
# summarize the shape of the train and test sets
print(speed_train.shape, speed_test.shape, power_train.shape, power_test.shape)
# _The data set is split into **train** and **test** sets, with **302** rows used for **model training** and **149** for **model evaluation**._
# ### _Automatic Outlier Detection_
#
# The scikit-learn library provides a number of built-in automatic methods for identifying outliers in data. Each method will be defined, then fit on the training dataset. The fit model will then predict which examples in the training dataset are outliers and which are not (so-called inliers). The outliers will then be removed from the training dataset, then the model will be fit on the remaining examples and evaluated on the entire test dataset.
#
# It would be invalid to fit the outlier detection method on the entire training dataset as this would result in data leakage. That is, the model would have access to data (or information about the data) in the test set not used to train the model. This may result in an optimistic estimate of model performance.
#
# One approach might be to return a “None” indicating that the model is unable to make a prediction on those outlier cases. This might be an interesting extension to explore that may be appropriate for your project [3].
#
# #### _Isolation Forest_
#
# Isolation Forest, or iForest for short, is a tree-based anomaly detection algorithm.
#
# It is based on modeling the normal data in such a way as to isolate anomalies that are both few in number and different in the feature space. The scikit-learn library provides an implementation of Isolation Forest in the IsolationForest class.
#
# The most important hyperparameter in the model is the “contamination” argument, which is used to help estimate the number of outliers in the dataset. This is a value between `0.0` and `0.5` and by default is set to `0.1` [3].
#
#
# +
# identify outliers in the training dataset
iso = IsolationForest(contamination=0.1)
power_hat = iso.fit_predict(speed_train)
# select all rows that are not outliers
mask = power_hat != -1
speed_train, power_train = speed_train[mask, :], power_train[mask]
# summarize the shape of the updated train and test sets
print(speed_train.shape, speed_test.shape, power_train.shape, power_test.shape)
# -
# ### _Baseline Model Performance_
# It is a regression predictive modeling problem, meaning that we will be predicting a numeric value. All input variables are also numeric.
# In this case, we will fit a linear regression algorithm and evaluate model performance by training the model on the test dataset and making a prediction on the test data and evaluate the predictions using the mean absolute error (MAE).
#
#
# #### Fit
# Fit the model
m = LinearRegression()
m.fit(speed_train, power_train)
# #### Evaluate
# +
# Evaluate the model
power_hat = m.predict(speed_test)
# Evaluate prediction
mae = mean_absolute_error(power_test, power_hat)
print(" Mean Absolute Error(MAE): %.3f" % mae)
# -
# The model achieved a **MAE** of **11.037** (_`results may vary`_). This is a baseline in performance to which we can compare different outlier identification and removal procedure. After implementing **Isolation Forest** method.
# Here's our intercept.
m.intercept_
# The coefficients.
m.coef_
# training model
p = [m.intercept_, m.coef_[0]]
p
# #### Predict
# +
# linear regression function for prediction
def predict(speed):
"""
predicting the power using speed as input
"""
return f(speed, p)
# -
# f(x, p)
powerOut = predict(speed)
# See how good the fit is.
m.score(speed_test, power_test)
plt.plot(speed, powerOut, "r-", label = "Linear Regression with Optimised Power Output")
plt.title("Optimised Power Output")
plt.xlabel("speed")
plt.ylabel("power")
plt.legend()
# <br>
#
# ---
#
# ## Simple Linear Regression using _`Statsmodels`_
#
# ---
# +
# Using statsmodels.
import statsmodels.api as sm
# Tell statmodels to include an intercept.
speedwithp = sm.add_constant(speed)
# Create a model.
msm = sm.OLS(power, speedwithp)
# Fit the data.
psm = msm.fit()
# Print a summary.
print(psm.summary())
# -
# <br>
#
# ***
#
# ## Polynomial Regression with _`scikit-learn`_
#
# ***
# Importing Libraries.
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# +
# retrieve the array
data = df.values
# split into input and output elements
speed, power = data[:, :-1], data[:, -1]
# summarize the shape of the dataset
print(speed.shape, power.shape)
x = speed.reshape(-1, 1)
y = power
# -
# transformer refers to an instance of PolynomialFeatures which you can use to transform the input x (speed).
transformer = PolynomialFeatures(degree= 7, include_bias=False)
# Fitting transformer with x (speed).
transformer.fit(x)
# Create a new and modified input.
x_ = transformer.transform(x)
# summarizing the earlier 3 steps with one line of code.
x_ = PolynomialFeatures(degree= 7, include_bias=False).fit_transform(x)
# create and fit the model.
m = LinearRegression().fit(x_, y)
# +
# Getting the R-square
r_sq = m.score(x_, y)
print('coefficient of determination:', r_sq)
print('intercept:', m.intercept_)
print('coefficients:', m.coef_)
p = [m.intercept_, m.coef_]
print ('p :', p)
# -
# Getting very similar results with different transformation arguments.
x_ = PolynomialFeatures(degree= 7, include_bias=True).fit_transform(x)
print(x_)
model = LinearRegression(fit_intercept= True).fit(x_, y)
# +
# Getting similar results to the previous case.
# getting the accuracy with R-squared
r_sq = model.score(x_, y)
print('coefficient of determination (accuracy -we notice it is almost one which is good!-):', r_sq)
print('intercept:', model.intercept_)
print('coefficients:', model.coef_)
# -
# predict the response (power)
y_pred = model.predict(x_)
print('predicted response:', y_pred)
plt.plot(x, y, "k.", label = "Power")
plt.plot(x, y_pred, "r-", label = "Polynomial Regression with estimated Power Output")
plt.title("Polynomial Regression")
plt.xlabel("Speed")
plt.ylabel("Power")
plt.legend()
# ## References
# [1] Functions to draw linear regression models ; https://seaborn.pydata.org/tutorial/regression.html#functions-to-draw-linear-regression-models
#
# [2] Ian McLoughlin; Models : https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/models.ipynb
#
# [3] 4 Automatic Outlier Detection Algorithms in Python : https://machinelearningmastery.com/model-based-outlier-detection-and-removal-in-python/
#
# [4] Matplotlib : https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.legend.html
#
# [5] KDnuggets : https://www.kdnuggets.com/2019/07/data-pre-processing-optimizing-regression-model-performance.html
#
# [6] RealPython ; Linear Regression in Python : https://realpython.com/linear-regression-in-python/
#
# [7] Statistics By Jim : https://statisticsbyjim.com/regression/choosing-regression-analysis/
#
# [8] Wikipedia ; polynomial regression: https://en.wikipedia.org/wiki/Polynomial_regression
# ## End
| power-production.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Aula 01 - Resolução das PErguntas de Negocio.
#
# ## 1.1 Novas perguntas do CEO:
# 1. Quantas casas estão disponíveis para compra?
# 2. Quantos atributos as casas possuem?
# 3. Quais são os atributos das casas?
# 4. Qual a casa mais cara ( casa com o maior valor de venda )?
# 5. Qual a casa com o maior número de quartos?
# 6. Qual a soma total de quartos do conjunto de dados?
# 7. Quantas casas possuem 2 banheiros?
# 8. Qual o preço médio de todas as casas no conjunto de dados?
# 9. Qual o preço médio de casas com 2 banheiros?
# 10. Qual o preço mínimo entre as casas com 3 quartos?
# 11. Quantas casas possuem mais de 300 metros quadrados na sala de estar?
# 12. Quantas casas tem mais de 2 andares?
# 13. Quantas casas tem vista para o mar?
# 14. Das casas com vista para o mar, quantas tem 3 quartos?
# 15. Das casas com mais de 300 metros quadrados de sala de estar, quantas tem mais de 2 banheiros?
# +
# Import LIbraries
import pandas as pd
import numpy as np
# -
# ### Loading Data
data = pd.read_csv( 'datasets/kc_house_data.csv' )
# Ploting data
data.head()
# ## 1. How many homes are available for purchase?
# Counting number of lines the same number of houses
# len(data['id'].unique())
# Or using .drop function (remove od duplicados e conta novamente)
# Estrategy:
# 1. Select "id" column;
# 2. Remov repeated value;
# 3. Count the number of unique values.
houses = len(data['id'].drop_duplicates())
print('The number of avaible houses are {}'.format(houses))
# ## 2. How many attributes do houses have?
# Estrategy:
# 1. Count the number os columns;
# 2. Remove values "id", "date" (they are not attributes):
attributes = len(data.drop(['id', 'date'], axis = 1).columns)
print('The houses have {} attributes'.format(attributes))
# ## 3. What are the attributes of houses?
## Estrategy:
# 1. Plot the columns.
attributes2 = data.drop(['id', 'date'], axis=1).columns
print('The attributes are: {}'.format(attributes2))
# ## 4. What is the most expensive house (house with the highest sale price)?
# +
# Estrategy:
# 1. Arrange the price column from highest to lowest
# 2. Apply reset index to recount rows and ensure correct result;
# 3. Collect the value and id from the first row.
data[['id','price']].sort_values('price', ascending=False).head() # 1
exphouse = data[['id','price']].sort_values('price', ascending=False).reset_index(drop=True)['id'][0] # 2 and 3
highestprice = data[['id','price']].sort_values('price', ascending=False).reset_index(drop=True)['price'][0]
print('The most expensive House is: id {} price U${}'.format(exphouse, highestprice))
# -
# ## 5. Which house has the most bedrooms?
# Estrategy:
# 1. Arrange the bedroom column from highest to lowest;
# 2. Apply reset index to recount rows and ensure correct result;
# 3. Collect the value and id from the first row.
data[["id","bedrooms"]].sort_values("bedrooms", ascending=False).head() # 1
bedroomsid = data[['id','bedrooms']].sort_values('bedrooms', ascending=False).reset_index(drop=True)['id'][0] # 2 and 3
bedrooms1 = data[['id','bedrooms']].sort_values('bedrooms', ascending=False).reset_index(drop=True)['bedrooms'][0]
print('The house with the most bedrooms is {} and have {} bedroomns'.format(bedroomsid, bedrooms1))
# # 6. What is the sum total of bedrooms in the dataset?
# Estrategy:
# 1. Filter the columns "bedrooms" and sum this value.
bedroomsnumber = data['bedrooms'].sum()
print('There are a total of {} bedrooms'.format(bedroomsnumber))
# # 7. Quantas casas possuem 2 banheiros?
# Estrategy:
# 1. Find the number of houses whit 2 bathrooms;
# 2. Select columns "id" and "bathroom";
# 3. Sum the number of houses.
data.loc[data['bathrooms'] == 2, ['id', 'bathrooms']] # 1
bethroomsum = data.loc[data['bathrooms'] == 2, ['id', 'bathrooms']].shape # 2 and 3
print('There are a total of {} bethrooms'.format(bethroomsum))
# # 8. What is the average price of all houses in the dataset?
# Estrategy:
# 1. Find the average price of houses.
# Ps.: Use numpy's round() function to select only 2 number after point.
averageprice = np.round(data['price'].mean(), 2)
print('The average price of the houses is: U${}'.format(averageprice ))
# the function data.dtypes show us what types variables we have.
data.dtypes
# # 9. What is the average price only houses whit 2 bathroom?
# Estrategy:
# 1. Find only the average price of houses whit 2 bathroms.
avg_bath = np.round(data.loc[data['bathrooms'] == 2, 'price'].mean(), 2)
print('The average price for houses whit 2 bathrooms is U${}'.format(avg_bath))
# # 10. What is the minimum price between 3 bedroom homes?
# Estrategy:
# 1. Select only 3 bedroom house and arrange ascending by price
min_price_bed = data.loc[data['bedrooms'] == 3, 'price'].min(),
print('The minimum price for houses whit 3 bedrooms is U${}'.format(min_pricebed))
# # 11. How many homes have more than 300 square meters in the living room?
# Estrategy:
# 1. Select only houses whit mor than 300ft² of living room and read the number of lines.
data['m2'] = data['sqft_living'] * 0.093
sqft_300 = len(data.loc[data['m2'] > 300, 'id'])
print('There are {} houses whit mor than 300ft² in the living room.'.format(sqft_300))
# #### Se quiser fazer a converção de ft² para m² basta usar o seguinte raciocionio: (1 ft² = 0.093 m²)
# #### data['m²']=data['sqft_living'] * 0.093 - (aqui substituimos a variavel sqft_living pela m² ja convertendo o valor)
# #### len(data.loc[data['m²'] > 300, 'id'])
# # 12. How many homes have more than 2 floors?
# Estrategy:
# 1. Select only houses whit mor than 300ft² of living room and read the number of lines.
floor_2 = data.loc[data['floors'] > 2, 'id'].size
print('There are {} houses whit mor than 2 floors.'.format(floor_2))
# # 13. How many houses have a waterfront view?
# Estrategy:
# 1. Select only houses whit waterfront view and read the number of lines.
waterfront_view = len(data.loc[data['waterfront'] != 0, 'id'])
print('There are {} houses whit waterfront view.'.format(waterfront_view))
# # 14. Of the houses with a waterfront view, how many have 3 bedrooms?
# Estrategy:
# 1. Select only houses whit waterfront view and read how many have 3 bedrooms.
data.columns
waterfront_bed = data.loc[(data['waterfront'] != 0) & (data['bedrooms'] == 3), "id"].size
print('Of the houses whit waterfront, {} houses have 3 bedrooms.'.format(waterfront_bed))
# # 15. Of the houses with more than 300 square meters of living room, how many have more than 2 bathrooms?
# Estrategy:
# 1. Select only houses whit mor than 300m² of livingo room and mor than 2 bedrooms.
house_300m_2bat = data[(data['m2']>300) & (data['bathrooms']>2)].shape[0]
print('Of the houses whit 300 square meters of living room, {} houses have 2 bathrooms.'.format(house_300m_2bat))
| Notebooks/Data_Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import chardet
import numpy as np
with open('archivos/atpTour.csv', 'rb') as f:
result = chardet.detect(f.read())
result['encoding']
datos = pd.read_csv('archivos/atpTour.csv', encoding=result['encoding'], low_memory=False)
df = pd.DataFrame(datos)
limpiarNaN = df.replace(np.nan, '0')
archivoConvertidoInt = limpiarNaN.describe(include = [np.number])
archivoConvertidoInt = limpiarNaN.replace("NR", "0")
archivoConvertidoInt['Wsets'] = archivoConvertidoInt.Wsets.astype(float)
archivoConvertidoInt['WRank'] = archivoConvertidoInt.WRank.astype(float)
archivoConvertidoInt.set_index("Location", inplace = True)
resultado = (archivoConvertidoInt
.sort_index()
.loc["Adelaide":"St. Petersburg", 'Series':'Round']
.groupby(["Location", "Series", "Court", "Surface", "Round"])["Series",]
.count())
resultado.loc[("Total", "", "", "", "")] = resultado.sum()
resultado
| Curso Pandas/EDA Atp Tour.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis (EDA)
# conduct EDA on the Starcraft 2 Data to examine the relationship between variables and other trends in the data.
# # Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # Load the data
starcraft_loc = '../data/interimStarcraft_cleaned.csv'
#using index_col = 0 to drop the uncessary number column added by saving the data from the previous notebook.
starcraft = pd.read_csv(starcraft_loc,index_col = 0)
starcraft.info()
starcraft.head()
# # Data Story
# Getting to high rank in Starcraft II is hard. So hard in fact that there is large amounts of content produced on the internet targeted towards helping you get "better" at the game. Yet "better" at the game is a pretty hard idea to quantify in a meaningful way. Given that your rank in Starcraft II is often used as a measure of your overall skill, we aren't particularly interested in some empirically "true" measure of being good at the game and are more interested in what particular game actions and habits are done more often by higher ranking players. Therefore we will be exploring the relationship between League Index and the other independant variables.
#Given that League Index is a categorical variable, not a continous one
#It is useful for us to find a continous variable with a high correlation to League Index for future predicitive modeling
starcraft.corr()
sns.heatmap(data=starcraft.corr())
# At first glance, it appears that APM (actions per minute) and NumberOfPACs seem to be highly positively correlated to league index while ActionLatency and GapBetweenPACs seem to be highly negatively correlated. It may be interesting to plot these values with each other. A PAC is simply a way of measureing a shift in focus. In this case a single PAC is counted when the screen is shifted to a new area for a minimum amount of time and at least one action is performed.
colsOfInterest = ['APM','NumberOfPACs','ActionLatency','GapBetweenPACs']
for i in range(len(colsOfInterest)):
for j in colsOfInterest[i+1:]:
_ = sns.scatterplot(x=colsOfInterest[i],y=j,data=starcraft,hue='LeagueIndex')
_ = plt.legend()
_ = plt.xlabel(colsOfInterest[i])
_ = plt.ylabel(j)
_ = plt.title(j+ ' vs ' +colsOfInterest[i])
plt.show()
# We find that Number of PACs and APM seem to have similar relationships with our other, negative predictors and that our correlation assesment from the above heatmap and table seems to hold true. Given that we have more information about PACs, for example information about the number of actions within a PAC in a given game, of the two high correlation predictors of league it makes the most sense to use NumberOfPACs as our continous variable when moving forward. Lets continue to explore how other factors may effect the number of PACs.
#We look at age and hours per week compared to Number of PACs
#We want to categorize data into low high numbers of hours per week so first we find summary statistics for the hour column
starcraft['HoursPerWeek'].describe()
#We are going to choose to split our hours around the median, so as to reduce the effect of outliers
starcraft['HoursType']=starcraft['HoursPerWeek'].apply(lambda x: 'High' if x>12.0 else 'Low')
starcraft.head()
_ = sns.scatterplot(y='NumberOfPACs',x='Age',hue='HoursType',data=starcraft,alpha=.5)
_ = plt.title('Number Of PACs vs age, grouped by Low or High weekly hours')
plt.show()
# Given that yellow and blue dots are dispersed relatively evenly it doesn't seem as if hours per week has a huge impact on number of PACs. We do notice a relatively steep drop off in number of PACs after about 25 years of age. This is worth keeping an eye on, lets check to see if this is a meaningful factor or if it is mostly impacted by the number of responses we have from younger players.
#Get summary statistics for age
starcraft['Age'].describe()
#check the summary statistics for number of PACs for players above our 75%
old_craft = starcraft[starcraft['Age'] > 24]
old_craft['NumberOfPACs'].describe()
#compare to number of PACs summary stats of the whole data set
starcraft['NumberOfPACs'].describe()
# We do find that while hours per week don't appear to have a consistent impact on number of PACs, as players become 25 years or older they do seem to have a reduced number of PACs.
#
# Lets explore how hotkey management might effect our Number of PACs stat. We have 3 hotkey related columns. We will also color by age to see if we continue to find a difference in age.
hotkey_vars = ['SelectByHotkeys','AssignToHotkeys','UniqueHotkeys']
for i in hotkey_vars:
_ = sns.scatterplot(x=i,y='NumberOfPACs',data=starcraft,hue='Age',alpha=.8)
_ = plt.legend()
_ = plt.title('Number of PACs vs ' + i)
plt.show()
# In these figures, it does not appear as if age is a meaningful contributor, though we do see that they all have a slight positive relationship to PACs, AssignToHotkeys has the most noteable positive relationship.
#
# So far, we have found that Number of PACs appears to be a good predictor of League Index, and that APM has the largest positive correlation on Number of PACS followed by Assign To Hotkeys while Action Latency seemed to have the greatest negative impact on Number of PACs. Finally we will check these variables effects on League Index to confirm that Number of PACs is a good continous variable substitute for our categorical variable of interest in League Index.
_ = sns.boxplot(y='NumberOfPACs',x='LeagueIndex',data=starcraft)
plt.show()
_ = sns.boxplot(y='APM',x='LeagueIndex',data=starcraft)
plt.show()
_ = sns.boxplot(y='AssignToHotkeys',x='LeagueIndex',data=starcraft)
plt.show()
_ = sns.boxplot(y='ActionLatency',x='LeagueIndex',data=starcraft)
plt.show()
# The above figures confirm what we suspected, those stats that have a meaningful impact on Number of PACs also have a similar impact on Leage Index. This seems to suggest that to increase in league rating, a player should be taking more actions and moving their camera around the map more frequently and assigning more units to hotkeys. However these gaps in stats appear to get more extreme beyond League Index 4, lets subset our data and see if there are higher correlations for other categories at lower ranks.
bad_craft = starcraft[starcraft['LeagueIndex'] <= 4]
bad_craft.shape
sns.heatmap(data=bad_craft.corr())
bad_craft.corr()
# We can see that in general our correlations have moved towards 0, this is expected as we have less variation in league index. Due to this expected decrease, any categories with correlation numbers that increased are worth exploring deeper. In this case, total hours and complex units made. We will plot them as bar plots across all 7 league indexes below.
_ = sns.barplot(y='TotalHours',x='LeagueIndex',data=starcraft,ci=None)
plt.show()
_ = sns.barplot(y='ComplexUnitsMade',x='LeagueIndex',data=starcraft,ci=None)
plt.show()
# We can clearly see that in the lower leagues, total hours and number of complex units made have a much larger impact on rank increase than they do at a higher rank where the bars start to step up in less extreme fashions or even, in the case of total hours, step down. This suggests that early on in your Starcraft league ranking up journey, it may be more meaningful to gain experience and focus on strategic decisions, such as what units to make, but that later at higher ranks getting better can often amount to getting faster.
| notebooks/02_Getz_EDA_First_Pass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
t=["feel like ending my life 😐 😐 ", "i am diagnosed with depression :) :)", "it's all over for me"]
df = pd.DataFrame()
df['tweets']=t
# +
# comment
# -
def remove_non_ascii(dff):
#s2 = str(df)
s2 = dff.str.replace("[^\\x00-\\x7F]", "abcde")
return s2
df['tweets']=remove_non_ascii(df['tweets'])
def remove_newline(dff):
#s2 = str(df)
s2= dff.str.replace("\r\n", " ")
return s2
df['tweets']=remove_newline(df['tweets'])
def remove_punctuation(df):
#s2 = str(df)
s2=df.str.replace('[^\w\s]',' ')
return s2
df['tweets']=remove_punctuation(df['tweets'])
# Creating a function remove_unwanted_spaces() to remove all unwanted spaces
def remove_unwanted_spaces(df):
df= re.sub('\s+',' ',df)
df=df.strip() # the strip() function removes any trailing spaces from the begining snd the end of the text
return s
df['tweets']=remove_punctuation(df['tweets'])
# +
tweets=[]
# +
def remove_emoticon_replacement(df):
j=0
for i in range(len(df['tweets'])):
x = re.search("(abcde)",df.loc[i]['tweets'])
if (x):
j=j+1
else:
tweets.append(df.loc[i]['tweets'])
remove_emoticon_replacement(df)
# -
df_processed = pd.DataFrame()
df_processed['tweets'] = tweets
df_processed['tweets']
| project_V1/model+preprocessing/.ipynb_checkpoints/tweet_preprocess-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from scipy.stats import mode
#Initializ with centroid data and the corresponding uncertainty generated from Feature Extraction
STAR_CENTROIDS = np.array([[-3,4], [8,-6], [10,10], [5,5]])
STAR_CENTROIDS_UNCERTAINTY = np.array([[0.06], [0.004], [0.001], [0.002]])
NUM_STARS = STAR_CENTROIDS.shape[0]
#Initialize
CATALOGUE = pd.read_csv("Modified Star Catalogue.csv")
PROCESSED_CATALOGUE= pd.read_csv('Processed_Catalogue.csv')
# +
def cos(row):
return np.cos(np.radians(row['Ang_Distance']))
REFERENCE = pd.DataFrame(columns=['Star_ID1', 'Star_ID2', 'Ang_Distance'])
REFERENCE['Star_ID1'], REFERENCE['Star_ID2'] = PROCESSED_CATALOGUE['Star_ID1'], PROCESSED_CATALOGUE['Star_ID2']
REFERENCE['Ang_Distance'] = PROCESSED_CATALOGUE.apply(cos, axis = 1)
REFERENCE.sort_values('Ang_Distance' ,ascending=True, inplace=True)
REFERENCE.head()
# -
REF_ARR = REFERENCE.to_numpy()
REF_ARR
STAR_CENTROIDS
STAR_CENTROIDS_UNCERTAINTY
# +
def starVectorTransform(centroid, focal_length=10):
'''
Generates the unit 3D vectors from given 2D centroids of stars on the
image frame with the focal point as the origin
<Formula> - CubeStar Doc - Appendix B
'''
x, y = centroid
temp = np.power(((x/focal_length)**2 + (y/focal_length)**2 + 1), -0.5)
ux = (x/focal_length)
uy = (y/focal_length)
uz = 1
return np.array([ux, uy, uz])*temp
STAR_VECTORS = np.apply_along_axis(starVectorTransform, 1, STAR_CENTROIDS, focal_length=10 )
STAR_VECTORS
# +
def vectorAngularDistance(vect1, vect2):
'''
Returns the angular distance [cos(theta)] between two unit vectors seperated by an angle theta
'''
return np.sum(vect1*vect2)
def uncertaintyAngularDistance(u1, u2):
'''
Assumes that the uncertainty is a simple addition
'''
return u1 + u2
# +
temp = [[1]]
for i in range(NUM_STARS-1):
temp.append([])
temp = np.array(temp)
temp[0].remove(1)
VOTE_LIST = np.vstack((np.arange(0, NUM_STARS), temp)).T
VOTE_LIST
# -
for i in range(NUM_STARS):
for j in range(i+1, NUM_STARS):
d_ij = vectorAngularDistance(STAR_VECTORS[i], STAR_VECTORS[j])
e_ij = uncertaintyAngularDistance(STAR_CENTROIDS_UNCERTAINTY[i], STAR_CENTROIDS_UNCERTAINTY[j])[0]
r_ij = [d_ij - e_ij, d_ij + e_ij]
ind = np.where( (REF_ARR[:, 2] >= r_ij[0]) & (REF_ARR[:,2] <= r_ij[1]) )
for k in REF_ARR[ind]:
s1, s2 = k[0], k[1]
VOTE_LIST[i, 1].append(s1)
VOTE_LIST[i, 1].append(s2)
VOTE_LIST[j, 1].append(s1)
VOTE_LIST[j, 1].append(s2)
temp = np.arange(0, NUM_STARS)
VOTE_LIST_2 = np.vstack((temp, np.zeros_like(temp),np.zeros_like(temp))).T
#VOTE_LIST_2[:, 2] = -1
VOTE_LIST_2
for i in range(NUM_STARS):
VOTE_LIST_2[i,1] = mode(VOTE_LIST[i,1])[0][0]
VOTE_LIST_2
for i in range(NUM_STARS):
for j in range(i+1, NUM_STARS):
d_ij = vectorAngularDistance(STAR_VECTORS[i], STAR_VECTORS[j])
e_ij = uncertaintyAngularDistance(STAR_CENTROIDS_UNCERTAINTY[i], STAR_CENTROIDS_UNCERTAINTY[j])[0]
r_ij = [d_ij - e_ij, d_ij + e_ij]
s1, s2 = VOTE_LIST_2[i, 1], VOTE_LIST_2[j, 1]
ind1 = np.where( (REF_ARR[:, 0] == s1) & (REF_ARR[:,1] == s2) )
if ind1[0].shape != (0,):
if REF_ARR[ind1]>r_ij[0] and REF_ARR[ind1]<r_ij[1]:
VOTE_LIST_2[i,2] +=1
VOTE_LIST_2[j,2] +=1
continue
ind2 = np.where( (REF_ARR[:, 0] == s2) & (REF_ARR[:,1] == s1) )
if ind2[0].shape != (0,):
if REF_ARR[ind2]>r_ij[0] and REF_ARR[ind2]<r_ij[1]:
VOTE_LIST_2[i,2] +=1
VOTE_LIST_2[j,2] +=1
VOTE_LIST_2
| Geometric_Voting_Runtime_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example 3: Visualizing your metadata
#
# This example introduces you to some of the `MetaViz` plotting routines, using the search functions explained in [Example 2](https://github.com/wrightky/MetaViz/blob/main/examples/2_Searching.ipynb). Given the number of plotting functions, this will merely be an overview of a few key examples.
#
# _Note: throughout this example, the code is written to correctly output the figures, but we use IPython's Image to display photos from the example gallery where the real photos would be instead. Ignore those commands._
# +
import MetaViz as mv
album = mv.Archive()
# Only here to display images from gallery folder, not needed to actually run codes:
import os
os.chdir('MetaViz/gallery')
from IPython.display import Image
# -
# ## Finally, some figures!
#
# The core of MetaViz is visualization -- helping you understand or communicate the underlying global structure of the data in your `Archive`. We've broken the types of figures you may be interested in creating into several categories based on the kind of information they convey. These categories (whose names correspond to the scripts in which they are stored) are as follows.
# ## Image Viewing
#
# Functions to simply display the images in your `Archive` inside Python. If you're performing complex searches like in [Example 2](https://github.com/wrightky/MetaViz/blob/main/examples/2_Searching.ipynb), it can be helpful to have functions to quickly show you which files are showing up in your lists, without having to go look for them in the directory.
# +
# Display the first photo in the collection located in Paris
# Note that we want the full path:
FileNames = album.FindSource(['Paris'],['Coverage'],withPath=True)
mv.ShowImage(FileNames[0],showTitle=False)
# -
Image(filename='StockPhoto.jpg') # Display
# ## Statistics
#
# Functions to show you bulk statistics of your `Archive`. This is where you'll find most of your histograms. For example, let's see how many of each filetype we have in the whole collection:
# Note that this function can also work on a subset of the collection
mv.FileTypes(album)
# We can also visualize lots of temporal statistics, about the year or time of year our photos are dated. These plots can be accessed individually or all at once, as follows.
# +
# Grab the creation dates for the files
df = album.GrabData(fields=['CreateDate'])
mv.TemporalStats(df)
# -
# ## Time-Series
#
# Speaking of temporal information, there are many routines for plotting changes through time. Perhaps we want to compare the relative abundance of a few specific keywords (e.g. people, locations) in our collection through time.
# +
# Let's compare the abundance of different animals showing up in subject keywords
values = ['Dog','Cat','Bird','Rabbit']
fields = ['Subject']
# Size of circles equates to number of entries that day
mv.OccuranceMagnitude(album, values, fields, scale=15)
# -
Image(filename='OccuranceMagnitude.png') # Display
# Let's try one of the non-matplotlib based ones. Seaborn offers a very nice `ViolinPlot` in the following example.
# +
# Relative abundance of different American car brands in collection through time
terms = ['Ford','GM','Chrysler','Rolls-Royce','Cadillac','Chevrolet']
fields = ['Subject']
# Width of violin corresponds to number of entries at that time
mv.ViolinPlot(album, terms, fields)
# -
Image(filename='ViolinPlot.png') # Display
# ## Magnitudes
#
# Perhaps we don't care about time, we just want to compare the abundance of different keywords in total, throughout either the whole collection or a specific subset of the collection. Which keywords show up the most?
#
# In the example below, I scanned my personal photo collection to see which music venues I most frequently tagged in my `Coverage` field. Note the more sophisticated search I used to get to the final product. First, I find photos containing "bands". Then I find the location for those photos. Then I count the number of times each unique keyword showed up. Then plot.
# +
N = 20 # Number of entries to show
# Find all photos in which I tagged a band
bandlist = album.FindSource(['Band'], ['Subject'])
# Grab the Coverage locations for those photos
data = album.GrabData(bandlist, fields=['Coverage'])
# Then, count how often each entry appeared
data = mv.CountUnique(data['Coverage'], delimiter=', ')
# Plot as a bar chart
mv.BarChart(data, N)
# Finally, adjust the plot to give it a better title
ax = plt.gca()
ax.set_title('Most Frequented Music Venues')
# -
Image(filename='BarChart.png') # Display
# Note the last two lines in this example. For matplotlib-based functions, you can always edit the resulting figure by calling the figure axes using `ax = plt.gca()` and then editing the axes objects.
# ## Connections
#
# Perhaps instead of visualizing keywords in competition with each other, you want to visualize the connections between them. How often do keywords appear together, either within the same field, or between different ones? What fraction of photos containing _Person A_ also contain _Person B_? What fraction of photos containing _Person A_ were taken in _Place C_ or by _Photographer D_?
#
# In this example, we see how often the ten most common people in the collection are associated with the 15 most common locations:
# +
# Plot correlation between Subject (x) and Coverage (y)
mv.Heatmap2(album, 'Subject', 'Coverage', N_x=10, N_y=15)
ax = plt.gca()
ax.set_title('Correlation (XMP Subject vs Coverage)')
# -
Image(filename='Heatmap2.png') # Display
# Within a given field, we can create a similar heatmap using `Heatmap1` instead of `Heatmap2`, or we could try out another tertiary package and make a nice `ChordChart`. Note that this package primarily works inside `Jupyter Lab` (and in real code, it's interactive! Not here though, this is a screenshot.)
#
# Maybe we have a collection of scanned letters, and we want to see how often there was correspondance between select people:
# +
# Chord diagram (Only works in JupyterLab)
names = ['Fermi','Bohr','Oppenheimer','Neumann','Meitner','Woods']
fields = ['Subject'] # Fields to search through
mv.ChordChart(album, names, fields)
# -
Image(filename='ChordChart.png') # Display
# ## Geospatial
#
# Currently working on this. Will build a full example 4 for it once these codes are ready.
# ## Get Plotting!
#
# For more example outputs, see the full [gallery](https://github.com/wrightky/MetaViz/tree/main/gallery) folder. Otherwise, explore, try things out, see how they work!
| examples/3_Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lattice generation
#
# Using latticegen, it is possible to generate different symmetry lattices with different appearances.
# These lattices can then be combined in moiré lattices, or compound lattices of the same lattice constant
# +
import numpy as np
import matplotlib.pyplot as plt
import latticegen
# -
# ## A single lattice
#
# First, let's look at generation of a single square lattice:
lattice = latticegen.anylattice_gen(r_k=0.01, theta=0,
order=1, symmetry=4)
plt.imshow(lattice.T)
# Here, $r_k$ is one over the lattice constant, `theta` is the rotation angle of the lattice, and `symmetry=4` indicates a four-fold symmetric lattice.
#
# <div class="alert alert-info">
# **Note:** $r_k$ is designed to be easily used with diffraction patterns, i.e. FFT transforms of images. If you just want a physical lattice, you might find [latticegen.physical_lattice_gen()](api.rst#latticegen.latticegeneration.physical_lattice_gen) more intuitive.
# </div>
# ## The `order` parameter
# To give more indication of what the `order` parameter, the maximum order of the Fourier/k-vector components does: The higher the order, the more well-resolved the atoms are as single spots. However, computational complexity increases fast.
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=[10,10])
for i in range(4):
ax.flat[i].imshow(latticegen.anylattice_gen(r_k=0.01, theta=0,
order=1+i, symmetry=4))
ax.flat[i].set_title(f'order = {i+1}')
# ## Different symmetries
#
# We can generate lattices of six-fold (triangular) symmetry and four-fold symmetry, as wel as an hexagonal lattice. These functions are also available separately as
# [trilattice_gen()](api.rst#latticegen.latticegeneration.trilattice_gen),
# [squarelattice_gen()](api.rst#latticegen.latticegeneration.squarelattice_gen) and
# [hexlattice_gen()](api.rst#latticegen.latticegeneration.hexlattice_gen).
fig, ax = plt.subplots(ncols=3, figsize=[10,4])
for i, sym in enumerate([3, 4, 6]):
if sym == 6:
data = latticegen.hexlattice_gen(r_k=0.01, theta=0,
order=3)
else:
data = latticegen.anylattice_gen(r_k=0.01, theta=0,
order=3, symmetry=sym)
ax.flat[i].imshow(data)
ax.flat[i].set_title(f'Symmetry = {sym}')
# ## A moiré superlattice of two lattices
#
# Now, we can visualize what the moiré of two stacked lattices looks like and play around with the influence of deforming the top lattice.
# We by default drop back to `order=2` to keep things snappy.
# +
r_k = 0.2
theta=2.05
kappa=1.005
psi=13.
xi=0.
lattice1 = 0.7*latticegen.hexlattice_gen(r_k, xi, 2)
lattice2 = latticegen.hexlattice_gen(r_k, theta+xi, 2,
kappa=kappa, psi=psi)
fig, ax = plt.subplots(figsize=[10,10])
data = (lattice1 + lattice2).compute()
im = ax.imshow(data.T,
vmax=np.quantile(data,0.95),
vmin=np.quantile(data,0.05),
)
ax.set_xlabel('x (nm)')
ax.set_ylabel('y (nm)')
ax.set_title(f'$\\theta = {theta:.2f}^\\circ, \\kappa = {kappa:.3f}, \\psi = {psi:.2f}^\\circ$');
| docs/source/Lattice_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="copyright"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="title:generic"
# # Vertex Pipelines: AutoML tabular regression pipelines using google-cloud-pipeline-components
#
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-unified/notebooks/official/google_cloud_pipeline_components_automl_tabular.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/ai-platform-unified/notebooks/official/google_cloud_pipeline_components_automl_tabular.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# <td>
# <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/ai-platform-unified/notebooks/official/google_cloud_pipeline_components_automl_tabular.ipynb">
# Open in Google Cloud Notebooks
# </a>
# </td>
# </table>
# <br/><br/><br/>
# + [markdown] id="overview:pipelines,automl"
# ## Overview
#
# This notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML tabular regression workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines).
# + [markdown] id="dataset:cal_housing,lrg"
# ### Dataset
#
# The dataset used for this tutorial is the [California Housing dataset from the 1990 Census](https://developers.google.com/machine-learning/crash-course/california-housing-data-description)
#
# The dataset predicts the median house price.
# + [markdown] id="objective:pipelines,automl"
# ### Objective
#
# In this tutorial, you create an AutoML tabular regression using a pipeline with components from `google_cloud_pipeline_components`.
#
# The steps performed include:
#
# - Create a `Dataset` resource.
# - Train an AutoML `Model` resource.
# - Creates an `Endpoint` resource.
# - Deploys the `Model` resource to the `Endpoint` resource.
#
# The components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.html#module-google_cloud_pipeline_components.aiplatform).
# + [markdown] id="costs"
# ### Costs
#
# This tutorial uses billable components of Google Cloud:
#
# * Vertex AI
# * Cloud Storage
#
# Learn about [Vertex AI
# pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="setup_local"
# ### Set up your local development environment
#
# If you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.
#
# Otherwise, make sure your environment meets this notebook's requirements. You need the following:
#
# - The Cloud Storage SDK
# - Git
# - Python 3
# - virtualenv
# - Jupyter notebook running in a virtual environment with Python 3
#
# The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
#
# 1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).
#
# 2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).
#
# 3. [Install virtualenv](Ihttps://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3.
#
# 4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter.
#
# 5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter.
#
# 6. Open this notebook in the Jupyter Notebook Dashboard.
#
# + [markdown] id="install_aip:mbsdk"
# ## Installation
#
# Install the latest version of Vertex SDK for Python.
# + id="install_aip:mbsdk"
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
# ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
# + [markdown] id="install_storage"
# Install the latest GA version of *google-cloud-storage* library as well.
# + id="install_storage"
# ! pip3 install -U google-cloud-storage $USER_FLAG
# + [markdown] id="install_gcpc"
# Install the latest GA version of *google-cloud-pipeline-components* library as well.
# + id="install_gcpc"
# ! pip3 install $USER kfp google-cloud-pipeline-components --upgrade
# + [markdown] id="restart"
# ### Restart the kernel
#
# Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="restart"
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="check_versions"
# Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
# + id="check_versions:kfp,gcpc"
# ! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
# ! python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))"
# + [markdown] id="before_you_begin:nogpu"
# ## Before you begin
#
# ### GPU runtime
#
# This tutorial does not require a GPU runtime.
#
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)
#
# 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
#
# 5. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.
# + id="set_project_id"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="autoset_project_id"
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# + id="set_gcloud_project_id"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="region"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
#
# - Americas: `us-central1`
# - Europe: `europe-west4`
# - Asia Pacific: `asia-east1`
#
# You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
#
# Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
# + id="region"
REGION = "us-central1" # @param {type: "string"}
# + [markdown] id="timestamp"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
# + id="timestamp"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="gcp_authenticate"
# ### Authenticate your Google Cloud account
#
# **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
#
# **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
#
# **Click Create service account**.
#
# In the **Service account name** field, enter a name, and click **Create**.
#
# In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# Click Create. A JSON file that contains your key downloads to your local environment.
#
# Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
# + id="gcp_authenticate"
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="bucket:mbsdk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
#
# Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
# + id="bucket"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
# + id="autoset_bucket"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="create_bucket"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="create_bucket"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="validate_bucket"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="validate_bucket"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="set_service_account"
# #### Service Account
#
# **If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.
# + id="set_service_account"
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
# + id="autoset_service_account"
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
# shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].strip()
print("Service Account:", SERVICE_ACCOUNT)
# + [markdown] id="set_service_account:pipelines"
# #### Set service account access for Vertex Pipelines
#
# Run the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.
# + id="set_service_account:pipelines"
# ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME
# ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME
# + [markdown] id="setup_vars"
# ### Set up variables
#
# Next, set up some variables used throughout the tutorial.
# ### Import libraries and define constants
# + id="import_aip:mbsdk"
import google.cloud.aiplatform as aip
# + [markdown] id="aip_constants:endpoint"
# #### Vertex AI constants
#
# Setup up the following constants for Vertex AI:
#
# - `API_ENDPOINT`: The Vertex AI API service endpoint for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` services.
# + id="aip_constants:endpoint"
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# + [markdown] id="pipeline_constants"
# #### Vertex Pipelines constants
#
# Setup up the following constants for Vertex Pipelines:
# + id="pipeline_constants"
PIPELINE_ROOT = "{}/pipeline_root/cal_housing".format(BUCKET_NAME)
# + [markdown] id="additional_imports"
# Additional imports.
# + id="import_pipelines:gcpc"
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
# + [markdown] id="init_aip:mbsdk"
# ## Initialize Vertex SDK for Python
#
# Initialize the Vertex SDK for Python for your project and corresponding bucket.
# + id="init_aip:mbsdk"
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
# + [markdown] id="define_pipeline:gcpc,automl,cal_housing,lrg"
# ## Define AutoML tabular regression model pipeline that uses components from `google_cloud_pipeline_components`
#
# Next, you define the pipeline.
#
# Create and deploy an AutoML tabular regression `Model` resource using a `Dataset` resource.
# + id="define_pipeline:gcpc,automl,cal_housing,lrg"
TRAIN_FILE_NAME = "california_housing_train.csv"
# ! gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/
gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}"
@kfp.dsl.pipeline(name="automl-tab-training-v2")
def pipeline(project: str = PROJECT_ID):
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=project, display_name="housing", gcs_source=gcs_csv_path
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=project,
display_name="train-automl-cal_housing",
optimization_prediction_type="regression",
optimization_objective="minimize-rmse",
column_transformations=[
{"numeric": {"column_name": "longitude"}},
{"numeric": {"column_name": "latitude"}},
{"numeric": {"column_name": "housing_median_age"}},
{"numeric": {"column_name": "total_rooms"}},
{"numeric": {"column_name": "total_bedrooms"}},
{"numeric": {"column_name": "population"}},
{"numeric": {"column_name": "households"}},
{"numeric": {"column_name": "median_income"}},
{"numeric": {"column_name": "median_house_value"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="median_house_value",
)
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=project,
machine_type="n1-standard-4",
)
# + [markdown] id="compile_pipeline"
# ## Compile the pipeline
#
# Next, compile the pipeline.
# + id="compile_pipeline"
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline,
package_path="tabular regression_pipeline.json".replace(" ", "_"),
)
# + [markdown] id="run_pipeline:automl,tabular"
# ## Run the pipeline
#
# Next, run the pipeline.
# + id="run_pipeline:automl,tabular"
DISPLAY_NAME = "cal_housing_" + TIMESTAMP
job = aip.PipelineJob(
display_name=DISPLAY_NAME,
template_path="tabular regression_pipeline.json".replace(" ", "_"),
pipeline_root=PIPELINE_ROOT,
)
job.run()
# + [markdown] id="view_pipeline_run:automl,tabular"
# Click on the generated link to see your run in the Cloud Console.
#
# <!-- It should look something like this as it is running:
#
# <a href="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" width="40%"/></a> -->
#
# In the UI, many of the pipeline DAG nodes will expand or collapse when you click on them. Here is a partially-expanded view of the DAG (click image to see larger version).
#
# <a href="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" width="40%"/></a>
# + [markdown] id="cleanup:pipelines"
# # Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial -- *Note:* this is auto-generated and not all resources may be applicable for this tutorial:
#
# - Dataset
# - Pipeline
# - Model
# - Endpoint
# - Batch Job
# - Custom Job
# - Hyperparameter Tuning Job
# - Cloud Storage Bucket
# + id="cleanup:pipelines"
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
try:
if delete_model and "DISPLAY_NAME" in globals():
models = aip.Model.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
model = models[0]
aip.Model.delete(model)
print("Deleted model:", model)
except Exception as e:
print(e)
try:
if delete_endpoint and "DISPLAY_NAME" in globals():
endpoints = aip.Endpoint.list(
filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time"
)
endpoint = endpoints[0]
endpoint.undeploy_all()
aip.Endpoint.delete(endpoint.resource_name)
print("Deleted endpoint:", endpoint)
except Exception as e:
print(e)
if delete_dataset and "DISPLAY_NAME" in globals():
if "tabular" == "tabular":
try:
datasets = aip.TabularDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.TabularDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "image":
try:
datasets = aip.ImageDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.ImageDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "text":
try:
datasets = aip.TextDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.TextDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "video":
try:
datasets = aip.VideoDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.VideoDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
try:
if delete_pipeline and "DISPLAY_NAME" in globals():
pipelines = aip.PipelineJob.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
pipeline = pipelines[0]
aip.PipelineJob.delete(pipeline.resource_name)
print("Deleted pipeline:", pipeline)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
# ! gsutil rm -r $BUCKET_NAME
| notebooks/official/pipelines/google_cloud_pipeline_components_automl_tabular.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import geoviews as gv
import geoviews.feature as gf
from bokeh.sampledata.airport_routes import airports, routes
gv.extension('matplotlib')
gv.output(fig='svg')
# -
# ## Define data
# +
# Count the number of connections from each airport
counts = routes.groupby('SourceID')[['Stops']].count().reset_index().rename(columns={'Stops': 'Connections'})
airports_df = pd.merge(airports, counts, left_on='AirportID', right_on='SourceID', how='left')
# Select only US mainland airports & convert from Mercator to Latitudes/Longitudes
airport_points = gv.Points(airports_df, ['Longitude', 'Latitude'])[-170:-50, 0: 50]
# Declare nodes, graph and tiles
nodes = gv.Nodes(airport_points, ['Longitude', 'Latitude', 'AirportID'],
['Name', 'City', 'Connections'])
graph = gv.Graph((routes, nodes), ['SourceID', 'DestinationID'], ['Source', 'Destination'])
tiles = gv.tile_sources.Wikipedia
# Select 50 busiest airports
busiest = list(routes.groupby('SourceID').count().sort_values('Stops').iloc[-50:].index.values)
busiest_airports = graph.select(AirportID=busiest, selection_mode='nodes')
# -
# ## Plot
gf.ocean * gf.land * gf.coastline * gf.borders * busiest_airports.opts(
node_size=8, edge_linewidth=1, edge_alpha=0.05, fig_size=300, padding=0.1)
| datashader-work/geoviews-examples/gallery/matplotlib/airport_graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice Problems
# ### Lecture 9
# Answer each number in a separate cell
#
# Rename this notebook with your last name and the lecture
#
# ex. Cych_B_09
#
# Turn-in this notebook on Canvas.
# 1. Pandas: methods and filters
# - Import the file 'Datasets/minutes_velocity.csv' into a **Pandas DataFrame**.
# - Find the median velocity from this **DataFrame** (use the function **Series.median( )**). Print the median value
# - Find the mean velocity (use the function **Series.mean( )**). Print the mean value
# - Filter your **DataFrame** to select all the velocities that fall between the median and mean using the 'and' conditional syntax.
# - Check your work: find the maximum and minimum velocities in the filtered data frame. Print the maximum and minimum values and compare them to the median and mean.
#
# 2. Functions
#
# - write a function that takes latitudes (in degrees) and converts them to colatitudes (in radians)
# - write a function, that takes two longitudes and returns the difference
# - re-write the function **great_circle( )** that calls the two other functions and returns the separation in degrees. (**Hint:** remember that **NumPy** assumes that the units are in radians)
#
# 3. Great circles
# - Use your new function to find the angular difference between where you were born and Pinyon Flat (**PF_lat = 33.3,PF_lon = -115.7**).
# - There are roughly 111 km in one degree, how many kilometers is the distance between your birthplace and Pinyon Flat? Print the distance
#
# 4. Formatting strings
# - set a variable, pi, to the value of **np.pi**
# - print the value of pi
# - format the value to have four significant digits.
# - print out a sentence with the formatted value of $\pi$ in it.
# - multiply $\pi$ by 50 and use the exponential form of string formatting to print out 4 significant digits.
| Practice_Problems/Lecture_09_Practice_Problems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cta] *
# language: python
# name: conda-env-cta-py
# ---
from CHECLabPy.stats import binom, poisson, normal_pdf
import numpy as np
from numba import njit, prange
from math import exp, pow, sqrt
import os
from matplotlib import pyplot as plt
# %matplotlib notebook
os.environ['NUMBA_NUM_THREADS'] = '6'
@njit(fastmath=True, parallel=True)
def sipm_nb(x, norm, eped, eped_sigma, spe, spe_sigma, lambda_, opct, pap, dap):
sap = spe_sigma # Assume the sigma of afterpulses is the same
# Obtain pedestal peak
p_ped = exp(-lambda_)
ped_signal = norm * p_ped * normal_pdf(x, eped, eped_sigma)
pe_signal = np.zeros(x.size)
found = False
# Loop over the possible total number of cells fired
for k in prange(1, 250):
pk = 0
for j in prange(1, k+1):
pj = poisson(j, lambda_) # Probability for j initial fired cells
# Skip insignificant probabilities
if pj < 1e-4:
continue
# Sum the probability from the possible combinations which result
# in a total of k fired cells to get the total probability of k
# fired cells
pk += pj * pow(1-opct, j) * pow(opct, k-j) * binom(k-1, j-1)
# Skip insignificant probabilities
if (not found) & (pk < 1e-4):
continue
if found & (pk < 1e-4):
break
found = True
# Consider probability of afterpulses
papk = pow(1 - pap, k)
p0ap = pk * papk
pap1 = pk * (1-papk) * papk
# Combine spread of pedestal and pe (and afterpulse) peaks
pe_sigma = sqrt(k * spe_sigma ** 2 + eped_sigma ** 2)
ap_sigma = sqrt(k * sap ** 2 + eped_sigma ** 2)
# Evaluate probability at each value of x
pe_signal += norm * (
p0ap * normal_pdf(x, eped + k * spe, pe_sigma) +
pap1 * normal_pdf(x, eped + k * spe * (1 - dap), ap_sigma)
)
return ped_signal + pe_signal
@njit(fastmath=True)
def sipm_nb_new(x, norm, eped, eped_sigma, spe, spe_sigma, lambda_, opct, pap, dap):
# Obtain pedestal peak
p_ped = exp(-lambda_)
ped_signal = norm * p_ped * normal_pdf(x, eped, eped_sigma)
pe_signal = np.zeros(x.size)
pk_max = 0
# Loop over the possible total number of cells fired
for k in prange(1, 100):
pk = 0
for j in prange(1, k+1):
pj = poisson(j, lambda_) # Probability for j initial fired cells
# Skip insignificant probabilities
if pj < 1e-4:
continue
# Sum the probability from the possible combinations which result
# in a total of k fired cells to get the total probability of k
# fired cells
pk += pj * pow(1-opct, j) * pow(opct, k-j) * binom(k-1, j-1)
# Skip insignificant probabilities
if pk > pk_max:
pk_max = pk
elif pk < 1e-4:
break
# Combine spread of pedestal and pe peaks
pe_sigma = sqrt(k * spe_sigma ** 2 + eped_sigma ** 2)
# Evaluate probability at each value of x
pe_signal += norm * pk * normal_pdf(x, eped + k * spe, pe_sigma)
return ped_signal + pe_signal
kwargs = dict(
norm=2,
eped=0,
eped_sigma=0.1,
spe=1,
spe_sigma=0.1,
lambda_=2,
opct=0.5,
pap=0,
dap=0,
)
x = np.linspace(-1, 300, 300000, dtype=np.double)
y_old = sipm_nb(x, **kwargs)
y_new = sipm_nb_new(x, **kwargs)
plt.plot(x[y_old>1e-5], y_old[y_old>1e-5])
plt.plot(x[y_new>1e-5], y_new[y_new>1e-5])
print(np.average(x, weights=y_old) / kwargs['lambda_'])
print(np.average(x, weights=y_new) / kwargs['lambda_'])
# %timeit sipm_nb(x, **kwargs)
# %timeit sipm_nb_new(x, **kwargs)
| sstcam_sandbox/d191105_spe/sipm_algorithm_speed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="hK-PXyilUZnR"
import tensorflow as tf
import tensorflow.keras.layers as layers
# + id="F7oKdYCGUNIf"
class ConvNeXt_Block(layers.Layer):
r""" ConvNeXt Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = layers.DepthwiseConv2D(kernel_size=7, padding='same') # depthwise conv
self.norm = layers.LayerNormalization(epsilon=1e-6)
# pointwise/1x1 convs, implemented with linear layers
self.pwconv1 = layers.Dense(4 * dim)
self.act = layers.Activation('gelu')
self.pwconv2 = layers.Dense(dim)
self.drop_path = DropPath(drop_path)
self.dim = dim
self.layer_scale_init_value = layer_scale_init_value
def build(self, input_shape):
self.gamma = tf.Variable(
initial_value=self.layer_scale_init_value * tf.ones((self.dim)),
trainable=True,
name='_gamma')
self.built = True
def call(self, x):
input = x
x = self.dwconv(x)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = input + self.drop_path(x)
return x
class Downsample_Block(layers.Layer):
"""The Downsample Block in ConvNeXt
Args:
dim (int): number of channels
"""
def __init__(self, dim):
super().__init__()
self.LN = layers.LayerNormalization(epsilon=1e-6)
self.conv = layers.Conv2D(dim, kernel_size=2, strides=2)
def build(self, input_shape):
self.built = True
def call(self, x):
x = self.LN(x)
x = self.conv(x)
return x
class DropPath(tf.keras.layers.Layer):
"""The Drop path in ConvNeXt
Reference:
https://github.com/rishigami/Swin-Transformer-TF/blob/main/swintransformer/model.py
"""
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def call(self, x, training=None):
return self._drop_path(x, self.drop_prob, training)
def _drop_path(self, inputs, drop_prob, is_training):
if (not is_training) or (drop_prob == 0.):
return inputs
# Compute keep_prob
keep_prob = 1.0 - drop_prob
# Compute drop_connect tensor
random_tensor = keep_prob
shape = (tf.shape(inputs)[0],) + (1,) * \
(len(tf.shape(inputs)) - 1)
random_tensor += tf.random.uniform(shape, dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.math.divide(inputs, keep_prob) * binary_tensor
return output
# + id="qqXN9PnQA5o7"
def create_convnext_model(input_shape=(224, 224, 3), depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], num_classes=1000, drop_path=0., layer_scale_init_value=1e-6):
""" Function to construct the ConvNeXt Model
Args:
input_shape (tuple): (Width, Height , Channels)
depths (list): a list of size 4. denoting each stage's depth
dims (list): a list of size 4. denoting number of kernel's in each stage
num_classes (int): the number of classes
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
Returns:
ConvNeXt model: an instance of tf.keras.Model
"""
assert (len(depths) == 4 and len(dims) ==4), "Must provide exactly 4 depths and 4 dims"
assert (len(input_shape) == 3), "Input shape must be (W, H, C)"
input = layers.Input(shape=input_shape)
# Stem + res2
y = layers.Conv2D(dims[0], kernel_size=4, strides=4)(input)
y = layers.LayerNormalization(epsilon=1e-6)(y)
for i in range(depths[0]):
y = ConvNeXt_Block(dims[0])(y)
# downsample + res3
y = Downsample_Block(dims[1])(y)
for i in range(depths[1]):
y = ConvNeXt_Block(dims[1])(y)
# downsample + res4
y = Downsample_Block(dims[2])(y)
for i in range(depths[2]):
y = ConvNeXt_Block(dims[2])(y)
# downsample + res5
y = Downsample_Block(dims[3])(y)
for i in range(depths[3]):
y = ConvNeXt_Block(dims[3])(y)
y = layers.GlobalAveragePooling2D()(y)
# final norm layer
y = layers.LayerNormalization(epsilon=1e-6)(y)
# Head
y = layers.Dense(num_classes)(y)
return tf.keras.Model(inputs=input, outputs=y)
# + id="MmcZXhJFUm4S"
model = create_convnext_model(num_classes=1000)
# + colab={"base_uri": "https://localhost:8080/"} id="OI4bXchCV8b3" outputId="bb6ca455-ae73-4ea9-926a-1a00a64235ca"
print(model.summary())
| Notebook/ConvNeXt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 32-bit
# language: python
# name: python38332bitabebdcd8e2a1488c90932336e3af1b49
# ---
# # Selenium을 통한 Webscraping
# ### Basics
from selenium import webdriver
# auto-reload 설정
# %load_ext autoreload
# %autoreload 2
# chromedriver 경로 확인
# !dir .\chromedriver.exe
# webdriver가 직접 request를 보내는 역할을 한다
driver = webdriver.Chrome('./chromedriver.exe') # 현재 디렉토리 밑에 있으면 경로 지정하지 않아도 알아서 찾음
driver
# 웹사이트 불러오기
d = driver.get('https://www.naver.com') # 위 chromedriver 창이 열린 뒤에만 실행 가능함
type(d)
| Day28_selenium_load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import cv2
import glob
import pickle
import numpy as np
import matplotlib.pyplot as plt
import utils
# +
with open('experimental/cfg.pickle', 'rb') as f:
cfg = pickle.load(f)
with open('experimental/data.pickle', 'rb') as f:
data = pickle.load(f)
with open('experimental/perspective_mtx.pickle', 'rb') as f:
perspective_mtx = pickle.load(f)
# -
test_images_path = glob.glob('./test_images/*_dst.jpg')
test_images_path
# +
img = cv2.imread(test_images_path[0])
im_size = img.shape[::-1][1:]
binary = utils.get_binary_mask_from_image(img, cfg)
# -
M = perspective_mtx['M']
MInv = perspective_mtx['MInv']
binary_warped = cv2.warpPerspective(binary, M, im_size)
leftx_base, rightx_base = utils.get_leftx_rightx_base(binary_warped)
leftx_base, rightx_base
leftx, lefty, rightx, righty = utils.get_left_right_lane_xy(binary_warped,
leftx_base,
rightx_base)
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
left_fit, right_fit = get_lane_line_coefficients(leftx, lefty, rightx, righty)
left_fitx, right_fitx, ploty = get_lane_line_position(im_size[1], left_fit, right_fit)
lane_pts = utils.get_lane_pts_for_fillPoly(left_fitx, right_fitx, ploty)
img_lane_warped = utils.get_lane_rectangle_image(binary_warped, lane_pts)
img_lane = cv2.warpPerspective(img_lane_warped, MInv, im_size)
out_img = utils.weighted_img(img_lane, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.imshow(out_img)
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# +
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit[0]*y_eval*ym_per_pix + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval*ym_per_pix + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
# -
left_curverad, right_curverad
def get_lane_curvature(left_fit, right_fit, ploty):
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval*ym_per_pix + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval*ym_per_pix + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
return left_curverad, right_curverad
def print_curvature_info(img, left_curvature, right_curvature):
left_curve_text = f'Left Curvature: {left_curvature:.2f} m'
right_curve_text = f'Right Curvature: {right_curvature:.2f} m'
font = cv2.FONT_HERSHEY_SIMPLEX
out_img = img.copy()
out_img = cv2.putText(out_img, left_curve_text, (10, 50),
font, 2, (0, 0, 0), 5, cv2.LINE_AA)
out_img = cv2.putText(out_img, right_curve_text, (10, 110),
font, 2, (0, 0, 0), 5, cv2.LINE_AA)
return out_img
left_curvature, right_curvature = get_lane_curvature(left_fit, right_fit, ploty)
window_img = print_curvature_info(cv2.cvtColor(img, cv2.COLOR_BGR2RGB),
left_curvature, right_curvature)
plt.imshow(window_img)
plt.imshow(print_curvature_info(out_img, left_curvature, right_curvature))
plt.savefig('output_images/lane_line_final_image.png')
| .ipynb_checkpoints/(experimental) Step6 Pixel Space to Real World-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %run -i 'PyFiles/imports.py'
# %run -i 'PyFiles/helpers.py'
# %run -i "PyFiles/experiment.py"
# %run -i "PyFiles/analysis.py"
import scipy.stats as stats
import glob
# +
medium_path_list = glob.glob('experiment_results/medium/*/*.txt')
test_analysis = EchoStateAnalysis([medium_path_list[0]],
model = "uniform",
ip_use_observers = True,
ip_method = "linear")
json_obj_test = test_analysis.experiment_lst[0]
test_experiment = test_analysis.get_experiment(json_obj_test, model = "uniform")
# -
json_obj_test["best arguments"]
test_esn = test_experiment.esn_spec
test2_esn = EchoStateNetwork(**json_obj_test["best arguments"]["exponential"],
resp_idx = json_obj_test["resp_idx"],
obs_idx = json_obj_test["obs_idx"],
exponential = False, plot = True,
llambda2 = 10**(-2), model_type = "exponential")
test2_esn.noise = 0.5
test2_esn.get_exp_weights()
test_esn = test_experiment.esn_spec
test2_esn = EchoStateNetwork(**json_obj_test["best arguments"]["exponential"],
resp_idx = json_obj_test["resp_idx"],
obs_idx = json_obj_test["obs_idx"],
exponential = False, plot = True,
llambda2 = 0.0001, model_type = "exponential")
test2_esn.noise = 0.1
test2_esn.get_exp_weights()
test_esn = test_experiment.esn_spec
test2_esn = EchoStateNetwork(**json_obj_test["best arguments"]["exponential"],
resp_idx = json_obj_test["resp_idx"],
obs_idx = json_obj_test["obs_idx"],
exponential = False, plot = True, model_type = "exponential",
llambda2 = 10)
test2_esn.noise = 0.5
test2_esn.get_exp_weights()
normal_error = np.random.normal(loc = 0, scale = 0.01, size = (10,3))
exp_weights1to3 = test2_esn.exp_weights[:3]
print(exp_weights1to3 )
exp_weights1to3 + normal_error
| MARIOS/secondary_notebooks/Dual Lambda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Аналитик в Гиперкуб (стажер)
# https://yandex.ru/jobs/vacancies/interns/intern_an_hypercube/
#
# Вам нужно:
# + скачать Access Log серверов NASA за июль 1995 года [отсюда](https://yadi.sk/d/JPx8Chjc6hLHNg);
# + распарсить файл в Pandas DataFrame;
# + посчитать количество обращений к каждому урлу;
# + найти топ-15 самых посещаемых урлов;
# + посчитать число запросов в секунду;
# + нарисовать график числа запросов в секунду;
# + построить гистограмму распределения размеров запросов.
#
# В качестве ответа приложите файл с кодом или Jupyter Notebook.
# + ## Загрузка Access Log серверов NASA за июль 1995 года [отсюда](https://yadi.sk/d/JPx8Chjc6hLHNg)
YANDI_LINK = 'https://yadi.sk/d/JPx8Chjc6hLHNg'
API_ENDPOINT = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
FILE_EXTENSION = 'log'
# ### Loading modules
import requests
# ### Functions
# +
def _get_real_direct_link(sharing_link):
pk_request = requests.get(API_ENDPOINT.format(sharing_link))
return pk_request.json().get('href')
def _extract_filename(direct_link):
for chunk in direct_link.strip().split('&'):
if chunk.startswith('filename='):
return chunk.split('=')[1]
return None
def download_yadisk_link(sharing_link):
'''Yandex disk link -> name of file(str), file (bytes)'''
direct_link = _get_real_direct_link(sharing_link)
if direct_link:
filename = _extract_filename(direct_link)
download = requests.get(direct_link)
file = download.content
print(f'Download {filename} from {sharing_link}.')
return filename, file
else:
print(f'Failed to download {sharing_link}.')
# -
# Загрузка Access Log с Яндекс Диска:
filename, yadisk_file = download_yadisk_link(YANDI_LINK)
# + ## Распарсить файл в Pandas DataFrame;
# ### Loading modules
import pandas as pd
import zipfile
import io
import re
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
# ### Functions
# +
def zipfile_extract(archive, file_extension, is_file=False):
'''zip or zip bytes -> string line'''
if not is_file:
archive = io.BytesIO(archive)
with zipfile.ZipFile(archive) as myzip:
for name in myzip.namelist():
if name.endswith(file_extension) and not name.startswith('__MACOSX'): # Hi Mac
with myzip.open(name) as file:
file = io.TextIOWrapper(file, errors='ignore')
for line in file:
yield line
def count_lines(data, file_extension=None, is_file=False):
'''zip or text in bytes/file -> count lines'''
count_lines = 0
if not is_file:
data = io.BytesIO(data)
with zipfile.ZipFile(data) as zip_file:
for name in zip_file.namelist():
if name.endswith(file_extension) and not name.startswith('__MACOSX'):
print(name)
with zip_file.open(name) as file:
count_lines += len(file.readlines())
return count_lines
def log_parsser(line, regex):
matches = regex.match(line)
if matches:
line_dict = {
'host': matches.group('host'),
'identd': matches.group('identd'),
'userid': matches.group('userid'),
'time': matches.group('time'),
'request_method': matches.group('request_method'),
'request': matches.group('request'),
'protocol': matches.group('protocol'),
'status_code': matches.group('status_code'),
'size_of_object': matches.group('size_of_object')}
else:
print(line)
line_dict = {'host': line}
return line_dict
def log_parsser_decorator(args):
return log_parsser(args, regex_comp)
def to_datetime(df, format_date):
df = pd.to_datetime(df, format=format_date)
return df
def df_optimizing(df):
for col in df.columns:
if df[col].dtypes.kind == 'i' or df[col].dtypes.kind == 'u':
if df[col].min() >= 0:
df[col] = pd.to_numeric(df[col], downcast='unsigned')
else:
df[col] = pd.to_numeric(df[col], downcast='integer')
elif df[col].dtypes.kind == 'f' or df[col].dtypes.kind == 'c':
df[col] = pd.to_numeric(df[col], downcast='float')
elif df[col].dtypes.kind == 'O':
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if num_unique_values / num_total_values < 0.5:
df[col] = df[col].astype('category')
return df
# -
# ## Парсинг Access Log
# [About Access Log](http://httpd.apache.org/docs/2.0/logs.html#accesslog)
# [Identification Protocol](https://tools.ietf.org/html/rfc1413)
# [HTTP Authentication ](https://tools.ietf.org/html/rfc2617)
# [Hypertext Transfer Protocol](https://tools.ietf.org/html/rfc2616)
# [Method](https://tools.ietf.org/html/rfc2616#section-5.1.1)
# [Hypertext Transfer Protocol Version](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#History)
# [Status Code](https://tools.ietf.org/html/rfc2616#section-6.1.1)
# Количество количество строк в Access Log:
if not filename.endswith(FILE_EXTENSION):
print(f'Количество строк в Access Log: {count_lines(yadisk_file, FILE_EXTENSION)}')
# ### Regular expression
# [Online regex tester](https://regex101.com/r/lTYzqc/4)
REGEX_PATTERN = r'(?P<host>.*?)\s(?P<identd>[\w,:\s]|-)\s(?P<userid>[\w]+|-)\s\[(?P<time>\d{1,2}\/(\w{3}|\d{2})\/\d{2,4}(:\d{2}){3}\s-?\d{4})\]\s\"(?P<request_method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT)?\s?(?P<request>.*?)\s?(?P<protocol>HTTP\/\d\.\d)?\"\s((?P<status_code>\d{3})|(-))\s((?P<size_of_object>\d+)|(-))'
regex_comp = re.compile(REGEX_PATTERN)
# ## CPU Info
import subprocess
print((subprocess.check_output('lscpu', shell=True).strip()).decode())
# ## Вариант 1. Парсинг одним процессом одиним потоком
# ### Распаковка zip-архива
if filename.endswith('zip'):
log_data = zipfile_extract(yadisk_file, FILE_EXTENSION)
# ### Парсинг Access Log
# +
# %%time
log_list = []
for line in log_data:
log_list.append(log_parsser(line, regex_comp))
# -
# ## Вариант 2. Парсинг несколькими процессами.
# ### Распаковка zip-архива
if filename.endswith('zip'):
log_data = zipfile_extract(yadisk_file, FILE_EXTENSION)
# ### Парсинг Access Log
# +
# %%time
pool_process = Pool()
log_list = pool_process.map(log_parsser_decorator, log_data)
# -
# ## Вариант 3. Парсинг одним процессом в несколько потоков (thread).
# ### Распаковка zip-архива
if filename.endswith('zip'):
log_data = zipfile_extract(yadisk_file, FILE_EXTENSION)
# ### Парсинг Access Log
# +
# %%time
pool_thread = ThreadPool()
log_list = pool_thread.map(log_parsser_decorator, log_data)
# -
# ### Удаление скачанного файла
del yadisk_file
# ## Создание DataFrame
# %%time
df = pd.DataFrame(log_list)
df.info()
col_list = [
'host',
'identd',
'userid',
'time',
'request_method',
'request',
'protocol',
'status_code',
'size_of_object'
]
df = df[col_list]
df.head()
# ### Удаление данных log_list
del log_list
# ### Преобразование типов данных
format_date = '%d/%b/%Y:%H:%M:%S %z'
df['time'] = to_datetime(df['time'], format_date)
df['size_of_object'] = pd.to_numeric(df['size_of_object'], errors='coerce')
df.info()
# ### Оптимизация размера DateFrame
df.info(memory_usage='deep')
df = df_optimizing(df)
df.info(memory_usage='deep')
# ## Посчитать количество обращений к каждому урлу
url_count = df.groupby('request') \
.agg({'request_method': 'count'}) \
.rename(columns={'request_method': 'count'}) \
.reset_index()
url_count.head()
# ## Найти топ-15 самых посещаемых урлов
url_count.nlargest(15, 'count')
# ## Посчитать число запросов в секунду
requests_per_second = df.groupby('time') \
.agg({'request_method': 'count'}) \
.rename(columns={'request_method': 'count'}) \
.reset_index()
requests_per_second.head()
requests_per_second.nlargest(15, 'count')
# ## Нарисовать график числа запросов в секунду
# ### Loading modules
# +
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
sns.set(rc={'figure.figsize': (9, 6)})
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# -
register_matplotlib_converters()
plt.plot(requests_per_second['time'], requests_per_second['count'])
plt.xlabel('Time')
plt.ylabel('Count')
plt.xticks(rotation=45)
plt.show()
# ## Построить гистограмму распределения размеров запросов
df.size_of_object.hist(log=True)
plt.xticks(rotation='vertical')
plt.show()
| yandex_intern_an_hypercube.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Session : 분석기간(18.04~18.09)내 상품구매한 방문자의 세션에 관한 정보
# - 비회원 / 수집과정에서 누락된 정보 등은 제외되었다고 함. <br><br>
#
# - CLNT_ID : 방문자의 쿠키하나에 부여된 고유 ID (브라우저, 기기가 다르면 ID도 다름)
# - SESS_ID : 세션 ID, 하나의 CLNT_ID에 여러 세션 ID 발급가능
# - SESS_SEQ : 세션 일련번호, 해당 SESS_ID가 CLNT_ID의 몇번째 세션인지
# - SESS_DT : 세션일자 (YYYYMMDD 형식)
# - TOT_PAG_VIEW_CT : 총 페이지조회건수, 세션내 총 페이지 뷰 수, 해당 페이지에 몇번 들어왔냐
# - TOT_SESS_HR_V : 총 세션 시간값(단위: 초), 세션 내 총 시간, 해당 페이지에 머무른 시간..?
# - DVC_CTG_NM : 기기유형, desktop=1 / mobile=2 / tablet=3
# - ZON_NM : 지역 대분류(세션기준), 광역단위
# - CITY_NM : 지역 중분류(세션기준), 도시단위
#
sess = pd.read_csv("../../data/05_Session.csv")
sess.tail()
for i in sess.columns[2:]:
print("column : {}, unique count : {}, data-type : {}".format(i, sess[i].nunique(), sess[i].dtype))
print(sess[i].unique())
print()
sess[sess.CLNT_ID==3573758]
# ### 궁금증
# - 세션ID의 순서를 알 수 있다면, SESS_SEQ=37일때 1~37까지 나와야하는거 아닌가? 중간에 없는건 뭐지? $\rightarrow$ 상품구매한 세션만 기록된 것임(해결)
# - 연령대별로 어떤 기기에서 접속했는지 custom과 조인해보면 알 수 있지 않을까?, 지역도 마찬가지!
# - SESS_DT를 가지고 구매주기를 계산해볼 수 있지 않을까?
# - 위 테이블에서 약 1시간(3,711초)동안 총 26개의 페이지를 보고 구매에 이르렀다는 말인가?
# - SESS_SEQ가 의미가 있을까? 필요할까?
# TOT_PAG_VIEW_CT, TOT_SESS_HR_V 분포를 보자
# TOT_PAG_VIEW_CT 내에 NaN값 처리 어떻게 할 것 인가?
# TOT_SESS_HR_V의 type 형변환 object --> float(NaN 때문에)
sess['TOT_SESS_HR_V'] = sess['TOT_SESS_HR_V'].map(lambda x: float(''.join(x.split(','))) if ',' in str(x) else float(x))
pd.options.display.float_format = '{:.2f}'.format
sess[['TOT_PAG_VIEW_CT', 'TOT_SESS_HR_V']].describe()
# +
# NaN 처리
| code/EDA/1.EDA_Session.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from catboost import CatBoostClassifier
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, plot_roc_curve, make_scorer, f1_score, roc_auc_score, det_curve
from sklearn import preprocessing
from scipy import stats
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import cross_validate, LeaveOneGroupOut, PredefinedSplit, GridSearchCV
import matplotlib.pyplot as plt
import os
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import CategoricalNB
import json
# %matplotlib inline
# +
def concat_dataframes(path, df_type):
dfs_list = []
dfs_rows_len_list = []
for user in os.listdir(path):
for file in os.listdir(os.path.join(path, user)):
if file.find(df_type) != -1:
df = pd.read_csv(os.path.join(path, user, file))
if df_type != 'broadcasts':
df = df.drop(["timestamp"], axis=1)
# df = (df - df.min()) / (df.max() - df.min())
df["user"] = int(user.split('_')[1])
dfs_list.append(df)
return pd.concat(dfs_list, ignore_index=True)
def drop_bad_rows(df, z = 3):
bad_rows = set()
for col in df.columns:
if col != "user":
for user in df.user.unique():
for x in list(df.loc[df.user == user, :][np.abs(stats.zscore(df.loc[df.user == user, col])) > z].index):
bad_rows.add(x)
for x in list(df[col][np.abs(stats.zscore(df[col])) > z].index):
bad_rows.add(x)
df = df.drop(list(bad_rows), axis=0)
return df
def drop_bad_cols(df, z = 3, allowed_proportion = 0.1):
bad_cols = set()
for col in df.columns:
if col != "user":
if df[df[col] != df[col].mean()].shape[0] < allowed_proportion * df.shape[0]:
bad_cols.add(col)
for user in df.user.unique():
if df.loc[df.user == user, :][df.loc[df.user == user, col] != df.loc[df.user == user, col].mean()].shape[0] < allowed_proportion * df.loc[df.user == user, :].shape[0]:
bad_cols.add(col)
elif np.sum(np.abs(stats.zscore(df.loc[df.user == user, col])) < z) < (1 - allowed_proportion) * df.loc[df.user == user, col].shape[0]:
bad_cols.add(col)
df = df.drop(bad_cols, axis=1)
return df, list(bad_cols)
def extract_delayed_user(df, user_label):
df_user = df[df["user"] == user_label]
df = df[df["user"] != user_label]
return df_user, df
def split_users_into_two_classes(df, valid_user_label):
df.loc[df["user"] != valid_user_label, "user"] = 0
df.loc[df["user"] == valid_user_label, "user"] = 1
return df
def get_cv_split(X, y, group_labels, valid_user_label):
predefined_split_array = np.zeros(group_labels.shape[0])
i = 0
test_array = [x for x in range(group_labels.shape[0])]
for test, _ in LeaveOneGroupOut().split(X, y, group_labels):
diff = np.setdiff1d(test_array, test)
if np.all(group_labels[diff[0] : diff[-1]] == valid_user_label) is np.bool_(True):
for sample in diff:
predefined_split_array[sample] = -1
else:
for sample in diff:
predefined_split_array[sample] = i
i += 1
return predefined_split_array
def generate_train_dataset(df, user, ex_user, is_SVM = False):
df_ = df.copy()
df_for_test = []
df__ = df_[df_.labels == ex_user].copy()
df_for_test.append(df__)
df_ = df_.drop(df__.index, axis=0)
for user_ in df_.labels.unique():
if user_ != ex_user:
test_size = int((0.25 * df_[df_.labels == user_].shape[0]) - 1)
df__ = df_[df_.labels == user_].sample(test_size).copy()
df_for_test.append(df__)
df_ = df_.drop(df__.index, axis=0)
df_ = split_users_into_two_classes(df_.copy(), user)
if is_SVM:
df_.loc[df_.user == 0, 'user'] = -1
df_ = df_.drop("labels", axis=1)
dataset = df_.to_numpy().copy()
np.random.shuffle(dataset)
X = dataset[:, :-1]
y = dataset[:, -1]
return X, y, df_for_test
def generate_test_dataset(df_list, user, ex_user, is_SVM = False):
test_df = pd.concat(df_list)
valid_user_in_test_count = test_df[test_df.labels == user].shape[0]
ex_user_in_test_count = test_df[test_df.labels == ex_user].shape[0]
others_in_test_count = [test_df[test_df.labels == x].shape[0]
for x in test_df.labels.unique() if x != user and x != ex_user]
others_test_count = sum(others_in_test_count)
part_size = min(valid_user_in_test_count, ex_user_in_test_count)
if others_test_count <= min(valid_user_in_test_count, ex_user_in_test_count):
part_size = others_test_count
new_df_parts = []
new_df_parts.append(test_df[test_df.labels == user].sample(part_size).copy())
new_df_parts.append(test_df[test_df.labels == ex_user].sample(part_size).copy())
new_df_parts.append(test_df[~test_df.labels.isin([user, ex_user])].sample(part_size).copy())
test_df = pd.concat(new_df_parts)
test_df.loc[test_df.labels == user, "user"] = 1
if is_SVM:
test_df.loc[test_df.labels != user, "user"] = -1
else:
test_df.loc[test_df.labels != user, "user"] = 0
print("True: ", test_df[test_df.user == 1].shape)
print("Shape: ", test_df.shape)
for x in test_df.labels.unique():
print("Count ", x, ": ", test_df[test_df.labels == x].shape)
test_df = test_df.drop("labels", axis=1)
test_dataset = test_df.to_numpy().copy()
X_test = test_dataset[:, :-1].copy()
y_test = test_dataset[:, -1].copy()
return X_test, y_test
def prepare_dataset(df, user, is_SVM=False):
df_ = split_users_into_two_classes(df.copy(), user)
group_labels = df_.labels.to_numpy().copy()
df_ = df_.drop('labels', axis=1)
if is_SVM:
df_.loc[df_.user == 0, 'user'] = -1
dataset = df_.to_numpy().copy()
X = dataset[:, :-1]
y = dataset[:, -1]
return X, y, group_labels
def create_file_for_results(data_type):
res_folder = '.\\_results'
if os.path.exists(res_folder) is False:
os.makedirs(res_folder)
file = os.path.join(res_folder, data_type + '_results.json')
if os.path.exists(file) is False:
with open(file, 'w') as f:
json.dump({'stub': None}, f)
return file
def update_file_with_results(file_path, results_dict):
import collections.abc
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
with open(file_path, 'r') as f:
res = json.load(f)
res = update(res, results_dict)
with open(file_path, 'w') as f:
json.dump(res, f, sort_keys=True, indent=2)
def get_dict_with_results(json_path):
with open(json_path, 'r') as f:
res = json.load(f)
return res
def get_dataframe(path, data_type, window_type, window_size):
return concat_dataframes(os.path.join(path, window_type, window_size), data_type)
def drop_corr_columns(df, corr_coef):
corr_matrix = df.corr().abs()
upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
corr_cols = [column for column in upper_tri.columns if any(abs(upper_tri[column]) > corr_coef) and column != "user"]
return df.drop(corr_cols, axis=1), corr_cols
def process_train_df(df, features, corr = 0.7, z = 3, prop = 0.1):
df = df.drop(df.columns.difference(features), axis=1)
df = df.dropna(how='all')
df = df.fillna(0)
if 'count_mean' in df.columns:
df = df[df.count_mean != 0]
df = drop_bad_rows(df, z)
df, dropped_cols_1 = drop_bad_cols(df, z, prop)
df, dropped_cols_2 = drop_corr_columns(df, corr)
return df, dropped_cols_1 + dropped_cols_2
# +
DATA_PATH = '..\\scripts\\_features_all'
DATA_TYPE = "location"
WINDOW_TYPE = "rolling"
WINDOW_SIZE = "120s"
# -
df = get_dataframe(DATA_PATH, DATA_TYPE, WINDOW_TYPE, WINDOW_SIZE)
features = df.columns.to_list()
df, _ = process_train_df(df, features)
features = df.columns.to_list()
def drop_corr_columns(df, corr_coef):
corr_matrix = df.corr().abs()
upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
corr_cols = [column for column in upper_tri.columns if any(abs(upper_tri[column]) > corr_coef) and column != "user"]
return df.drop(corr_cols, axis=1), corr_cols
df, _ = drop_corr_columns(df, 0.7)
fig = plt.figure(figsize=(25, 25))
plt.matshow(df.corr(), fignum=fig.number)
plt.xticks(range(df.shape[1]), df.columns, fontsize=18, rotation=90)
plt.yticks(range(df.shape[1]), df.columns, fontsize=18)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
plt.title("Correlation matrix", fontsize=20, y=-0.03)
plt.savefig("corr_after.png")
features
df
sample = df.sample(50000)
plt.figure(figsize=(16, 10))
plt.scatter(x=sample['disappeared_devices_count_mean_mad'], y=sample['jaccard_index_var_skew'], alpha=0.5, c=sample.user, cmap='magma')
plt.colorbar()
plt.show()
df = (df - df.min()) / (df.max() - df.min())
# +
# fig = plt.figure(figsize=(20, 25))
# plt.matshow(df.corr(), fignum=fig.number)
# plt.xticks(range(df.shape[1]), df.columns, fontsize=18, rotation=90)
# plt.yticks(range(df.shape[1]), df.columns, fontsize=18)
# cb = plt.colorbar()
# cb.ax.tick_params(labelsize=16)
# plt.title("Correlation matrix", fontsize=20, y=-0.03)
# +
# sample = df.sample(1000)
# plt.figure(figsize=(16, 10))
# plt.scatter(x=sample['conn_level_mean'], y=sample['count_var'], alpha=0.5, c=sample.user, cmap='magma')
# plt.colorbar()
# plt.show()
# +
# for user in df.user.unique():
# for valid_user in df.user.unique():
# if user != valid_user:
# print('---------------------------------------------------------------------------')
# print('Valid user: ', valid_user, 'Extracted user: ', user)
# print('---------------------------------------------------------------------------')
# df1, df_ = extract_delayed_user(df.copy(), user)
# df1['user'] = 0
# df_ = split_users_into_two_classes(df_.copy(), valid_user)
# df_ = resample(df_)
# dataset = df_.to_numpy()
# X = dataset[:, :-1]
# y = dataset[:, -1]
# X_test = df1.to_numpy()[:, :-1]
# y_test = df1.to_numpy()[:, -1]
# model = CatBoostClassifier(iterations=100, depth=6, loss_function='Logloss')
# model.fit(X, y, verbose=False)
# preds_class = model.predict(X_test)
# print('Accuracy: ', accuracy_score(preds_class, y_test))
# sum_ = 0
# imp = [ (x, i) for x, i in zip(model.feature_importances_, range(len(model.feature_importances_)) )]
# sorted_ = sorted(imp, key=lambda tup: tup[0])
# for i in range(len(sorted_)):
# if sorted_[i][0] > 5:
# print(sorted_[i][1], ': ', df_.columns[sorted_[i][1]], ' - ', sorted_[i][0])
# print('---------------------------------------------------------------------------')
# print('---------------------------------------------------------------------------')
# +
# for d in [VALIDATION_CATBOOST_BIG_DICT, VALIDATION_RFC_BIG_DICT, \
# VALIDATION_SVC_BIG_DICT, VALIDATION_LR_BIG_DICT]:
# for user, res in d.items():
# print("Valid User: ", user)
# print("--------------------------------------------------------------------------------")
# means_acc = []
# means_prec = []
# means_rec = []
# means_roc = []
# means_f1 = []
# for ex_user, ex_res in res.items():
# print('Ex user: ', ex_user)
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# y_true = ex_res['y_test']
# y_pred = ex_res['y_predict']
# if len(ex_res['y_proba'].shape) > 1 and ex_res['y_proba'].shape[1] > 1:
# y_proba = ex_res['y_proba'][:, 1]
# else:
# y_proba = ex_res['y_proba']
# acc = accuracy_score(y_true, y_pred)
# prec = precision_score(y_true, y_pred)
# rec = recall_score(y_true, y_pred)
# f1 = f1_score(y_true, y_pred)
# means_acc.append(acc)
# means_prec.append(prec)
# means_rec.append(rec)
# means_f1.append(f1)
# print('Accuracy: ', acc)
# print('Precision: ', prec)
# print('Recall: ', rec)
# try:
# roc = roc_auc_score(y_true, y_proba)
# means_roc.append(roc)
# print('ROC-AUC: ', roc)
# except Exception as e:
# print('ROC-AUC: skip')
# print('F1: ', f1)
# print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# print('Mean accuracy: ', sum(means_acc) / len(means_acc))
# print('mean Precision: ', sum(means_prec) / len(means_prec))
# print('mean Recall: ', sum(means_rec) / len(means_rec))
# if len(means_roc) > 0:
# print('mean ROC-AUC: ', sum(means_roc) / len(means_roc))
# print('mean F1: ', sum(means_f1) / len(means_f1))
# print("--------------------------------------------------------------------------------")
# +
# gs_df_parts = []
# for user in df.labels.unique():
# new_df = df[df.labels == user].sample(int(df[df.labels == user].shape[0] * 0.2)).copy()
# gs_df_parts.append(new_df)
# df = pd.concat(gs_df_parts)
# +
# sample = df.sample(10000)
# fig, ax = plt.subplots(figsize=(12, 9))
# scatter = ax.scatter(x=sample['disappeared_devices_count_mean_mad'], y=sample['jaccard_index_var_skew'], \
# alpha=0.5, c=sample.user, cmap='plasma')
# # produce a legend with the unique colors from the scatter
# plt.rcParams['legend.title_fontsize'] = 'x-large'
# legend1 = ax.legend(*scatter.legend_elements(), loc="upper right", title="Users", fontsize=14)
# ax.add_artist(legend1)
# plt.xlabel('MAD среднего числа исчезнувших устройств', fontsize=12)
# plt.ylabel('Skew дисперсии расстояния Жаккара', fontsize=12)
# plt.savefig('.\\after.png', dpi=500)
# plt.show()
# +
# C = 1
# kernel = 'rbf'
# degree = 1
# gamma = 5
# user = 8
# df['labels'] = df['user']
# df_ = resample(df.copy())
# df_ = split_users_into_two_classes(df_.copy(), user)
# df_ = resample(df_)
# df_.loc[df_.user == 0, 'user'] = -1
# df_ = df_.drop('labels', axis=1)
# model = SVC(C=C, kernel=kernel, degree=degree, gamma=gamma)
# +
# import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.colors import ListedColormap
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import StandardScaler
# from sklearn.datasets import make_moons, make_circles, make_classification
# from sklearn.neural_network import MLPClassifier
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.svm import SVC
# from sklearn.gaussian_process import GaussianProcessClassifier
# from sklearn.gaussian_process.kernels import RBF
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
# from sklearn.naive_bayes import GaussianNB
# from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# h = .02 # step size in the mesh
# figure = plt.figure(figsize=(27, 9))
# i = 1
# ds = df_.to_numpy().copy()
# np.random.shuffle(ds)
# X = ds[:, :-1]
# y = ds[:, -1]
# X_train = X[:10000, [14, 31]]
# y_train = y[:10000]
# X_test = X[160000:, [14, 31]]
# y_test = y[160000:]
# x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
# y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
# xx, yy = np.meshgrid(
# np.arange(x_min, x_max, h),
# np.arange(y_min, y_max, h))
# cm = plt.cm.RdBu
# cm_bright = ListedColormap(['#FF0000', '#0000FF'])
# ax = plt.subplot(1, 2, 1)
# ax.set_title("Input data")
# ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
# edgecolors='k')
# ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
# edgecolors='k')
# ax.set_xlim(xx.min(), xx.max())
# ax.set_ylim(yy.min(), yy.max())
# ax.set_xticks(())
# ax.set_yticks(())
# ax = plt.subplot(1, 1 + 1, 1)
# model.fit(X_train, y_train)
# score = model.score(X_test, y_test)
# if hasattr(model, "decision_function"):
# Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
# else:
# Z = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Z = Z.reshape(xx.shape)
# ax.contourf(xx, yy, Z, cmap=cm, alpha=.4)
# ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
# edgecolors='k')
# ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
# edgecolors='k', alpha=0.6)
# ax.set_xlim(xx.min(), xx.max())
# ax.set_ylim(yy.min(), yy.max())
# ax.set_xticks(())
# ax.set_yticks(())
# # ax.set_title(name)
# ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
# size=15, horizontalalignment='right')
# plt.tight_layout()
# plt.show()
# +
# fpr, tpr, threshold = roc_curve(y_true, y_proba)
# roc_auc = auc(fpr, tpr)
# plt.title('ROC-curve')
# plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
# plt.legend(loc = 'lower right')
# plt.plot([0, 1], [0, 1],'r--')
# plt.xlim([0, 1])
# plt.ylim([0, 1])
# plt.ylabel('True Positive Rate')
# plt.xlabel('False Positive Rate')
# plt.savefig("..\\")
| src/notebooks/vizualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas 基础
# ## Pandas介绍
1. Pandas是什么?
Pandas是一个处理数据集数据的工具, 主要用于数据挖掘数据分析的开源Python库
2. 为什么要用Pandas?
便捷的数据处理能力
文件读取方便
封装了Matplotlib, Numpy的画图和计算
import pandas as pd
import numpy as np
# 创建一个符合正太分布的10个股票5天的涨跌幅数据
stock_change = np.random.normal(size=[5, 10])
stock_change
# 使用Pandas中的数据结构
stock_day_rise = pd.DataFrame(stock_change)
stock_day_rise
# 构造行索引序列
stock_code = ['股票{}'.format(i) for i in range(stock_day_rise.shape[0])]
stock_code
# 添加行索引
data = pd.DataFrame(stock_day_rise, index=stock_code)
data
# 添加列索引
# 生成一个时间序列, 略过周末非交易日
date = pd.date_range('2019-3-21', periods=stock_day_rise.shape[1], freq='B')
date
# index代表行索引, columns代表列索引
data = pd.DataFrame(stock_change, index=stock_code, columns=date)
data
# ### DataFrame介绍
DataFrame是数据容器, 二维数组
DataFrame的结构, index行索引, columns列索引, values数据
DataFrame的常用属性和方法
属性: index, columns, values, shape, T
方法:head(num), tail(num)
data.shape
data.index
data.columns
data.T
data.values
data.head(3)
data.tail(3)
DataFrame的索引设置
1. data_df.reset_index() # 重置行索引, 默认保留旧索引, drop=True不保留旧索引
2. data_df.index # 直接替换行索引
3. data_df.set_index # 指定某行为行索引
4. data_df = pd.DataFrame(data, index=code) # 创建DataFrame是指定行索引或者列索引
data.reset_index()
data.reset_index(drop=True)
data.index = ['中证股{}'.format(i) for i in range(data.shape[0])]
data
data.set_index(data.columns[0])
# 设置多个索引
data.set_index([data.columns[0], data.columns[1]])
# ### Series介绍
# Series数据容器 一维数组
# Series的结构 index, values
# 创建Series:
# 指定内容, 默认索引 pd.Series(np.arange(10))
# 指定索引 pd.Series(np.random.rand(10, 20, 10), np.arange(10))
# 通过字典数据创建 pd.Series({'red':100, 'blue':200, 'green': 500, 'yellow':1000})
pd.Series(range(10))
type(pd.Series(range(10)))
pd.Series(np.random.rand(10), range(10))
pd.Series({'red':100, 'blue':200, 'green': 500, 'yellow':1000})
# ### 索引操作
结合loc或者iloc使用索引(先行后列)(推荐使用)
loc:使用索引名字进行切片
iloc:使用索引下标进行切片
# 读取文件
data = pd.read_csv("./data/stock_day.csv")
data.head(3)
data['open']['2018-02-27']
data['open'].head()
# 通过索引名获取数据, 先行后列
data.loc['2018-02-27':'2018-02-23', 'open']
# 通过索引下标获取数据
data.iloc[0:2, 1:3]
排序操作
df.sort_values(by, ascending)
按某一列进行排序
df.sort_index(ascending)
对于行索引进行排序
False 降序
True 升序 默认
data.sort_index().head(5)
data.sort_index(ascending=False).head(5)
data.sort_values(by='p_change').head(5)
data.sort_values(by=['open', 'high']).head()
data['p_change'].sort_index().head()
# ### DataFrame运算
DataFrame运算
算术运算
df.add(number) 加上一个具体的数字
df.sub() 差, 两列相减
df.mul()
乘
df.div()
除
主要用于按列按行对数据进行处理
逻辑运算: 返回逻辑结果True或False
逻辑运算符号<、 >、|、 &
逻辑运算函数
df.query(expr)
逻辑判断表达式的字符串
df.isin(values) 例如例如
统计运算
df.describe()
综合分析, 能够直接得出很多统计结果,count, mean, std, min, max 等
统计函数
df.mode(axis): 求众数(数据中出现次数最多的数)
df.prod(axis): 求乘积,将所有数据乘起来
df.median(axis): 求中位数,排在所有数据中间那个数的值
max, min, std, var, mean
idxmax: 求出最大值的位置
idxmin: 求出最小值的位置
25%
75%
累计统计函数
df.cumsum():累计和
df.cummax():累计最大值
df.cummin():累计最小值
df.cumprod():累计乘积
自定义运算
df.apply(func, axis=0)
设置按行处理还是按列处理
axis=0 按列处理 axis=1为行进行运算
data.head()
data['open'].add(1).head()
data['high'].sub(data['low']).head()
data['open'].mul(data['price_change']).head()
data['open'].div(data['price_change']).head()
# ## 基本数据操作
# ## DataFrame运算
# ## Pandas画图
# ## 文件读取与存储
# # Pandas高级数据处理
# ## 缺失值的处理
# ## 数据离散化
# ## 数据合并
# ## 交叉表透视表
# ## 分组与聚合
| .ipynb_checkpoints/Pandas-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pipeline
# <div class="alert alert-info">
#
# This tutorial is available as an IPython notebook at [malaya-speech/example/pipeline](https://github.com/huseinzol05/malaya-speech/tree/master/example/pipeline).
#
# </div>
# ### Common way
# We write python code like,
# +
x = 1
def foo(x):
a = x + 1
return a
foo(x)
# -
# This is simple, x -> a(x) -> y, If I want to add b after a and b depends on a,
# +
x = 1
def foo(x):
a = x + 1
b = a + 2
return b
foo(x)
# -
# x -> a(x) -> b(x) -> y, If I need both value from a, b, just simply returned a and b.
# +
x = 1
def foo(x):
a = x + 1
b = a + 2
return a, b
foo(x)
# -
# x -> a(x) -> y
#
# y -> b(x) -> y1
# Now, if I want to make a branch, a -> b -> c, b -> d -> e,
# +
x = 1
def foo(x):
a = x + 1
b = a + 2
c = b + 3
d = b - 1
e = d - 2
return a, b, c, d, e
foo(x)
# -
# These still look simple, what if,
#
# `e` as element, `L` as list of elements,
#
# x\[e\] -> a(x)-> y\[l\] -> batch every 3 elements (l\[e,e,e\], ..) -> loop every element apply z(x) -> flatten.
#
# And do not forget, returned all steps.
# +
x = 10
def foo(x):
def a(x):
return [i for i in range(x)]
y = a(x)
def batch(x, batch_size):
r = [x[i: i + batch_size] for i in range(0, len(x), batch_size)]
return r
batched = batch(y, 3)
def z(x):
return (sum(x), sum(x) / len(x))
batched_z = [z(i) for i in batched]
flatten = []
for i in batched_z:
flatten.extend(i)
return y, batched, batched_z, flatten
foo(x)
# -
# When the code grow, it is very hard to understand especially when we have multiple branches and elements to list and vice versa.
#
# So **malaya-speech Pipeline comes to help!**
# ### Initiate pipeline
# Now, I want to do the same,
#
# x\[e\] -> a(x)-> y\[l\] -> batch every 3 elements (l\[e,e,e\], ..) -> loop every element apply z(x) -> flatten.
#
# Using malaya-speech Pipeline.
# +
from malaya_speech import Pipeline
p = Pipeline()
def a(x):
return [i for i in range(x)]
def z(x):
return (sum(x), sum(x) / len(x))
pipeline = (
p.map(a).batching(3).foreach_map(z).flatten()
)
# -
# Yep, simple as that! Do not worry, we will look into each interfaces later.
# ### Pipeline visualization
p.visualize()
# So now, we can understand what our pipeline trying to do.
result = p.emit(x)
result
# So the results are pretty same and malaya-speech Pipeline will returned dictionary type.
# +
def a(x):
return [i for i in range(x)]
def left(x):
return (sum(x), sum(x) / len(x))
def right(x):
return [i * i for i in x]
p = Pipeline()
batched = p.map(a).batching(3)
pipe_left = batched.foreach_map(left)
pipe_right = batched.foreach_map(right)
combined = pipe_left.zip(pipe_right).flatten()
p.visualize()
# -
result = p.emit(x)
result
# ### Interface
# #### map
#
# `map` is simply apply a function for input.
p = Pipeline()
p.map(lambda x: x + 1)
p.emit(2)
p = Pipeline()
p.map(lambda x: x + 1, name = 'plus')
p.emit(2)
# +
def x(a, b):
return a + b
p = Pipeline()
p.map(x, b = 3, name = 'plus')
p.emit(2)
# -
# #### batching
#
# `batching` is simply batch elements to size of N.
p = Pipeline()
p.batching(2)
p.emit([1,2,3,4,5])
p = Pipeline()
p.batching(2).batching(2)
p.emit([1,2,3,4,5])
# As you can see, `batching` only returned last `batching` because the key in dictionary is duplicate. So to prevent any duplicate,
p = Pipeline()
p.batching(2).batching(2, name = 'second')
p.emit([1,2,3,4,5])
# #### foreach_map
#
# `foreach_map` is simply apply a function for each elements in a list.
# +
p = Pipeline()
def x(a):
return a * a
p.map(lambda x: [i for i in range(x)], name = 'generate').foreach_map(x)
p.emit(5)
# +
p = Pipeline()
def x(a):
return sum(a)
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2).foreach_map(x)
p.emit(9)
# -
# `foreach_map` also provide different methods to process the elements,
#
# ```python
# class foreach_map(Pipeline):
# """
# Apply a function to every element in a tuple in the stream.
#
# Parameters
# ----------
# func: callable
# method: str, optional (default='sync')
# method to process each elements.
#
# * ``'sync'`` - loop one-by-one to process.
# * ``'async'`` - async process all elements at the same time.
# * ``'thread'`` - multithreading level to process all elements at the same time.
# Default is 1 worker. Override `worker_size=n` to increase.
# * ``'process'`` - multiprocessing level to process all elements at the same time.
# Default is 1 worker. Override `worker_size=n` to increase.
#
# *args :
# The arguments to pass to the function.
# **kwargs:
# Keyword arguments to pass to func.
# ```
#
# Default is `sync`. If,
#
# 1. `async`, you need to install tornado,
#
# ```bash
# pip3 install tornado
# ```
#
# 2. `thread`, you need to install dask,
#
# ```bash
# pip3 install dask
# ```
#
# **We only provided single-machine dask processing**.
#
# 3. `process`, you need to install dask,
#
# ```bash
# pip3 install dask
# ```
#
# **We only provided single-machine dask processing**.
# +
import time
p = Pipeline()
def x(a):
time.sleep(1)
return sum(a)
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2).foreach_map(x)
# +
# %%time
p.emit(9)
# +
p = Pipeline()
def x(a):
time.sleep(1)
return sum(a)
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2).foreach_map(x, method = 'async')
# +
# %%time
p.emit(9)
# -
# **asynchronous != concurrency**.
# +
p = Pipeline()
def x(a):
time.sleep(1)
return sum(a)
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2).foreach_map(x, method = 'thread')
# +
# %%time
p.emit(9)
# -
# Try to increase the worker size, `worker_size=n`.
# +
p = Pipeline()
def x(a):
time.sleep(1)
return sum(a)
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2)\
.foreach_map(x, method = 'thread', worker_size = 5)
# +
# %%time
p.emit(9)
# -
# **We do not suggest use threading level to process a very extensive function, so, use processing level instead**.
# +
p = Pipeline()
def x(a):
time.sleep(1)
return sum(a)
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2)\
.foreach_map(x, method = 'process', worker_size = 5)
# +
# %%time
p.emit(9)
# -
# **Perfecto!**
# #### partition
#
# `partition` is simply to group multiple `emit` into N size of tuple. Each successful partition, N count will reset.
p = Pipeline()
p.partition(3).map(lambda x: sum(x), name = 'sum')
p.emit(1)
# First emit nothing happened because `partition` only proceed if we `emit` after N size.
p.emit(2)
p.emit(3)
p.emit(4) # not yet, N count reset, returned last state
p.emit(5) # not yet, N count reset, returned last state
p.emit(6)
# #### sliding_window
#
# `partition` is simply to group multiple `emit` into N size of tuple.
p = Pipeline()
p.sliding_window(3).map(lambda x: sum(x), name = 'sum')
p.emit(1)
p.emit(2)
p.emit(3)
p.emit(4)
# If you want exact size of N,
p = Pipeline()
p.sliding_window(3, return_partial = False).map(lambda x: sum(x), name = 'sum')
p.emit(1)
p.emit(2)
p.emit(3)
p.emit(4)
# #### flatten
#
# `flatten` simply to flatten nested list.
# +
p = Pipeline()
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2).flatten()
p.emit(10)
# +
p = Pipeline()
p.map(lambda x: [i for i in range(x)], name = 'generate').batching(2).batching(3).flatten().flatten(name = 'nested')
p.emit(10)
# -
# #### zip
#
# `zip` is to combine 2 branches into 1 branch.
# +
p = Pipeline()
left = p.map(lambda x: x + 1, name = 'left')
right = p.map(lambda x: x + 10, name = 'right')
left.zip(right).map(sum)
p.visualize()
# -
p.emit(2)
# +
p = Pipeline()
left = p.map(lambda x: [i + 1 for i in range(x)], name = 'generate_left')
right = p.map(lambda x: [i + 10 for i in range(x)], name = 'generate_right')
left.zip(right).flatten()
p.visualize()
# -
p.emit(5)
# #### foreach_zip
#
# `foreach_zip` same like `zip`, combine 2 branches into 1 branch, but will zip left and right hand sides for each elements.
# +
p = Pipeline()
left = p.map(lambda x: [i + 1 for i in range(x)], name = 'generate_left')
right = p.map(lambda x: [i + 10 for i in range(x)], name = 'generate_right')
left.foreach_zip(right).foreach_map(sum)
p.visualize()
# -
p.emit(5)
| docs/load-pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''Questionnaire'': conda)'
# name: python3
# ---
from anim_maker import *
maker = C4DSMPLHAnimMaker(PORT = 3008)
# init
maker.Initialize()
maker.RegisterJoints()
# load data
import numpy as np
#amass_npz_fname = '../../../thirdParty/amass/support_data/github_data/amass_sample.npz' # the path to body data
amass_npz_fname = '../../../thirdParty/accad/C2 - Run to stand_poses.npz' # the path to body data
bdata = np.load(amass_npz_fname)
bdata['poses'][0].shape
for i in tqdm(range(1, 22)):
joint_name = "f_avg_" + SMPL_H_SKELETON[i]
for frame in range(0,21,10):
# get rotation from body data
rotateX, rotateY, rotateZ = bdata['poses'][frame][i*3:(i+1)*3]
# get joint name
joint_name = "f_avg_" + SMPL_H_SKELETON[i]
maker.SetOneKeyFrame(joint_name + "_curveRX", frame, rotateX)
time.sleep(0.05)
maker.SetOneKeyFrame(joint_name + "_curveRY", frame, rotateY)
time.sleep(0.05)
maker.SetOneKeyFrame(joint_name + "_curveRZ", frame, -rotateZ)
time.sleep(0.05)
maker.controller.SendCommand("{}.InsertTrackSorted({}_rX)".format(joint_name, joint_name))
time.sleep(0.05)
maker.controller.SendCommand("{}.InsertTrackSorted({}_rY)".format(joint_name, joint_name))
time.sleep(0.05)
maker.controller.SendCommand("{}.InsertTrackSorted({}_rZ)".format(joint_name, joint_name))
time.sleep(0.05)
| genmotion/render/c4d/c4d set anim2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Digging deep with Numpy
# * Two dimensional array
# * checking shape, type and flatening the array
# * Random, rand and randn
# * Indexing and slicing the array
#
# getting to know the array
# multidimensional array
import numpy as np
multi_array = np.array(([1,2],[3,4],[5,6]))
multi_array.ndim # ndim show the dimension of the query
# checking for the data type in the 2D array.
# getting to know the array
multi_array.dtype # ndim show the dimension
# Incase we want to transform the datat type to float
transform_data = np.array([[1,2],[3,4],[5,6]], dtype = np.float64)
transform_data
transform_data.shape #represents info. on the dimensions
#. 3 represents the number of rows in a dataframe
# 2 - stands for the columns
# +
# Flatening the array from two dimensions to one dimension.
# ravel : is used to flaten my array, that means that it can flatten my dataset to one dimensionl
transform_data.ravel() # we can see it has become as one dimnsion.
# -
# ### For getting the two dimentional data structures to one D, we can also use to FLATTEN function of numpy. Please check that out
# ## Random
#
# Numpy also has lots of ways to create random number arrays:
#
# ### rand
# Create an array of the given shape and populate it with
# random samples from a uniform distribution
# over ``[0, 1)``.
np.random.rand(2)
np.random.rand(5,5)
# ### randn
#
# Return a sample (or samples) from the "standard normal" distribution. Unlike rand which is uniform:
np.random.randn(2)
np.random.randn(5,5)
np.random.randint(1,100,10)
# ## Indexing and Slicing
# Accesssing Single element.
x1 = np.array([5,6,7,8,9])
x1[0] # yes at zero position 5 is present
x1[4] # likewise at the fourth position ii is 9
# For the first time we are adding a multi-dimensional feature.
x2 = np.random.randint(10, size=(3, 4))
x2 # items can be accessed using a comma-separated tuple of indices:
x2[0, 0] # will give us the initial single position
# now let us get some middle position.
x2[1, 2] # before comma it refers to the rows & post comma it is the column
# Please guess what shoould we write if I want the very bottom value towards left hand side.
x2[ , ]
# Adding another feature of modifying the two dimensional dataset.
x2[0, 0] = 12
x2
# +
# Slicing with the numpy. For one dimensional it is same as lists.
x = np.arange(10)
x
# -
# getting the first five elements.
x[:5] # first five elements
# If I want between index 4 and 7, what should be my syntax.
x[:]
# Let us jump on to multi-dimensional aubarrays.
x2
x2[:2, :3]
x2[:3, ::2] # all rows, every other column
# ### Please check out the below link for further understanding!
#
# [tutorials on numpy](https://www.datacamp.com/community/tutorials/python-numpy-tutorial)
#
| 01-Sep-2018/Numpy_Part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Kat
# #### BUAD 5112, M3: Time Series Decomposition Assignment
# #### September 19, 2021
# Load packages needed
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# ## Analyzing Private Construction with Time Series Decomposition
# +
# Private Construction: Load and visualize the data
dfCS = pd.read_csv('ConstructionTimeSeriesDataV2.csv')
fig, ax = plt.subplots()
ax.plot(dfCS['Private Construction'], label = 'Private Construction Sales')
ax.set_xlabel('Month')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# -
# A quick look at the data shows us it is non-linear. There are "component patterns", which looks like waves which indicates seasonality. We would want to research what is causing those patterns in the data. If we decompose the data, we can forecase the future and how the components will repeat.
#
# Next we compute the moving average of each point for a data window. The window size used here is 36, and the average squared error is computed in order to help evaluate which window size is appropriate for the moving average.
#
# +
# Private Construction: Compute moving average
def sqErr(row):
return (row['Private Construction'] - row['MovAvg'])**2
dfCS['MovAvg'] = np.NaN
dfCS['sqErr'] = np.NaN
# Changing the DataFrame index to DatetimeIndex data type is required for using one of the functions below
dfCS.index = pd.date_range(freq = 'm', start = pd.Timestamp(year = 2002, month = 1, day = 2), periods = len(dfCS['Private Construction']))
print(len(dfCS),'\n',dfCS)
window = 36
window = window - window % 2
# Compute the moving average in the loop below using a window centered on the data point whose average is being computed
for i in range(int(window/2),dfCS.shape[0]-int(window/2)):
dfCS.loc[dfCS.index[i],'MovAvg'] = (0.5*dfCS.iloc[i - int(window/2)]['Private Construction'] + dfCS.iloc[i - int(window/2)+1:i + int(window/2)]['Private Construction'].sum() + 0.5*dfCS.iloc[i + int(window/2)]['Private Construction'])/float(window)
dfCS['sqErr'] = (dfCS['Private Construction'] - dfCS['MovAvg'])**2
# The moving average cannot be applied to all rows and we need to delete those rows because we cannot use them in the analysis
dfCS.dropna(how='any',inplace=True)
print('Average Squared Error per Month: ', sum(dfCS['sqErr'])/len(dfCS))
print(dfCS)
# -
# Plot moving average
fig,ax = plt.subplots()
ax.plot(dfCS['MovAvg'],label='Moving Avg.')
ax.plot(dfCS['Private Construction'],label='Private Construction Sales')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Next we calculate the residual sales (R1) using the moving average subtracted from the demand.
# Private Construction: Compute Residual and Error
dfCS['R1'] = dfCS['Private Construction'] - dfCS['MovAvg']
dfCS['R1Error'] = abs((dfCS['Private Construction'] - dfCS['R1'])/dfCS['Private Construction'])
dfCS.style.format({
'MovAvg': '{:.1f}'.format,
'sqErr': '{:,.1f}'.format,
'R1': '{:,.1f}'.format,
'R1Error': '{:,.3f}'.format
})
# Private Construction: Plot Average Residual and Display Average Residual
fig,ax = plt.subplots()
ax.plot(dfCS['R1'])
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
print('Average Residual: ', sum(dfCS['R1'])/len(dfCS))
# Calculate Autocorrelation to detect any cyclical patterns and how many periods before they repeat
maxCorr = 0.0
period = np.NaN
for i in range(1,147):
corr = dfCS['R1'].autocorr(lag=i)
print('Correlation, lag ',i,' ',corr)
if corr > maxCorr:
maxCorr = corr
period = i
print('period = ',period,' Maximum Correlation = ',maxCorr)
# We now calculate autocorrelation to detect any cyclical patterns how many periods before they are repeated.
# +
# Break the time series up, computes average of period, and plots the patterns and averages
period = 12
cycleLen = period
numCycles = int(len(dfCS)/cycleLen + 0.5)
cycles = [dfCS.iloc[range(i*period,min((i+1)*period,len(dfCS)))]['R1'] for i in range(numCycles)]
ptsInCycles = [dfCS.iloc[range(i,len(dfCS['R1']),period)]['R1'].tolist() for i in range(period)]
avg = [sum(pts)/len(pts) for pts in ptsInCycles]
fig,ax = plt.subplots()
for i in range(len(cycles)):
ax.plot(cycles[i].values,label='Cycle '+str(i),c='k')
ax.plot(avg,label='Average Cycle',c='r')
ax.set_xlabel('Month')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.legend()
# +
# Inserts the appropriate C(m) value into the C column for month, then plots the cyclicity component
cycleLen = period # see prior cell for computation of cyclicality period
numCycles = int(len(dfCS)/cycleLen + 0.5)
dfCS['C'] = np.NaN # Creates an empty column for the cyclicality component data
for i in range(len(dfCS)):
dfCS.loc[dfCS.index[i], 'C'] = avg[i % cycleLen] # Write appropriate cyclicality value
fig,ax = plt.subplots()
ax.plot(dfCS['C'],label='Cyclic Pattern')
ax.plot(dfCS['R1'],label='Remainder After Trend')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
# -
# Computes remaining residual sales, computes the error, computes the 'fit', plots the model, computes the avg absolute error, and remove the sqErr column
dfCS['R2Private'] = dfCS['R1'] - dfCS['C']
dfCS['R2ErrorPrivate'] = abs(dfCS['R2Private']/dfCS['Private Construction'])
dfCS['fit'] = dfCS['MovAvg'] + dfCS['C']
dfCS.drop(['sqErr'],axis=1,inplace=True)
print('Average Error: ', sum(dfCS['R2ErrorPrivate'])/len(dfCS))
print(dfCS)
fig,ax = plt.subplots()
ax.plot(dfCS['Private Construction'],label='Private Construction')
ax.plot(dfCS['fit'], label = 'Fit')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
fig,ax = plt.subplots()
ax.plot(dfCS['R2Private'],label='Remainder after Trend and Cyclical Components')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
maxCorr = 0.0
period = np.NaN
for i in range(1,37):
corr = dfCS['R2Private'].autocorr(lag=i)
print('Correlation, lag ',i,' ',corr)
if corr > maxCorr:
maxCorr = corr
period = i
print('period = ',period,' Maximum Correlation = ',maxCorr)
fig,ax = plt.subplots()
ax.plot(dfCS['Private Construction'],label='Private Construction')
ax.plot(dfCS['fit'],label='Fit')
ax.plot(dfCS['R2Private'],label='Residual')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
# ## Analyzing Public Construction with Time Series Decomposition
# +
# Public Construction: Load and visualize the data
dfCS = pd.read_csv('ConstructionTimeSeriesDataV2.csv')
fig, ax = plt.subplots()
ax.plot(dfCS['Public Construction'], label = 'Public Construction Sales')
ax.set_xlabel('Month')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# -
# A quick look at the data shows us it is non-linear. There are "component patterns", which looks like waves which indicates seasonality. We would want to research what is causing those patterns in the data. If we decompose the data, we can forecase the future and how the components will repeat.
#
# Next we compute the moving average of each point for a data window. The window size used here is 36, and the average squared error is computed in order to help evaluate which window size is appropriate for the moving average.
# +
# Public Construction: Compute moving average
def sqErr(row):
return (row['Public Construction'] - row['MovAvg'])**2
dfCS['MovAvg'] = np.NaN
dfCS['sqErr'] = np.NaN
# Changing the DataFrame index to DatetimeIndex data type is required for using one of the functions below
dfCS.index = pd.date_range(freq = 'm', start = pd.Timestamp(year = 2002, month = 1, day = 2), periods = len(dfCS['Public Construction']))
print(len(dfCS),'\n',dfCS)
window = 36
window = window - window % 2
# Compute the moving average in the loop below using a window centered on the data point whose average is being computed
for i in range(int(window/2),dfCS.shape[0]-int(window/2)):
dfCS.loc[dfCS.index[i],'MovAvg'] = (0.5*dfCS.iloc[i - int(window/2)]['Public Construction'] + dfCS.iloc[i - int(window/2)+1:i + int(window/2)]['Public Construction'].sum() + 0.5*dfCS.iloc[i + int(window/2)]['Public Construction'])/float(window)
dfCS['sqErr'] = (dfCS['Public Construction'] - dfCS['MovAvg'])**2
# The moving average cannot be applied to all rows and we need to delete those rows because we cannot use them in the analysis
dfCS.dropna(how='any',inplace=True)
print('Average Squared Error per Month: ', sum(dfCS['sqErr'])/len(dfCS))
print(dfCS)
# -
# Public Construction: plot moving average
fig,ax = plt.subplots()
ax.plot(dfCS['MovAvg'],label='Moving Avg.')
ax.plot(dfCS['Public Construction'],label='Public Construction Sales')
ax.set_xlabel('Year')
ax.set_ylabel('Units of Demand')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Next we calculate the residual sales (R1) using the moving average subtracted from the demand.
# Public Construction: Compute Residual and Error
dfCS['R1'] = dfCS['Public Construction'] - dfCS['MovAvg']
dfCS['R1Error'] = abs((dfCS['Public Construction'] - dfCS['R1'])/dfCS['Public Construction'])
dfCS.style.format({
'MovAvg': '{:.1f}'.format,
'sqErr': '{:,.1f}'.format,
'R1': '{:,.1f}'.format,
'R1Error': '{:,.3f}'.format
})
# Public Construction: Plot Average Residual and Display Average Residual
fig,ax = plt.subplots()
ax.plot(dfCS['R1'])
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
print('Average Residual: ', sum(dfCS['R1'])/len(dfCS))
# Calculate Autocorrelation to detect any cyclical patterns and how many periods before they repeat
maxCorr = 0.0
period = np.NaN
for i in range(1,147):
corr = dfCS['R1'].autocorr(lag=i)
print('Correlation, lag ',i,' ',corr)
if corr > maxCorr:
maxCorr = corr
period = i
print('period = ',period,' Maximum Correlation = ',maxCorr)
# +
# Break the time series up, computes average of period, and plots the patterns and averages
period = 12
cycleLen = period
numCycles = int(len(dfCS)/cycleLen + 0.5)
cycles = [dfCS.iloc[range(i*period,min((i+1)*period,len(dfCS)))]['R1'] for i in range(numCycles)]
ptsInCycles = [dfCS.iloc[range(i,len(dfCS['R1']),period)]['R1'].tolist() for i in range(period)]
avg = [sum(pts)/len(pts) for pts in ptsInCycles]
fig,ax = plt.subplots()
for i in range(len(cycles)):
ax.plot(cycles[i].values,label='Cycle '+str(i),c='k')
ax.plot(avg,label='Average Cycle',c='r')
ax.set_xlabel('Month')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.legend()
# +
# Inserts the appropriate C(m) value into the C column for month, then plots the cyclicity component
cycleLen = period # see prior cell for computation of cyclicality period
numCycles = int(len(dfCS)/cycleLen + 0.5)
dfCS['C'] = np.NaN # Creates an empty column for the cyclicality component data
for i in range(len(dfCS)):
dfCS.loc[dfCS.index[i], 'C'] = avg[i % cycleLen] # Write appropriate cyclicality value
fig,ax = plt.subplots()
ax.plot(dfCS['C'],label='Cyclic Pattern')
ax.plot(dfCS['R1'],label='Remainder After Trend')
ax.set_xlabel('Year')
ax.set_ylabel('spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
# -
# Computes remaining residual sales, computes the error, computes the 'fit', plots the model, computes the avg absolute error, and remove the sqErr column
dfCS['R2Public'] = dfCS['R1'] - dfCS['C']
dfCS['R2ErrorPublic'] = abs(dfCS['R2Public']/dfCS['Public Construction'])
dfCS['fit'] = dfCS['MovAvg'] + dfCS['C']
dfCS.drop(['sqErr'],axis=1,inplace=True)
print('Average Error: ', sum(dfCS['R2ErrorPublic'])/len(dfCS))
print(dfCS)
fig,ax = plt.subplots()
ax.plot(dfCS['Public Construction'],label='Public Construction')
ax.plot(dfCS['fit'], label = 'Fit')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
fig,ax = plt.subplots()
ax.plot(dfCS['R2Public'],label='Remainder after Trend and Cyclical Components')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
maxCorr = 0.0
period = np.NaN
for i in range(1,37):
corr = dfCS['R2Public'].autocorr(lag=i)
print('Correlation, lag ',i,' ',corr)
if corr > maxCorr:
maxCorr = corr
period = i
print('period = ',period,' Maximum Correlation = ',maxCorr)
fig,ax = plt.subplots()
ax.plot(dfCS['Public Construction'],label='Public Construction')
ax.plot(dfCS['fit'],label='Fit')
ax.plot(dfCS['R2Public'],label='Residual')
ax.set_xlabel('Year')
ax.set_ylabel('Spending')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.legend()
| timeseries/TimeSeries-GitHub.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rVcY1JnfcA2i"
# # Notebook Author info
# <center>
#
# | | |
# | ----------- | ----------- |
# |  | `Name:` <NAME><br /><br /> `Professional Status:` Student of Computer Science and Engineering <br /><br /> `Email:` <EMAIL> <br /><br /> `Website :` https://rafat97.github.io/ <br /><br />`Github:` https://github.com/Rafat97 <br /><br /> `Linkedin:` https://www.linkedin.com/in/rafat-haque-173131139/ |
#
# </center>
#
# + [markdown] id="okAZVx9nb9Es"
# # Drive mount code
# + id="Re1cOmeea_7J"
# from google.colab import drive
# drive.mount('./drive')
# + [markdown] id="nW4ElwLgysH_"
# # Dataset Creation
# + [markdown] id="Ay2TDwZCy96_"
# ## Download dataset 2
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="wlb4s-FrcGxi" outputId="606d472d-91b8-4ea2-d1cf-2d4e8ca1e397"
# !gdown --id 1L<KEY>
# + [markdown] id="9RQpoQIizIfo"
# ## Import package
# + colab={"base_uri": "https://localhost:8080/"} id="uLl0_bE2cC-C" outputId="f0b146c9-1b9c-4f1a-8425-021550218023"
# import some importent library or packages
import matplotlib.pyplot as plt
import warnings
import time,sys
import copy
import pandas as pd
import numpy as np
import cv2
import os
import pathlib
import zipfile
import torch
import torchvision
from torchvision import models
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torch.optim as optim
import pathlib
import shutil
from pathlib import Path
from collections import Counter
# !pip install torchsummary
from torchsummary import summary
from sklearn.utils import shuffle
# !pip install torchviz
from torchviz import make_dot, make_dot_from_trace
warnings.filterwarnings('ignore')
torch.manual_seed(0)
# + [markdown] id="tzq1MJWAzLy3"
# ## Read downloaded dataset
# + id="SG4wK48ZcGFy"
df = pd.read_csv('/content/Dataset 2.csv',encoding='unicode_escape')
# + colab={"base_uri": "https://localhost:8080/"} id="yZCvTyl6keZe" outputId="ad543d85-f186-470c-b14a-5d6f54911631"
df.columns ## columns name
# + colab={"base_uri": "https://localhost:8080/"} id="l8lCKytvnQxF" outputId="7f7434f7-7da1-4185-db47-5da029b5f525"
df['polarity'].value_counts() ## get number of data in `polarity` class
# + colab={"base_uri": "https://localhost:8080/"} id="wU8m4CcPkiV1" outputId="1f6bcffc-41f0-4afb-af30-cadcc85eef39"
df['text'].value_counts()
# + [markdown] id="BnThLglw3HEu"
# ## Dataset splitting
# + id="NddseGZ0n-Y8"
## split dataset based on the class
traning_split_size = 0.8
df_class_1 = df[df['polarity'] == 1]
df_class_0 = df[df['polarity'] == 0]
trainSize = int(len(df_class_0) * traning_split_size)
Traning_class_0 = df_class_0[:trainSize]
Test_class_0 = df_class_0[trainSize:]
trainSize = int(len(df_class_1) * traning_split_size)
Traning_class_1 = df_class_1[:trainSize]
Test_class_1 = df_class_1[trainSize:]
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="MJnpde_qvabR" outputId="aa4957e6-e87c-4675-873f-7fb79cd42c14"
## traning dataset create
li = [Traning_class_0,Traning_class_1]
frame = pd.concat(li, axis=0, ignore_index=True)
frame = shuffle(frame)
frame.reset_index(inplace=True, drop=True)
frame.to_csv('Train-Dataset.csv',index=False)
frame
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="UC7KHE3Jxnax" outputId="f1dbfc59-8e38-4eff-8c7b-8a22baa61d0e"
## testing dataset create
li = [Test_class_0,Test_class_1]
frame = pd.concat(li, axis=0, ignore_index=True)
frame = shuffle(frame)
frame.reset_index(inplace=True, drop=True)
frame.to_csv('Test-Dataset.csv',index=False)
frame
# + [markdown] id="SwhD-3_x3t5V"
# # Text cleaning
# + colab={"base_uri": "https://localhost:8080/"} id="vf7x0ff83u0F" outputId="e81f6cd3-1f09-43cc-a02c-0e0143daf070"
# import some importent library or packages
import matplotlib.pyplot as plt
import warnings
import time,sys,re,string
import copy
import pandas as pd
import numpy as np
import cv2
import os
import pathlib
import zipfile
import torch
import torchvision
from torchvision import models
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torch.optim as optim
import pathlib
import shutil
from pathlib import Path
from collections import Counter
# !pip install torchsummary
from torchsummary import summary
from sklearn.utils import shuffle
# !pip install torchviz
from torchviz import make_dot, make_dot_from_trace
try:
import contractions
except:
# !pip install contractions
import contractions
from torchtext.legacy.data import Field, TabularDataset, BucketIterator
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator, Vectors, GloVe
import nltk
try:
nltk.data.find('tokenizers/punkt')
nltk.data.find('averaged_perceptron_tagger')
nltk.data.find('brown')
except LookupError:
nltk.download('averaged_perceptron_tagger')
nltk.download('brown')
nltk.download('punkt')
from nltk import sent_tokenize,word_tokenize
warnings.filterwarnings('ignore')
torch.manual_seed(0)
# + id="UqxEVwxP37g2"
traning_df = pd.read_csv('/content/Train-Dataset.csv')
testing_df = pd.read_csv('/content/Test-Dataset.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="gulKysGivn3d" outputId="cd93aa5f-039f-4db8-b678-c28ec1d87f6c"
traning_df
# + id="qdQ4ajcoOdX6"
def clean_text(text):
'''Make text lowercase, remove text in square brackets,remove links,remove punctuation
and remove words containing numbers. removibng miltiple full stop'''
text = str(text).lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub('https?://\S+|www\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w*\d\w*', '', text)
text = re.sub(r'\.+', ".", text)
return text
def replace_text(text):
text = str(text).lower()
text = text.encode('ascii', 'ignore').decode('utf-8')
return text
for dta in [traning_df,testing_df]:
dta['text_cleaning'] = dta.text.apply(lambda x: x.strip().lower() )
dta['text_cleaning'] = dta.text_cleaning.apply(lambda x : " ".join(x.split()) )
dta['text_cleaning'] = dta.text_cleaning.apply(lambda x: contractions.fix(x) )
dta['text_cleaning'] = dta.text_cleaning.apply(lambda x: clean_text(x) )
dta['text_cleaning'] = dta.text_cleaning.apply(lambda x: replace_text(x) )
dta['number_of_word'] = dta.text_cleaning.apply(lambda x: len(word_tokenize(x)) )
dta['number_of_letter'] = dta.text_cleaning.apply(lambda x: len(x) )
del dta['text']
dta['text'] = dta['text_cleaning']
del dta['text_cleaning']
for dta in [traning_df,testing_df]:
word_count_zero = dta[dta['number_of_word'] == 0]
dta.drop(word_count_zero.index,inplace=True)
letter_count_zero = dta[dta['number_of_letter'] == 0]
dta.drop(letter_count_zero.index,inplace=True)
# # Training DF
# traning_df['text_cleaning'] = traning_df.text.apply(lambda x: x.strip().lower() )
# traning_df['text_cleaning'] = traning_df.text_cleaning.apply(lambda x : " ".join(x.split()) )
# traning_df['text_cleaning'] = traning_df.text_cleaning.apply(lambda x: contractions.fix(x) )
# traning_df['text_cleaning'] = traning_df.text_cleaning.apply(lambda x: clean_text(x) )
# traning_df['text_cleaning'] = traning_df.text_cleaning.apply(lambda x: replace_text(x) )
# # Testing DF
# testing_df['text_cleaning'] = testing_df.text.apply(lambda x: x.strip().lower() )
# testing_df['text_cleaning'] = traning_df.text_cleaning.apply(lambda x : " ".join(x.split()) )
# testing_df['text_cleaning'] = testing_df.text_cleaning.apply(lambda x: contractions.fix(x) )
# testing_df['text_cleaning'] = testing_df.text_cleaning.apply(lambda x: clean_text(x) )
# testing_df['text_cleaning'] = testing_df.text_cleaning.apply(lambda x: replace_text(x) )
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="OptnUdYqPe_D" outputId="d36a92de-7bf0-4edb-e118-5d59ea245b48"
traning_df
# + colab={"base_uri": "https://localhost:8080/"} id="CnDFX37x7tcc" outputId="20de7af8-19ae-4480-9399-728df6635f08"
alltext_length = []
allword_length = []
for val in traning_df.text:
word_tok = word_tokenize(val)
alltext_length.append(len(val))
allword_length.append(len(word_tok))
print(max(alltext_length))
print(max(allword_length))
# traning_df.text.apply(lambda x: len(x) )
# + colab={"base_uri": "https://localhost:8080/"} id="doo23zv2ZNj4" outputId="09e8eacf-ea5b-41db-dbbb-26317953e2e6"
for val in traning_df.text:
word_tok = word_tokenize(val)
if len(word_tok) <= 1:
print(val)
# + id="U5lXljaX4nx8"
traning_df.to_csv('/content/Train-Dataset-prcessed.csv',index=False)
testing_df.to_csv('/content/Test-Dataset-prcessed.csv',index=False)
# + [markdown] id="pS1_ntiM5itz"
# # Training & `CNN` modeling
# + colab={"base_uri": "https://localhost:8080/"} id="VCpz-8Tl6EX0" outputId="4a01020f-b10c-49ae-b968-7df6fafd2bac"
# import some importent library or packages
import matplotlib.pyplot as plt
import warnings
import time,sys,re,string
import copy
import pandas as pd
import numpy as np
import cv2
import os
import pathlib
import zipfile
import torch
import torchvision
from torchvision import models
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torch.optim as optim
import pathlib
import shutil
from pathlib import Path
from collections import Counter
# !pip install torchsummary
# !pip install torchinfo
from torchinfo import summary
from sklearn.utils import shuffle
# !pip install torchviz
from torchviz import make_dot, make_dot_from_trace
try:
import contractions
except:
# !pip install contractions
import contractions
from torchtext.legacy.data import Field, TabularDataset, BucketIterator
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator, Vectors, GloVe
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import nltk
try:
nltk.data.find('tokenizers/punkt')
nltk.data.find('averaged_perceptron_tagger')
nltk.data.find('brown')
except LookupError:
nltk.download('averaged_perceptron_tagger')
nltk.download('brown')
nltk.download('punkt')
from nltk import sent_tokenize,word_tokenize
warnings.filterwarnings('ignore')
torch.manual_seed(0)
# + id="-4O9tu5u6LC6"
traning_df = pd.read_csv('/content/Train-Dataset-prcessed.csv')
testing_df = pd.read_csv('/content/Test-Dataset-prcessed.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 412} id="kQmdyRqE6fwK" outputId="06024393-7b72-4c50-b5a8-c5fec0995d59"
traning_df
# + id="7n0l74pAlyr_"
label_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
text_field = Field(tokenize='spacy', lower=True, include_lengths=True, batch_first=True)
fields = [('polarity', label_field), ('text', text_field)]
train_data = TabularDataset(path="/content/Train-Dataset-prcessed.csv",
format="csv",
fields=fields,
skip_header=True)
valid_data = TabularDataset(path="/content/Test-Dataset-prcessed.csv",
format="csv",
fields=fields,
skip_header=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_iter = BucketIterator(train_data, batch_size=32, sort_key=lambda x: len(x.text),
device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid_data, batch_size=32, sort_key=lambda x: len(x.text),
device=device, sort=True, sort_within_batch=True)
text_field.build_vocab(train_data,) # min_freq=3,vectors = "glove.6B.100d"
label_field.build_vocab(train_data)
# + colab={"base_uri": "https://localhost:8080/"} id="9GXT5L5ADSU3" outputId="6c1f559e-2066-4b81-c4bc-da7d75c84ab2"
#No. of unique tokens in text
print("Size of TEXT vocabulary:",len(text_field.vocab))
#No. of unique tokens in label
print("Size of LABEL vocabulary:",len(label_field.vocab))
#Commonly used words
print(text_field.vocab.freqs.most_common(10))
#Word dictionary
print(text_field.vocab.stoi)
# + id="J_GgfPEM1Rri"
# for batch in train_iter:
# print(batch.polarity)
# print(batch.text)
# + [markdown] id="_oF4NWmtDAEE"
# ## Code 1
#
# + id="hNxd9n-6JUeb"
# import torch.nn as nn
# class LSTMTagger(torch.nn.Module):
# def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
# super(LSTMTagger, self).__init__()
# self.hidden_dim = hidden_dim
# self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# # The LSTM takes word embeddings as inputs, and outputs hidden states
# # with dimensionality hidden_dim.
# self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# # The linear layer that maps from hidden state space to tag space
# self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
# def forward(self, sentence):
# embeds = self.word_embeddings(sentence)
# # print(len(sentence))
# x = embeds.view(len(sentence), 1, -1)
# # print(x.shape)
# lstm_out, _ = self.lstm(x)
# tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
# tag_scores = torch.nn.functional.log_softmax(tag_space, dim=1)
# return tag_scores
# + id="Ve_OtXk4JcID"
# EMBEDDING_DIM = 6
# HIDDEN_DIM = 6
# model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(text_field.vocab), 1)
# loss_function = torch.nn.NLLLoss()
# optimizer = optim.Adam(model.parameters(), lr=0.00001)
# criterion = torch.nn.BCEWithLogitsLoss()
# + id="ubro-1vsQyAi"
# from torchinfo import summary
# summary( model, input_size=(1,),dtypes=[torch.long],)
# + id="cVP8QA7eKHXT"
# def binary_accuracy(preds, y):
# rounded_preds = torch.round(torch.sigmoid(preds))
# correct = (rounded_preds == y).float()
# acc = correct.sum() / len(correct)
# return acc
# + id="-sm8DhuuKIAF"
# # training function
# def train(model, iterator):
# epoch_loss = 0
# epoch_acc = 0
# model.train()
# for i,batch in enumerate( iterator,1):
# text, text_lengths = batch.text
# optimizer.zero_grad()
# predictions = model(text,).squeeze(1)
# loss = criterion(predictions, batch.polarity)
# acc = binary_accuracy(predictions, batch.polarity)
# loss.backward()
# optimizer.step()
# epoch_loss += loss.item()
# epoch_acc += acc.item()
# print_val = f"running_loss : {(loss.item()):.6f}\t"
# print_val += f"running_corrects : {acc.item():.6f}\t"
# sys.stdout.write('\r' + str(print_val))
# return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="sqx1YxbhKIUS"
# def evaluate(model, iterator):
# epoch_loss = 0
# epoch_acc = 0
# model.eval()
# with torch.no_grad():
# for batch in iterator:
# text, text_lengths = batch.text
# predictions = model(text,).squeeze(1)
# loss = criterion(predictions, batch.polarity)
# acc = binary_accuracy(predictions, batch.polarity)
# epoch_acc += acc.item()
# epoch_loss += loss.item()
# return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="RAvQiQFkKI3j"
# t = time.time()
# loss=[]
# acc=[]
# val_acc=[]
# val_loss=[]
# num_epochs = 100
# for epoch in range(num_epochs):
# train_loss, train_acc = train(model, train_iter)
# valid_loss, valid_acc = evaluate(model, valid_iter)
# print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Acc: {valid_acc*100:.2f}%')
# # print(f'\t')
# loss.append(train_loss)
# acc.append(train_acc)
# val_loss.append(valid_loss)
# val_acc.append(valid_acc)
# print(f'time:{time.time()-t:.3f}')
# + [markdown] id="P5ZSxIp5MTF9"
# ## Code 2
#
# + id="eAoLfKd3MTF9"
# import torch.nn as nn
# class RNNModel(torch.nn.Module):
# def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
# super(RNNModel, self).__init__()
# self.hidden_dim = hidden_dim
# self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# self.rnn = nn.RNN(embedding_dim, hidden_dim, 2, batch_first=True, nonlinearity='relu')
# self.fc = nn.Linear(hidden_dim, tagset_size)
# def forward(self, sentence):
# embeds = self.word_embeddings(sentence)
# # print(len(sentence))
# # x = embeds.view(len(sentence), 1, -1)
# # Initialize hidden state with zeros
# h0 = torch.autograd.Variable(torch.zeros(2, sentence.size(0), self.hidden_dim))
# out, hn = self.rnn(embeds, h0)
# out = self.fc(out[:, -1, :])
# return out
# + id="YJRiBgIdMTF9"
# EMBEDDING_DIM = 6
# HIDDEN_DIM = 6
# model = RNNModel(EMBEDDING_DIM, HIDDEN_DIM, len(text_field.vocab), 1)
# loss_function = torch.nn.NLLLoss()
# optimizer = optim.Adam(model.parameters(), lr=0.00001)
# criterion = torch.nn.BCEWithLogitsLoss()
# + id="rVNkJWhbMTF-"
# from torchinfo import summary
# summary( model, input_size=(1,),dtypes=[torch.long],)
# + id="EOXrDyt9MTF-"
# def binary_accuracy(preds, y):
# rounded_preds = torch.round(torch.sigmoid(preds))
# correct = (rounded_preds == y).float()
# acc = correct.sum() / len(correct)
# return acc
# + id="dDatcS7MMTF-"
# # training function
# def train(model, iterator):
# epoch_loss = 0
# epoch_acc = 0
# model.train()
# for i,batch in enumerate( iterator,1):
# text, text_lengths = batch.text
# optimizer.zero_grad()
# predictions = model(text,).squeeze(1)
# loss = criterion(predictions, batch.polarity)
# acc = binary_accuracy(predictions, batch.polarity)
# loss.backward()
# optimizer.step()
# epoch_loss += loss.item()
# epoch_acc += acc.item()
# print_val = f"running_loss : {(loss.item()):.6f}\t"
# print_val += f"running_corrects : {acc.item():.6f}\t"
# sys.stdout.write('\r' + str(print_val))
# return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="OyC3bM4qMTF-"
# def evaluate(model, iterator):
# epoch_loss = 0
# epoch_acc = 0
# model.eval()
# with torch.no_grad():
# for batch in iterator:
# text, text_lengths = batch.text
# predictions = model(text,).squeeze(1)
# loss = criterion(predictions, batch.polarity)
# acc = binary_accuracy(predictions, batch.polarity)
# epoch_acc += acc.item()
# epoch_loss += loss.item()
# return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="xgdcIV9fMTF-"
# t = time.time()
# loss=[]
# acc=[]
# val_acc=[]
# val_loss=[]
# num_epochs = 100
# for epoch in range(num_epochs):
# train_loss, train_acc = train(model, train_iter)
# valid_loss, valid_acc = evaluate(model, valid_iter)
# print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Acc: {valid_acc*100:.2f}%')
# # print(f'\t')
# loss.append(train_loss)
# acc.append(train_acc)
# val_loss.append(valid_loss)
# val_acc.append(valid_acc)
# print(f'time:{time.time()-t:.3f}')
# + [markdown] id="ePXzQUbiGsYR"
# ## Code 3 Model
# + id="3cmBea1XREfM"
import torch.nn as nn
class LSTM_net(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers,
bidirectional, dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
self.rnn = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc1 = nn.Linear(hidden_dim * 2, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
# text = [sent len, batch size]
embedded = self.embedding(text)
# embedded = [sent len, batch size, emb dim]
#pack sequence
# print(embedded.shape,text_lengths)
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)
packed_output, (hidden, cell) = self.rnn(packed_embedded)
#unpack sequence
# output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
# output = [sent len, batch size, hid dim * num directions]
# output over padding tokens are zero tensors
# hidden = [num layers * num directions, batch size, hid dim]
# cell = [num layers * num directions, batch size, hid dim]
# concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
# and apply dropout
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
output = self.fc1(hidden)
output = self.dropout(self.fc2(output))
#hidden = [batch size, hid dim * num directions]
return output
# + id="pf-hTYsT4TH-"
class CNN(torch.nn.Module):
def __init__(self , vocab_size, embedding_dim, hidden_dim, output_dim, n_layers,
bidirectional, dropout, pad_idx):
super(CNN, self).__init__()
self.embedding = torch.nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
#initializing convolution layer
self.conv1 = torch.nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3)
self.conv2 = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3)
self.conv3 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1)
#initializing dropout
self.dropout = torch.nn.Dropout(0.2)
#initializing MaxPool2d
self.pool= torch.nn.MaxPool2d(2,2)
#initializing linear
self.fc1 = torch.nn.Linear(64* 1* 1, 512)
self.fc2 = torch.nn.Linear(512,64)
self.fc3 = torch.nn.Linear(64,output_dim)
def forward(self, text):
embedded = self.embedding(text)
x = embedded.view(-1,1,16,16)
x = self.pool(torch.nn.functional.relu(self.conv1(x)))
x = self.dropout(x)
x = self.pool(torch.nn.functional.relu(self.conv2(x)))
x = self.dropout(x)
x = self.pool(torch.nn.functional.relu(self.conv3(x)))
x = x.view(-1, 64* 1* 1)
# x = self.pool(torch.nn.functional.relu(x))
# x = x.view(-1, 32* 1* 1)
x = torch.nn.functional.relu(self.fc1(x))
x = torch.nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return x
# + id="qhleHCW344v2"
# Hyperparameters
num_epochs = 100
learning_rate = 0.0001
INPUT_DIM = len(text_field.vocab)
EMBEDDING_DIM = 256
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.2
PAD_IDX = text_field.vocab.stoi[text_field.pad_token] # padding
# + id="ZWHp10bmBKv0" colab={"base_uri": "https://localhost:8080/"} outputId="36019882-56ec-496a-f21c-0811cdf57a7e"
model = CNN(INPUT_DIM,
EMBEDDING_DIM,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT,
PAD_IDX).to(device)
from torchinfo import summary
summary( model, input_size=(1, 10),dtypes=[torch.long],)
# + id="pCBLWBNV-Mh5" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9db4e9ae-178e-4ebf-987e-87e223db0858"
x = torch.randint(3, 5, (1,10)).to(device)
make_dot(model(x), params=dict(model.named_parameters()), show_attrs=True, show_saved=True)
# + id="y7UM-zq0Khvx" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="065e9f9a-244d-4ad9-d295-de44a7ded63f"
make_dot(model(x), params=dict(model.named_parameters()), show_attrs=True, show_saved=True).render("Model-CNN", format="png")
# + id="KUyGJntuRUgc"
# model = LSTM_net(INPUT_DIM,
# EMBEDDING_DIM,
# HIDDEN_DIM,
# OUTPUT_DIM,
# N_LAYERS,
# BIDIRECTIONAL,
# DROPOUT,
# PAD_IDX)
# + id="5YMhpd_JReCz"
model.to(device)
# Loss and optimizer
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# + id="huyt19JJRhDb"
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
# + id="6NnBSBI0RjhT"
# training function
def train(model, iterator):
epoch_loss = 0
epoch_acc = 0
model.train()
for i,batch in enumerate( iterator,1):
text, text_lengths = batch.text
optimizer.zero_grad()
predictions = model(text).squeeze(1)
loss = criterion(predictions, batch.polarity)
acc = binary_accuracy(predictions, batch.polarity)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
print_val = f"running_loss : {(loss.item()):.6f}\t"
print_val += f"running_corrects : {acc.item():.6f}\t"
sys.stdout.write('\r' + str(print_val))
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="zruqoGIzRmJl"
def evaluate(model, iterator):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text,).squeeze(1)
loss = criterion(predictions, batch.polarity)
acc = binary_accuracy(predictions, batch.polarity)
epoch_acc += acc.item()
epoch_loss += loss.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# + id="KgJpF8CdRoJL" colab={"base_uri": "https://localhost:8080/"} outputId="4298309d-11e9-480c-afef-a8a083bd6882"
t = time.time()
loss=[]
acc=[]
val_acc=[]
val_loss=[]
for epoch in range(num_epochs):
train_loss, train_acc = train(model, train_iter)
valid_loss, valid_acc = evaluate(model, valid_iter)
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | Val. Acc: {valid_acc*100:.2f}%')
# print(f'\t')
loss.append(train_loss)
acc.append(train_acc)
val_loss.append(valid_loss)
val_acc.append(valid_acc)
print(f'time:{time.time()-t:.3f}')
# + [markdown] id="bo5j5q2H2IZZ"
# # Classification Performance Metrics
# + id="QaNZD97cLleV"
# import some importent library or packages
import glob,sys,os
import matplotlib.pyplot as plt
import warnings
import numpy as np
import seaborn as sn
import pandas as pd
import pathlib
import zipfile
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision import models
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torch.optim as optim
import time,sys
import copy
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
warnings.filterwarnings('ignore')
# + id="dQJ35QIc2L3Z"
_tranning_loss = loss
_tranning_acc = acc
_validation_loss = val_loss
_validation_acc = val_acc
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="4LZpToji3Jxy" outputId="55cc624b-9f1f-4135-cb8e-ab65901293f1"
plt.figure(figsize=(10,5))
plt.title("Loss graph")
plt.plot(_tranning_loss,label="Tranning Loss")
plt.plot(_validation_loss,label="Validation Loss")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="9I3Z4eo13K-T" outputId="c945d4f8-f9b1-4816-ed2f-e4e6be2d12f3"
plt.figure(figsize=(10,5))
plt.title("Accuracy graph")
plt.plot(_tranning_acc,label="Tranning Accuracy")
plt.plot(_validation_acc,label="Validation Accuracy")
plt.xlabel("iterations")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
# + id="1xjZ8_bBAeMH"
y_true_tensor = torch.tensor([])
y_pred_tensor = torch.tensor([])
model.eval()
with torch.no_grad():
for batch in valid_iter:
text, text_lengths = batch.text
predictions = model(text).squeeze(1)
rounded_preds = torch.round(torch.sigmoid(predictions))
correct = (rounded_preds == batch.polarity).float()
# print(rounded_preds,batch.polarity)
y_true_tensor = torch.cat((y_true_tensor,batch.polarity))
y_pred_tensor = torch.cat((y_pred_tensor,rounded_preds))
# acc = binary_accuracy(predictions, batch.polarity)
# print(acc)
# + id="Uil8uwrOLUz2"
y_true = y_true_tensor.type(torch.LongTensor).tolist()
y_pred = y_pred_tensor.type(torch.LongTensor).tolist()
# + colab={"base_uri": "https://localhost:8080/"} id="evonmYksLWOY" outputId="45e008ac-39dc-41a3-8020-1d8998a4bcce"
matrice = confusion_matrix(y_true,y_pred)
matrice
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="QvWx7SNHW2cs" outputId="ed23da74-eb8e-4b70-fa64-a957f6451247"
df_cm = pd.DataFrame(matrice,columns=[0,1],index=[0,1])
plt.figure(figsize=(5,5))
sn.heatmap(df_cm, annot=True,annot_kws={"size": 10},fmt='g',cmap='Blues',)
plt.title(f"Confusion matrix")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="U9nF3NHxUzH7" outputId="1620ea10-2bc4-4063-e90b-868112eb43ed"
classify_report = classification_report(y_true, y_pred, )
print(classify_report)
| Soft-Assignment-3/170104028.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Example of energy scan of a torsion angle
#
# Always super interesting to see the energy landscape of a conformational change.
# And who needs a GUI for that, just use RDKit and xTB
#
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import copy
import logging
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import rdkit
from rdkit import Chem
from rdkit.Chem.Draw import MolsToGridImage, MolToImage
# +
try:
import ppqm
except ModuleNotFoundError:
import pathlib
cwd = pathlib.Path().resolve().parent
sys.path.append(str(cwd))
import ppqm
from ppqm import chembridge
# -
# ## Set logging level
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger("ppqm").setLevel(logging.INFO)
logging.getLogger("xtb").setLevel(logging.INFO)
show_progress = True
# ## Define a molecule you like
smiles = "[Cl]c2ccccc2c1ccccc1" # 1-chloro-2-phenylbenzene
molobj = Chem.MolFromSmiles(smiles)
molobj
# ## Get some 3D conformers (RDKit)
molobj = ppqm.tasks.generate_conformers(molobj, n_conformers=1)
ppqm.jupyter.show_molobj(molobj)
torsion_indices = [5, 6, 7, 8]
MolToImage(
Chem.RemoveHs(chembridge.copy_molobj(molobj)),
highlightAtoms=torsion_indices,
size=(500, 500),
)
conformer = molobj.GetConformer()
origin = conformer.GetPositions()
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *torsion_indices)
angle
steps = 100
delta_angles = np.linspace(0, 360, steps)
delta_angles = delta_angles[1:]
for step in delta_angles:
conformer_prime = rdkit.Chem.Conformer(conformer)
angle_prime = angle + step
Chem.rdMolTransforms.SetDihedralDeg(conformer_prime, *torsion_indices, angle_prime)
molobj.AddConformer(conformer_prime, assignId=True)
molobj.GetNumConformers()
ppqm.jupyter.show_molobj(molobj, align_conformers=False)
# ## Calculate the energy barrier
# +
xtb = ppqm.XtbCalculator(
scr="_tmp_directory_",
n_cores=2,
cmd="xtb",
show_progress=show_progress,
)
def calculate_energies(molobj):
xtb_options = {"gfn": 2}
results = xtb.calculate(molobj, xtb_options)
energies = [result["scc_energy"] for result in results]
energies = np.array(energies)
energies *= ppqm.units.hartree_to_kcalmol
energies -= np.min(energies)
return energies
# -
energies = calculate_energies(molobj)
# ## Show the energy barrier
_ = plt.plot([angle] + list(angle + delta_angles), energies)
| notebooks/example_xtb_torsion_scan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
# # %run notebook_setup
# +
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
lc_url = "https://archive.stsci.edu/missions/tess/tid/s0001/0000/0004/4142/0236/tess2018206045859-s0001-0000000441420236-0120-s_lc.fits"
with fits.open(lc_url) as hdus:
lc = hdus[1].data
lc_hdr = hdus[1].header
texp = lc_hdr["FRAMETIM"] * lc_hdr["NUM_FRM"]
texp /= 60.0 * 60.0 * 24.0
time = lc["TIME"]
flux = lc["PDCSAP_FLUX"]
flux_err = lc["PDCSAP_FLUX_ERR"]
m = np.isfinite(time) & np.isfinite(flux) & (lc["QUALITY"] == 0)
time = time[m]
flux = flux[m]
flux_err = flux_err[m]
# Identify outliers
m = np.ones(len(flux), dtype=bool)
for i in range(10):
y_prime = np.interp(time, time[m], flux[m])
smooth = savgol_filter(y_prime, 301, polyorder=3)
resid = flux - smooth
sigma = np.sqrt(np.mean(resid**2))
m0 = resid < sigma
if m.sum() == m0.sum():
m = m0
break
m = m0
# Just for this demo, subsample the data
ref_time = 0.5 * (np.min(time[m])+np.max(time[m]))
time = np.ascontiguousarray(time[m] - ref_time, dtype=np.float64)
flux = np.ascontiguousarray(flux[m], dtype=np.float64)
flux_err = np.ascontiguousarray(flux_err[m], dtype=np.float64)
mu = np.median(flux)
flux = flux / mu - 1
flux_err /= mu
x = time
y = flux * 1000
yerr = flux_err * 1000
plt.plot(time, flux, ".k")
plt.plot(time, smooth[m] / mu - 1);
# +
import exoplanet as xo
results = xo.estimators.lomb_scargle_estimator(
x, y, max_peaks=1, min_period=1.0, max_period=30.0,
samples_per_peak=50)
peak = results["peaks"][0]
ls_period = peak["period"]
freq, power = results["periodogram"]
plt.plot(-np.log10(freq), power, "k")
plt.axvline(np.log10(ls_period), color="k", lw=4, alpha=0.3)
plt.xlim((-np.log10(freq)).min(), (-np.log10(freq)).max())
plt.annotate("period = {0:.4f} d".format(ls_period),
(0, 1), xycoords="axes fraction",
xytext=(5, -5), textcoords="offset points",
va="top", ha="left", fontsize=12)
plt.yticks([])
plt.xlabel("log10(period)")
plt.ylabel("power");
# +
import pymc3 as pm
import theano.tensor as tt
def build_model(mask=None):
p_period = 16.93
p_t0 = -8.84
p_depth = 0.04
if mask is None:
mask = np.ones_like(x, dtype=bool)
with pm.Model() as model:
# The mean flux of the time series
mean = pm.Normal("mean", mu=6, sd=10.0)
# A jitter term describing excess white noise
logs2 = pm.Normal("logs2", mu=2*np.log(np.min(yerr[mask])), sd=5.0)
# A SHO term to capture long term trends
logS = pm.Normal("logS", mu=0.0, sd=15.0, testval=np.log(np.var(y[mask])))
logw = pm.Normal("logw", mu=np.log(2*np.pi/10.0), sd=10.0)
term1 = xo.gp.terms.SHOTerm(log_S0=logS, log_w0=logw, Q=1/np.sqrt(2))
# The parameters of the RotationTerm kernel
logamp = pm.Normal("logamp", mu=np.log(np.var(y[mask])), sd=5.0)
logperiod = pm.Normal("logperiod", mu=np.log(ls_period), sd=5.0)
period = pm.Deterministic("period", tt.exp(logperiod))
logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
mix = pm.Uniform("mix", lower=0, upper=1.0)
term2 = xo.gp.terms.RotationTerm(
log_amp=logamp,
period=period,
log_Q0=logQ0,
log_deltaQ=logdeltaQ,
mix=mix
)
u_star = xo.distributions.QuadLimbDark("u_star",
testval=np.array([0.31, 0.1]))
R_star = 0.8, 0.1
Rho_star = 0.8
r_star = pm.Normal("r_star", mu=R_star[0], sd=R_star[1])
logrho_star = pm.Normal("logrho_star", mu=np.log(Rho_star), sd=1)
rho_star = pm.Deterministic("rho_star", tt.exp(logrho_star))
pm.Potential("r_star_prior", tt.switch(r_star > 0, 0, -np.inf))
logP = pm.Normal("logP", mu=np.log(p_period), sd=0.1)
t0 = pm.StudentT("t0", mu=p_t0, nu=1.0, sd=0.5)
# ror, b = xo.distributions.get_joint_radius_impact(
# min_radius=0.001, max_radius=0.3,
# testval_r=p_depth,
# testval_b=0.1)
logror = pm.Normal("logror", mu=np.log(p_depth),
sd=2)
ror = pm.Deterministic("r", tt.exp(logror))
b_param = pm.Uniform("b_param", lower=0, upper=1)
b = pm.Deterministic("b", b_param * (1 + ror))
ecc = pm.Bound(pm.Beta, lower=0.0, upper=1.0)("ecc", alpha=0.867, beta=3.03, testval=0.05)
omega = xo.distributions.Angle("omega")
pm.Potential("ror_prior_lo", tt.switch(tt.all(0.005 < ror), 0.0, -np.inf))
pm.Potential("ror_prior_hi", tt.switch(tt.all(ror < 0.3), 0.0, -np.inf))
# pm.Potential("ror_prior", -tt.log(ror))
# pm.Potential("b_prior", tt.switch(b < 1, 0, -np.inf))
p_period = pm.Deterministic("p_period", tt.exp(logP))
r_pl = pm.Deterministic("r_pl", r_star * ror)
orbit = xo.orbits.KeplerianOrbit(
r_star=r_star, #m_star=m_star,
period=p_period, t0=t0, b=b,
rho_star=rho_star, ecc=ecc, omega=omega)
light_curves = xo.StarryLightCurve(u_star, r_star=r_star).get_light_curve(
orbit=orbit, r=r_pl, t=x[mask], texp=texp)*1e3
light_curve = pm.math.sum(light_curves, axis=-1)
pm.Deterministic("light_curves", light_curves)
# Set up the Gaussian Process model
kernel = term1 + term2
gp = xo.gp.GP(kernel, x[mask], yerr[mask]**2 + tt.exp(logs2), J=6)
# Compute the Gaussian Process likelihood and add it into the
# the PyMC3 model as a "potential"
pm.Potential("loglike", gp.log_likelihood(y[mask] - mean - light_curve))
# Compute the mean model prediction for plotting purposes
pm.Deterministic("pred", gp.predict())
# Optimize to find the maximum a posteriori parameters
map_soln = pm.find_MAP(start=model.test_point, vars=[mean, logs2])
map_soln = pm.find_MAP(start=map_soln, vars=[mean, logs2, logS, logw])
map_soln = pm.find_MAP(start=map_soln, vars=[mean, logs2, logamp, logQ0, logdeltaQ, mix])
map_soln = pm.find_MAP(start=map_soln, vars=[model.logror, model.b_param])
map_soln = pm.find_MAP(start=map_soln, vars=[model.logror, model.b_param, logP, t0])
return model, map_soln
model0, map_soln0 = build_model()
# -
plt.plot(x, y, "k", label="data")
plt.plot(x, map_soln0["pred"] + map_soln0["mean"], color="C1", label="model")
plt.xlim(x.min(), x.max())
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("map model");
# +
mod = map_soln0["pred"] + map_soln0["mean"] + np.sum(map_soln0["light_curves"], axis=-1)
resid = y - mod
rms = np.sqrt(np.median(resid**2))
mask = np.abs(resid) < 5. * rms
plt.plot(x, resid, "k", label="data")
plt.plot(x, np.sum(map_soln0["light_curves"], axis=-1))
plt.plot(x[~mask], resid[~mask], "xr", label="outliers")
plt.axhline(0, color="#aaaaaa", lw=1)
plt.ylabel("residuals [ppt]")
plt.xlabel("time [days]")
plt.legend(fontsize=12, loc=4)
plt.xlim(x.min(), x.max());
plt.xlim(-9.5+17,-8+17)
# -
model, map_soln = build_model(mask)
plt.plot(x[mask], y[mask], "k", label="data")
plt.plot(x[mask], map_soln["pred"] + map_soln["mean"], color="C1", label="model")
plt.xlim(x.min(), x.max())
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("map model");
# +
plt.plot(x[mask], y[mask], "k", label="data")
plt.plot(x[mask], y[mask] - map_soln["pred"] + map_soln["mean"], color="C1", label="model")
plt.plot(x[mask], map_soln["light_curves"], color="C1", label="model")
plt.xlim(x.min(), x.max())
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("map model");
plt.xlim(-10,-8)
# plt.ylim(-0.2,0.25)
# -
sampler = xo.PyMC3Sampler(window=200, start=100, finish=1000)
with model:
sampler.tune(tune=8000, start=map_soln, step_kwargs=dict(target_accept=0.9))
with model:
trace = sampler.sample(draws=3000)
period_samples = trace["period"]
plt.hist(period_samples, 35, histtype="step", color="k")
plt.yticks([])
plt.xlabel("rotation period [days]")
plt.ylabel("posterior density");
plt.tight_layout()
plt.savefig('rotation-posterior.png', dpi=200)
period_samples = trace["p_period"]
plt.hist(period_samples, 35, histtype="step", color="k")
plt.yticks([])
plt.xlabel("orbital period [days]")
plt.ylabel("posterior density");
map_soln
# +
# Compute the GP prediction
gp_mod = np.median(trace["pred"] + trace["mean"][:, None], axis=0)
# Get the posterior median orbital parameters
p = np.median(trace["p_period"])
t0 = np.median(trace["t0"])
# Plot the folded data
x_fold = (x[mask] - t0 + 0.5*p) % p - 0.5*p
plt.plot(x_fold * 24., y[mask] - gp_mod, ".k", label="data",
alpha=0.5, zorder=-1000, mec="none")
# # Overplot the phase binned light curve
# bins = np.linspace(-10, 10, 50)
# denom, _ = np.histogram(x_fold, bins)
# num, _ = np.histogram(x_fold, bins, weights=y[mask])
# denom[num == 0] = 1.0
# plt.plot(0.5*(bins[1:] + bins[:-1]) * 24., num / denom, "o", color="C2",
# label="binned")
# Plot the folded model
inds = np.argsort(x_fold)
inds = inds[np.abs(x_fold)[inds] < 0.3]
pred = trace["light_curves"][:, inds, 0]
pred = np.percentile(pred, [16, 50, 84], axis=0)
plt.plot(x_fold[inds] * 24., pred[1], color="C1", label="model")
art = plt.fill_between(x_fold[inds] * 24., pred[0], pred[2], color="C1", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
# Annotate the plot with the planet's period
txt = "period = {0:.5f} +/- {1:.5f} days".format(
np.mean(trace["p_period"]), np.std(trace["p_period"]))
plt.annotate(txt, (0, 0.9), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom", fontsize=12)
# plt.annotate('AU Mic b', (0, 1), xycoords="axes fraction",
# xytext=(5, 5), textcoords="offset points",
# ha="left", va="bottom", fontsize=12)
plt.legend(fontsize=10, loc=4)
plt.xlabel("time since transit [hours]")
plt.ylabel("de-trended flux (ppt)")
plt.xlim(-7, 7);
plt.tight_layout()
plt.savefig('transit-plot.png', dpi=200)
# -
pred = np.percentile(trace["light_curves"][:, :, 0] + trace["mean"][:, None] +
trace["pred"], [5,50,95], axis=0)
pred1 = np.percentile(trace["light_curves"][:, :, 0], [5,50,95], axis=0)
pred2 = np.percentile(trace["pred"] + trace["mean"][:, None], [5,50,95], axis=0)
# +
fig, [ax1, ax2] = plt.subplots(2,1, figsize=[8, 8])
time = x[mask] - np.median(trace['t0'])
ax1.plot(time, pred[1], color='C3', label = 'combined model')
art = ax1.fill_between(time, pred[0], pred[2], color="C3", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax1.plot(time, pred1[1], color='C1', label = 'transit model')
art = ax1.fill_between(time, pred1[0], pred1[2], color="C1", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax1.plot(time, pred2[1], color='C2', label = 'GP model')
art = ax1.fill_between(time, pred2[0], pred2[2], color="C2", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax1.plot(time, y[mask], ".k", label="data",
alpha=0.4, zorder=-1000, mec="none")
ax1.set_xlim(-1,1)
ax1.set_ylim(-20, 25)
ax1.legend()
ax1.set_xlabel("time since first transit (days)")
ax1.set_ylabel("flux (ppt)")
ax1.annotate('Transit 1', (0.7, 0.1), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom", fontsize=12)
ax2.plot(time, pred[1], color='C3', label = 'combined model')
art = ax2.fill_between(time, pred[0], pred[2], color="C3", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax2.plot(time, pred1[1] -10, color='C1', label = 'transit model (offset by 10 ppt)')
art = ax2.fill_between(time, pred1[0] -10, pred1[2]-10, color="C1", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax2.plot(time, pred2[1], color='C2', label = 'GP model')
art = ax2.fill_between(time, pred2[0], pred2[2], color="C2", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax2.plot(time, y[mask], ".k", label="data",
alpha=0.4, zorder=-1000, mec="none")
ax2.set_xlim(16,18)
ax2.set_ylim(-20, 25)
ax2.legend()
ax2.set_xlabel("time since first transit (days)")
ax2.set_ylabel("flux (ppt)")
ax2.annotate('Transit 2', (0.7, 0.1), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom", fontsize=12)
plt.tight_layout()
plt.savefig('transits-GP.png', dpi=200)
# -
fig, ax = plt.subplots(1,1, figsize=[14, 9])
plt.plot(x[mask], gp_mod, color="C3", label="model")
plt.plot(x[mask], np.percentile(pred, [50], axis=0)[0] + gp_mod, color="C2", label="model")
plt.plot(x[mask], np.percentile(pred, [50], axis=0)[0], color="C1", label="model")
plt.scatter(x,y, s=1)
plt.xlim(7-17,9-17)
pm.summary(trace, varnames=["logw", "logS", "logs2", "logamp", "r_pl", "logperiod", "logQ0",
"b", "t0", "p_period", "r_star", "rho_star", "u_star", "mean"])
x[mask] - np.median(trace['t0'])
import pickle
with open('my_model-aumic.pkl', 'wb') as buff:
pickle.dump({'model': model, 'trace': trace}, buff)
del map_soln0
np.exp(np.log(0.5)+2)
# +
fig, [ax1, ax2] = plt.subplots(2,1, figsize=[8, 8])
time = x[mask] - np.median(trace['t0'])
ax1.plot(time, pred[1], color='C3', label = 'combined model')
art = ax1.fill_between(time, pred[0], pred[2], color="C3", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax1.plot(time, pred1[1], color='C1', label = 'transit model')
art = ax1.fill_between(time, pred1[0], pred1[2], color="C1", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax1.plot(time, pred2[1], color='C2', label = 'GP model')
art = ax1.fill_between(time, pred2[0], pred2[2], color="C2", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax1.plot(time, y[mask], ".k", label="data",
alpha=0.4, zorder=-1000, mec="none")
ax1.set_xlim(-3.2,-2.6)
ax1.set_ylim(-20, -5)
ax1.legend()
ax1.set_xlabel("time since first transit (days)")
ax1.set_ylabel("flux (ppt)")
ax1.annotate('Transit 1', (0.7, 0.1), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom", fontsize=12)
ax2.plot(time, pred[1], color='C3', label = 'combined model')
art = ax2.fill_between(time, pred[0], pred[2], color="C3", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax2.plot(time, pred1[1] -10, color='C1', label = 'transit model (offset by 10 ppt)')
art = ax2.fill_between(time, pred1[0] -10, pred1[2]-10, color="C1", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax2.plot(time, pred2[1], color='C2', label = 'GP model')
art = ax2.fill_between(time, pred2[0], pred2[2], color="C2", alpha=0.3,
zorder=1000)
art.set_edgecolor("none")
ax2.plot(time, y[mask], ".k", label="data",
alpha=0.4, zorder=-1000, mec="none")
ax2.set_xlim(16,18)
ax2.set_ylim(-20, 25)
ax2.legend()
ax2.set_xlabel("time since first transit (days)")
ax2.set_ylabel("flux (ppt)")
ax2.annotate('Transit 2', (0.7, 0.1), xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom", fontsize=12)
plt.tight_layout()
# -
ref_time-1342.225
whos
pm.trace_to_dataframe(trace).to_hdf('aumic-trace.hdf5')
| code/for_tom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/arunsechergy/TSAI-DeepNLP-END/blob/main/assignments/assignment14/NextSteps_Tokenize_Python_code_generate_predictions_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Q_0OJT3zpORb"
# Analysis:
# 1. Model is able to generate python code
# 2. Understands indendation and newline, colon
# 3. Inaccurate python code, need more understanding on the logic to be written
# 4. need better examples
# 5. need complete results
#
# Current steps:
# 1. Increase number of epochs for learning (if needed, tweak learning rate)
# 2. Increase Max Sequence length as well
# - Decrease the split
# 2. Add Data Augmentation strategy
# 3. Any other loss function, needs to be checked
# 4. Add Pretrained Python embedding layer
#
#
# + id="3o2pKxe5Xuc6"
file = 'drive/MyDrive/datasets/english_python_data_tabs.txt'
# + id="xpVKHOoDX7-7"
# mkdir data
# + colab={"base_uri": "https://localhost:8080/"} id="k9kcCMFBYBQM" outputId="557e13a9-c2c9-4033-d7ff-86d04c8a8335"
from google.colab import drive
drive.mount('/content/drive')
# + id="SvEyA-GNXvJ1"
# !cp -r 'drive/MyDrive/datasets/english_python_data_tabs.txt' 'data/'
# + id="VF6WBdkzXFIj"
import torch
import torch.nn as nn
import torch.optim as optim
import torchtext
#from torchtext.data import Field, BucketIterator
from torchtext.legacy.data import Example, Field, BucketIterator, Dataset
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import spacy
import numpy as np
import random
import math
import time
# + id="Jvf-W2iuXlsX"
import tokenize
import os
import re
import pandas as pd
from io import BytesIO, StringIO
# + id="bjAJXxWlXmpK"
DATA_DIR = 'data'
corpus_name = "english_python_data_tabs.txt"
# corpus_name = "english_python_data.txt"
corpus = os.path.join(DATA_DIR, corpus_name)
def readLines(file, n=None):
with open(file, 'r') as datafile:
lines = datafile.readlines()
return lines
# n = len(lines) if n is None else n
# for line in lines[:n]:
# print(line)
input_file= readLines(corpus)
# + id="t3nrTjGZYM31"
q_c = {}
prev_value = []
for id, line in enumerate(input_file):
#if line.startswith('#'):
#if (line.lower().startswith('# write')) or (line.lower().startswith('#write')) or (line.lower().startswith('#python')) or (line.lower().startswith('# python')):
if re.match(r'[#]\d*\s*(Write|write|python|Python)', line):
q_c[line] = id
prev_line = line
prev_value = []
else:
prev_value.append(line)
q_c[prev_line] = prev_value
# + id="5jrx9xCxYTW9"
question_code_pairs = pd.DataFrame.from_dict(q_c.items())
question_code_pairs.columns = ['question', 'code']
# + id="tdr4-JT8YZ_5"
question_code_pairs['code'] = question_code_pairs['code'].apply(lambda x: "".join(x))
# + id="N-ulu10KYbye"
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# + id="pfk7mm9LYeFZ"
spacy_en = spacy.load('en')
# + id="ygfJ4xWk9ewL"
#CONSTANT DEFINITION
MAX_SEQ_LENGTH = 200
# + id="Hmp3ryrXYhnQ"
def tokenize_en(text, max_length=MAX_SEQ_LENGTH):
"""
Tokenizes English text from a string into a list of strings
"""
tokens = [tok.text for tok in spacy_en.tokenizer(text)]
tokens = tokens[:max_length]
return tokens
# + id="EnlMQA9PYoRL"
def tokenize_py(text, max_length=MAX_SEQ_LENGTH):
"""
Tokenizes Python code from a string into a list of strings
"""
tokenized_code = []
tokens = tokenize.tokenize(BytesIO(text.encode('utf-8')).readline)
try:
for token in tokens:
tokenized_code.append(token.string)
# tokens = [token.string for token in tokens]
except Exception as e:
pass
# print(f"Error in tokenization {e}")
tokens = tokenized_code[:max_length]
return tokens
# + id="2vGXTBGiYkTT"
SRC = Field(tokenize = tokenize_en,
init_token = '<sos>',
eos_token = '<eos>',
lower = True,
batch_first = True)
TRG = Field(tokenize = tokenize_py,
init_token = '<sos>',
eos_token = '<<PASSWORD>>',
lower = True,
batch_first = True
)
# + id="fGub3FhkbAbz"
fields = [('src', SRC), ('trg', TRG)]
# + colab={"base_uri": "https://localhost:8080/"} id="RitJggaGbuvY" outputId="e2e2ec7a-ada1-4be6-ae8d-5b5a43a905a9"
train_len = int(0.95*(question_code_pairs.shape[0]))
valid_len = int(0.98*(question_code_pairs.shape[0]))
test_len = int((question_code_pairs.shape[0]))
train_len, valid_len, test_len
# + id="Zm3JdopMFLDb"
train_pair, valid_pair, test_pair = question_code_pairs[:train_len], question_code_pairs[train_len:valid_len], question_code_pairs[valid_len:test_len]
# + id="gYT9jLRfFVFB"
train_example = [Example.fromlist([train_pair.question[i], train_pair.code[i]], fields) for i in range(train_len)]
train_data = Dataset(train_example, fields)
# + id="gbPNYRNkGkJm"
valid_example = [Example.fromlist([valid_pair.question[i], valid_pair.code[i]], fields) for i in range(train_len, valid_len)]
valid_data = Dataset(valid_example, fields)
# + id="-ILNFqKKHS-w"
test_example = [Example.fromlist([test_pair.question[i], test_pair.code[i]], fields) for i in range(valid_len, test_len)]
test_data = Dataset(test_example, fields)
# + id="lkU88cUzb3Z7"
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
# + colab={"base_uri": "https://localhost:8080/"} id="6805ktkRfHmd" outputId="0e96879e-70c7-4e68-df98-8be64a736281"
# checking :, \t and \n in the vocab
vars(TRG.vocab)['stoi'].get(':'), vars(TRG.vocab)['stoi'].get('\t'), vars(TRG.vocab)['stoi'].get('\n')
# + colab={"base_uri": "https://localhost:8080/"} id="hOrPFtp_fDPp" outputId="e0df708c-f23f-4d54-d3a0-73b7fa79e41e"
# check max sequence length of the train_data
len__ = []
for i in range(len(train_data)):
len_ = len(vars(list(train_data)[i])['trg'])
len__.append(len_)
max(len__)
# + colab={"base_uri": "https://localhost:8080/"} id="bP5_46VOfRmy" outputId="e6733f34-8fa7-46cd-f777-f1db58e639e8"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
# + id="CMWrGxtof7_y"
BATCH_SIZE = 128
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device,sort=False)
# + id="Df6b18Wnf-ds"
class Encoder(nn.Module):
def __init__(self,
input_dim,
hid_dim,
n_layers,
n_heads,
pf_dim,
dropout,
device,
max_length = 100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(input_dim, hid_dim)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([EncoderLayer(hid_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, src, src_mask):
#src = [batch size, src len]
#src_mask = [batch size, 1, 1, src len]
batch_size = src.shape[0]
src_len = src.shape[1]
pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, src len]
#print(f'{"encoder pos Embedding", self.pos_embedding(pos) }')
#print(f'{"encoder pos Embedding shape", pos.shape }')
src = self.dropout((self.tok_embedding(src) * self.scale) + self.pos_embedding(pos))
#src = [batch size, src len, hid dim]
for layer in self.layers:
src = layer(src, src_mask)
#src = [batch size, src len, hid dim]
return src
# + id="tgjG5ziTgANi"
class EncoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
#src = [batch size, src len, hid dim]
#src_mask = [batch size, 1, 1, src len]
#self attention
_src, _ = self.self_attention(src, src, src, src_mask)
#dropout, residual connection and layer norm
src = self.self_attn_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
#positionwise feedforward
_src = self.positionwise_feedforward(src)
#dropout, residual and layer norm
src = self.ff_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
return src
# + id="CtZoYiSKgCUr"
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask = None):
batch_size = query.shape[0]
#query = [batch size, query len, hid dim]
#key = [batch size, key len, hid dim]
#value = [batch size, value len, hid dim]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
#Q = [batch size, query len, hid dim]
#K = [batch size, key len, hid dim]
#V = [batch size, value len, hid dim]
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
#Q = [batch size, n heads, query len, head dim]
#K = [batch size, n heads, key len, head dim]
#V = [batch size, n heads, value len, head dim]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
#energy = [batch size, n heads, query len, key len]
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim = -1)
#attention = [batch size, n heads, query len, key len]
x = torch.matmul(self.dropout(attention), V)
#x = [batch size, n heads, query len, head dim]
x = x.permute(0, 2, 1, 3).contiguous()
#x = [batch size, query len, n heads, head dim]
x = x.view(batch_size, -1, self.hid_dim)
#x = [batch size, query len, hid dim]
x = self.fc_o(x)
#x = [batch size, query len, hid dim]
return x, attention
# + id="rvBqmINjgD_t"
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, hid_dim, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
#x = [batch size, seq len, hid dim]
x = self.dropout(torch.relu(self.fc_1(x)))
#x = [batch size, seq len, pf dim]
x = self.fc_2(x)
#x = [batch size, seq len, hid dim]
return x
# + id="2mDXnIiKgGBq"
class Decoder(nn.Module):
def __init__(self,
output_dim,
hid_dim,
n_layers,
n_heads,
pf_dim,
dropout,
device,
max_length = 100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(output_dim, hid_dim)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([DecoderLayer(hid_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, 1, trg len, trg len]
#src_mask = [batch size, 1, 1, src len]
batch_size = trg.shape[0]
trg_len = trg.shape[1]
pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, trg len]
#print(f"Shape of position: batch_size, trg_len {pos.shape} == {batch_size} {trg_len}")
#print(f'{"postion", pos}')
#print(f'{"token_embedding",self.tok_embedding(trg)}')
#print(f'{"self scale", self.scale}')
#print(f'{"self scale", self.pos_embedding(pos)}')
#print(f'{"print target",(self.tok_embedding(trg) * self.scale)}')
#print(f'{"print target",(self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos)}')
trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos))
#trg = [batch size, trg len, hid dim]
for layer in self.layers:
trg, attention = layer(trg, enc_src, trg_mask, src_mask)
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
output = self.fc_out(trg)
#output = [batch size, trg len, output dim]
return output, attention
# + id="Xn7vhh_JgIJ6"
class DecoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.enc_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.encoder_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len, hid dim]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, 1, trg len, trg len]
#src_mask = [batch size, 1, 1, src len]
#self attention
_trg, _ = self.self_attention(trg, trg, trg, trg_mask)
#dropout, residual connection and layer norm
trg = self.self_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#encoder attention
_trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)
# query, key, value
#dropout, residual connection and layer norm
trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#positionwise feedforward
_trg = self.positionwise_feedforward(trg)
#dropout, residual and layer norm
trg = self.ff_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
return trg, attention
# + id="g7Ybe4-7gKXx"
class Seq2Seq(nn.Module):
def __init__(self,
encoder,
decoder,
src_pad_idx,
trg_pad_idx,
device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = device
def make_src_mask(self, src):
#src = [batch size, src len]
src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
#src_mask = [batch size, 1, 1, src len]
return src_mask
def make_trg_mask(self, trg):
#trg = [batch size, trg len]
trg_pad_mask = (trg != self.trg_pad_idx).unsqueeze(1).unsqueeze(2)
#trg_pad_mask = [batch size, 1, 1, trg len]
trg_len = trg.shape[1]
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device = self.device)).bool()
#trg_sub_mask = [trg len, trg len]
trg_mask = trg_pad_mask & trg_sub_mask
#trg_mask = [batch size, 1, trg len, trg len]
return trg_mask
def forward(self, src, trg):
#src = [batch size, src len]
#trg = [batch size, trg len]
src_mask = self.make_src_mask(src)
trg_mask = self.make_trg_mask(trg)
#src_mask = [batch size, 1, 1, src len]
#trg_mask = [batch size, 1, trg len, trg len]
enc_src = self.encoder(src, src_mask)
#enc_src = [batch size, src len, hid dim]
output, attention = self.decoder(trg, enc_src, trg_mask, src_mask)
#output = [batch size, trg len, output dim]
#attention = [batch size, n heads, trg len, src len]
return output, attention
# + id="KVLIub29gMWH"
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
HID_DIM = 256
ENC_LAYERS = 3
DEC_LAYERS = 3
ENC_HEADS = 8
DEC_HEADS = 8
ENC_PF_DIM = 512
DEC_PF_DIM = 512
ENC_DROPOUT = 0.1
DEC_DROPOUT = 0.1
MAX_SEQ_LENGTH = MAX_SEQ_LENGTH #this is defined earlier
enc = Encoder(INPUT_DIM,
HID_DIM,
ENC_LAYERS,
ENC_HEADS,
ENC_PF_DIM,
ENC_DROPOUT,
device,
MAX_SEQ_LENGTH)
dec = Decoder(OUTPUT_DIM,
HID_DIM,
DEC_LAYERS,
DEC_HEADS,
DEC_PF_DIM,
DEC_DROPOUT,
device,
MAX_SEQ_LENGTH)
# + id="qI0lh9AsgUxw"
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
model = Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX, device).to(device)
# + colab={"base_uri": "https://localhost:8080/"} id="hj0nes7jgWlN" outputId="8a6f5929-962c-4fa2-a6f1-a9d709f0a112"
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# + id="ohev4jAegX5v"
def initialize_weights(m):
if hasattr(m, 'weight') and m.weight.dim() > 1:
nn.init.xavier_uniform_(m.weight.data)
# + id="iyas1Hixgalq"
model.apply(initialize_weights);
# + id="7ZDVKVKTgcR9"
LEARNING_RATE = 0.0005
optimizer = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)
# + id="njtD3l0PgdyF"
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
# + id="0Sy0KQnSgfWX"
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
#print(f'{"traget",trg}')
optimizer.zero_grad()
output, _ = model(src, trg[:,:-1])
#print(output)
#output = [batch size, trg len - 1, output dim]
#trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:,1:].contiguous().view(-1)
#output = [batch size * trg len - 1, output dim]
#trg = [batch size * trg len - 1]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + id="HMf3qwjKghPB"
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output, _ = model(src, trg[:,:-1])
#print(output)
#output = [batch size, trg len - 1, output dim]
#trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:,1:].contiguous().view(-1)
#output = [batch size * trg len - 1, output dim]
#trg = [batch size * trg len - 1]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + id="hgn4pAMzgiyY"
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# + colab={"base_uri": "https://localhost:8080/"} id="R_HFWcxIgkbV" outputId="64e87b46-6f78-41da-f2ac-6019ca5b20f5"
# increased the num_of_epochs to 25
N_EPOCHS = 25
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'python_code_generator1-model.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
# + colab={"base_uri": "https://localhost:8080/"} id="DG38U1aFgoJ5" outputId="4f8836aa-fdfa-4a7b-98d9-45db7517d546"
model.load_state_dict(torch.load('python_code_generator1-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
# + id="EFjHEdpjopDV"
#Save the model in drive
# !cp -r python_code_generator1-model.pt 'drive/MyDrive/Models/'
# + id="FHuwvBdNjTTP"
def translate_sentence(sentence, src_field, trg_field, model, device, max_len = MAX_SEQ_LENGTH):
# For Prediction
# Set the model in evaluation mode to deactivate the DropOut modules
# This is IMPORTANT to have reproducible results during evaluation!
model.eval()
if isinstance(sentence, str):
nlp = spacy.load('en')
tokens = [token.text.lower() for token in nlp(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)
src_mask = model.make_src_mask(src_tensor)
with torch.no_grad():
enc_src = model.encoder(src_tensor, src_mask)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
for i in range(max_len):
trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device)
trg_mask = model.make_trg_mask(trg_tensor)
with torch.no_grad():
output, attention = model.decoder(trg_tensor, enc_src, trg_mask, src_mask)
pred_token = output.argmax(2)[:,-1].item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:], attention
# + id="pKcRrudvk7H9"
# Check the predictions
# + colab={"base_uri": "https://localhost:8080/"} id="LmTfRBUtk6mn" outputId="69ee3ce4-b6ad-46c7-a585-b0012f16be53"
example_idx = 1
src = vars(test_data.examples[example_idx])['src']
trg = vars(test_data.examples[example_idx])['trg']
print(src)
print(trg)
# + colab={"base_uri": "https://localhost:8080/"} id="XX6tUkECjWyu" outputId="4ec1ecb3-2247-4969-a261-646092d23514"
translation, attention = translate_sentence(src, SRC, TRG, model, device, max_len=MAX_SEQ_LENGTH)
processed_prediction = ' '.join(translation[1:-1]) # spaced joined, takes from 1 because of utf-8 token and ignore the eos token with -1
print(f'predicted trg = {translation}')
print("translated prediction\n", ' '.join(translation[1:-1]))
# + id="FWzngNVAmmkk"
# Evaluating the Generated Python code
# + id="Jm6GEIu2lW2N" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="d19b0f5a-1080-40f7-e211-f751a317f42f"
# replace the unk token with a name - test
for_evaluation = processed_prediction.replace('<unk>', 'test')
exec(for_evaluation)
# + id="iYzI2hOCmXJG"
# If no error, then the run is successful
# + id="ah9MNmxBoDsm"
# Attention
# + id="8EDnazsOm0q7"
def display_attention(sentence, translation, attention, n_heads = 8, n_rows = 4, n_cols = 2):
assert n_rows * n_cols == n_heads
fig = plt.figure(figsize=(15,25))
for i in range(n_heads):
ax = fig.add_subplot(n_rows, n_cols, i+1)
_attention = attention.squeeze(0)[i].cpu().detach().numpy()
cax = ax.matshow(_attention, cmap='bone')
ax.tick_params(labelsize=12)
ax.set_xticklabels(['']+['<sos>']+[t.lower() for t in sentence]+['<eos>'],
rotation=45)
ax.set_yticklabels(['']+translation)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
| assignments/assignment14/NextSteps_Tokenize_Python_code_generate_predictions_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interactive,widgets, GridspecLayout
# %matplotlib inline
# +
grid = GridspecLayout(20, 32)
style = {'description_width': 'initial'}
plot_select = widgets.ToggleButtons(
options=['lines','scatter'], description='',
style = style)
def plot_func(a, f, plot_type):
plt.figure(2)
x = np.linspace(0, 2*np.pi, num=1000)
y = a*np.sin(1/f*x)
if plot_type == 'lines':
plt.plot(x,y)
elif plot_type == 'scatter':
plt.scatter(x,y)
plt.ylim(-1.1, 1.1)
plt.title('a sin(f)')
plt.show()
a = widgets.FloatSlider(value = -1, min = -1, max = 0, step = 0.1)
f = widgets.FloatSlider(value = 0.55,min = 0.1, max = 1.)
#interactive_plot = interactive(plot_func, a=(-1,0,0.1), f=(0.1, 1))
#output = interactive_plot.children[-1]
#output.layout.height = '300px'
#interactive_plot
grid[0,8:31] = plot_select
grid[1,8:31] = a
grid[2,8:31] = f
grid[3:16,8:31] = widgets.interactive_output(plot_func,{'a':a,'f':f, 'plot_type':plot_select})
grid
# -
| notebooks/.ipynb_checkpoints/simpleAppTest-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df_train = pd.read_csv('bat_ball.csv')
df_train
for x in df_train.iterrows():
#iterrows returns a tuple per record whihc you can unpack
# X[0] is the index
# X[1] is a tuple of the rows values so X[1][0] is the value of the first column etc.
pd.DataFrame([x[1][7], x[1][5], x[1][6], x[1][3], x[1][4]]).to_csv(str(x[1][0].split('.')[0])+"BUY.txt", header=False, index=False)
df_train.to_csv()
unique_img_ids = df_train.name.unique()
unique_img_ids
df_train['class'] = df_train['class'].replace({'ball':0,'bat':1})
# +
train_df1 = df_train[df_train['image_height'] == 720]
train_df2 = df_train[df_train['image_height'] == 1144]
train_df3 = df_train[df_train['image_height'] == 678]
# -
w_list = [680, 1080, 640]
h_list = [720, 1144, 678]
# +
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[1] + box[2]) / 2.0
y = (box[3] + box[4]) / 2.0
w = box[2] - box[1]
h = box[4] - box[3]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return [box[0], x, y, w, h]
def convert_to_yolo_label(coco_format_box, w = w_list[2], h = h_list[2]):
bbox = coco_format_box
xmin = bbox[1]
xmax = bbox[1] + bbox[3]
ymin = bbox[2]
ymax = bbox[2] + bbox[4]
b = (float(bbox[0]), float(xmin), float(xmax), float(ymin), float(ymax))
yolo_box = convert((w, h), b)
if np.max(yolo_box) > 1 or np.min(yolo_box) < 0: # Take this opportunity to check that conversion works
print("BOX HAS AN ISSUE")
print(bbox)
print(yolo_box)
return yolo_box
# +
train_df3['bbox'] = train_df3[['class', 'x-axis', 'y-axis', 'width', 'height']].values.tolist()
train_df3['yolo_box'] = train_df3.bbox.apply(convert_to_yolo_label)
# -
train_df1
train_df2
train_df3
df_train
# +
#df_train['bbox'] = df_train[['class', 'x_center', 'y_center', 'width', 'height']].values.tolist()
# -
df_train = pd.concat([train_df1, train_df2, train_df3])
df_train
# +
val_list = ['U1_14_9', 'U1_14_17', 'U1_16_14', 'U1_16_25', 'U1_17_11', 'U2_1_25', 'U2_2_24', 'U2_2_33', 'U2_3_28',
'U2_5_18', 'U2_6_20', 'U2_7_27', 'U2_9_13', 'U2_11_12', 'U2_11_16', 'U2_12_19', 'U2_12_33', 'U2_13_20',
'U2_14_14', 'U2_17_7', 'U2_17_18', 'U3_2_6', 'U3_3_19', 'U3_4_8', 'U4_0_16', 'U4_0_21', 'U4_0_29',
'U4_1_23', 'U4_3_10', 'U4_3_22', 'U4_8_17', 'U4_9_9', 'U4_9_24', 'U4_9_25', 'U4_11_16', 'U4_11_17',
'U4_12_17', 'U4_13_11', 'U4_14_15', 'U4_15_7', 'U4_15_20', 'U4_16_16', 'U4_19_12', 'U4_20_18', 'U4_21_16',
'U4_22_16', 'U5_0_16', 'U5_1_9', 'U5_5_21', 'U5_6_19', 'U5_8_6', 'U5_9_7', 'U5_10_7', 'U6_1_17', 'U6_2_27',
'U6_4_11', 'U6_4_18', 'U6_5_10', 'U6_7_21', 'U6_8_12', 'U6_8_19', 'U6_8_25', 'U6_8_33', 'U6_9_8', 'U6_10_6',
'U6_12_10', 'U6_13_8', 'U6_13_14', 'U6_13_17', 'U6_14_14', 'U6_15_7', 'U6_16_15', 'U6_17_14', 'U8_1_17',
'U8_5_17', 'U10_0_22', 'U10_1_20', 'U12_0_15', 'U12_1_10']
len(val_list)
for i in range(79):
val_list[i] = val_list[i] + '.png'
val_annot = df_train.loc[df_train['name'].isin(val_list)]
val_annot
# +
val_list = ['U1_14_9', 'U1_14_17', 'U1_16_14', 'U1_16_25', 'U1_17_11', 'U2_1_25', 'U2_2_24', 'U2_2_33', 'U2_3_28',
'U2_5_18', 'U2_6_20', 'U2_7_27', 'U2_9_13', 'U2_11_12', 'U2_11_16', 'U2_12_19', 'U2_12_33', 'U2_13_20',
'U2_14_14', 'U2_17_7', 'U2_17_18', 'U3_2_6', 'U3_3_19', 'U3_4_8', 'U4_0_16', 'U4_0_21', 'U4_0_29',
'U4_1_23', 'U4_3_10', 'U4_3_22', 'U4_8_17', 'U4_9_9', 'U4_9_24', 'U4_9_25', 'U4_11_16', 'U4_11_17',
'U4_12_17', 'U4_13_11', 'U4_14_15', 'U4_15_7', 'U4_15_20', 'U4_16_16', 'U4_19_12', 'U4_20_18', 'U4_21_16',
'U4_22_16', 'U5_0_16', 'U5_1_9', 'U5_5_21', 'U5_6_19', 'U5_8_6', 'U5_9_7', 'U5_10_7', 'U6_1_17', 'U6_2_27',
'U6_4_11', 'U6_4_18', 'U6_5_10', 'U6_7_21', 'U6_8_12', 'U6_8_19', 'U6_8_25', 'U6_8_33', 'U6_9_8', 'U6_10_6',
'U6_12_10', 'U6_13_8', 'U6_13_14', 'U6_13_17', 'U6_14_14', 'U6_15_7', 'U6_16_15', 'U6_17_14', 'U8_1_17',
'U8_5_17', 'U10_0_22', 'U10_1_20', 'U12_0_15', 'U12_1_10']
len(val_list)
for i in range(79):
val_list[i] = val_list[i] + '.png'
train_annot = df_train.loc[df_train['name'].isin(val_list) == False]
train_annot
# -
unique_img_ids_train = train_annot.name.unique()
unique_img_ids_train
unique_img_ids_val = val_annot.name.unique()
unique_img_ids_val
folder_location = "labels/val"
#change unique_img_ids[:2] to unique_img_ids to iterate through all images
for img_id in unique_img_ids_val: # loop through all unique image ids. Remove the slice to do all images
print(img_id)
filt_df = val_annot.query("name == @img_id") # filter the df to a specific id
print(filt_df)
#print(filt_df.shape[0])
all_boxes = filt_df.yolo_box.values
type(all_boxes)
file_name = "{}/{}.txt".format(folder_location, img_id.split('.')[0]) # specify the name of the folder and get a file name
s = "%s %s %s %s %s \n" # the first number is the identifier of the class. If you are doing multi-class, make sure to change that
with open(file_name, 'a') as file: # append lines to file
for i in all_boxes:
new_line = (s % tuple(i))
file.write(new_line)
folder_location = "labels/train"
#change unique_img_ids[:2] to unique_img_ids to iterate through all images
for img_id in unique_img_ids_train: # loop through all unique image ids. Remove the slice to do all images
print(img_id)
filt_df = train_annot.query("name == @img_id") # filter the df to a specific id
print(filt_df)
#print(filt_df.shape[0])
all_boxes = filt_df.yolo_box.values
type(all_boxes)
file_name = "{}/{}.txt".format(folder_location, img_id.split('.')[0]) # specify the name of the folder and get a file name
s = "%s %s %s %s %s \n" # the first number is the identifier of the class. If you are doing multi-class, make sure to change that
with open(file_name, 'a') as file: # append lines to file
for i in all_boxes:
new_line = (s % tuple(i))
file.write(new_line)
type(df_train.iloc[0][9])
| YOLOv5_formatting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# UNIVERSITY OF DODOMA, COLLEGE OF INFORMATICS AND VIRTUAL EDUCATION
# MATHEMATICS FOR DATASCIENCE AND MACHINE LEARNING WITH NUMPY
#
# # NUMPY
# NumPy is the fundamental package for scientific computing with Python. It contains among other things:
#
# 1)A powerful N-dimensional array object
#
# 2)Sophisticated (broadcasting) functions
#
# 3)Tools for integrating C/C++ and Fortran code
#
# 4)Useful linear algebra, Fourier transform, and random number capabilities
#
# Scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases.
# # Installing packages
# Numpy packages is available in different distribution on different key packages such as the following
#
# a)Anaconda, it is free distribution of Python with scientific packages. Supports Linux, Windows and Mac.
# when donload the anaconda this come with many package such numpy, panda and other package.
#
# b) Python(x,y) ,this is free distribution including scientific packages, based around the Spyder IDE. Windows and Ubuntu; Py2 only
#
# c) Winpython, Another free distribution including scientific packages and the Spyder IDE. Windows only, but more actively maintained and supports the latest Python 3 versions.
#
# Insatall vi pip ,
# for the window user when want to insatll this package use a commqnd
# python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
#
# for linux and ubunt user use this command ,
# sudo apt-get install python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose
#
# for mac user use this command ,
# sudo port install py35-numpy py35-scipy py35-matplotlib py35-ipython +notebook py35-pandas py35-sympy py35-nose
# # The baisc
# NumPy’s main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In NumPy dimensions are called axes.
# for more understand cheki this site of numpy https://docs.scipy.org/doc/numpy/user/quickstart.html
# [[1,3,4]]this array has one axe and 3 element,[[2,3,5],[2,3,5]] this array has two axe and 6 element
#
# Before want to use numpy first must be import
import numpy as np
x=np.arange(15).reshape(3,5)
x.ndim
#shape of an array
x.shape
#size of element
x.size
#data type of tha array
x.dtype.name
# # Array creation
y=np.array([3,4,5,6])
y
#shape of an array
y.shape
#check the size of array
y.size
# The function zeros creates an array full of zeros, the function ones creates an array full of ones, and the function empty creates an array whose initial content is random and depends on the state of the memory. By default, the dtype of the created array is float64.
np.zeros((2,3))
np.ones((2,3))
np.ones((2,3,4))
# To create sequences of numbers, NumPy provides a function analogous to range that returns arrays instead of lists.
np.arange(20,50,5)
# When arange is used with floating point arguments, it is generally not possible to predict the number of elements obtained, due to the finite floating point precision. For this reason, it is usually better to use the function linspace that receives as an argument the number of elements that we want, instead of the step
np.linspace(1,5,10)
# # print array
# on c++ we use the keyword cout to give out the information,in python we use the word print with a blacket print()
#
a=np.arange(3,4)
print(a)
b=np.array([2,3,4])
print(b)
# # Trigonometric and round number
number=np.sin(45)
print(number)
number=np.cos(45)
print(number)
# numpy it can use to round anumber nearest munber by used a function around()
a=np.around([2.36,6.79])
a
# if want to round nearest integer you can use a function calle rint()
num=np.array([2.7,6.4,0.2,66.7])
print(np.rint(num))
# # Sum and product
number=np.prod([3,5])
print(number)
number=np.linspace(2,4,10)
print(number)
#to find the sum
number.sum()
# # Basic operation
# Arithmetic operators on arrays apply elementwise. A new array is created and filled with the result. you can use this package for different opreation such as addition,sub,multiplication
# +
a = np.array( [20,30,40,50] )
b = np.arange( 4 )
# -
print(b)
c=a-b
print(c)
c=a+b
print(c)
# some time you can use the sum,max,min to find the maximun number and minimum number of the given array
# See also
# all, any, apply_along_axis, argmax, argmin, argsort, average, bincount, ceil, clip, conj, corrcoef, cov, cross, cumprod, cumsum, diff, dot, floor, inner, inv, lexsort, max, maximum, mean, median, min, minimum, nonzero, outer, prod, re, round, sort, std, sum, trace, transpose, var, vdot, vectorize, where
max(c)
min(c)
# # Indexing and Slicing
# One-dimensional arrays can be indexed, sliced and iterated over, much like lists and other Python sequences.
h=np.arange(10)**3
print(h)
#index of zero
h[0]
#index of 2 is equal to number 4
h[2]
h[3:6]
# # Shape Manipulatiion
# An array has a shape given by the number of elements along each axis.you can change the shape of an array according to the needed
b=np.floor(10*np.random.random((3,4)))
b
b.ravel() # returns the array, flattened
#shape the array into 5 rows and 2column
b.reshape(6,2)
b.reshape(4,3)
b.T # returns the array, transposed
#And also you cna use resize insted of reshape but when use the resise it return
b.resize((2,6))
b
# # Stacking together different arrays
# +
a = np.floor(10*np.random.random((2,2)))
b = np.floor(10*np.random.random((2,2)))
# -
a
b
np.hstack((a,b))
np.vstack((a,b))
# On the other hand, the function row_stack is equivalent to vstack for any input arrays. In general, for arrays of with more than two dimensions, hstack stacks along their second axes, vstack stacks along their first axes, and concatenate allows for an optional arguments giving the number of the axis along which the concatenation should happen.
#
#
# References
# http://localhost:8888/notebooks/Documents/mathematics%20for_ds_and_ml/Untitled.ipynb
#
# PREPARED BY <NAME>
# <EMAIL>
| Mathematics for datascience and machine learning with numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 37 - Employees who are managers
#
# Suppose you're trying to understand how many managers you have per employee at Company XYZ. On your search to understand, you are given two tables: (1) managers and (2) employees. Each table has 1 column named id.
#
# Given this dataset, can you use SQL to find the employees that are also managers? Hint: given the table names as well as the single column name you should be able to write a full SQL query.
# ```sql
# select
# m.id
# from managers m
# join employees e
# on e.id = m.id
# ```
| interviewq_exercises/q037_sql_managers_join_employees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rodrigonevest/web-scraping/blob/main/Crawler_Corn_Soybeans.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="LIwtULu0Z5lx" outputId="0eb0612f-eee3-4d99-f1f6-ce975d96df4e"
import urllib3
from bs4 import BeautifulSoup
import pandas as pd
import re
import nltk
nltk.download('book')
# + colab={"base_uri": "https://localhost:8080/"} id="Rg17slmPkOik" outputId="ec548c63-d744-4af4-f7e6-657443d31bed"
# !pip install -U pycountry
import pycountry
# + id="6IiMo9uoaByF" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="8df1e725-c505-4b39-a05b-bd7add362370"
#print(cbot.head())
def BuscaTexto(pagina):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
#Criando o dataframe para salvar os dados
df = pd.DataFrame(index=['Date','Link','Headlines','News'])#,'Cbot_corn','CepeaR$_corn','CepeaU$','Rótulo'""",'Pais'"""])
http = urllib3.PoolManager()
try:
dados_paginas = http.request('GET',pagina)
except HTTPError as e:
print(e)
except URLError:
print("Server down or incorrect domain")
# Pegando os dados do html da pagina
index = BeautifulSoup(dados_paginas.data,'lxml')
#Procurando as tag "a" dentro da "div"
links = index.find("div",{"id":"content"})
links = links.find_all('a')
#print(links)
cont = 0
base = pagina.split('/News')[0]
#Percorrendo os link e salvando seu href(os links)
for link in links:
if cont == range(len(base)):
#if cont == 160:
break;
else:
cont +=1
aux = str(link.get('href'))
url = base + '/' + aux
try:
dados = http.request('GET',url)
except:
print("Pagina nao encontrada" )
#Pegando os dados do html do link acessado
news = BeautifulSoup(dados.data,'lxml')
#Pegando o conteúdo da Manchete
conteudo = news.find("div",{"id": "content"})
#conteudo = news.find("div",{"id":"section"})
#conteudo = news.find_all({"h3"})
#if conteudo != None:
#x2 = ''
#if conteudo != None:
# for x in conteudo:
# x2 +=''.join(x.find_all(text=True))
# x2 = re.sub("[\n,\t]","",x2)
#Pegando o conteudo das noticias
noticias = news.find("div",{"id": "content"})
noticia = news.find("div",{"id": "content"})
#noticias = news.find("div",{"id":"section"})
#noticias = news.find_all({"p"})
if noticias != None:
noticias = [y.get_text(strip=True) for y in news.find_all('p')]
noticias = str(noticias)
noticias = noticias.replace("['",'')
noticias = noticias.replace("']",'')
noticias = noticias.replace('["','')
noticias = noticias.replace('"]','')
noticias = noticias.replace("', '",'')
conteudo = [x.get_text(strip=True) for x in news.find_all('h3')]
conteudo = str(conteudo)
conteudo = conteudo.replace("['",'')
conteudo = conteudo.replace("']",'')
conteudo = conteudo.replace('["','')
conteudo = conteudo.replace('"]','')
conteudo = conteudo.replace("', '",'')
data = news.find({"h5"}).get_text(strip=True)
if noticia != None:
noticia = [j for j in noticia.find_all('li',text=True)]
noticia = str(noticia)
noticia = noticia.replace("['",'')
noticia = noticia.replace("']",'')
noticia = noticia.replace('["','')
noticia = noticia.replace('"]','')
noticia = noticia.replace("', '",'')
noticia = noticia.replace('<"','')
noticia = noticia.replace('">','')
noticia = noticia.replace('"\n','')
noticia = re.sub('[\n,\t,<li>,/]','',noticia)
# y2 = ''
#if noticias != None:
# for y in noticias :
# y2 +=' '.join(y.find_all(text=True))
# y2 = re.sub("[\n,\t,\r,\f]","",y2)
#Pegando o conteúdo da Data
#data = news.find("div",{"id": "content"})
#data = news.find("div",{"id":"section"})
data = news.find({"h5"}).get_text(strip=True)
#if data != None:
# txt_dt = data.get_text(strip=True)
df = df.append({'Date': data,
'Link': url ,
'Heallines' : conteudo,
'News': noticias + noticia,
}, #'Pais': j},
ignore_index=True)
return df
"""
paises = list()
for y in pycountry.countries:
paises.append(y.name)
j = list()
cont = 0
for p in paises:
if p in texto:
j.append(p)
"""
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="ycnr9DN1aGJB" outputId="f29868d0-5e18-479e-b4a0-f9f36bf19b9e"
link2 = 'http://soybeansandcorn.com/News'
df = BuscaTexto(link2)
df.dropna(inplace=True)
df['Date'] = pd.to_datetime(df['Date'],errors='coerce')
df.dropna(subset=['Date'],inplace=True)
df.head()
#print(df.isnull().sum())
#print(df.to_string())
#df.head(20)
#print(df)
# + colab={"base_uri": "https://localhost:8080/"} id="Pf8o6gj8K8qS" outputId="1bbcb0b5-3439-408a-cf08-85eaa6ca681e"
df.isnull().sum()
# + id="qGbB3RMg5Tub"
df.to_csv('Crawler_Corn_Soybeans.csv',index=False)
| Crawler_Corn_Soybeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="c490aab6-378c-474c-bdaf-1d27f220e065"
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from skimage import io, transform
import matplotlib.pyplot as plt # for plotting
import numpy as np
import pandas as pd
import glob
import os
#import cv2
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
from torch.optim import Adam, SGD
#from tqdm import tqdm
# + id="bf888b97-c936-4d4d-b35e-2860fbf40bdf"
# DataLoader Class
# if BATCH_SIZE = N, dataloader returns images tensor of size [N, C, H, W] and labels [N]
class ImageDataset(Dataset):
def __init__(self, data_csv, train = True , img_transform=None):
"""
Dataset init function
INPUT:
data_csv: Path to csv file containing [data, labels]
train:
True: if the csv file has [labels,data] (Train data and Public Test Data)
False: if the csv file has only [data] and labels are not present.
img_transform: List of preprocessing operations need to performed on image.
"""
self.data_csv = data_csv
self.img_transform = img_transform
self.is_train = train
data = pd.read_csv(data_csv, header=None)
if self.is_train:
images = data.iloc[:,1:].to_numpy()
labels = data.iloc[:,0].astype(int)
else:
images = data.iloc[:,:]
labels = None
self.images = images
self.labels = labels
print("Total Images: {}, Data Shape = {}".format(len(self.images), images.shape))
def __len__(self):
"""Returns total number of samples in the dataset"""
return len(self.images)
def __getitem__(self, idx):
"""
Loads image of the given index and performs preprocessing.
INPUT:
idx: index of the image to be loaded.
OUTPUT:
sample: dictionary with keys images (Tensor of shape [1,C,H,W]) and labels (Tensor of labels [1]).
"""
image = self.images[idx]
image = np.array(image).astype(np.uint8).reshape((32, 32, 3),order='F')
,
if self.is_train:
label = self.labels[idx]
else:
label = -1
image = self.img_transform(image)
sample = {"images": image, "labels": label}
return sample
# + colab={"base_uri": "https://localhost:8080/"} id="8cd43199-7b15-41bf-8c5a-a2af0d286b91" outputId="c2b50224-7e0b-4a28-8e75-efe7cdd0631e"
# Data Loader Usage
BATCH_SIZE = 200 # Batch Size. Adjust accordingly
NUM_WORKERS = 20 # Number of threads to be used for image loading. Adjust accordingly.
img_transforms = transforms.Compose([transforms.ToPILImage(),transforms.ToTensor()])
# Train DataLoader
train_data = "../CIFAR/train_data.csv" # Path to train csv file
train_dataset = ImageDataset(data_csv = train_data, train=True, img_transform=img_transforms)
train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=False, num_workers = NUM_WORKERS)
# Test DataLoader
test_data = "../CIFAR/public_test.csv" # Path to test csv file
test_dataset = ImageDataset(data_csv = test_data, train=True, img_transform=img_transforms)
test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle=False, num_workers = NUM_WORKERS)
# + colab={"base_uri": "https://localhost:8080/"} id="wjF2UTQVd_Y7" outputId="fbc4afd0-c338-42dd-84d5-3b9961c97a19"
torch.cuda.is_available()
# + id="7e62bb43-ce19-4bc9-908b-2d83f4cebbbd"
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = Sequential(
Conv2d(3, 32, kernel_size=3, stride=1),
BatchNorm2d(32),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
)
self.conv2 = Sequential(
Conv2d(32, 64, kernel_size=3, stride=1),
BatchNorm2d(64),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
)
self.conv3 = Sequential(
Conv2d(64, 512, kernel_size=3, stride=1),
BatchNorm2d(512),
ReLU(inplace=True),
MaxPool2d(kernel_size=2, stride=2),
)
self.conv4 = Sequential(
Conv2d(512, 1024, kernel_size=2, stride=1),
ReLU(inplace=True),
)
self.linear1 = Sequential(
Linear(1024 * 1 * 1, 256),
ReLU(inplace=True),
)
self.drop = Dropout(p=0.2)
self.linear2 = Linear(256,10)
# Defining the forward pass
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1)
x = self.linear1(x)
x = self.drop(x)
x = self.linear2(x)
return x
# + id="G4snUxabqqNP"
def train_model(epoch,x_train,y_train):
model.train()
tr_loss = 0
x_train, y_train = Variable(x_train), Variable(y_train)
if torch.cuda.is_available(): # converting the data into GPU format
x_train = x_train.cuda()
y_train = y_train.cuda()
optimizer.zero_grad() # clearing the Gradients of the model parameters
# prediction for training and validation set
output_train = model(x_train)
# computing the training and validation loss
loss_train = loss(output_train, y_train)
#print(loss_train)
# computing the updated weights of all the model parameters
loss_train.backward()
optimizer.step()
tr_loss = loss_train.item()
return tr_loss
def predict_model(x_test,y_test):
if torch.cuda.is_available(): # converting the data into GPU format
x_test = x_test.cuda()
with torch.no_grad():
output = model(x_test)
softmax = torch.exp(output).cpu()
prob = list(softmax.numpy())
predictions = np.argmax(prob, axis=1)
#pred = np.append(pred,predictions)
#print(predictions,y_test.cpu().detach().numpy())
return (np.sum(predictions==y_test.cpu().detach().numpy()))
# + id="NUzjVi9zp7CV"
model = Net()
optimizer = Adam(model.parameters(), lr=0.0001)
loss = CrossEntropyLoss()
if torch.cuda.is_available():
model = model.cuda()
loss = loss.cuda()
# + colab={"base_uri": "https://localhost:8080/"} id="pCqCbQQasykP" outputId="8542318f-c1d7-41b6-96d7-56d1a2a53fd2"
epochs = 5
torch.autograd.set_detect_anomaly(True)
loss_file = open('loss.txt','w')
acc_file = open('accuracy.txt','w')
losses = []
accs = []
for epoch in range(epochs):
avg_train_loss = 0
acc = 0
n = 0
model.train()
for batch_idx, sample in enumerate(train_loader):
images = sample['images']
labels = sample['labels']
avg_train_loss += train_model(epoch,images,labels)
model.eval()
for b,sample in enumerate(test_loader):
images = sample['images']
labels = sample['labels']
n += len(labels)
acc += predict_model(images,labels)
avg_train_loss /= len(train_loader)
acc /= n
loss_file.write('{}\n'.format(avg_train_loss))
losses.append(avg_train_loss)
acc_file.write('{}\n'.format(acc))
accs.append(acc)
print(epoch+1,avg_train_loss,acc)
loss_file.close()
acc_file.close()
# + colab={"base_uri": "https://localhost:8080/", "height": 920} id="afqEJO8R30Mr" outputId="96ec169b-4c7b-4bb2-fe51-c6c5eabab967"
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 20}
plt.rc('font', **font)
plt.rcParams['figure.figsize'] = (15,15)
x = [1,2,3,4,5]
y= [
1.4485580623149872,
1.0671111538012823,
0.8919388349850973,
0.7601574355363846,
0.6457052547732989,
]
plt.xlabel("Number of Epochs")
plt.ylabel("Training Cross-Entropy Loss")
plt.title('Training Loss v/s Epochs (CIFAR10)')
plt.plot(x,y,'-mo')
plt.savefig('c10plt.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 920} id="GLqZ77KZ4D55" outputId="806863fe-5552-4cbb-867c-29c7e656abe9"
y = [0.59425,
0.6535,
0.67975,
0.686,
0.6875,
]
plt.xlabel("Number of Epochs")
plt.ylabel("Test Accuracy")
plt.title('Test Accuracy v/s Epochs (CIFAR10)')
plt.plot(x,y,'-mo')
plt.savefig('c10plt1.png')
# + colab={"base_uri": "https://localhost:8080/"} id="rkkX0XEPN4V1" outputId="87504319-4109-4c5b-d0a1-e7beca566a01"
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
# + id="zcWDHUc6Kk7X"
torch.save(model.state_dict(),'./model2.pth')
| A4-CNN/Part (b)/Part_b.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np
import pandas as pd
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
# +
RUN_ON_KAGGLE = False
# Directories and filenames
if RUN_ON_KAGGLE:
INPUT_PATH = "../input/flickr8k/"
IMAGE_PATH = INPUT_PATH+'Images/'
OUTPUT_PATH = "./"
OUTPUT_IMAGE_PATH = OUTPUT_PATH +'Images/'
CAPTIONS_FILE = INPUT_PATH+'captions.txt'
else:
INPUT_PATH = '../data/raw/flickr8k/'
IMAGE_PATH = INPUT_PATH+'Images/'
OUTPUT_PATH = "../data/interim/aida-image-captioning-inceptresnetv2_v2/"
OUTPUT_IMAGE_PATH = OUTPUT_PATH +'Images/'
CAPTIONS_FILE = INPUT_PATH+'captions.txt'
# +
# Create a dataframe which summarizes the image, path & captions as a dataframe
# Each image id has 5 captions associated with it therefore the total dataset should have 40455 samples.
captions_df = pd.read_csv(CAPTIONS_FILE)
pd.set_option('display.max_colwidth',-1) # Set the max column width to see the complete caption
print(captions_df.shape)
captions_df.head()
# -
captions_agg_df=captions_df.groupby("image").first().reset_index()
captions_agg_df.head()
#datagen=ImageDataGenerator(rescale=1./255) # not necessary
datagen=ImageDataGenerator()
train_generator=datagen.flow_from_dataframe(dataframe=captions_agg_df,
directory=IMAGE_PATH,
x_col="image", y_col="caption",
class_mode="raw",
target_size=(299,299),
shuffle=False
)
image_model = tf.keras.applications.InceptionResNetV2(
include_top=False,
weights='imagenet'
)
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
# !mkdir $OUTPUT_PATH
# !mkdir $OUTPUT_IMAGE_PATH
i=0
for X, _ in train_generator:
img = tf.keras.applications.inception_v3.preprocess_input(X)
batch_features = image_features_extract_model(img)
batch_features = tf.reshape(batch_features,
(batch_features.shape[0], -1, batch_features.shape[3]))
if i == 0:
print(f"X.shape: {X.shape}")
print(f"img.shape: {img.shape}")
print(f"batch_features.shape: {batch_features.shape}")
print(f"reshaped batch_features.shape: {batch_features.shape}")
for bf in batch_features:
filename = OUTPUT_IMAGE_PATH + captions_agg_df.iloc[i].image.replace(".jpg",".npy")
print(f"filename: {filename}")
np.save(filename, bf.numpy())
i=i+1
| notebooks/02_feature-extraction_inception-resnet-v2_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rahulvigneswaran/100DaysOfDeepLearning/blob/main/Day_1_SuperClassingLT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="pTr89CyqYt4I"
# # Super classing Long-Tailed Dataset
#
# <blockquote class="twitter-tweet"><p lang="en" dir="ltr">🥁 Day 1: Super classing Long-Tailed Datasets.<br><br>When we combine classes together and classify them individually first, we are reducing the effective imbalance factor of the entire dataset. From 100 classes to 10 classes. Would that help?</p>— <NAME> (@lettucelemontea) <a href="https://twitter.com/lettucelemontea/status/1500675843554562048?ref_src=twsrc%5Etfw">March 7, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
# + colab={"base_uri": "https://localhost:8080/"} id="qWccvqb6YyMU" outputId="08103604-0efc-46ce-eae9-804758e651e7"
# !git clone <I will make this repo public soon>
# + colab={"base_uri": "https://localhost:8080/"} id="zGlZqxnBdra2" outputId="d55c84cd-a317-4f4b-aee4-88df857ff7c5"
# !pip install -r LibLongTail/requirements.txt
# + colab={"base_uri": "https://localhost:8080/"} id="PoO5U7LrdxSU" outputId="47b731a0-afff-4c90-85d3-44a6aaac6a37"
# %cd LibLongTail/
# + colab={"base_uri": "https://localhost:8080/", "height": 71} id="cXqyBYG5fgdC" outputId="b4ac181d-40e1-451a-96e6-f86bffecbbf1"
import wandb
wandb.login()
# + colab={"base_uri": "https://localhost:8080/"} id="kODvJVxBj2If" outputId="b69e6126-f688-43ac-c468-eb9b120fca0d"
# !python run.py --sub_exp "Baseline_CE" --cfg "/content/LibLongTail/configs/classification/CIFAR100LT/ce.yaml" --opts LOGGER.PROJECT_NAME "100DaysOfDL" GENERAL.EPOCH 100 GENERAL.GPUS [0] ARCH.BACKBONE "resnet18" DATASET.IMB 0.005
# + colab={"base_uri": "https://localhost:8080/"} id="X-s94ldFsGJY" outputId="bf6dba5e-3239-4134-8a6b-ed6d83a14b96"
# !python run.py --sub_exp "Baseline_cRT" --cfg "/content/LibLongTail/configs/classification/CIFAR100LT/crt.yaml" --opts LOGGER.PROJECT_NAME "100DaysOfDL" GENERAL.EPOCH 100 GENERAL.GPUS [0] ARCH.BACKBONE "resnet18" DATASET.IMB 0.005 ARCH.PRETRAIN "/content/LibLongTail/logs/seed_1/cifar100lt/resnet18/ce/Baseline_CE_ce_cifar100lt_resnet18_seed_1_imb_0.005/best.ckpt"
# + colab={"base_uri": "https://localhost:8080/"} id="8dbaR5unph4r" outputId="2e6207ed-1c85-49f9-b215-d38de00d62d4"
# !python run.py --sub_exp "supclass_stage1" --cfg "/content/LibLongTail/configs/classification/CIFAR100LT/ce.yaml" --opts LOGGER.PROJECT_NAME "100DaysOfDL" GENERAL.EPOCH 100 GENERAL.GPUS [0] ARCH.BACKBONE "resnet18" DATASET.IMB 0.005 DATASET.NUM_CLS 20
# + colab={"base_uri": "https://localhost:8080/"} id="TDo1V8gSdqa6" outputId="dca2d8de-2e61-4c47-98e0-dbcd36482ee6"
# !python run.py --sub_exp "supclass_stage2_corrected" --cfg "/content/LibLongTail/configs/classification/CIFAR100LT/crt.yaml" --opts LOGGER.PROJECT_NAME "100DaysOfDL" GENERAL.EPOCH 100 GENERAL.GPUS [0] ARCH.BACKBONE "resnet18" DATASET.IMB 0.005 ARCH.PRETRAIN "/content/LibLongTail/logs/seed_1/cifar100lt/resnet18/ce/supclass_stage1_ce_cifar100lt_resnet18_seed_1_imb_0.005/best.ckpt"
# + [markdown] id="el3OuaUzg3PH"
# |Method|Accuracy|Many|Mid|Low|
# |--------|---------|---------|-------|------|
# |CE|0.214|0.453|0.2353|0.0597|
# |cRT|0.2526|0.458|0.2873|0.1075|
# |SupClass| 0.2404|0.4426|0.2766|0.0961|
| Day_1_SuperClassingLT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# <a id='top'></a>
#
# # DSP using FFT and psd
#
# This notebook will demonstrate some basic aspects related with digital signal processing using FFT and psd. It is mostly inspired by two LabVIEW white papers, [this one](http://www.ni.com/white-paper/4278/en/) and [this one](http://www.ni.com/white-paper/4541/en/). We will also take the oportunity to test different power spectrum estimation implementations from two common Python packages, [matplotlib.mlab](http://matplotlib.org/api/mlab_api.html) and [scipy.signal](https://scipy.github.io/devdocs/signal.html), following this [StackOverflow question](http://stackoverflow.com/questions/33286467/why-do-the-power-spectral-density-estimates-from-matplotlib-mlab-psd-and-scipy-s).
#
# ## Table of contents
#
# [Preamble](#Preamble)
#
# [Introduction](#Introduction)
#
# [Sample signal](#Sample-signal)
#
# [Fourier transform](#Fourier-transform)
#
# [Power spectrum](#Power-spectrum)
#
# [The periodogram](#The-periodogram)
#
# [Matplotlib package](#Matplotlib-package)
#
# [SciPy package](#SciPy-package)
#
# [Conclusions](#Conclusions)
#
# [Odds and ends](#Odds-and-ends)
#
# ## Preamble
#
# The computational environment set up for this Python notebook includes numpy and scipy for the numerical simulations, matplotlib and pandas for the plots:
# +
import sys
import numpy as np
import scipy as sp
import matplotlib as mpl
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
print(sys.version)
for package in (np, sp, mpl, pd):
print('{:.<15} {}'.format(package.__name__, package.__version__))
# -
# Furthermore, we will need the following special functions:
from numpy.fft import fft, fftfreq, rfft, rfftfreq, fftshift
from scipy.signal import periodogram, welch
from matplotlib.mlab import rms_flat, psd, detrend_none, window_hanning
# [Back to top](#top)
#
# ## Introduction
#
# The [power spectral density](https://en.wikipedia.org/wiki/Spectral_density) of a digital signal can be [estimated](https://en.wikipedia.org/wiki/Spectral_density_estimation) in several different ways, namely through:
#
# 1. The [periodogram](https://en.wikipedia.org/wiki/Periodogram)
# 2. The [Bartlett's method](https://en.wikipedia.org/wiki/Bartlett%27s_method)
# 3. The [Welch's method](https://en.wikipedia.org/wiki/Welch%27s_method)
#
# We will illustrate them below. However, before that we will have to set up a sample signal.
#
# [Back to top](#top)
#
# ## Sample signal
#
# For the purpose of illustration, in this notebook we will use a sample signal (in volt) composed of a small amplitude sine wave with an additive large amplitude random noise:
# +
Ns = 4096 # number of samples
np.random.seed(1234) # random seed (for repeatability)
rn = np.random.random(Ns)-0.5 # zero mean random noise
Fs = 100 # sampling frequency
dt = 1./Fs # time discretisation
tt = np.arange(Ns)*dt # time sampling
A = 0.067 # sine wave amplitude
f = 10.24 # sine wave frequency
sw = A*np.sin(2*np.pi*f*tt) # sine wave
ss = sw+rn # sample signal
signals = (rn, sw, ss)
labels = ('Random noise', 'Sine wave', 'Sample signal')
v = [(np.max(v), np.min(v), np.mean(v), rms_flat(v)) for v in signals]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Min', 'Mean', 'RMS'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.plot(tt, signals[v], label=labels[v])
ax.set_title('Time history')
ax.set_xlabel('Time [s]')
ax.set_ylabel('Amplitude [V]')
ax.legend()
ax.grid()
# -
# The sample signal time history plot shows that the sine wave is completely hidden by the additive random noise because of the difference in the amplitudes of both signals.
#
# The theoretical **sine wave RMS value** is equal to its amplitude ($A$) divided by the square root of 2:
#
# $$RMS(sine wave) = \frac{A}{\sqrt 2} = A_{RMS}$$
print('{:.6f}, {:.6f}'.format(df['RMS']['Sine wave'], A/np.sqrt(2)))
# For additive [orthogonal](https://en.wikipedia.org/wiki/Root_mean_square) signals, the RMS value of the total is equal to the square root of sum of squares (SRSS) of the parts. Let us check that with the random noise and the sine wave against the sample signal:
SRSS = np.sqrt(df['RMS']['Random noise']**2 + df['RMS']['Sine wave']**2)
print('{:.6f}, {:.6f}'.format(SRSS, df['RMS']['Sample signal']))
# We are now ready to start processing these signals.
#
# [Back to top](#top)
#
# ## Fourier transform
#
# We will start processing these signals by taking their Fourier transform into the frequency domain. For that we will use the [FFT algorithm](https://en.wikipedia.org/wiki/Fast_Fourier_transform), implemented in NumPy as the [fft](https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fft.html) function, and normalise the result by the number of samples (Ns):
# +
RN2 = fft(rn)/Ns
SW2 = fft(sw)/Ns
SS2 = fft(ss)/Ns
FT2 = (RN2, SW2, SS2)
freqs = fftfreq(Ns, d=1./Fs)
v = [(np.absolute(v[ix]), freqs[ix]) for v in FT2 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(fftshift(freqs), fftshift(np.absolute(FT2[v])), label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Two-sided amplitude spectrum')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [$V_{RMS}$]')
ax.legend()
ax.grid()
# -
# Several aspects are worth mentioning about these plots:
#
# 1. The amplitude spectra comprise both positive and negative frequencies, reason why it is called a **two-sided spectrum**;
# 2. They are symmetric about the origin, which stems from the fact that the Fourier transform of real signals is Hermitian;
# 3. The peak values occur at the same frequency, which is very close, but not equal, to the sine wave frequency. This issue is related to the frequency discretisation of the fft, among others, which in turn is affected by the signal duration.
#
# The theoretical **value of a sine wave two-sided amplitude spectrum** is equal to the sine wave amplitude ($A$) divided by the double of the square root of 2:
#
# $$\left| FT_{2sided} \right| = \frac{A}{2 \cdot \sqrt 2} = \frac{A_{RMS}}{2}$$
print('{:.6f}, {:.6f}'.format(df['Max']['Sine wave'], A/(2*np.sqrt(2))))
# The difference between the actual and theoretical values will become smaller as the duration of the signals increases towards infinity. *This aspect, which also influences the frequency discretisation mentioned above, affects most of the numerical comparisons shown in this notebook*.
#
# We will now take advantage of this symmetry property of the Fourier transform with real signals to compute only the non-negative frequency terms. There are two options to achieve that:
#
# 1. Truncate the FFT - basically, compute only the terms that are of interest;
# 2. Fold the FFT - compute the FFT ordinates as if they were [folded](http://www.staff.vu.edu.au/msek/FFT%20Folding%20Algorithm.pdf) about the Nyquist frequency to obtain a **one-sided spectrum**.
#
# For the first one we will use the [rfft](https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html) function implemented in NumPy which gives the spectral components from DC up to the Nyquist frequency:
# +
TRN = rfft(rn)/Ns
TSW = rfft(sw)/Ns
TSS = rfft(ss)/Ns
TFT = (TRN, TSW, TSS)
tfreqs = rfftfreq(Ns, d=1./Fs)
v = [(np.absolute(v[ix]), tfreqs[ix], np.absolute(v[0])) for v in TFT for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(tfreqs, np.absolute(TFT[v]), label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Truncated amplitude spectrum')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [$V_{RMS}$]')
ax.legend()
ax.grid()
# -
# As can be seen, these truncated amplitude spectra are exactly the same as the previous two-sided amplitude spectra, but were computed only for the non-negative frequency terms.
#
# The one-sided spectra, on the other hand, are computed by taking the complex conjugate of the second half of the two-sided spectrum (the negative frequencies), reversing and adding it to the first half (in the corresponding positive frequencies). Better yet, multiply the truncated spectrum ordinates by two, with the only exceptions of the DC and Nyquist (if it exists) components:
# +
scale = 2.*np.ones_like(tfreqs) # scale rfft components by a factor of 2
scale[0] = 1. # the DC component is not scaled
if scale.size%2 == True: # if there is a Nyquist component...
scale[-1] = 1. # ...then it is not scaled
FT1 = [v*scale for v in TFT]
v = [(np.absolute(v[ix]), tfreqs[ix], np.absolute(v[0])) for v in FT1 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(tfreqs, np.absolute(FT1[v]), label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('One-sided amplitude spectrum')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [$V_{RMS}$]')
ax.legend()
ax.grid()
# -
# The theoretical **value of a sine wave one-sided amplitude spectrum** is equal to the sine wave RMS, reason why the units of the amplitude spectrum are often referred to as *quantity squared rms*, where quantity is the unit of the time-domain signal:
#
# $$\left| FT_{1sided} \right| = 2 \cdot \left| FT_{2sided} \right| = 2 \cdot \frac{A}{2 \cdot \sqrt 2} = \frac{A}{\sqrt 2} = A_{RMS}$$
print('{:.6f}, {:.6f}'.format(df['Max']['Sine wave'], A/np.sqrt(2)))
# The peak corresponding to the sine wave frequency is still barely distinguishable above the major peaks in the sample signal one-sided amplitude spectrum. Let us query for the top 5 values:
df = pd.DataFrame(data=np.column_stack((np.absolute(FT1[2]), tfreqs)), columns=('RSS', 'Freq'))
print(df.nlargest(5, columns='RSS').to_string(index=False))
# So we see that the sine wave frequency really does not stand out amongst the amplitude spectrum peaks. In order to improve this result we will need other processing tools.
#
# [Back to top](#top)
#
# ## Power spectrum
#
# We will compute now the power spectra of the three signals using the normalised Fourier transforms. First of all, we will multiply the two-sided fft by their complex conjugates in order to obtain the **two-sided power spectra**:
# +
PS2 = [np.real(v*np.conj(v)) for v in FT2]
v = [(v[ix], freqs[ix], np.absolute(v[0])) for v in PS2 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(fftshift(freqs), fftshift(PS2[v]), label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Two-sided power spectrum (fft)')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [${V_{RMS}}^2$]')
ax.legend()
ax.grid()
# -
# The theoretical **value of a sine wave two-sided power spectrum** is equal to the square of the sine wave two-sided amplitude spectrum, that is, the square of the sine wave amplitude ($A$) divided by 8:
#
# $$\left| S_{2sided} \right| = {\left( \frac{A}{2 \cdot \sqrt 2} \right)}^2 = \frac{A^2}{8} = \frac{{A_{RMS}}^2}{4}$$
print('{:.6f}, {:.6f}'.format(df['Max']['Sine wave'], (A**2/8)))
# Similar to the Fourier transform case, we will compute the **one-sided power spectra** by mutliplying the truncated rfft by their complex conjugates and applying the same scaling as for the one-sided amplitude spectra:
# +
PS1 = [np.real(v*np.conj(v))*scale for v in TFT]
v = [(v[ix], tfreqs[ix], np.absolute(v[0])) for v in PS1 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(tfreqs, PS1[v], label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('One-sided power spectrum (scaled rfft)')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [${V_{rms}}^2$]')
ax.legend()
ax.grid()
# -
# We can see that the one-sided power spectrum estimates are the same. The theoretical **value for a sine wave one-sided power spectrum** is now given by the square of the sine wave one-sided amplitude spectrum, that is, the square of the sine wave amplitude ($A$) divided by 4:
#
# $$\left| G_{1sided} \right| = 2 \cdot \left| S_{2sided} \right| = 2 \cdot \frac{A^2}{8} = \frac{A^2}{4} = \frac{{A_{RMS}}^2}{2}$$
print('{:.6f}, {:.6f}'.format(df['Max']['Sine wave'], (A**2/4)))
# The peak corresponding to the sine wave frequency is now more visible above the major peaks in the sample signal one-sided amplitude spectrum. Let us query again for the top 5 values:
df = pd.DataFrame(data=np.column_stack((np.absolute(PS1[2]), tfreqs)), columns=('RSS', 'Freq'))
print(df.nlargest(5, columns='RSS').to_string(index=False))
# We have reached some important results thus far, let us summarize them here:
#
# | Amplitude spectra | Fourier transform | Power spectra |
# |---|---|---|
# |Two-sided (peak)| $\frac{A}{2} = \frac{A_{RMS}}{\sqrt 2}$ | $\frac{A^2}{4} = \frac{{A_{RMS}}^2}{2}$ |
# |Two-sided (RMS)| $\frac{A}{2 \cdot \sqrt 2} = \frac{A_{RMS}}{2}$ | $\frac{A^2}{8} = \frac{{A_{RMS}}^2}{4}$ |
# |One-sided (peak)| $A = \sqrt 2 \cdot A_{RMS}$ | $\frac{A^2}{2} = {A_{RMS}}^2$ |
# |One-sided (RMS)| $\frac{A}{\sqrt 2} = A_{RMS}$ | $\frac{A^2}{4} = \frac{{A_{RMS}}^2}{2}$ |
#
# [Back to top](#top)
#
# ## The periodogram
#
# Now that we have seen how to compute the power spectra from the Fourier transforms, we will use directly the [periodogram](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.periodogram.html) function implemented in SciPy to obtain the same results:
# +
freqs, Prn = periodogram(rn, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=False, scaling='spectrum')
freqs, Psw = periodogram(sw, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=False, scaling='spectrum')
freqs, Pss = periodogram(ss, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=False, scaling='spectrum')
PS0 = (Prn, Psw, Pss)
v = [(np.absolute(v[ix]), freqs[ix], np.absolute(v[0])) for v in PS0 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=('Random noise', 'Sine wave', 'Masked signal'), columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(fftshift(freqs), fftshift(PS0[v]), label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Two-sided power spectrum (periodogram)')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [${V_{rms}}^2$]')
ax.legend()
ax.grid()
# +
freqs, Prn = periodogram(rn, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=True, scaling='spectrum')
freqs, Psw = periodogram(sw, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=True, scaling='spectrum')
freqs, Pss = periodogram(ss, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=True, scaling='spectrum')
PS0 = (Prn, Psw, Pss)
v = [(np.absolute(v[ix]), freqs[ix], np.absolute(v[0])) for v in PS0 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=('Random noise', 'Sine wave', 'Masked signal'), columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(freqs, PS0[v], label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('One-sided power spectrum (periodogram)')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [${V_{rms}}^2$]')
ax.legend()
ax.grid()
# -
# The periodogram function also allows us to compute the power spectral density of a signal which, in some cases, is more relevant than the power spectra:
# +
freqs, Prn = periodogram(rn, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=True, scaling='density')
freqs, Psw = periodogram(sw, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=True, scaling='density')
freqs, Pss = periodogram(ss, fs=Fs, window=None, nfft=None, detrend=None, return_onesided=True, scaling='density')
PSD0 = (Prn, Psw, Pss)
v = [(np.absolute(v[ix]), freqs[ix], np.absolute(v[0])) for v in PSD0 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.semilogy(freqs, PSD0[v], label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('One-sided power spectral density (periodogram)')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [${V_{rms}}^2/Hz$]')
ax.legend()
ax.grid()
# -
# Naturally, the power spectrum can be estimated from the power spectral density by multiplying by the frequency discretisation:
print(df['Max']['Sine wave']*(freqs[1]-freqs[0]))
# The peak corresponding to the sine wave frequency is now more visible above the major peaks in the sample signal one-sided amplitude spectrum. Let us query again for the top 5 values:
df = pd.DataFrame(data=np.column_stack((np.absolute(PSD0[2]), tfreqs)), columns=('RSS', 'Freq'))
print(df.nlargest(5, columns='RSS').to_string(index=False))
# [Back to top](#top)
#
# ## Matplotlib package
#
# Matplotlib supplies one function, [psd](http://matplotlib.org/api/mlab_api.html#matplotlib.mlab.psd), to estimate the power spectral density (Pxx) of a signal (x) in the [mlab](http://matplotlib.org/api/mlab_api.html) module. This function, which is described as a "Welch’s average periodogram method", has the following signature:
#
# matplotlib.mlab.psd(x, NFFT=256, Fs=2, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None,sides='default', scale_by_freq=None)
#
# The function returns a tuple with the power spectral density estimate and the corresponding frequencies (Pxx, freqs).
# +
Prn, freqs = psd(rn, NFFT=512, Fs=Fs, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None,
sides='onesided', scale_by_freq=True)
Psw, freqs = psd(sw, NFFT=512, Fs=Fs, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None,
sides='onesided', scale_by_freq=True)
Pss, freqs = psd(ss, NFFT=512, Fs=Fs, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None,
sides='onesided', scale_by_freq=True)
PSD1 = (Prn, Psw, Pss)
v = [(np.absolute(v[ix]), freqs[ix], np.absolute(v[0])) for v in PSD1 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.plot(freqs, PSD1[v], label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Power spectral density')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [V**2/Hz]')
ax.legend()
ax.grid()
# -
# The peak corresponding to the sine wave frequency is now more visible above the major peaks in the sample signal one-sided amplitude spectrum. Let us query again for the top 5 values:
df = pd.DataFrame(data=np.column_stack((np.absolute(PSD1[2]), freqs)), columns=('RSS', 'Freq'))
print(df.nlargest(5, columns='RSS').to_string(index=False))
# [Back to top](#top)
#
# ## SciPy package
#
# SciPy supplies two functions to estimate the power spectral density (Pxx) of a signal (x) in the [signal](https://scipy.github.io/devdocs/signal.html) module, [periodogram](https://scipy.github.io/devdocs/generated/scipy.signal.periodogram.html) and [welch](https://scipy.github.io/devdocs/generated/scipy.signal.welch.html). Their signatures are the following:
#
# scipy.signal.periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1)
#
# scipy.signal.welch(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1)
#
# The first function is the periodogram whereas the second function uses the Welch’s method. In the particular case that noverlap is 0, this method is equivalent to Bartlett’s method. Both functions return a tuple with the frequencies and the power spectral density estimate (freqs, Pxx).
# +
freqs, Prn = periodogram(rn, fs=Fs, window='hann', nfft=512, detrend=None, return_onesided=True, scaling='density', axis=-1)
freqs, Psw = periodogram(sw, fs=Fs, window='hann', nfft=512, detrend=None, return_onesided=True, scaling='density', axis=-1)
freqs, Pss = periodogram(ss, fs=Fs, window='hann', nfft=512, detrend=None, return_onesided=True, scaling='density', axis=-1)
PSD2 = (Prn, Psw, Pss)
v = [(np.absolute(v[ix]), freqs[ix], np.absolute(v[0])) for v in PSD2 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.plot(freqs, PSD2[v], label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Power spectral density')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [V**2/Hz]')
ax.legend()
ax.grid()
# -
# The peak corresponding to the sine wave frequency is now more visible above the major peaks in the sample signal one-sided amplitude spectrum. Let us query again for the top 5 values:
df = pd.DataFrame(data=np.column_stack((np.absolute(PSD2[2]), freqs)), columns=('RSS', 'Freq'))
print(df.nlargest(5, columns='RSS').to_string(index=False))
# +
freqs, Prn = welch(rn, fs=Fs, window='hann', nperseg=512, noverlap=None, nfft=None, detrend=None,
return_onesided=True, scaling='density', axis=-1)
freqs, Psw = welch(sw, fs=Fs, window='hann', nperseg=512, noverlap=None, nfft=None, detrend=None,
return_onesided=True, scaling='density', axis=-1)
freqs, Pss = welch(ss, fs=Fs, window='hann', nperseg=512, noverlap=None, nfft=None, detrend=None,
return_onesided=True, scaling='density', axis=-1)
PSD3 = (Prn, Psw, Pss)
v = [(np.absolute(v[ix]), freqs[ix], np.absolute(v[0])) for v in PSD3 for ix in (np.argmax(np.absolute(v)),)]
df = pd.DataFrame(data=v, index=labels, columns=('Max', 'Freq', 'DC'))
print(df)
fig, ax = plt.subplots()
ax.hold(True)
for v in (2,1):
ax.plot(freqs, PSD3[v], label=labels[v])
ax.axvline(f, label='{}Hz'.format(f), ls=':')
ax.set_title('Power spectral density')
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Amplitude [V**2/Hz]')
ax.legend()
ax.grid(True)
# -
# The peak corresponding to the sine wave frequency is now more visible above the major peaks in the sample signal one-sided amplitude spectrum. Let us query again for the top 5 values:
df = pd.DataFrame(data=np.column_stack((np.absolute(PSD3[2]), freqs)), columns=('RSS', 'Freq'))
print(df.nlargest(5, columns='RSS').to_string(index=False))
# [Back to top](#top)
#
# ## References
#
# <NAME>, “The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms”, IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967.
#
# [Back to top](#top)
#
# ## Odds and ends
#
# This notebook was created by <NAME>.
#
# [Back to top](#top)
| DSP_FFT_psd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Linear-Regression" data-toc-modified-id="Linear-Regression-1">Linear Regression</a></span><ul class="toc-item"><li><span><a href="#1)-Import-packages" data-toc-modified-id="1)-Import-packages-1.1">1) Import packages</a></span></li><li><span><a href="#2)-Load-Data" data-toc-modified-id="2)-Load-Data-1.2">2) Load Data</a></span></li><li><span><a href="#Build-Univariate-Linear-Regression" data-toc-modified-id="Build-Univariate-Linear-Regression-1.3">Build Univariate Linear Regression</a></span><ul class="toc-item"><li><span><a href="#Train---Test-split" data-toc-modified-id="Train---Test-split-1.3.1">Train - Test split</a></span></li><li><span><a href="#Univariate-Linear-Regression" data-toc-modified-id="Univariate-Linear-Regression-1.3.2">Univariate Linear Regression</a></span></li></ul></li><li><span><a href="#Model-interpretation" data-toc-modified-id="Model-interpretation-1.4">Model interpretation</a></span></li></ul></li></ul></div>
# -
# # Linear Regression
#
# <a href="https://drive.google.com/file/d/1EZ_xqMaYj77vErVnrQmnFOj-VBEoO5uW/view" target="_blank">
# <img src="http://www.deltanalytics.org/uploads/2/6/1/4/26140521/screen-shot-2019-01-05-at-4-48-29-pm_orig.png" width="500" height="400">
# </a>
#
# Linear Regression attempts to predict a continuous outcome feature (**Y**) from one or more explanatory features (**X**).
#
# $$Y = \beta_0 + \beta_1 X$$
#
# $\beta_0$ is called the intercept term, and represents the expected mean value of Y when all explanatory features equal 0.
# $\beta_1$ is called a beta coefficient, and represents the expected change in the value of Y that results from a one unit change in X.
#
# Below is an example of a linear regression with only one explanatory feature. The red dots indicate the actual data, and the blue line represents the predicted **Y** values based on the provided **X** values. $\beta_0$ appears to equals 0, and $\beta_1$ appears to equal 2.
# <img src="./images/LinearRegression.png" alt="Go Find Missing Image" style="width: 500px;height=500"/>
#
# In this lab, we will attempt to construct a linear regression in order to answer a question that Kiva borrowers may have:
#
# **What impacts the loan amount requested? **
#
# To ensure that our linear regressor is appropriate and interpretable, we will have to confirm the following assumptions are not violated:
#
# 1. Linear relationship between x and y - We can draw a line or curve within the relationship
# 2. Normality - A normal curve, a histogram of the values should look like a bell
# 3. Minimal multicollinearity (if multiple variables) - Each variable should introduce **new** information
# 4. No autocorrelation - No relationship **within** a variable depending on the time/magnitude scale
# 5. Homoscedasticity - The variation in the error of the model should not be increasing as the variation in the data increases
#
# - Additional rule of thumb: at least 20 observations per independent variable in the analysis
#
# If these assumptions are violated, then the predictive power of the linear regression is still valid but the information concerning the most important features is not. It is important to keep this in mind!
#
#
# Here's a look ahead at what we'll be doing in these series of notebooks:
#
# 2.1 Load Data and Build Univariate Linear Regression
# 2.2 Check Assumptions
# 2.3 Build Multivariate Linear Regression
# 2.4 Model Evaluation
# 1) Import packages
# -----
# +
# Generally useful data science packages
import numpy as np
import pandas as pd
# Plotting package
import matplotlib.pyplot as plt
# %matplotlib inline
from pandas.plotting import scatter_matrix
import seaborn as sns
plt.rcParams['figure.figsize'] = (12, 8)
sns.set()
sns.set(font_scale=1.5)
# We'll be using statsmodels to fit linear regression
import statsmodels.api as sm
# Packages for checking assumptions
from scipy import stats as stats
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error, make_scorer
# Set jupyter notebook preferences
# the command below means that the output of multiple commands in a cell will be output at once.
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# the command below tells jupyter to display up to 100 columns, this keeps everything visible
pd.set_option('display.max_columns', 100)
pd.set_option('expand_frame_repr', True)
# -
# 2) Load Data
# -----
# +
# Load data
try:
# Local version
path = "../data/"
filename = 'loans.csv'
df = pd.read_csv(path+filename)
except FileNotFoundError or ParserError:
# If not local, get from remote repp. Helpful if using colab.
url = 'https://raw.githubusercontent.com/DeltaAnalytics/machine_learning_for_good_data/master/loans.csv'
df = pd.read_csv(url)
# It always a good to visually inspect the data
df.head()
# -
# Build Univariate Linear Regression
# -----
# ### Train - Test split
#
# Prior to building our model, we first need to split our dataset into a training set and a test set. We will use our training set to train our regressor, and we will use our test set for model validation.
# To achieve this, we will use call sklearn's [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html), and set the input parameter `test_size` to .2 so that 20% of the data will be assigned to the test set and 80% of the data will be assigned to the training set.
#
# **We set the test set aside and only look at this at the end to evaluate the models performance on unseen data.**
#
# *Why do we do this?* We do this in order to ensure that we do not test on what the model has already learned from. This way, we are not simply solving for the data we *know* about and we can check if our model is applicable when we do not know the output values. As a result, we can also check for **overfitting** the training data.
#
# We fix the random state so that each time we run the train_test_split code, we get the same distribution of data. This is important as keeping the data split constant allows us to compare results from different sessions.
df.columns.tolist()
# +
pd.options.mode.chained_assignment = None # default='warn'
# Define our dependent variable
y_column = 'loan_amount'
y = df[y_column]
# Define our independent variables
x_columns = ['lender_count']
X = df[x_columns]
# Add an intercept term to the independent variables. This is needed in order to include the constant term from
# linear regression equation.
X['intercept'] = 1
# Split our data into training and test data sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# -
# ### Univariate Linear Regression
# In order to build our linear regressor, we will use [statsmodels](http://www.statsmodels.org/dev/generated/statsmodels.regression.linear_model.OLS.html) implementation. The are other implementations out there; however, we will use statmodels because it creates a nice summary table for model evaluation. Let's print out the summary table to demonstrate how easy it is to train the model and see the results.
#
# *Why **regression**?* In this case, we are not classifying an attribute of the loan request, we are predicting the **amount** requested which is a *continuous variable*.
#
# *What does **univariate** mean?* Univariate breaks down into "uni" which means single and "variate" which looks a lot like "variable" - therefore, univariate means our prediction is based on just one variable!
#
# Think about the formula we introduced at the top of this notebook, what gives away that we are only using **one** variable in this case?
#
# For an in-depth review on all the statistics and numbers given in the summary below, check out this [awesome page!](http://connor-johnson.com/2014/02/18/linear-regression-with-python/)
model = sm.OLS(endog=y_train, exog=X_train)
model_fit = model.fit()
print(model_fit.summary())
# Model interpretation
# -----
#
# This is the typical regression output. It's a lot to digest!
#
# Remember that our linear regression model can be represented as an equation, like this:
#
# predicted_loan_amount = intercept + coef*lender_count
#
# What is the intercept value?
model_fit.params.intercept
# What does that intercept value tell us?
#
# <br>
# <br>
# <br>
# What is the coefficent for `lender_count`?
model_fit.params.lender_count
# What does that coeffiecent value tell us?
#
# <br>
# <br>
# <br>
# -----
#
# How do we know however, whether this is a significant result?
#
# We have a sufficient amount of confidence in this conclusion because the **p-value** is reported to be 0.000. In technical terms, the p-value is **the probability of getting results as extreme as the ones observed given no correlation. **
#
# In statistics, we want our results to fall within the 95% confidence interval, or the p-value to be <= 0.05. This means, "[i]f repeated samples were taken and the 95% confidence interval was computed for each sample, 95% of the intervals would contain the population mean. A 95% confidence interval has a 0.95 probability of containing the population mean. 95% of the population distribution is contained in the confidence interval." [Read more here.](http://www.investopedia.com/terms/s/standard-error.asp) The p-value is an indicator of where we fall in the confidence interval. In English, small p-value (<= 0.05) indicates strong evidence that the coefficient is different than 0.
#
# This is a relatively simplified explanation of p-values. Don't worry if it's not immediately intuitive - [not even professional statisticians can easily explain this concept.](http://fivethirtyeight.com/features/not-even-scientists-can-easily-explain-p-values/) To get a deeper understanding, we recommend grabbing the nearest textbook on statistics to review!
#
# Let's test our model on our test set.
y_pred_test = model_fit.predict(X_test)
plt.figure(figsize=(9,7))
plt.scatter(y_pred_test, y_test, alpha=0.5, c='r')
plt.title('predicted vs true for test data')
plt.xlabel('predicted loan amounts')
plt.ylabel('True loan amounts')
axes = plt.gca()
axes.set_xlim([0,11000])
axes.set_ylim([0,10000])
plt.show();
# Our model seems to be consistently under-predicting loan amounts (most of the dots seem to be above the line y=x). We will try to improve this model using multivariate regression.
#
# **Before we start looking for other explanatory features, it is important to note that interpretation of the results above is only valid if the assumptions of linear regression are not violated. Lets go through these assumptions now...**
# <br>
# <br>
# <br>
#
# ----
| 3_module_linear_regression/3_1_linear_regression-build_univariate_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## 6.4 Translation of Expressions
# ### 6.4.1
#
# > Add to the translation of Fig. 6.19 rules for the following productions:
#
# > a) $E~\rightarrow~E_1~*~E_2$.
#
# > b) $E~\rightarrow~+~E_1$.
# | PRODUCTION | SEMANTIC RULES |
# |:-----------|:---------------|
# |$S\rightarrow\mathbf{id}=E;$|$\begin{array}{ll}S.code =&E.code~||~\\ &gen(top.get(\mathbf{id}.lexeme)~'='~E.addr)\end{array}$|
# |$E\rightarrow E_1+E_2$|$\begin{array}{ll}E.addr =& \mathbf{new}~Temp()\\ E.code =& E_1.code~||~E_2.code~||~\\&gen(E.addr~'='~E_1.addr~'+'~E_2.addr)\end{array}$|
# |$E\rightarrow E_1*E_2$|$\begin{array}{ll}E.addr =& \mathbf{new}~Temp()\\ E.code =& E_1.code~||~E_2.code~||~\\&gen(E.addr~'='~E_1.addr~'*'~E_2.addr)\end{array}$|
# |$~~|+E_1$|$\begin{array}{ll}E.addr =& E_1.addr\\ E.code =& E_1.code\end{array}$|
# |$~~|-E_1$|$\begin{array}{ll}E.addr =& \mathbf{new}~Temp()\\ E.code =& E_1.code~||~\\&gen(E.addr~'='~'\mathbf{minus}'~E_1.addr)\end{array}$|
# |$~~|~(E_1)$|$\begin{array}{ll}E.addr=&E_1.addr\\E.code=&E_1.code\end{array}$|
# |$~~|~\mathbf{id}$|$\begin{array}{ll}E.addr=&top.get(\mathbf{id}.lexeme)\\ E.code =& ''\end{array}$
# ### 6.4.2
#
# > Repeat Exercise 6.4.1 for the incremental translation of Fig. 6.20.
# $$\begin{array}{rcll}
# S &\rightarrow& \mathbf{id} = E; & \{ gen(top.get(\mathbf{id}.lexeme)~'='~E.addr);\} \\
# E &\rightarrow& E_1 + E_2 & \{ E.addr = \mathbf{new}~Temp();\\
# &&& ~gen(E.addr~'='~E_1.addr~'+'~+E_2.addr;\}\\
# E &\rightarrow& E_1 * E_2 & \{ E.addr = \mathbf{new}~Temp();\\
# &&& ~gen(E.addr~'='~E_1.addr~'*'~+E_2.addr;\}\\
# &|& +E_1& \{ E.addr = E_1.addr; \}\\
# &|& -E_1& \{ E.addr = \mathbf{new}~Temp();\\
# &&& ~gen(E.addr~'='~'\mathbf{minus}'~E_1.addr;\}\\
# &|& (E_1)& \{ E.addr = E_1.addr; \}\\
# &|& \mathbf{id}& \{ E.addr = top.get(\mathbf{id}.lexeme); \}\\
# \end{array}$$
# ### 6.4.3
#
# > Use the translation of Fig. 6.22 to translate the following assignments:
#
# > a) `x = a[i] + b[j]`.
# ```
# t1 = a.type.width * i
# t2 = a.array.base [ t1 ]
# t3 = b.type.width * j
# t4 = b.array.base [ t3 ]
# t5 = t2 + t4
# x = t5
# ```
# > b) `x = a[i][j] + b[i][j]`.
# ```
# t1 = a.type.width * i
# t2 = a.type.elem.type.width * j
# t3 = t1 + t2
# t4 = a.array.base [ t3 ]
# t5 = b.type.width * i
# t6 = b.type.elem.type.width * j
# t7 = t1 + t2
# t8 = b.array.base [ t7 ]
# t9 = t4 + t8
# x = t9
# ```
# > c) `x = a[b[i][j]][c[k]]`.
# ```
# t1 = b.type.width * i
# t2 = b.type.elem.type.width * j
# t3 = t1 + t2
# t4 = b.array.base [ t3 ]
# t5 = a.type.width * t4
# t6 = c.type.width * k
# t7 = c.array.base [ t6 ]
# t8 = a.type.elem.type.width * t7
# t9 = t5 + t8
# t10 = a.array.base [ t9 ]
# x = t10
# ```
# ### 6.4.4
#
# > Revise the translation of Fig. 6.22 for array references of the Fortran style, that is, $\mathbf{id}[E_1, E_2, \dots, E_n]$ for an $n$-dimensional array.
# $$\begin{array}{rcl}
# L &\rightarrow& \mathbf{id}~[~T~] \\
# T &\rightarrow& E \\
# &|& T_1, E \\
# \end{array}$$
# ### 6.4.5
#
# > Generalize formula (6.7) to multidimensional arrays, and indicate what values can be stored in the symbol table and used to compute offsets. Consider the following cases:
#
# > a) An array $A$ of two dimensions, in row-major form. The first dimension has indexes running from $l_1$ to $h_1$, and the second dimension has indexes from $l_2$ to $h_2$. The width of a single array element is $w$.
# $base + (i_1 - l_1) * (h_2 - l_2 + 1) * w + (i_2 - l_2) * w$
# > b) The same as (a), but with the array stored in column-major form.
# $base + (i_2 - l_2) * (h_1 - l_1 + 1) * w + (i_1 - l_1) * w$
# > c) An array $A$ of $k$ dimensions, stored in row-major form, with elements of size $w$. The $j$th dimension has indexes running from $l_j$ to $h_j$.
# $base + ((\cdots(((i_1 - l_1) * (i_2 - l2 + 1) + (i_2 - l2)) * (i_3 - l_3 + 1))\cdots) + (i_k - l_k))*w$
# > d) The same as (c) but with the array stored in column-major form.
# $base + ((\cdots(((i_k - l_k) * (i_{k-1} - l_{k-1} + 1) + (i_{k-2} - l_{k-2})) * (i_{k-3} - l_{k-3} + 1))\cdots) + (i_1 - l_1))*w$
# ### 6.4.6
#
# > An integer array $A[i, j]$, stored row-major, has index $i$ ranging from 1 to 10 and index $j$ ranging from 1 to 20. Integers take 4 bytes each. Suppose array $A$ is stored starting at byte 0. Find the location of:
#
# > a) $A[4, 5]$
# $3 \times 80 + 4 \times 4 = 256$
# > b) $A[10, 8]$
# $9 \times 80 + 7 \times 4 = 748$
# > c) $A[3, 17]$.
# $2 \times 80 + 16 \times 4 = 224$
# ### 6.4.7
#
# > Repeat Exercise 6.4.6 if $A$ is stored in column-major order.
#
# > a) $A[4, 5]$
# $3 \times 4 + 4 \times 80 = 172$
# > b) $A[10, 8]$
# $9 \times 4 + 7 \times 80 = 316$
# > c) $A[3, 17]$.
# $2 \times 4 + 16 \times 80 = 648$
# ### 6.4.8
#
# > A real array $A[i, j, k]$ has index $i$ ranging from 1 to 4, $j$ is ranging from 0 to 4, and $k$ ranging from 5 to 10. Reals take 8 bytes each. If $A$ is stored row-major, starting at byte 0, find the locations of:
#
# > a) $A[3, 4, 5]$
# $2 \times 240 + 4 \times 48 + 0 \times 8 = 672$
# > b) $A[1, 2, 7]$
# $0 \times 240 + 2 \times 48 + 2 \times 8 = 112$
# > c) $A[4, 3, 9]$.
# $3 \times 240 + 3 \times 48 + 4 \times 8 = 896$
# ### 6.4.9
#
# > Repeat Exercise 6.4.8 if $A$ is stored in column-major order.
#
# > a) $A[3, 4, 5]$
# $2 \times 8 + 4 \times 32 + 0 \times 160 = 144$
# > b) $A[1, 2, 7]$
# $0 \times 8 + 2 \times 32 + 2 \times 160 = 384$
# > c) $A[4, 3, 9]$.
# $3 \times 8 + 3 \times 32 + 4 \times 160 = 760$
| 06/6.4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/codeforhk/python_course/blob/master/py_class_1_and_2_recap.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="9VHTi_SpL2mW" colab_type="text"
# # Module 1: Intro to the Python programming lauguage
# + [markdown] id="3I44I0Ot7c7o" colab_type="text"
# ## The Zen of Python
# + id="eLbx-mcd7JI0" colab_type="code" outputId="c6624813-c155-4c48-d627-143676a57989" colab={"base_uri": "https://localhost:8080/", "height": 374}
# The zen of Python
import this
# + [markdown] id="7m5VF5cKL5wD" colab_type="text"
# # Module 2: Data Types and Operators¶
# + [markdown] id="i-OkKzccNJw5" colab_type="text"
# ## 2.0.0 Assigning Variable
# + id="ZOesiyKFNEVM" colab_type="code" colab={}
# Let's say assign the value 1 to x, then execute the cell by CTRL+ENTER
x = 1
#From now on, python recognise x is equal to 1
# + id="p3hm-ThVNEoV" colab_type="code" colab={}
# This will create an error, called "SyntaxError". It means python doesn't understand it!
# (You should see this very often from now on)
1 = x
# + id="OmrYAVvGNE2T" colab_type="code" colab={}
x # Try execute me!
# + [markdown] id="YulrYuBnNFBc" colab_type="text"
# ### 2.0.1 Reassigning variable
# + id="bz4GWqIaNFKd" colab_type="code" colab={}
# You can assign anything to the variable.
x = 'an expensive iphone'
x
# + [markdown] id="CY6m70KQNuOV" colab_type="text"
# ## 2.1.0 Data Types
# + [markdown] id="HAdOX6BXNuGc" colab_type="text"
# ### 2.1.1 Integer
# + id="f5tErI4YNt77" colab_type="code" colab={}
# Interger
y = 2
type(y)
# + [markdown] id="26FZLeD3NtzS" colab_type="text"
# ### 2.1.2 Float
# + id="fZz3AqgVOFyO" colab_type="code" colab={}
# Float
z = 3.1416
type(z)
# + id="0AhUFDzsOF5s" colab_type="code" colab={}
# You can change the type from float to integer using int()
int(z)
# + [markdown] id="aF8aVJYeOGB8" colab_type="text"
# ### 2.1.3 String
# + id="TteglY2UOGIl" colab_type="code" colab={}
this_is_a_string = 'Patrick'
this_is_also_a_string = "$%%^&*()njfnksdjfsdyftuy32uygsjdhfbmnasd"
number_can_also_be_string = "123456"
# + id="z4g9XlnQOGNa" colab_type="code" colab={}
# Anything within a " or ' will be a string
myname = "Patrick"
type(myname)
# + id="O0UwUfdWOGQq" colab_type="code" colab={}
# String can be concatenated by using + sign
firstname = 'Cristiano'
lastname = 'Ronaldo'
fullname = firstname + ' ' + lastname # Space also counts as string
fullname
# + id="T9ikPxn3OGUR" colab_type="code" colab={}
# Remember Integer and Float? They will also be string desipte they are not
one = '1'
two = '2.2'
one + two #
# + id="qakXfj3tOGXT" colab_type="code" colab={}
int(one) + float(two)
# + [markdown] id="hU6dK5IsOGan" colab_type="text"
# ### 2.1.4 Reserved Keywords in Python
# + id="qMK64oSvOGd8" colab_type="code" colab={}
# Don't use these keywords as variable !!!
# They have special meanings and usage in Python
# Here are some of the examples
False
None
and
if
for
from
import
as
is
in
print
# + [markdown] id="i79T47r4OGhF" colab_type="text"
# ## 2.2.0 Math Operations in Python
#
# - Summation
# - Subtraction
# - Multiplication
# - Division
# + [markdown] id="gacjl7fUOGkS" colab_type="text"
# ### 2.2.1 Summation
# + id="Cg3T2aaiOGnL" colab_type="code" colab={}
# You can use it like a calculator. Try 6 + 7? you should be expecting 13?
6 + 7
# + id="bADl5fZyOGqd" colab_type="code" colab={}
#Let's say I want to know how much to does it cost to the buy whole set of iphone
iphoneX = 9998
airpods = 1288
price_for_new_iphone = iphoneX + airpods
price_for_new_iphone
# + [markdown] id="Cg6laE1kOGtq" colab_type="text"
# ### 2.2.2 Subtraction
# + id="kEHDI7B6Ntgz" colab_type="code" colab={}
# You can use it like a calculator. Try 8 - 2? you should be expecting 6?
8 - 2
# + id="ZWq2j-PONtNM" colab_type="code" colab={}
# Assign 2019 to a variable called THISYEAR,
THIS_YEAR = 2019
# + id="ApCJtNNKgazs" colab_type="code" colab={}
# Assign the year of your birthday to a variable called BIRTHYEAR
BIRTH_YEAR = # Don't forget to assign a birth year to the BIRTHYEAR variable
# + id="TCbH3ziXga7k" colab_type="code" colab={}
# Work out your age by substracting BIRTHYEAR by THISYEAR
age = THIS_YEAR - BIRTH_YEAR
age
# + [markdown] id="aWmerWxZgbBy" colab_type="text"
# ### 2.2.3 Multiplication
# + id="hEZbsaXsgbGY" colab_type="code" colab={}
# You can use it like a calculator. Try 7 * 6? you should be expecting 42?
7 * 6
# + id="7G6Lf4NBgbKL" colab_type="code" colab={}
# Let's assign 7.8 to a variable called exchange_rate
exchange_rate = 7.8
# + id="BCa0QWongbNu" colab_type="code" colab={}
# Let's assign 7.8 to a variable called exchange_rate
exchange_rate = 7.8
# Assign 100 to a variable called usd
usd = 100
#workout how much hkd is worth of 100 usd
hkd = usd * exchange_rate
hkd
# + [markdown] id="Xo7q8qdIgbRO" colab_type="text"
# ### 2.2.4 Division
# + id="lFVwSJ8VgbUo" colab_type="code" colab={}
# You can use it like a calculator. Try 12 / 6? you should be expecting 2?
12 / 6
# + id="QKkyYqWbgbYH" colab_type="code" colab={}
# In division, the result would be float by default
# You can do divsion with two slash (//) to indicate you want Integer as results
12 // 6
# + [markdown] id="HpHWV3sJgbba" colab_type="text"
# ### 2.2.5 More examples
# + id="la2Qjj1Sgbel" colab_type="code" colab={}
# You don't need to define the type of variable. The interpreter will guess.
a = 4
b = 3
print(a + b)
print(a - b)
print(a * b)
print(a / b)
# + id="c_iSocIvgbiI" colab_type="code" colab={}
# As in the previous example, if one element is floating point
# the interpreter will do an automatic cast
a = 6. ## this is now a float
b = 7
print(a + b)
print(a - b)
print(a * b)
print(a / b)
# + [markdown] id="qpYV1lwqgblt" colab_type="text"
# ## 2.3.0 Logical operation (Boolean, True or False)
# + [markdown] id="o5-hxW08hMqw" colab_type="text"
# ### 2.3.1 The equal (==) sign
# + id="cH-HxUa1g_sY" colab_type="code" colab={}
## The == sign will determin whether the two values are equal
x = 1
x == 1 # Check if x equals one
# + [markdown] id="_Ig9SzWIg_9z" colab_type="text"
# ### 2.3.2 The not equal (!=) sign
# + id="SGu1O_dhhAHc" colab_type="code" colab={}
## The != sign will determin whether the two values are equal
x = 1
x != 1 # Check if x equals one
# + [markdown] id="H4L4N5NphAQA" colab_type="text"
# ### 2.3.3 Inequality signs (>, <)
# + id="a3AMkJd0hAYv" colab_type="code" colab={}
# > and < sign means bigger or smaller
iphone = 9000
samsung = 5000
# You can change the sign to see a different results
iphone > samsung
# + [markdown] id="8HvDR37Ygboq" colab_type="text"
# ### 2.3.4 Using them together >=, <= etc.
# + id="FX6HRUOrhcbL" colab_type="code" colab={}
iphone <= samsung
# + [markdown] id="kb4kclc9hcPi" colab_type="text"
# ## 2.4.0 String Operations
# + id="4Gs0RYoEhiWE" colab_type="code" colab={}
# Python can handle very long text as well
wiki = "Python is a widely used high-level programming language for general-purpose programming, created by <NAME> and first released in 1991"
print(wiki)
# + [markdown] id="LrAOgg7Qhieh" colab_type="text"
# ### 2.4.1 Slicing strings
# + id="xhfQWbWIhiqV" colab_type="code" colab={}
# You can try to slice different parts of a string
# by using the index
# The first letter will be indexed as 0
wiki[0]
# + id="CM0nBTg9hivl" colab_type="code" colab={}
# The last letter will be indexed as -1
wiki[-1]
# + id="UpmIzYF0hizb" colab_type="code" colab={}
# You can also use [beginning_index:ending_index] to slice
# a range of string into a shorter string (or called substring)
wiki[0:23]
# + [markdown] id="gzR28E9ehi2k" colab_type="text"
# ### 2.4.2 String related functions
# + id="S3bxYDV3hi51" colab_type="code" colab={}
# Finding the location of a substring
# It will return the first index if finds the substring
print(wiki.find("Python"))
# + id="ILM8vb4Ehi8r" colab_type="code" colab={}
## Changing string to uppercase
print(wiki.upper())
# + id="yHfCOqtCh9fo" colab_type="code" colab={}
## Changing string to lowercase
print(wiki.lower())
# + id="hvX8mYbEh9kV" colab_type="code" colab={}
## Replacing substrings to something else
print(wiki.replace('high', 'low'))
# + id="YmgOgi3Wh9ni" colab_type="code" colab={}
## these operations do not modify the original string
print(wiki)
# + id="e8l3RP5Zh-Bd" colab_type="code" colab={}
# We can count the occurrences of a letter or word
print(wiki.count('a'))
print(wiki.count('Python'))
# + id="hcQ41EYBh-SM" colab_type="code" colab={}
# "in" keyword returns a boolean
print("python" in wiki)
print("language" in wiki)
# + id="BINbRHsrh-fq" colab_type="code" colab={}
## .split() separates fields
print(wiki.split())
# It would return a new data type called "list"
# + id="RAGZkWpThi_r" colab_type="code" colab={}
# You can use them together !
# Change all the letters to lower case and count occurnce of a word
print(wiki.lower().count('python'))
# + [markdown] id="LPrM34cVL8vW" colab_type="text"
# # Module 3: Data Structures (List, Dictionary, Tuple, Dataframe)
# + [markdown] id="z_YdLD49L_Up" colab_type="text"
# ## 3.1.0 List
# + [markdown] id="7mIqOKEFjsoZ" colab_type="text"
# ### 3.1.1 List syntax
#
# + id="AZKRtPYgjgVu" colab_type="code" outputId="d60bcd55-5a7c-4c09-d9aa-8a12b7fc096b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# We use square brackets to indicate a list
sea_creatures = ['shark', 'cuttlefish', 'squid', 'mantis shrimp', 'anemone']
print(sea_creatures)
# + id="gbNxf6fvjj4X" colab_type="code" outputId="f203710a-318e-4051-9054-028dcb7a529f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# We can check the type of the sea_creatures variable
type(sea_creatures)
# + [markdown] id="xkP00K2qjmAn" colab_type="text"
# ### 3.1.2 List & Index
# + id="T5QP_tM3j9vH" colab_type="code" outputId="1ef91429-3511-4aae-d8da-b50f78f18ec0" colab={"base_uri": "https://localhost:8080/", "height": 68}
# List begins at zero. similar to string slicing
num_list = [1,2,3,4,5]
print(num_list[0]) # use square bracket to indicate anything "index". index begins at 0 -
# and this expression is trying to get the first item from the list!
#This to select the second item
print(num_list[1])
#This to select the last item
print(num_list[-1])
#This to select the second last item
print(num_list[-2])
# + id="2--X4XmPkErR" colab_type="code" outputId="4157f39a-96ba-41f8-fc75-1de8a21a0668" colab={"base_uri": "https://localhost:8080/", "height": 68}
# And this is how to selet more than one item in list
print(num_list[0:5])
print(num_list[3:])
print(num_list[:3])
# + [markdown] id="eIc-xC21kOlN" colab_type="text"
# ### 3.1.3 Length of a list
# + id="B_n12ezHkXxK" colab_type="code" outputId="59096601-9206-406e-dc34-bc4219028150" colab={"base_uri": "https://localhost:8080/", "height": 34}
# You can use the len() function to look for the length of a list
num_list = [1,2,3,4,5]
len(num_list)
# + [markdown] id="uJ0Axt0Nkcvi" colab_type="text"
# ### 3.1.4 Manipulating the list
# + id="F26XTiC1kj7a" colab_type="code" colab={}
# You can add item to the list using append()
empty_list = []
empty_list.append(1)
empty_list.append(2.0)
empty_list.append('three')
print(empty_list)
# + id="vzsncufTkf5u" colab_type="code" colab={}
# You can remove a certain item using remove()
num_list = [1,2,3,4,5]
num_list.remove(1)
print(num_list)
# + id="coVQSP8Dkxch" colab_type="code" colab={}
# To sort the list, use .sort()
num = [100,-20,1,2,3,1]
num.sort()
print(num)
# + id="KzzXoJhYkz3v" colab_type="code" outputId="f0eb179a-3dbb-43b5-8efb-0699b3e7df2e" colab={"base_uri": "https://localhost:8080/", "height": 51}
# To find the max, use .max()
num = [100,-20,1,2,3,1]
print(max(num))
# You can find the minmun using min() as well
num = [100,-20,1,2,3,1]
print(min(num))
# + id="5s6II820k0HN" colab_type="code" colab={}
# To count a certain item, use count
num = [100,-20,1,2,3,1]
num.count(1)
# + id="NesbanCzk0Pa" colab_type="code" outputId="74503737-cc21-4dbd-95c3-376eab8103fd" colab={"base_uri": "https://localhost:8080/", "height": 34}
# You can "add" two list together to form a longer list
# This is called concatenating list
asia = ["china", "japan", "india"]
europe = ["france", "germany", "italy"]
asia + europe
# + [markdown] id="k2ppH-TTk0V4" colab_type="text"
# ### 3.1.5 Creating complex list
#
# + id="XNKS9abEk0eE" colab_type="code" outputId="86e65891-1ce2-46c0-f5d3-1906d1c9fc12" colab={"base_uri": "https://localhost:8080/", "height": 34}
# List can be of a more complicatd structure
# Such as a list of list, or 2D list
a = [1, 2, 3]
b = [3, 4, 5]
c = [5, 6, 7]
[a,b,c]
# + [markdown] id="V_DwoA4fk0ig" colab_type="text"
# ### 3.1.6 Working with strings & list
# + id="Rm1v0ZxGmKBG" colab_type="code" colab={}
# You can use .split() to split a long string into a list
shape_of_you = "The club isn't the best place to find a lover. So the bar is where I go (mmmm). Me and my friends at the table doing shots. Drinking fast and then we talk slow (mmmm). And you come over and start up a conversation with just me. And trust me I'll give it a chance now (mmmm). Take my hand, stop, put Van The Man on the jukebox. And then we start to dance. And now I'm singing like. . Girl, you know I want your love. Your love was handmade for somebody like me. Come on now, follow my lead. I may be crazy, don't mind me. Say, boy, let's not talk too much. Grab on my waist and put that body on me. Come on now, follow my lead. Come, come on now, follow my lead (mmmm). . I'm in love with the shape of you. We push and pull like a magnet do. Although my heart is falling too. I'm in love with your body. Last night you were in my room. And now my bedsheets smell like you. Every day discovering something brand new. I'm in love with your body. . Oh I oh I oh I oh I. I'm in love with your body. Oh I oh I oh I oh I. I'm in love with your body. Oh I oh I oh I oh I. I'm in love with your body. Every day discovering something brand new. I'm in love with the shape of you. . One week in we let the story begin. We're going out on our first date (mmmm). You and me are thrifty, so go all you can eat. Fill up your bag and I fill up a plate (mmmm). We talk for hours and hours about the sweet and the sour. And how your family is doing okay (mmmm). And leave and get in a taxi, then kiss in the backseat. Tell the driver make the radio play. And I'm singing like. . Girl, you know I want your love. Your love was handmade for somebody like me. Come on now, follow my lead. I may be crazy, don't mind me. Say, boy, let's not talk too much. Grab on my waist and put that body on me. Come on now, follow my lead. Come, come on now, follow my lead (mmmm). . I'm in love with the shape of you. We push and pull like a magnet do. Although my heart is falling too. I'm in love with your body. Last night you were in my room. And now my bedsheets smell like you. Every day discovering something brand new. I'm in love with your body. . Oh I oh I oh I oh I. I'm in love with your body. Oh I oh I oh I oh I. I'm in love with your body. Oh I oh I oh I oh I. I'm in love with your body. Every day discovering something brand new. I'm in love with the shape of you. . Come on, be my baby, come on. Come on, be my baby, come on. Come on, be my baby, come on. Come on, be my baby, come on. Come on, be my baby, come on. Come on, be my baby, come on. Come on, be my baby, come on. Come on, be my baby, come on. . I'm in love with the shape of you. We push and pull like a magnet do. Although my heart is falling too. I'm in love with your body. Last night you were in my room. And now my bedsheets smell like you. Every day discovering something brand new. I'm in love with your body. . Come on, be my baby, come on. Come on, be my baby, come on. I'm in love with your body. Come on, be my baby, come on. Come on, be my baby, come on. I'm in love with your body. Come on, be my baby, come on. Come on, be my baby, come on. I'm in love with your body. Every day discovering something brand new. I'm in love with the shape of you"
shape_of_you_list = shape_of_you.split('.') # Split by the "."
shape_of_you_list[:3]
# + [markdown] id="TZFpxCIEmKOe" colab_type="text"
# ## 3.2.0 Dictionary
# + [markdown] id="9y0EDOu0mKWf" colab_type="text"
# ### 3.2.1 Dictionary Syntax
# + id="vmaCOWXNmKex" colab_type="code" outputId="592f21e8-cdba-4432-cf07-6eac7ea21adb" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Dictionary is also called as a "key value pair".
# You can directly extract the value based on the key
# Let's make a dictionary:
mygeobook = {"Country":"USA","Capital":"Washington,Dc","President":"<NAME>","Region":"America"}
mygeobook['Country'] # Try change this to different keys in the dictionary
# + id="7AgoLl_6mKlu" colab_type="code" outputId="b58ae957-32c3-4146-d673-dff550fec618" colab={"base_uri": "https://localhost:8080/", "height": 34}
# We can check the type of the mygeobook variable
type(mygeobook)
# + [markdown] id="icLMI8e7346w" colab_type="text"
# ### 3.2.2 Dictionary examples
# + id="ZVdu4LQ-mJ1U" colab_type="code" outputId="23595e59-adad-45da-c84a-7c12e43b00d0" colab={"base_uri": "https://localhost:8080/", "height": 85}
# You can reassign a new value by the key
mygeobook['Region'] = "North America"
mygeobook # Region changed from "America" to "North America"
# + id="Ne41WRC7mJnr" colab_type="code" outputId="4a6bf5c9-a5f1-46c2-80ff-aa1c174be691" colab={"base_uri": "https://localhost:8080/", "height": 102}
# It can also store a list as the value
mygeobook['Cities'] = ['new york', 'san fransisco', 'chicago']
mygeobook
# + [markdown] id="IpPspJIjobop" colab_type="text"
# ## 3.3.0 Tuple
# + [markdown] id="LA9ZLilG4Fvz" colab_type="text"
# ### 3.3.1 Tuple syntax
# + id="BSyFaiGNobka" colab_type="code" outputId="825d4e91-d644-40e7-e593-2f4d87db4d0c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Tuple are indicate by curly brackets
num_tuple = (1, 2, 3)
num_tuple
# + id="xiDfqnuHobeL" colab_type="code" outputId="1372f1f2-1123-4e2a-b577-063fc36b1451" colab={"base_uri": "https://localhost:8080/", "height": 34}
# We can check the type of the num_tuple variable
type(num_tuple)
# + [markdown] id="kRJrT1SY3-R8" colab_type="text"
# ### 3.3.1 Tuple examples
# + id="mrudPw6IobW4" colab_type="code" outputId="6422b3bf-2dc9-4514-b91a-199d00c3a9aa" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Tuple is similar to list, you can retrive value by using the index
num_tuple[0]
# + id="5ey9tavQo990" colab_type="code" outputId="7d3f6a8f-e09b-4397-9e93-4c41c170487d" colab={"base_uri": "https://localhost:8080/", "height": 181}
# The main difference is new values cannot be assigned
# This is called immutable
num_tuple[0] = 3 # This will throw an error
# + id="EkCCHrgI4gIn" colab_type="code" colab={}
# You can only reassign the whole tuple
tuple_a = ("apples", "bananas", "oranges")
print(tuple_a)
tuple_a = ("berries", "bananas", "oranges")
print(tuple_a)
# + [markdown] id="144UJ71Mk0mN" colab_type="text"
# # Module 4: Operations and Control Flow
# + [markdown] id="n-tO2k8YpZSn" colab_type="text"
# ## 4.0.0 Indentation matters!
# + id="gnLoGWMspY_t" colab_type="code" outputId="4f73986f-7cd6-4c0e-e8dc-9f329510b600" colab={"base_uri": "https://localhost:8080/", "height": 34}
# To tell python that you can in a certain operations (or control flow)
# You have to use spaces to indicate that
# We call it indentation
programming_lauguage = 'Python'
if(programming_lauguage == 'Python'):
# Add four spaces for the line below to fix the error
print('I am learning ' + programming_lauguage)
else:
print('I am learning something else')
# + [markdown] id="MotSW311k0p3" colab_type="text"
# ## 4.1.0 if else statements
# + [markdown] id="1uai3UtM5XsS" colab_type="text"
# ### 4.1.1 if else syntax
# + id="UMrEdzebpogT" colab_type="code" colab={}
# Here is an example of an if else statement
iphone = 9000
samsung = 5000
if(iphone>samsung):
print('buy Samsung!')
else:
print('buy iphone!')
# + [markdown] id="mTFWTGBWpqwT" colab_type="text"
# ### 4.1.2 elif statements
# + id="WS35eRjb5BwS" colab_type="code" colab={}
# You can also uses elif to add one more if cluase
iphone = 9000
samsung = 5000
if(iphone > samsung):
print('buy Samsung!')
elif (samsung >= 6000 ):
print('buy iphone!')
else:
print('No need to buy a phone')
# + [markdown] id="m59Le7Tl5HJU" colab_type="text"
# ### 4.1.3 Multiple conditions in if else
# + id="vyLQ9laypq9w" colab_type="code" outputId="e39998c7-6a7b-403a-b984-65553b371940" colab={"base_uri": "https://localhost:8080/", "height": 34}
# You can also evaluate with more than one condition
iphone = 9000
samsung = 5000
if(iphone > samsung) and (samsung <= 6000 ):
print('buy Samsung!')
elif (samsung >= 6000 ):
print('buy iphone!')
else:
print('No need to buy a phone')
# + [markdown] id="plw8BK3_prLT" colab_type="text"
# ## 4.2.0 for loop
# + [markdown] id="HAZm8QczprZt" colab_type="text"
# ### 4.2.1 for loop syntax
# + id="LDfABAHZprn8" colab_type="code" outputId="2a1e11c0-ac89-474c-bf42-d675af12c105" colab={"base_uri": "https://localhost:8080/", "height": 68}
# You can loop through a list of values
shopping_list = ["bananas", "chocolate", "carrots"]
# We can automatically loop through list elements without using indexes
for item in shopping_list:
print("today I brought some " + item)
# + [markdown] id="bfXMtTP83Y7Q" colab_type="text"
# ### 4.2.2 for loop examples
# + id="DsIyxuacr6Si" colab_type="code" outputId="85088d8f-7bad-4c0c-aee1-ce82c6161fe8" colab={"base_uri": "https://localhost:8080/", "height": 187}
# You can also loop rough a range of numbers
result=0
for i in range(1,11):
result = result+i
print(result)
# + id="kt0-stsApsWB" colab_type="code" outputId="eb52dd09-0ed8-457c-af7e-7095623ac08d" colab={"base_uri": "https://localhost:8080/", "height": 68}
# You can loop two list together
# Using zip()
list_1 = [1, 2, 3]
list_2 = [3, 4, 5]
for x, y in zip(list_1, list_2):
print(x, y)
# + id="DYVceZqTpsZB" colab_type="code" outputId="47a16a3e-8af1-47e9-a9b0-94f7d9c29d24" colab={"base_uri": "https://localhost:8080/", "height": 119}
# You can also get the index of the item during the loop
# Using enumerate()
list_enumerate = [1, 2, 3, 4, 5, 6]
for i, x in enumerate(list_enumerate):
print(i, x)
# + [markdown] id="jwRk0Qjg5RVh" colab_type="text"
# ## 4.3.0 While loop
# + [markdown] id="STu5AViN5RjX" colab_type="text"
# ### 4.3.1 while loop syntax
# + id="LKv4ynVU6R_W" colab_type="code" outputId="cf43133f-6e2c-4ed6-84cc-0a9e067aba78" colab={"base_uri": "https://localhost:8080/", "height": 119}
# Here is a while loop example
i = 1
while i < 6:
print(i)
i = i + 1
print(i)
# + [markdown] id="nh8AVg1r6mTI" colab_type="text"
# ### 4.3.2 break statement and infinite loop
# + id="mmhKUGkH5Rvw" colab_type="code" colab={}
#
i = 1
while i < 6:
print(i)
if i == 3:
break
i += 1
# + id="rh_4ZkQRpscq" colab_type="code" colab={}
| 2020-01-beginner/course-material/py_class_1_and_2_recap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time-dependent PDEs
#
# So far we've seen ODEs, and looked at time-integration techniques, and then PDEs but we mostly focused on stationary problems. Now we will combine the two and look at time-dependent PDEs. As a model problem we will consider the [*heat equation*](https://en.wikipedia.org/wiki/Heat_equation) which models the diffusion of heat in a material with some given thermal conductivity
#
# $$
# \partial_t u - \alpha \nabla^2 u = 0
# $$
#
# augmented with appropriate initial and boundary conditions. We will look at both implicit and explicit time integration schemes for this equation, starting with explicit schemes.
# +
# %matplotlib notebook
from matplotlib import pyplot
import numpy
pyplot.style.use('ggplot')
from collections import namedtuple
Point = namedtuple("Point", ("x", "y"))
class Grid(object):
def __init__(self, Nx, Ny, P0=Point(0,0), P1=Point(1,1)):
X0, Y0 = P0
X1, Y1 = P1
self.W = X1 - X0
self.H = Y1 - Y0
self.Nx = Nx
self.Ny = Ny
x = numpy.linspace(X0, X1, self.Nx)
y = numpy.linspace(Y0, Y1, self.Ny)
self.XY = numpy.meshgrid(x, y, indexing="ij")
@property
def ndof(self):
return self.Nx*self.Ny
@property
def hx(self):
return self.W/(self.Nx - 1)
@property
def hy(self):
return self.H/(self.Ny - 1)
def alpha(self, i, j):
return i*self.Ny + j
def new_vector(self, components=1):
vec = numpy.zeros(self.Nx*self.Ny*components, dtype=float)
shape = (self.Nx, self.Ny)
if components > 1:
shape = shape + (components, )
return vec.reshape(shape)
def contourf(self, u, levels=11, ax=None):
U = u.reshape(self.Nx, self.Ny)
if ax is None:
pyplot.figure()
contours = pyplot.contourf(*self.XY, U, levels)
pyplot.colorbar(contours)
else:
contours = ax.contourf(*self.XY, U, levels)
pyplot.colorbar(contours)
return contours
def quiver(self, u, colour=None, ax=None):
U = u.reshape(self.Nx, self.Ny, 2)
if ax is None:
pyplot.figure()
quiver = pyplot.quiver
else:
quiver = ax.quiver
if colour is None:
vecs = quiver(*self.XY, U[..., 0], U[..., 1])
else:
vecs = quiver(*self.XY, U[..., 0], U[..., 1], colour)
pyplot.colorbar(vecs)
return vecs
# -
# ## An explicit scheme
#
# We will first discretise the time derivative. Recall the general form of an ODE is
#
# $$
# \partial_t u = f(t, u)
# $$
#
# where here we have
#
# $$
# f(t, u) = \alpha \nabla^2 u.
# $$
#
# In an explicit scheme, we evaluate $f(u)$ at the beginning of the timestep. We'll start with explicit Euler
#
# $$
# u^{n+1} = u^n + \Delta t \alpha \nabla^2 u^n.
# $$
#
# So given an initial condition $u^0$ we just need to be able to compute $\alpha \nabla^2 u^0$ and add it on to get the value at the next timestep.
#
# Let's solve this problem on the square domain $\Omega = [0, 1] \times [0, 1]$ with the boundary conditions
#
# $$
# \begin{aligned}
# u &= 1 && x = 0, y \in [0.25, 0.75]\\
# u &= 0 && x = 1, y \in [0.6, 0.8]\\
# \nabla u \cdot n &= 0 && \text{otherwise}.
# \end{aligned}
# $$
#
# We can think of this as modelling a 2D room with a radiator on one wall, a window on the other, and perfectly insulating (ha!) walls everywhere else.
# +
import numba
@numba.jit(nopython=True)
def f(un, f_, Nx, Ny, hx, hy, stencil):
for i in range(Nx):
for j in range(Ny):
f_[i, j] = 0
# Dirichlet boundary
if i == 0 and 0.25 <= j*hy <= 0.75:
f_[i, j] = 0
elif i == Nx - 1 and 0.6 <= j*hy <= 0.8:
f_[i, j] = 0
else:
for idx, (i_, j_) in enumerate([(i-1, j), (i, j-1), (i, j), (i, j+1), (i+1, j)]):
# Homogeneous Neumann everywhere else: i-1 -> i+1 (i = 0), i+1 -> i-1 (i = Nx - 1), etc...
i_ = (Nx - 1) - abs(Nx - 1 - abs(i_))
j_ = (Ny - 1) - abs(Ny - 1 - abs(j_))
f_[i, j] += stencil[idx] * un[i_, j_]
return f_
# -
# Notice how on the Dirichlet boundary, we set the update function to return zero. This way, as long as our initial condition satisfies the boundary conditions, it will do so for all time. For the homogeneous Neumann condition, we implement the symmetric "reflected" condition (rather than a one-sided difference).
#
# Let's go ahead and integrate this.
def setup(N):
grid = Grid(N, N)
u = grid.new_vector()
# Initial condition, 1 on the right boundary when y \in [0.25, 0.75]
for j in range(grid.Ny):
if 0.25 <= j*grid.hy <= 0.75:
u[0, j] = 1
return grid, u
def explicit_euler(u0, dt, grid, alpha=1, T=5):
us = [u0]
ts = [0]
update = numpy.zeros_like(u0)
u = u0
t = 0
# Notice how the sign is flipped relative to -\nabla^2 (since we have \partial_t u = +\nabla^2 u)
stencilx = 1/grid.hx**2 * numpy.array([1, 0, -2, 0, 1])
stencily = 1/grid.hy**2 * numpy.array([0, 1, -2, 1, 0])
stencil = stencilx + stencily
while t < T:
update = f(u, update, grid.Nx, grid.Ny, grid.hx, grid.hy, stencil)
if numpy.linalg.norm(update, numpy.inf) < 1e-10:
# Terminate if we've reached a steady-state
break
# Explicit Euler: u <- u + dt f(u)
u = u + dt*alpha*update
us.append(u)
t += dt
ts.append(t)
return ts, us
# Now we're ready to integrate the equation, let's try on a relatively coarse grid.;
N = 11
alpha = 1
grid, u = setup(N)
dt = 0.00252
ts, us = explicit_euler(u, dt, grid, alpha=alpha, T=10)
grid.contourf(us[-1], levels=20);
# This looks like the solution I'm expecting, but the timestep is *very* small. I only have 10 cells in each direction.
#
# Let's see what happens when we make the timestep bigger.
N = 11
alpha = 1
grid, u = setup(N)
dt = 0.00255
ts, us = explicit_euler(u, dt, grid, alpha=alpha, T=10)
grid.contourf(us[-1], levels=40);
# ## Instability for large timesteps
#
# Uh-oh, this looks bad. What's going on? We have hit the [CFL](https://en.wikipedia.org/wiki/Courant–Friedrichs–Lewy_condition) constraint for this PDE.
#
# This property of a timestepping scheme, named after three mathematicians, Courant, Friedrichs, and Lewy, provides us with a rule for determining an appropriate maximum timestep given a particular spatial discretisation. An intuition for what is going on is that the *physical* equation has some domain of dependence. A region of the solution at time $t$ affects some other region of the solution at $t + \Delta t$. If our numerical scheme fails to capture this dependence, we get bad behaviour.
#
# In other words, if we pick a timestep that is too large, information can propagate "too fast" in our numerical simulation.
#
# The CFL condition was developed in the analysis of advection equations
#
# $$
# \partial_t u - w \cdot \nabla u = 0.
# $$
#
# For which we have the constraint (with $w = 1$)
#
# $$
# \frac{\Delta t}{\Delta x} \le 1.
# $$
#
# That is, I can't push information more than a single cell in one timestep.
#
# For the heat equation, the restriction is much tighter, we need
#
# $$
# \frac{\Delta t}{(\Delta x)^2} \le c
# $$
#
# with $c$ some (dimension-dependent) constant. In two dimensions, for explicit Euler, we have $c = 0.25$.
# ### Eigenvalue analysis
#
# How did I arrive at this magic constant? Recall that the *stability region* for explicit Euler is the unit circle centred at -1 in the complex plane. A necessary condition for stability of the timestepping scheme applied to the scalar Dahlquist test equation
#
# $$
# \partial_t u = \lambda u
# $$
#
# which, discretised with explicit Euler gives
#
# $$
# u^{n+1} = u^n + \lambda\Delta t u^n,
# $$
#
# is that $-2 \le \lambda \Delta t < 0$.
#
# How can we apply this same idea here, when we have
#
# $$
# \partial_t u = \nabla^2 u
# $$
#
# or, discretised
#
# $$
# u^{n+1} = u^n + \Delta t \nabla^2 u^n?
# $$
#
# For this operator, we can find the bound by considering the *eigenvalues* of $\nabla^2$. If we can find them, we can replace the discretised operator by a diagonal one (with the eigenvalues on the diagonal), and then treat each equation separately.
#
# Let's go ahead and discretise $\nabla^2$ and look at the eigenvalues.
def laplacian(grid):
ndof = grid.ndof
A = numpy.zeros((ndof, ndof))
X, Y = grid.XY
Nx = grid.Nx
Ny = grid.Ny
stencilx = 1/grid.hx**2 * numpy.array([1, 0, -2, 0, 1])
stencily = 1/grid.hy**2 * numpy.array([0, 1, -2, 1, 0])
stencil = stencilx + stencily
for i in range(grid.Nx):
for j in range(grid.Ny):
row = grid.alpha(i, j)
# Dirichlet boundary
if i == 0 and 0.25 <= j*grid.hy <= 0.75:
A[row, row] = 0
elif i == grid.Nx - 1 and 0.6 <= j*grid.hy <= 0.8:
A[row, row] = 0
else:
indices = [(i-1, j), (i, j-1), (i, j), (i, j+1), (i+1, j)]
i_ = lambda i_: (Nx - 1) - abs(Nx - 1 - abs(i_))
j_ = lambda j_: (Ny - 1) - abs(Ny - 1 - abs(j_))
cols = [grid.alpha(i_(i), j_(j)) for i, j in indices]
for c, s in zip(cols, stencil):
A[row, c] += s
return A
grid = Grid(11, 11)
A = laplacian(grid)
# We're interested in the *smallest* (most negative) eigenvalue
evals = numpy.linalg.eigvals(A)
evals.min()
# We need, when multiplying this by $\Delta t$ to arrive at a number larger than -2. Which implies
dt = -2/evals.min()
dt
# So $\Delta t = 0.0025$ is right on the edge of stability for our method (hence the problem blowing up with $\Delta t = 0.0026$).
#
# What is the relationship we need between $\Delta x$ and $\Delta t$? The most negative eigenvalue scales with $\frac{1}{(\Delta x)^2}$, and so we need
#
# $$
# \frac{\Delta t}{(\Delta x)^2} = \text{const}.
# $$
#
# Each time we double the spatial resolution we must reduce the timestep by a factor of four!
#
# ### Bounding the eigenvalues of a regular stencil
#
# For the stencils we see in the course, we can put a bound on the eigenvalues (and in particular the smallest one) using a remarkable theorem due to [Gershgorin](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem).
#
# For *any* square matrix $A$ with entries $a_{ij}$, write
#
# $$
# R_i = \sum_{j \ne i} |a_{ij}|
# $$
#
# (the sum of the absolute value of the off-diagonal entries), and define the disc
#
# $$
# D(a_{ii}, R_i) = \{z \in \mathbb{C} : |z - a_{ii}| \le R_i\}
# $$
#
# (that is, a circle centered at $a_{ii}$ with radius $R_i$).
#
# Then every eigenvalue of $A$ is contained in at least one of these discs.
# +
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
def gershgorin(A):
n = len(A)
evals = numpy.linalg.eigvals(A)
patches = []
# draw discs
seen = set()
for i in range(n):
xi = numpy.real(A[i,i])
yi = numpy.imag(A[i,i])
ri = numpy.sum(numpy.abs(A[i,:])) - numpy.abs(A[i,i])
if (xi, yi, ri) in seen:
continue
circle = Circle((xi, yi), ri)
patches.append(circle)
seen.add((xi, yi, ri))
fig, ax = pyplot.subplots()
p = PatchCollection(patches, alpha=0.1)
ax.add_collection(p)
pyplot.plot(numpy.real(evals), numpy.imag(evals),' o')
pyplot.axis('equal')
return fig
# -
gershgorin(A);
# We can see that this isn't a very good estimate of many of the eigenvalues, but it's quite good for the minimal one.
#
# So, if I give you a stencil
#
# $$
# \frac{1}{h_x^2}\begin{bmatrix}-1 & 2 & -1\end{bmatrix}
# $$
#
# we can immediately say that the maximal eigenvalue will be less than or equal to $\frac{4}{h_x^2}$, and the minimal one will be greater than or equal to zero.
example = numpy.asarray([[5., 3., 2.],
[4., 6., 5.],
[-3., 1., 4.]])
gershgorin(example);
# ## Breaking through the timestep restriction
#
# Our only chance of being able to take larger timesteps is to increase the size of the stability region. We can try and do so with explicit methods, but we will *always* run into the timestep constraint eventually (since no explicit method contains an unbounded stability region.
#
# Instead, we turn to *implicit* methods. We're now going to have to invert the operator at every timestep, hence our interest in different methods for doing so. We'll do implicit Euler first, for which the discretised problem looks like
#
# $$
# \mathbb{I} u^{n+1} - \Delta t \nabla^2 u^{n+1} = u^n.
# $$
#
# Rearranging, we obtain
#
# $$
# u^{n+1} = (\mathbb{I} - \Delta t \nabla^2)^{-1} u^n
# $$
#
# so our update step is to invert an operator onto the old state, rather than applying the operator to the state.
# +
import scipy.linalg
def implicit_euler(u0, dt, grid, alpha=1, T=5):
A = dt*alpha*laplacian(grid)
I = numpy.eye(len(A))
op = I - A
lu, piv = scipy.linalg.lu_factor(op)
t = 0
us = [u0]
ts = [t]
u = u0
while t < T:
u = scipy.linalg.lu_solve((lu, piv), u.flat).reshape(u.shape)
t += dt
us.append(u)
ts.append(t)
if numpy.linalg.norm(us[-2] - us[-1], numpy.inf) < 1e-10:
break
return ts, us
# -
N = 21
grid, u = setup(N)
dt = 1
ts, us = implicit_euler(u, dt, grid, T=100)
grid.contourf(us[-1], levels=20);
| material/finite-difference-III.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hamzarabi3/Bike-sharing-data-analysis-case-study/blob/main/Cyclics_bike_sharing_code.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="SMZJFd2ZDkcO"
# # How Does a Bike-Share Navigate Speedy Success?
# + [markdown] id="Jlz8M_D7EOW7"
# ## Introduction
# **1. About the company**
#
#
# <img src='https://images.unsplash.com/photo-1565444872947-6fd5f91ee0f0?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=889&q=80'/>
#
#
# In 2016, Cyclistic launched a successful bike-share oering. Since then, the program has grown to a fleet of 5,824 bicycles that
# are geotracked and locked into a network of 692 stations across Chicago. The bikes can be unlocked from one station and
# returned to any other station in the system anytime.
#
# Until now, Cyclistic’s marketing strategy relied on building general awareness and appealing to broad consumer segments. One
# approach that helped make these things possible was the flexibility of its pricing plans: single-ride passes, full-day passes, and
# annual memberships.
#
# Customers who purchase single-ride or full-day passes are referred to as *casual riders*. Customers who purchase annual memberships are *Cyclistic members*.
#
# **2. Business Task**
# * Identify the differences between Casual riders and Cyclistic members in their use of Cyclics bikes.
#
# **3. Stakeholders**
# * Primary stakeholder : <NAME> (Director of Marketing)
# * Secondary Stakeholders : Cyclistic Marketing analytics team
#
#
#
#
# + id="ELKKQT8MAWdA"
import pandas as pd
from os import listdir
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
# + [markdown] id="OKacpBBN77Q9"
# ## Preparation
# + [markdown] id="faW4gFfrCMln"
# **1. Download the data**
# + id="Fc1_VPjkDM9k" colab={"base_uri": "https://localhost:8080/"} outputId="45c1b0c2-6c42-464f-fc8d-495881feb384"
# %%shell
# # cd /content/drive/MyDrive/DATAsets/Cyclistic
# wget https://divvy-tripdata.s3.amazonaws.com/202004-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202006-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202007-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202008-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202009-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202010-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202011-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202012-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202101-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202102-divvy-tripdata.zip
# wget https://divvy-tripdata.s3.amazonaws.com/202103-divvy-tripdata.zip
# unzip \*.zip
# # rm /content/drive/MyDrive/DATAsets/Cyclistic/*.zip
# # rm -d -r /content/drive/MyDrive/DATAsets/Cyclistic/__MACOSX
# cp -r /content/drive/MyDrive/DATAsets/Cyclistic /content
# + id="653ajRZEHuNv"
path='/content/Cyclistic'
# + [markdown] id="Fzg0j14aJlpD"
# **2. About the dataset ?**
#
# We will be using a public dataset published by [Divvy](https://www.divvybikes.com/about) :
#
# >Divvy is Chicagoland’s bike share system across Chicago and Evanston. Divvy provides residents and visitors with a convenient, fun and affordable transportation option for getting around and exploring Chicago.
#
#
# This dataset is made public under this [license](https://www.divvybikes.com/data-license-agreement) which grants us the right to *access, reproduce, analyze, copy, modify, distribute in your product or service and use the Data for any lawful purpose.*
#
# The dataset consists of 12 csv files corresponding to 12 months of Divvy's customer data from April 2020 to March 2021. Considering the time of the analysis, this dataset is perfectly **current**.
# + [markdown] id="Ea1oYdEx9kyb"
# ## Processing
# + id="9brv4slxH0U-"
files=listdir(path)
# + [markdown] id="AfqFriU894s8"
# **1. Merge all in one file**
# + id="bj2HF-mTJTjb"
f=files[0]
all=pd.read_csv(path+'/'+f)
for f in files[1:]:
df=pd.read_csv(path+'/'+f)
all=all.append(df)
# + colab={"base_uri": "https://localhost:8080/"} id="X6aCgILP7Inc" outputId="032a3225-55cf-4948-8d06-75f22213e946"
len(all['ride_id'].unique()) #how many rides are there?
# + [markdown] id="I7xZemyL-YPY"
# **2. Datetime columns**
# + id="d-fLC6ng7oal"
all['started_at']=pd.to_datetime(all['started_at'])
all['ended_at']=pd.to_datetime(all['ended_at'])
# + id="T4uf3iBV8W5O"
all['ride_duration']=all['ended_at']-all['started_at']
all['ride_duration']=all['ride_duration'].dt.total_seconds()//60
all=all.query('ride_duration > 0') #ride duration must be positive
# + id="0zMx8C15Flr3"
d='start'
all[d+'_month']=all[d+'ed_at'].dt.month
all[d+'_dayofweek']=all[d+'ed_at'].dt.day_name()
all[d+'_quarter']=all[d+'ed_at'].dt.quarter
all[d+'_hour']=all[d+'ed_at'].dt.hour
d='end'
all[d+'_month']=all[d+'ed_at'].dt.month
all[d+'_dayofweek']=all[d+'ed_at'].dt.day_name()
all[d+'_quarter']=all[d+'ed_at'].dt.quarter
all[d+'_hour']=all[d+'ed_at'].dt.hour
# + id="7CT6mHGmfIhD"
all.to_csv('/content/drive/MyDrive/Cyclistic_project/processed_divvy_dataset.csv',index=0)
# + id="S30iENO5fsQB"
del all
# + [markdown] id="f-GPiXEEM8l7"
# ## Analysis
# + colab={"base_uri": "https://localhost:8080/"} id="2dso_V1Bf5yf" outputId="bece20af-8d3f-4a55-9209-8965ac4ab557"
data=pd.read_csv('/content/drive/MyDrive/Cyclistic_project/processed_divvy_dataset.csv')
data['started_at']=pd.to_datetime(data['started_at'])
data['ended_at']=pd.to_datetime(data['ended_at'])
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="wdyreMliAq0R" outputId="b3ebbe28-e9b1-486e-cba8-e72a3a15c6cc"
data[['ride_duration']].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="dTiKODR6fnU3" outputId="70d7e5b0-9636-440c-8f4e-9bf54ef3db6a"
data.head(3)
# + [markdown] id="WTVVFACLlmw6"
# **1. Rides count**
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="rc1mVbHmog38" outputId="52db61c7-9b51-49b7-e1b2-527797938756"
plt.figure(figsize=(16,6))
sns.countplot(data=data,
x='start_dayofweek',
hue='member_casual',
palette='deep',
order=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
);
plt.xlabel('Day')
plt.ylabel('Number of rides')
plt.title('Total rides per day for each customer type');
plt.legend(['Casual customer','Member'])
plt.savefig('/content/drive/MyDrive/Cyclistic_project/rides_per_day.png');
# + id="6pgBObZdmyXd"
months=[4,5,6,7,8,9,10,11,12,1,2,3]
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="hCwoMjJpvyhz" outputId="b0dfe26a-cdd7-4221-f855-0f88c8477477"
plt.figure(figsize=(16,6))
sns.countplot(data=data,
x='start_month',
hue='member_casual',
palette='deep',
order=months
);
plt.xlabel('Month')
plt.ylabel('Number of rides')
plt.title('Total rides per month for each customer type');
plt.legend(['Casual customer','Member'])
plt.savefig('/content/drive/MyDrive/Cyclistic_project/rides_per_month.png');
# + [markdown] id="BJCwDUtroAiS"
# > From the above visuals we can spot some key differences :
# > * Casual members daily rides count is at its max on weekends
# > * Although, Members rides count is minimal on weekends, their demand on rides is fairly stable and it remains between 250k and slightly surpasses 300k on Saturday compared to casual customers demand which goes from less than 150k in workdays to more than 300k on Saturdays.
# > * The demand on bike sharing service changes significantly around the year, this is probabliy because it snows in winter in Chicago.
# + [markdown] id="x81IehXSl-Lr"
# **2. Ride duration**
# + colab={"base_uri": "https://localhost:8080/", "height": 172} id="cjiDOTTofJBo" outputId="de2fd7c7-2957-4a2b-a727-ed4111cccb2f"
pd.pivot_table(data,index='member_casual',values='ride_duration',aggfunc=[np.mean,np.std])
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="2NknQ49rW7pW" outputId="f4d1de55-3df8-4940-fce5-2524a0610d99"
T=data.groupby(['start_hour','member_casual']).mean().reset_index()[['start_hour','member_casual','ride_duration']]
plt.figure(figsize=(16,6))
sns.barplot(data=T,x='start_hour',y='ride_duration',hue='member_casual')
plt.xlabel('Hour')
plt.ylabel('Mean ride duration in minutes')
plt.title('Mean ride duratin per hour for each customer type');
#plt.legend(['Casual customer','Member'])
plt.savefig('/content/drive/MyDrive/Cyclistic_project/mean_ride_duration_per_hour.png');
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="UxF0rBRAi3bw" outputId="f5ed2a5f-5ab6-400b-abe2-da07228305f0"
plt.figure(figsize=(16,6))
sns.boxplot(data=T,y='member_casual',x='ride_duration')
plt.ylabel('Customer type')
plt.xlabel('Mean ride duration in minutes')
plt.title('Dispersion of ride durations for Casual customers and members');
# + colab={"base_uri": "https://localhost:8080/", "height": 405} id="5rEQ0mbjXpy4" outputId="6666b1b7-41f9-49f3-d794-c504c550d7b5"
T=data.groupby(['start_dayofweek','member_casual']).mean().reset_index()[['start_dayofweek','member_casual','ride_duration']]
plt.figure(figsize=(17,6))
sns.barplot(data=T,x='start_dayofweek',y='ride_duration',hue='member_casual',order=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'])
plt.ylabel('Mean ride duration in minutes')
plt.title('Mean ride duratin per day for each customer type');
#plt.legend(['Casual customer','Member'])
plt.savefig('/content/drive/MyDrive/Cyclistic_project/mean_rides_duration_per_day.png');
# + [markdown] id="av6JJHgy7lKy"
# > * The average duration of a casual rides is 3 times that of a member's ride.
# > * Members ride durations are short and show less dispersion
# + id="5LX9GgFRVC_e"
| Cyclics_bike_sharing_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=true
# # Lab: _The Maltese Python_
#
# <div class="alert alert-block alert-warning">
# <p>This activity only works if we're in our JupyterLab environment. If you haven't finished the <a href = '../activities/activity-1-github-clone.md'>"Cloning" a repository</a> worksheet, please do so now.
# </div>
#
# ---
#
# <img src = "https://cs.allegheny.edu/sites/dluman/cmpsc100/cmpsc-100-maltese-python.png">
#
# ## Overview
#
# > In 2021 a professor attempted to boggle their students' minds by sending them a game in which they hid a Golden Python whose scales were conjured from digital text. An enterprising computer system seized this priceless token and the fate of the Maltese Python remains a mystery to this day...
#
# For no reason whatsoever, you find yourself at the door to the very mansion where rumors place the whereabouts of the Maltese Python. (Is there really any benefit to asking why you find yourself here?)
#
# Of course, however, this is a digital mansion made of folders located in the `/mansion` file system and, as you would expect, this activity is meant to help you practice commands like:
#
# * `cd`
# * `ls`
# * `pwd`
#
# You will use these commands to go from room to room in the mansion, keeping an eye out for secret passages (**HINT**: typing `secretpassages` at any time _may_ help you).When you think you 've found the python itself (you'll know when you do), type `claim` to steal it away (to your home directory; think `~`).
#
# Keep the mansion map handy so that as you `cd` and `pwd` through the directory structure.
#
# ### Special note
#
# `secretpassage` and `claim` _are not_ traditional terminal commands. They're made up for this exercise. Not real, but no less fun.
#
# ## Mansion map
#
# ```
# [GARRET]
# \ [BALCONY] [LARDER]
# \ / /
# [TERRARIUM] [LIBRARY] [GALLERY] [CELLAR] /
# \ | | | /
# \ | ------------ | /
# \ | / | /
# [CONSERVATORY] | /
# | -------[KITCHEN]
# | |
# [BALLROOM] [DININGROOM]
# \ /
# |
# [GRAND STAIRCASE]
# |
# [PARLOR] | [DRAWING ROOM]
# \ | /
# \ | /
# \ | /
# \|/
# |
# |
# [FOYER] <---- You start here
# ```
#
# ## Requirements
#
# * `the_maltese_python!` file is `claim`ed
# * The file (`the_maltese_python!`) is in the `lab` folder of your assignment repository
# * The `claim` command performs this action for you
# * A completed:
# * Reflection completed in the [writing/reflection.md](writing/reflection.md) file
#
# ## Command chart
#
# Here's a table containing all of the commands that you should use to complete this exercise and some examples of each:
#
# |Command|Full name|Paraphrase|Examples|
# |-------|---------|----------|--------|
# |`cd`|**C**hange **D**irectory|I would like to go...|`cd ..` (down one level)|
# |`ls`|**L**i**s**t|What can I see?|`ls` (list current directory)|
# |`pwd`|**P**ath to **W**orking **D**irectory|Where am I?|`pwd` (gets current directory)|
#
# ## Tips
#
# * Keep in mind that while `secretpassage` _may_ help you, it equally _may not_
# * Practice `cd`, `pwd`, and `ls` liberally -- you won't hurt anything
# * If you're stuck, use the `pwd` command and consult the [mansion map](#Mansion-map)
# * Don't forget to `claim` the real Maltese Python when you find it!
#
# <div class="alert alert-block alert-danger">
# You must find the python using your <b>terminal</b>.
# </div>
#
# ### When you're ready
# + jupyter={"source_hidden": true} tags=[]
from IPython.display import HTML
HTML('<a href="", data-commandlinker-command="terminal:create-new">Click here to open a terminal.</a>')
# -
# * Use `cd /mansion` as your first command to access the Mansion
#
# ## A secret quest
# <div class="alert alert-block alert-info">
# <p>Somewhere in the mansion lies a secret password. Find it and communicate it to your instructor in a clandestine <strong>NON-DIGITAL</strong> way (i.e. it must be spoken aloud) to claim a prize.</p>
# </div>
# + [markdown] tags=[]
# ## Display your trophy! Type the Markdown required to display `cage/the_maltese_python.png` in the space below using an image tag:
# -
# 
| lab/week-0-lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="sCmtzkeGPI2Q"
# # Category 4
#
# RNN(순환신경망)을 활용한 텍스트 분류 (Text Classification)
# + [markdown] id="W1Hj9c1NPbPu"
# ## 문제
# + [markdown] id="RcvEYUuhPb3f"
# NLP QUESTION
#
# For this task you will build a classifier for the sarcasm dataset
# The classifier should have a final layer with 1 neuron activated by sigmoid as shown.<br/><br/>
# It will be tested against a number of sentences that the network hasn't previously seen<br/>
# And you will be scored on whether sarcasm was correctly detected in those sentences
#
#
# -------------------------------
# **자연어 처리**<br/><br/>
# 이 작업에서는 sarcasm 데이터 세트에 대한 분류기를 작성합니다.
# 분류기는 1 개의 뉴런으로 이루어진 sigmoid 활성함수로 구성된 최종 층을 가져야합니다.<br/>
# 제출될 모델은 데이터셋이 없는 여러 문장에 대해 테스트됩니다.
# 그리고 당신은 그 문장에서 sarcasm 판별이 제대로 감지되었는지에 따라 점수를 받게 될 것입니다
#
# + [markdown] id="7C3ewm9XQHgr"
# -----------------------------------
#
# + [markdown] id="YE_gqvyN3Ato"
# ## 1. import
# + id="HKMbpxX8Ro-h" executionInfo={"status": "ok", "timestamp": 1629906052855, "user_tz": -540, "elapsed": 3134, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
import json
import tensorflow as tf
import numpy as np
import urllib
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint
# + [markdown] id="wEf5XJJE38vl"
# # 2.Load dataset
# + id="7FLOv_TrRo-m" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906060825, "user_tz": -540, "elapsed": 1087, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="cad7f963-1568-4689-faf0-01708b74a55d"
url = 'https://storage.googleapis.com/download.tensorflow.org/data/sarcasm.json'
urllib.request.urlretrieve(url, 'sarcasm.json')
# + [markdown] id="5vTO4qdw5rUh"
# `datas` 변수에 `json`을 활용하여 로드
# + id="yBg4ZmnKRo-v" executionInfo={"status": "ok", "timestamp": 1629906065852, "user_tz": -540, "elapsed": 373, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
with open('sarcasm.json') as f:
datas = json.load(f)
# + [markdown] id="bI31En1o5p7R"
# `datas` 5개 출력
#
# * `article_link`: 뉴스 기사 URL
# * `headline`: 뉴스기사의 제목
# * `is_sarcastic`: 비꼬는 기사 여부 (비꼼: 1, 일반: 0)
# + id="Ylqxc8fcRo-0" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906068951, "user_tz": -540, "elapsed": 484, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="b123ceab-6d00-4752-c74f-107ed92bf788"
datas[:5]
# + [markdown] id="M2JA-GrVRo-4"
# # 3. Preprocessing
# + [markdown] id="rbK3O6ig-V_7"
# ### 3-1. 데이터셋 구성
# + [markdown] id="kes4D7Q-7r1n"
# * X (Feature): sentences
# * Y (Label): label
# + id="zgUxM1Rh7_-t" executionInfo={"status": "ok", "timestamp": 1629906116867, "user_tz": -540, "elapsed": 381, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
sentences = []
labels = []
# + id="evOur3s7Ro_G" executionInfo={"status": "ok", "timestamp": 1629906117229, "user_tz": -540, "elapsed": 3, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
for data in datas:
sentences.append(data['headline'])
labels.append(data['is_sarcastic'])
# + id="QxkCHeN-Ro_J" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906123572, "user_tz": -540, "elapsed": 408, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="21a495a9-4188-4308-a06b-8dd43b8f2c31"
sentences[:5]
# + id="7AFuC-5eRo_N" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906124304, "user_tz": -540, "elapsed": 4, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="004ae486-feb6-4a3b-a1aa-082ba709a612"
labels[:5]
# + [markdown] id="kV-gws0ZIDT5"
# ### 3-2. Train / Validation Set 분리
# + [markdown] id="7ui0tLP38ziX"
# 20,000개를 기준으로 **데이터셋을 분리**합니다.
#
# + id="AP_9RTOFIDT7" executionInfo={"status": "ok", "timestamp": 1629906137308, "user_tz": -540, "elapsed": 780, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
training_size = 20000
# + id="QbKerNkAIDUC" executionInfo={"status": "ok", "timestamp": 1629906137697, "user_tz": -540, "elapsed": 2, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
train_sentences = sentences[:training_size]
train_labels = labels[:training_size]
# + id="VUv12RFJIDUJ" executionInfo={"status": "ok", "timestamp": 1629906138571, "user_tz": -540, "elapsed": 4, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
validation_sentences = sentences[training_size:]
validation_labels = labels[training_size:]
# + [markdown] id="CEXyUrNHRo_S"
# ### 3-3. Tokenizer 정의
# + [markdown] id="5bgjblALRo_c"
# 단어의 토큰화를 진행합니다.
#
# * `num_words`: 단어 max 사이즈를 지정합니다. 가장 **빈도수가 높은** 단어부터 저장합니다.
# * `oov_token`: 단어 토큰에 없는 단어를 어떻게 표기할 것인지 지정해줍니다.
# + id="CyHzPmsg9aWS" executionInfo={"status": "ok", "timestamp": 1629906151438, "user_tz": -540, "elapsed": 386, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
vocab_size = 1000
oov_tok = "<OOV>"
# + id="ORwBwbVIRo_d" executionInfo={"status": "ok", "timestamp": 1629906155577, "user_tz": -540, "elapsed": 365, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
tokenizer = Tokenizer(num_words=vocab_size, oov_token='<OOV>')
# + [markdown] id="5c6Nqj8YRo_g"
# ### 3-4 Tokenizer로 학습시킬 문장에 대한 토큰화 진행
# + [markdown] id="oWptwIFq-B9B"
# `fit_on_texts`로 학습할 문장에 대하여 **토큰화**를 진행합니다.
# + id="mWZFZUFNRo_h" executionInfo={"status": "ok", "timestamp": 1629906167489, "user_tz": -540, "elapsed": 418, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
tokenizer.fit_on_texts(train_sentences)
# + id="_2ZPQndyRo_k" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906168848, "user_tz": -540, "elapsed": 12, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="5b3cf533-6777-4ca9-db97-be59dd380e47"
for key, value in tokenizer.word_index.items():
print('{} \t======>\t {}'.format(key, value))
if value == 25:
break
# + [markdown] id="Proj_AWp-Lsm"
# 토큰화된 **단어 사전의 갯수**
# + id="Rufbtj6aRo_n" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906182396, "user_tz": -540, "elapsed": 365, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="11ead444-b9c0-4e02-953d-94827b0a5f1b"
len(tokenizer.word_index)
# + [markdown] id="PFaPeh05-iZQ"
# 단어사전은 **dictionary** 형태로 되어 있습니다.
#
# 즉, 단어를 key로 입력하면 값을 return 합니다.
# + id="sxvQwbfVA_fk" executionInfo={"status": "ok", "timestamp": 1629906183989, "user_tz": -540, "elapsed": 4, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
word_index = tokenizer.word_index
# + id="W23IUjuNRo_s" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906186986, "user_tz": -540, "elapsed": 3, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="078bb6ee-4282-4465-e54a-6ff289fa5a64"
word_index['hello']
# + id="nMwedFRm-Y9L" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906188362, "user_tz": -540, "elapsed": 3, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="95784a8d-1c17-4d64-80db-16c9b86ab044"
word_index['<OOV>']
# + [markdown] id="ozaMIfx_Ro_4"
# ### 3-5 문장(sentences)을 토큰으로 변경 (치환)
# + [markdown] id="cNqh5HgMAcKW"
# `texts_to_sequences`: 문장을 숫자로 **치환** 합니다. Train Set, Valid Set 모두 별도로 적용해주어야 합니다.
# + id="bYNXTtTIRo_5" executionInfo={"status": "ok", "timestamp": 1629906211615, "user_tz": -540, "elapsed": 876, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
train_sequences = tokenizer.texts_to_sequences(train_sentences)
validation_sequences = tokenizer.texts_to_sequences(validation_sentences)
# + [markdown] id="weKqGE4DAolj"
# **변환된 Sequences 확인**
# + id="CKH_vITVRo_8" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906213596, "user_tz": -540, "elapsed": 386, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="82b0a863-ab41-49f6-a344-1997669904ee"
train_sequences[:5]
# + [markdown] id="LEgW25CNRpAB"
# 변환 과정에서 '<OOV>'로 **변환된 단어** 확인
# + [markdown] id="PgP_i2wHBScf"
# 빈도수로 지정한 `num_words`=1000 에 의거하여, 빈도수가 1000번째보다 떨어지는 단어는 자동으로 1로 치환됩니다.
#
# 1은 '<OOV>' 입니다. (Out of Vocab)
# + id="Mt22rhELRpAF" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1629906217158, "user_tz": -540, "elapsed": 383, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="36573df9-93c0-47ba-bf31-660dc861cf00"
train_sentences[4]
# + id="ZfwQcmvXRpAJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906219568, "user_tz": -540, "elapsed": 388, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="8efe421d-a1b5-435f-9bfd-01459cb274ff"
word_index['j'], word_index['k'], word_index['rowling'], word_index['wishes'], word_index['snape'], word_index['happy']
# + id="kGYuwVXBA00V" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906220854, "user_tz": -540, "elapsed": 7, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="8320425e-1c75-441e-a956-d196f02ba9b3"
train_sequences[4]
# + [markdown] id="SRCwRYueRpAQ"
# ### 3-6 시퀀스의 길이를 맞춰주기
# + [markdown] id="zAZyufTlRpAR"
# 3가지 옵션을 입력해 줍니다.
#
# * `maxlen`: 최대 문장 길이를 정의합니다. 최대 문장길이보다 길면, 잘라냅니다.
# * `truncating`: 문장의 길이가 `maxlen`보다 길 때 앞을 자를지 뒤를 자를지 정의합니다.
# * `padding`: 문장의 길이가 `maxlen`보다 짧을 때 채워줄 값을 앞을 채울지, 뒤를 채울지 정의합니다.
# + id="CNH-OCTJ9hCN" executionInfo={"status": "ok", "timestamp": 1629906232499, "user_tz": -540, "elapsed": 366, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
# 한 문장의 최대 단어 숫자
max_length = 120
# 잘라낼 문장의 위치
trunc_type='post'
# 채워줄 문장의 위치
padding_type='post'
# + id="jdaO9ErnRpAS" executionInfo={"status": "ok", "timestamp": 1629906234628, "user_tz": -540, "elapsed": 451, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
train_padded = pad_sequences(train_sequences, maxlen=max_length, truncating=trunc_type, padding=padding_type)
validation_padded = pad_sequences(validation_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# + [markdown] id="w6eVduGjBzpZ"
# 변환된 후 shape 확인
# + id="tAudfwjTRpAV" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906243188, "user_tz": -540, "elapsed": 433, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="315295f7-3162-4ab0-8920-73a8d524f2e3"
train_padded.shape
# + [markdown] id="_p77o7VlIDV4"
# ### 3-7. label 값을 numpy array로 변환
# + [markdown] id="Zk5Xv54HCEtx"
# model이 `list` type은 받아들이지 못하므로, numpy array로 변환합니다.
# + id="RSTya5jCIDV5" executionInfo={"status": "ok", "timestamp": 1629906264025, "user_tz": -540, "elapsed": 368, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
train_labels = np.array(train_labels)
validation_labels = np.array(validation_labels)
# + [markdown] id="GqT-hof9CNT-"
# -------------------------------------------------------------
#
# + [markdown] id="ZV72Y76tRpA3"
# ## Embedding Layer
# + [markdown] id="GFGHxB6gRpA4"
# **고차원을 저차원으로 축소**시켜주는 역할을 합니다.
# + [markdown] id="g-9HwruaRpA5"
# one-hot encoding을 진행했을 때, 1000차원으로 표현되는 단어들을 16차원으로 줄여주는 겁니다. 그렇게 해서 `sparsity`문제를 해소하도록 유도합니다.
# + id="iLHuZfQfDBtL" executionInfo={"status": "ok", "timestamp": 1629906268121, "user_tz": -540, "elapsed": 370, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
embedding_dim = 16
# + [markdown] id="ks25QpDWCyos"
# **변환 전**
# + id="HElkj8fSRpA5" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906269366, "user_tz": -540, "elapsed": 13, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="e3929857-04dc-466b-8844-b426bbe31064"
sample = np.array(train_padded[0])
sample
# + [markdown] id="LG2vE6amCv7b"
# **변환 후**
# + id="HJ-d2D1tRpBM" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906275748, "user_tz": -540, "elapsed": 6008, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="0594f27a-35e1-494a-e109-2bf088d9ed08"
x = Embedding(vocab_size, embedding_dim, input_length=max_length)
x(sample)[0]
# + [markdown] id="ehNYMSChRpBw"
# #4. 모델 정의 (Sequential)
# + id="Jn8t4uehIDW-" executionInfo={"status": "ok", "timestamp": 1629906282170, "user_tz": -540, "elapsed": 1778, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
model = Sequential([
Embedding(vocab_size, embedding_dim, input_length=max_length),
Bidirectional(LSTM(64, return_sequences=True)),
Bidirectional(LSTM(64)),
Dense(32, activation='relu'),
Dense(16, activation='relu'),
Dense(1, activation='sigmoid')
])
# + [markdown] id="i6umKCF-D-U3"
# 요약 확인
# + id="JjmIBD7QD9FV" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629906282170, "user_tz": -540, "elapsed": 5, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="d0b32fce-4b09-43aa-82c2-481c90ced4e1"
model.summary()
# + [markdown] id="WCdAX_rXRpB0"
# #5. 컴파일 (compile)
# + [markdown] id="IpJgAudwRpB1"
# 1. `optimizer`는 가장 최적화가 잘되는 알고리즘인 'adam'을 사용합니다.
# 2. `loss`는 이진 분류이기 때문에 `binary_crossentropy`를 사용합니다.
# + id="B2iupQVBRpB1" executionInfo={"status": "ok", "timestamp": 1629906287968, "user_tz": -540, "elapsed": 367, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# + [markdown] id="yJ0gsuoqSv7z"
# #6. ModelCheckpoint: 체크포인트 생성
# + [markdown] id="oXHmDZ2aSx4O"
# `val_loss` 기준으로 epoch 마다 최적의 모델을 저장하기 위하여, ModelCheckpoint를 만듭니다.
# * `checkpoint_path`는 모델이 저장될 파일 명을 설정합니다.
# * `ModelCheckpoint`을 선언하고, 적절한 옵션 값을 지정합니다.
# + id="v_AvlY0cIDXX" executionInfo={"status": "ok", "timestamp": 1629906293113, "user_tz": -540, "elapsed": 362, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
checkpoint_path = 'my_checkpoint.ckpt'
checkpoint = ModelCheckpoint(checkpoint_path,
save_weights_only=True,
save_best_only=True,
monitor='val_loss',
verbose=1)
# + [markdown] id="oKy7ZkL6RpB4"
# #7. 학습 (fit)
# + id="lBL_WTG3Er63" executionInfo={"status": "ok", "timestamp": 1629906303395, "user_tz": -540, "elapsed": 442, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
epochs=10
# + id="byjJCmN_RpB4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629907177643, "user_tz": -540, "elapsed": 873470, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="fe7e6449-8416-45fd-d716-8dd4075cb10b"
history = model.fit(train_padded, train_labels,
validation_data=(validation_padded, validation_labels),
callbacks=[checkpoint],
epochs=epochs)
# + [markdown] id="shzhTOjAninH"
# # 8. 학습 완료 후 Load Weights (ModelCheckpoint)
# + [markdown] id="kLqb_6XrMvdq"
# 학습이 완료된 후에는 반드시 `load_weights`를 해주어야 합니다.
#
# 그렇지 않으면, 열심히 ModelCheckpoint를 만든 의미가 없습니다.
# + id="4jO1ucZ9ninH" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1629907177644, "user_tz": -540, "elapsed": 45, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="31d0fab9-a2a0-4d17-b395-396aff22c87a"
model.load_weights(checkpoint_path)
# + [markdown] id="1t0xRupR1LmK"
# ## 학습 오차에 대한 시각화
# + id="wwus5OLdFg2t" executionInfo={"status": "ok", "timestamp": 1629907177644, "user_tz": -540, "elapsed": 15, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}}
import matplotlib.pyplot as plt
# + id="luCcz7AUE5Z3" colab={"base_uri": "https://localhost:8080/", "height": 572} executionInfo={"status": "ok", "timestamp": 1629907178529, "user_tz": -540, "elapsed": 898, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="9a7b4c60-d777-4610-a400-a5d4705dae54"
plt.figure(figsize=(12, 9))
plt.plot(np.arange(1, epochs+1), history.history['loss'])
plt.plot(np.arange(1, epochs+1), history.history['val_loss'])
plt.title('Loss / Val Loss', fontsize=20)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(['loss', 'val_loss'], fontsize=15)
plt.show()
# + id="W98mqIn0E8bW" colab={"base_uri": "https://localhost:8080/", "height": 572} executionInfo={"status": "ok", "timestamp": 1629907178530, "user_tz": -540, "elapsed": 35, "user": {"displayName": "\uae40\ubcc4\ud76c", "photoUrl": "", "userId": "06602448826503759202"}} outputId="3bce17c8-3c2d-4196-8522-9c3662a1becf"
plt.figure(figsize=(12, 9))
plt.plot(np.arange(1, epochs+1), history.history['acc'])
plt.plot(np.arange(1, epochs+1), history.history['val_acc'])
plt.title('Acc / Val Acc', fontsize=20)
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend(['acc', 'val_acc'], fontsize=15)
plt.show()
| TensorFlow Developer Certificate Practice/TF Certificate 4_RNN NLP (Sarcasm).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basically Bible Analytics
#
# ## Starting with Basic Metrics
#
# In this notebook, I will look at some basic metrics for the Bible. For instance, it is fairly easy to learn that there are sixty-six books in the Bible, but I don't think I have every heard anyone share how many chapters or verses are in the Bible. I will explore some of these basic questions.
#
# In order to do this, I need the Bible text. I was able to obtain it from Kaggle here: https://www.kaggle.com/oswinrh/bible#t_asv.csv. If you link to this site, you will see many available versions, but I decided to use the Bible in Basic English because my goal is to eventually apply text analytics to the text.
import pandas as pd
import sqlite3
bible = pd.read_csv(r'C:\Bible Research\Translations\Bible in Basic English\t_bbe.csv')
# ### Basic structure of the data
bible.head()
bible.info()
# Before analyzing the data, I want to save this data frame in a SQL database. I begin by connecting to the SQL database I created, *biblesql*.
conn = sqlite3.connect(r"C:\Bible Research\SQL database\biblesql.db")
# Next, I store the dataframe *bible* as a SQL table called *bible_bbe*.
bible.to_sql('bible_bbe', conn, if_exists='replace', index=False)
# Finally, I query the SQL table I created.
pd.read_sql('select * from bible_bbe where b = 5 limit 5', conn)
# ### Basic counts
#
# We already know that there are sixty-six books in the Bible, but I'll run some code to confirm. I will then find out how many chapters and verses are in the Bible. I will also find out how many words are in this particular version of the Bible. This obviously changes by version.
pd.read_sql('select count(distinct(b)) as books from bible_bbe', conn)
# There are 66 books,
pd.read_sql('select count(c) as chapters from (select distinct(b), c from bible_bbe)', conn)
# 1,189 chapters,
pd.read_sql('select count(v) as verses from bible_bbe', conn)
# And 31,103 verses in th Bible in Basic English.
# Getting a word count is a little more complex. This site was helpful: https://www.geeksforgeeks.org/python-program-to-count-words-in-a-sentence
# +
l = []
for index, row in bible.iterrows():
value = len(row['t'].split())
l.append(value)
print('There are', sum(l), 'words in this translation.')
# -
# My next question is, how long would it take the average person to read the Bible all the way through? Everyone reads at a different pace, but this site tells us that the average person can read 300 words a minute: # https://www.google.com/search?q=how+many+words+can+the+average+person+read+per+minute&rlz=1C1CHBF_enUS855US855&oq=how+many+words+can+the+average+per&aqs=chrome.0.0j69i57j0l6.8896j0j7&sourceid=chrome&ie=UTF-8
#
# If we divide 840357 by 300, this should tell us how many minutes it would take an average person to read the Bible.
840357/300
print('Which is', 840357/18000, 'hours')
# This is roughly 46 hours and 40 minutes. This site (http://www.euxton.com/bible.htm) says that it takes 70 hours and 40 minutes to read the Bible at "pulpet rate." I assume this is much slower than the average person can read to themselves.
# Next, how many words are in each book of the Bible, what proportion does each book account for, and how long would each book take an average person to read?
# +
b = []
words = []
chapter = []
for i in bible.b.unique():
value = i
b.append(value)
book = []
for index, row in bible[bible.b == i].iterrows():
value2 = len(row['t'].split())
book.append(value2)
words.append(sum(book))
chapter.append(bible[bible.b==i].c.nunique())
# +
books = pd.DataFrame()
books['b'] = b
books['c'] = chapter
books['words'] = words
books['proportion'] = books.words/sum(books.words)
books['minutes'] = books.words/250
books['hours'] = books.minutes/60
# -
books.head()
# This seems like a good time to store this dataframe as a SQL table.
books.to_sql('books', conn, if_exists='replace', index=False)
# This is interesting, but at this point we are refering to the books of the Bible by their order rather than their given name. I'm going to read in a key to attach names, which will be a little more insightful. I will also save this dataframe as a SQL table.
book_key = pd.read_csv(r'C:\Bible Research\key_english.csv')
book_key.to_sql('book_key', conn, if_exists='replace', index=False)
pd.read_sql('select * from book_key limit 5', conn)
# This dataframe contains the book order, which will allow me to tie this information to the dataframe I already have. It also contains the book name as well as which testement each belongs to and a group variable. The group variable refers to which type of book each is. For instance, Genisis is part of the Law, so it's in group 1. Jude is an epistle, so it's in group 7.
#
# Now, I will merge the two dataframes and sort to see which books are the longest and shortest.
merged = pd.read_sql('select k.*, e.c, e.words, e.proportion, e.minutes, e.hours from books AS e inner join book_key AS k on e.b = k.b order by e.proportion desc', conn)
merged.to_sql('bible_metrics', conn, if_exists='replace', index=False)
pd.read_sql('select * from bible_metrics', conn)
# It looks like the shortest book of the Bible is 2 John, which only takes a little over a minute to read. I'm not surprised to see Psalms in first place, but I thought Gensis would be second.
pd.read_sql('select sum(proportion) as proportion from bible_metrics where b in (1,2,3,4,5)', conn)
# Eventhough Gensis is the fifth longest book of the Bible, the first five books, the Torah, accounts for 19% of the Bible. When you finish Deuteronomy, you're 1/5 of the way through.
# Lastly, I want to see the tables I created in my SQL database.
# +
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
print(cursor.fetchall())
| 001 - Basic Bible Analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# + [markdown] deletable=true editable=true
# # Variables
# + deletable=true editable=true
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
from datetime import date
date.today()
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
# + deletable=true editable=true
tf.__version__
# + deletable=true editable=true
np.__version__
# -
# NOTE on notation
#
# _x, _y, _z, ...: NumPy 0-d or 1-d arrays
# _X, _Y, _Z, ...: NumPy 2-d or higer dimensional arrays
# x, y, z, ...: 0-d or 1-d tensors
# X, Y, Z, ...: 2-d or higher dimensional tensors
#
# + [markdown] deletable=true editable=true
# ## Variables
# + [markdown] deletable=true editable=true
# Q0. Create a variable `w` with an initial value of 1.0 and name `weight`.
# Then, print out the value of `w`.
# + deletable=true editable=true
# + [markdown] deletable=true editable=true
# Q1. Complete this code.
# + deletable=true editable=true
# Create a variable w.
w = tf.Variable(1.0, name="Weight")
# Q. Add 1 to w and assign the value to w.
assign_op = ...
with tf.Session() as sess:
sess.run(w.initializer)
for _ in range(10):
print(sess.run(w), "=>", end="")
sess.run(assign_op)
# + [markdown] deletable=true editable=true
# Q2. Complete this code.
# + deletable=true editable=true
w1 = tf.Variable(1.0)
w2 = tf.Variable(2.0)
w3 = tf.Variable(3.0)
out = w1 + w2 + w3
# Q. Add an Op to initialize global variables.
init_op = ...
with tf.Session() as sess:
sess.run(init_op) # Initialize all variables.
print(sess.run(out))
# + [markdown] deletable=true editable=true
# Q3-4. Complete this code.
# + deletable=true editable=true
V = tf.Variable(tf.truncated_normal([1, 10]))
# Q3. Initialize `W` with 2 * W
W = ...
# Q4. Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) # Initialize all variables.
_V, _W = sess.run([V, W])
print(_V)
print(_W)
assert np.array_equiv(_V * 2.0, _W)
# Compare the value of `W` with Z = 2 * V.
# + [markdown] deletable=true editable=true
# Q5-8. Complete this code.
# + deletable=true editable=true
g = tf.Graph()
with g.as_default():
W = tf.Variable([[0,1],[2,3]], name="Weight", dtype=tf.float32)
# Q5. Print the name of `W`.
print("Q5.", ...)
# Q6. Print the name of the op of `W`.
print("Q6.", ...)
# Q7. Print the data type of `w`.
print("Q7.", ...)
# Q8. Print the shape of `w`.
print("Q8.", ...)
# Q9. Print the rank (or ndims) of `W`.
print("Q9.", ...)
# Q10. Check if the graph of `W` is the same as `g`.
print("Q10.", ...)
# + [markdown] deletable=true editable=true
# ## Variable helper functions
# + [markdown] deletable=true editable=true
# Q11-15. Complete this code.
# + deletable=true editable=true
tf.reset_default_graph()
w1 = tf.Variable(1.0, name="weight1")
w2 = tf.Variable(2.0, name="weight2", trainable=False)
w3 = tf.Variable(3.0, name="weight3")
with tf.Session() as sess:
# Q11. Initialize the variables w1 and w2.
sess.run(...)
# Q12. Print the name of all global variables
for v in tf.global_variables():
print("global variable =>", ...)
# Q13. Print the name of all trainable variables
for v in tf.trainable_variables():
print("trainable_variable =>", ...)
# + [markdown] deletable=true editable=true
# ## Saving and Restoring Variables
# + [markdown] deletable=true editable=true
# Q14-15. This is a simple example code to find the coefficient of a linear function. (Here y = 2x).
# Complete the code.
# + deletable=true editable=true
tf.reset_default_graph()
w = tf.Variable(0.2, 'weight') # <- This is what we want to find. The true value is 2.
x = tf.random_uniform([1])
y = 2. * x # Let's pretend we don't know the coefficient 2 here.
y_hat = w * x
loss = tf.squared_difference(y, y_hat)
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
saver = tf.train.Saver()
for step in range(1, 10001):
sess.run(train_op)
import os
if not os.path.exists('model'): os.mkdir('model') # Let's make a folder in order not to discrupt our current folder.
if step % 1000 == 0:
print(sess.run(w), "=>", end="")
# Q14. Save the checkpoint file, giving it a base name of `model/my-model`
save_path = ...
print("Saved successfully")
print(os.listdir('model'))
# Q15. Find the filename of the latest saved checkpoint file.
# And if it exists, restore it.
ckpt = tf.train.latest_checkpoint('model')
print(ckpt)
if ckpt is not None:
...
print("Restored succesfully!")
# + [markdown] deletable=true editable=true
# ## Sharing Variables
# + [markdown] deletable=true editable=true
# Q16. Complete this code.
# + deletable=true editable=true
g = tf.Graph()
with g.as_default():
with tf.variable_scope("foo"):
v = tf.get_variable("vv", [1,]) # v.name == "foo/vv:0"
#Q. Get the existing variable `v`
...
assert v1 == v
# + [markdown] deletable=true editable=true
# Q17. Predict the result of this code.
# + deletable=true editable=true
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("vv", [1])
print("v.name =", v.name)
# + [markdown] deletable=true editable=true
# Q18. Complete this code.
# + deletable=true editable=true
value = [0, 1, 2, 3, 4, 5, 6, 7]
# Q. Create an initializer with `value`.
init = ...
tf.reset_default_graph()
x = tf.get_variable('x', shape=[2, 4], initializer=init)
with tf.Session() as sess:
sess.run(x.initializer)
print("x =\n", sess.run(x))
# + [markdown] deletable=true editable=true
# Q19. Complete this code.
# + deletable=true editable=true
# Q. Create an initializer with a normal distribution of mean equals 0 and standard deviation equals 2.
init = ...
tf.reset_default_graph()
x = tf.get_variable('x', shape=[10, 1000], initializer=init)
with tf.Session():
x.initializer.run()
_x = x.eval()
print("Make sure the mean", np.mean(_x), "is close to 0" )
print("Make sure the standard deviation", np.std(_x), "is close to 2" )
# + [markdown] deletable=true editable=true
# Q20. Complete this code.
# + deletable=true editable=true
# Q. Create an initializer with a truncated normal distribution of mean equals 0 and standard deviation equals 2.
init = ...
tf.reset_default_graph()
x = tf.get_variable('x', shape=[1000,], initializer=init)
with tf.Session():
x.initializer.run()
_x = x.eval()
plt.scatter(np.arange(1000), _x)
_avg = np.array([np.mean(_x)] * 1000)
_std = np.array([np.std(_x)] * 1000)
plt.plot(np.arange(1000), _avg, 'r-')
plt.plot(np.arange(1000), _avg + 2*_std, 'g-')
plt.plot(np.arange(1000), _avg - 2*_std, 'k-')
plt.legend(['mean', 'upper 2*std', 'lower 2*std'])
plt.show()
# Note that few data points exceed the bounaries.
# + [markdown] deletable=true editable=true
# Q21. Complete this code.
# + deletable=true editable=true
# Q. Create an initializer with a random normal distribution of minimum 0 and maximum 1.
init = ...
tf.reset_default_graph()
x = tf.get_variable('x', shape=[5000,], initializer=init)
with tf.Session():
x.initializer.run()
_x = x.eval()
count, bins, ignored = plt.hist(_x, 20, normed=True)
plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
plt.show()
# Note that few data points exceed the bounaries.
# + deletable=true editable=true
# Check other initializers such as zeros_initializer, ones_initializer, or orthogonal_initializer, as well.
# + [markdown] deletable=true editable=true
# ## Exporting and Importing Meta Graphs
# + [markdown] deletable=true editable=true
# Q22. Complete the code. Make sure you've done questions 14-15.
# + deletable=true editable=true
tf.reset_default_graph()
print("Of course, there're no variables since we reset the graph. See", tf.global_variables())
with tf.Session() as sess:
# Q. Import the saved graph of `model/my-model-10000`.
new_saver = ...
new_saver.restore(sess, 'model/my-model-10000')
for v in tf.global_variables():
print("Now we have a variable", v.name)
# + deletable=true editable=true
| Variables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="vh_Bs4XdeRzQ"
# # In this notebook we show how we can scrap data from webpages using the [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/), a python library.
# <br><br>
# + colab={} colab_type="code" id="P610gMZrd8SE"
#making the necessary imports
from pprint import pprint
from bs4 import BeautifulSoup
from urllib.request import urlopen
# + colab={} colab_type="code" id="jfwgiGjJeBSG"
myurl = "https://stackoverflow.com/questions/415511/how-to-get-the-current-time-in-python" #specify the url
html = urlopen(myurl).read() #query the website so that it returns a html page
soupified = BeautifulSoup(html, 'html.parser') # parse the html in the 'html' variable, and store it in Beautiful Soup format
# -
# As the size of the HTML webpage (soupified) is large, we are just showing some of its output (only 2000 characters).
# +
#pprint(soupified.prettify()) #for printing the full HTML structure of the webpage
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="7TbFVmpRg6jN" outputId="fea34a67-acf5-4fa0-c88a-e604146689e4"
pprint(soupified.prettify()[:2000])#to get an idea of the html structure of the webpage
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="GDHVujprhLJZ" outputId="69e7067a-e9ca-4f9e-f1ff-08ce96ec408e"
soupified.title #to get the title of the web page
# + colab={"base_uri": "https://localhost:8080/", "height": 435} colab_type="code" id="lSpsxVXkeDf0" outputId="e727c109-5c54-495a-9791-e0982fd1f0a2"
question = soupified.find("div", {"class": "question"}) #find the nevessary tag and class which it belongs to
questiontext = question.find("div", {"class": "s-prose js-post-body"})
print("Question: \n", questiontext.get_text().strip())
answer = soupified.find("div", {"class": "answer"}) #find the nevessary tag and class which it belongs to
answertext = answer.find("div", {"class": "s-prose js-post-body"})
print("Best answer: \n", answertext.get_text().strip())
# + [markdown] colab_type="text" id="j1jgT85dlcow"
# BeautifulSoup is one of the many libraries which allow us to scrape web pages. Depending on your needs you can choose between the many available choices like beautifulsoup, scrapy, selenium, etc
| Ch2/01_WebScraping_using_BeautifulSoup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PSI Walkthrough
#
#
# #%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from psi import calculate_psi
# +
sns.set_style("white")
rs = np.random.RandomState(5)
benchmark = rs.normal(size = 1000)
new = rs.normal(loc = 0.2, size = 1200)
# -
# - QQ plot
# +
input_len = min(len(benchmark), len(new))
print(f'Common input length = {input_len}')
ban1 = np.random.choice(benchmark, input_len)
new1 = np.random.choice(new, input_len)
print(ban1.shape)
print(new1.shape)
ban1.sort()
new1.sort()
plt.figure(figsize=[8,5])
plt.scatter(ban1, new1)
plt.plot([min(ban1), max(ban1)], [min(ban1), max(ban1)], color = 'red')
plt.xlabel('Quantiles of XXX in benchmark dataset')
plt.ylabel('Quantiles of XXX in new dataset')
plt.show()
# -
# - Distribution
# + tags=[]
plt.figure(figsize=[8,5])
plot = sns.kdeplot(benchmark, shade=True, label='Benchmark')
plot = sns.kdeplot(new, shade=True, label='Current')
plot.set(yticklabels=[], xticklabels = [])
plt.title("Distribution from Benchmark and Current Input")
sns.despine(left=True)
# +
#plot.get_figure().savefig('images/distributions.png', format='png', dpi=1000)
# -
# - PSI test
# 1. PSI < 0.1: No Data Drift
# 1. 0.1 <= PSI < 0.2: Need Further Investigation
# 1. PSI > 0.2: Potential Data Drift/Breach/Anomaly
def runPSI(benchmark_data, current_data, TargetVar=None):
Bm = benchmark_data[TargetVar]
Cu = current_data[TargetVar]
psi_val, df = calculate_psi(Bm, Cu)
if psi_val > 0.2:
alarm = "Lv3: Potential Data Breach"
elif psi_val >= 0.1:
alarm = "Lv2: Further Investigation Required"
else: # < 0.1
alarm = "Lv1: No Data Drift is monitored"
print(f'{alarm} (PSI= {psi_val})')
return df
# print(f'Target Variable = {TargetVar}')
df = runPSI(benchmark, new)
# - Interpretation by PSI historgram
df.head(2)
percents = df.melt(id_vars=['Bucket']).rename(columns={'variable':'Population', 'value':'Percent'})
percents.head(2)
plt.figure(figsize=[8,6])
p = sns.barplot(x="Bucket", y="Percent", hue="Population", data=percents)
p.set(xlabel="Bucket", ylabel="Population Percent")
sns.despine(left=True)
#
# # Appendix
# +
def scale_range (input, min, max):
input += -(np.min(input))
input /= np.max(input) / (max - min)
input += min
return input
buckets = 10
raw_breakpoints = np.arange(0, buckets + 1) / (buckets) * 100
breakpoints = scale_range(raw_breakpoints, np.min(initial), np.max(initial))
# -
initial_counts = np.histogram(initial, breakpoints)[0]
new_counts = np.histogram(new, breakpoints)[0]
df = pd.DataFrame({'Bucket': np.arange(1, 11), 'Breakpoint Value':breakpoints[1:], 'Initial Count':initial_counts, 'New Count':new_counts})
df['Initial Percent'] = df['Initial Count'] / len(initial)
df['New Percent'] = df['New Count'] / len(new)
df['New Percent'][df['New Percent'] == 0] = 0.001
df
percents = df[['Initial Percent', 'New Percent', 'Bucket']] \
.melt(id_vars=['Bucket']) \
.rename(columns={'variable':'Population', 'value':'Percent'})
percents.head()
p = sns.barplot(x="Bucket", y="Percent", hue="Population", data=percents)
p.set(xlabel='Bucket', ylabel='Population Percent')
sns.despine(left=True)
# +
#p.get_figure().savefig('images/constant_bins_percents.png', format='png', dpi=1000)
# -
# $PSI = \sum{}\Big(\big(Actual \% - Expected \%\big) \times ln\big(\dfrac{Actual \%}{Expected \%}\big)\Big)$
# $$ PSI = \sum{} \Big(\big(Actual\% - Expected\%\big) \times ln\big({Actual\% \over Expected\%}\big)\Big) $$
df['PSI'] = (df['New Percent'] - df['Initial Percent']) * np.log(df['New Percent'] / df['Initial Percent'])
df.head()
np.sum(df['PSI'])
np.round(calculate_psi(initial, new, buckets=10, axis=1), 5) == np.round(np.sum(df['PSI']), 5)
breakpoints = np.stack([np.percentile(initial, b) for b in np.arange(0, buckets + 1) / (buckets) * 100])
initial_counts = np.histogram(initial, breakpoints)[0]
new_counts = np.histogram(new, breakpoints)[0]
df = pd.DataFrame({'Bucket': np.arange(1, 11), 'Breakpoint Value':breakpoints[1:], 'Initial Count':initial_counts, 'New Count':new_counts})
df['Initial Percent'] = df['Initial Count'] / len(initial)
df['New Percent'] = df['New Count'] / len(new)
df['New Percent'][df['New Percent'] == 0] = 0.001
percents = df[['Initial Percent', 'New Percent', 'Bucket']] \
.melt(id_vars=['Bucket']) \
.rename(columns={'variable':'Population', 'value':'Percent'})
p = sns.barplot(x="Bucket", y="Percent", hue="Population", data=percents)
p.set(xlabel='Bucket', ylabel='Population Percent')
sns.despine(left=True)
# +
#p.get_figure().savefig('images/percentile_bins_percents.png', format='png', dpi=1000)
# -
calculate_psi(initial, new, buckettype='quantiles', buckets=10, axis=1)
| walkthrough-example.dkang.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (carla_racetrack_BA)
# language: python
# name: pycharm-402c9497
# ---
# +
import os
import sys
import time
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import carla
# + pycharm={"name": "#%%\n"}
#Configs
map_idx = 2
MAP = ['circut_spa', 'RaceTrack', 'RaceTrack2']
# + pycharm={"name": "#%%\n"}
#Connecting to client
client = carla.Client('localhost', 2000)
client.set_timeout(5.0) # seconds
# + pycharm={"name": "#%%\n"}
world = client.load_world(MAP[map_idx])
blueprint_library = world.get_blueprint_library()
vehicle = blueprint_library.filter('*aud*')[0]
spectator = world.get_spectator()
# + pycharm={"name": "#%%\n"}
spawn_points = pd.read_csv(f'../data/spawn_points/{MAP[map_idx]}.csv')
# + pycharm={"name": "#%%\n"}
for idx, (x, y, z, yaw) in list(spawn_points.iterrows())[:]:
location = carla.Location(x, y, z)
rotation = carla.Rotation(0, yaw, 0)
spawn = carla.Transform(location, rotation)
actor = world.spawn_actor(vehicle, spawn)
actor.apply_control(carla.VehicleControl(throttle=1))
time.sleep(1.0)
print(f'Vehicle {idx}, {yaw}, {actor.get_location().z}')
if idx % 3 == 0:
spec_loc = carla.Location(spawn.location.x, spawn.location.y, spawn.location.z+6)
spec_rot = carla.Rotation(spawn.rotation.pitch-7., float(yaw), 0.)
spectator.set_transform(carla.Transform(spec_loc, spec_rot))
actor.destroy()
# + pycharm={"name": "#%%\n"}
| notebooks/20200506_spawn_points_check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Batch Processing!
# #### A notebook to show some of the capilities available through the pCunch package
#
# This is certainly not an exhaustive look at everything that the pCrunch module can do, but should hopefully provide some insight.
# ...or, maybe I'm just procrastinating doing more useful work.
# +
# Python Modules and instantiation
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import time
import os
# # %matplotlib widget
# ROSCO toolbox modules
from ROSCO_toolbox import utilities as rosco_utilities
# WEIS modules
from weis.aeroelasticse.Util import FileTools
# Batch Analysis tools
from pCrunch import Processing, Analysis
from pCrunch import pdTools
# Instantiate fast_IO
fast_io = rosco_utilities.FAST_IO()
fast_pl = rosco_utilities.FAST_Plots()
import importlib
Processing = importlib.reload(Processing)
Analysis = importlib.reload(Analysis)
# -
# ## Define file paths and filenames
# I'm loading a case matrix that is output when using weis.aeroelasticse.CaseGen_General to run a series of batch runs to initialize the output files here.
#
# Note that this isn't necessary, just my workflow in this notebook.
# point to some file paths
outfile_base = '/Users/nabbas/Documents/Projects/ROSCO_dev/DLC_Analysis/DLC_Outputs/5MW_Land_DLC11/'
fname_case_matrix = os.path.join(outfile_base,'case_matrix.yaml')
# +
# Load case matrix into datafraome
case_matrix = FileTools.load_yaml(fname_case_matrix, package=1)
cm = pd.DataFrame(case_matrix)
# pull wind speed values from InflowWind filenames
windspeeds, seed, IECtype, cmw = Processing.get_windspeeds(cm, return_df=True)
cmw.head()
# -
# #### Comparison cases
# I'm comparing two different controllers here, so I'm going to define two lists of output filenames, each corresponding to the output files from each controller
# +
# Define controllers we care to separate things by
controllers = list(set(cmw[('ServoDyn', 'DLL_FileName')]))
controllers
# Parse find outfiles names
outfiles = []
for cont in controllers:
case_names = cmw[cmw[('ServoDyn','DLL_FileName')]==cont]['Case_Name']
outnames = list( outfile_base + case_names + '.outb' )
outfiles.append(outnames)
# -
# ### outfiles
# In the end, we just need a list of OpenFAST output files. Here, we have a structure that looks something like `[[], []]`. This could be extended any amount like `[[],[],...,[], []]`, or just be one list of strings `[]`.
# ## Now we can do some processing!
#
# First, let's load the FAST_Processing class and initialize some parameters.
#
# +
fp = Processing.FAST_Processing()
fp.OpenFAST_outfile_list = outfiles
fp.dataset_names = ['DLC1.1', 'DLC1.3']
fp.to = 30
fp.parallel_analysis = True
fp.save_LoadRanking = False
fp.save_SummaryStats = False
fp.verbose=True
# # Can defined specific variables for load ranking if desired
# fp.ranking_vars = [["RotSpeed"],
# ["OoPDefl1", "OoPDefl2", "OoPDefl3"],
# ['RootMxc1', 'RootMxc2', 'RootMxc3'],
# ['TwrBsFyt'],
# ]
# -
# #### The fast way to compare things.
# We could now collect all of the summary stats and load rankings using:
# ```
# stats,load_rankings = fp.batch_processing()
# ```
# In `fp.batch_processing()` most of the analysis is done for any structure of data. I'm going to step through things a bit more piecewise in this notebook, however.
#
# NOTE: The goal in `batch_processing` is to have a "do anything" script. It is a work in progress, but getting there...
# +
# stats,load_rankings = fp.batch_processing()
# -
# ## Design Comparisons
# We can use fp.design_comparison to compare multiple sets of runs (like we are in this case...). This will generate summary stats and load rankings, running in parrallel when it can and is told to. `fp.batch_processing()` functionally does the same thing if we give it an outfile matrix with equal size lists. We'll show the design comparison here to show a break down
stats, load_ranking = fp.design_comparison(outfiles)
# #### Breaking it down further...
#
# `fp.batch_processing()` calls `Analysis.Loads_Analysls.full_loads_analysis()` to load openfast data, generate stats, and calculate load rankings. Because we defined `fp.parallel_analysis=True` this process was parallelized. This helps for speed and memory reasons, because now every openfast run is not saved. `fp.batch_processing()` then takes all of the output data and parses it back together.
#
# Separately, we call call `Analysis.Loads_Analysls.full_loads_analysis()` with `return_FastData=True` and all of the fast data will be returned. Because we are comparing data though, we'll stick with the design comparison tools.
#
# #### Loading data
# We can also just load previously parsed data if we ran `FAST_Processing` with the `save_LoadRankings` and `save_SummaryStates` flags as True.
# +
# Or load stats and load rankings
root = '/Users/nabbas/Documents/Projects/ROSCO_dev/DLC_Analysis/DLC_Outputs/5MW_Land_DLC11/stats/'
lrfile = [root+'dataset0_LoadRanking.yaml', root+'dataset1_LoadRanking.yaml']
sfile = [root+'dataset0_stats.yaml', root+'dataset1_stats.yaml']
fname_case_matrix = root+'../case_matrix.yaml'
stats = [FileTools.load_yaml(sf, package=1) for sf in sfile]
load_rankings = [FileTools.load_yaml(lf, package=1) for lf in lrfile]
case_matrix = FileTools.load_yaml(fname_case_matrix, package=1)
cm = pd.DataFrame(case_matrix)
# -
# ### We can look at our data a bit further with pandas dataframes
# The data here is just for a few runs for simplicity. Usually you'd do this for a LOT more cases...
stats_df = pdTools.dict2df(stats, names=['ROSCO', 'Legacy'])
stats_df.head()
# ### Load Ranking
# Lets re-run the load ranking for the sake of example. We'll have to load the analysis tools, and then run the load ranking for the stats we just found
fa = Analysis.Loads_Analysis()
fa.t0 = 30
fa.verbose = False
# Define the ranking variables and statiscits of interest. Note that `len(ranking_vars) == len(ranking_stats)`! We can pass this a list of stats (multiple runs), a dictionary with one run of stats, or a pandas dataframe with the requisite stats. If the inner list contains multiple OpenFAST channels, the load_rankings function will find the min/max/mean of the collection of the channels (e.g., max out of plane tip deflection of all three blades).
#
# We'll also output a dictionary and a pandas DataFrame from `fa.load_ranking()`
fa.ranking_vars = [['TwrBsFxt'], ['OoPDefl1', 'OoPDefl2', 'OoPDefl3']]
fa.ranking_stats = ['max', 'min']
load_ranking, load_ranking_df = fa.load_ranking(stats_df, get_df=True)
load_ranking_df.head()
# This is organized for each iteration of `[ranking_vars, ranking_stats]`. The stats are ordered accordingly, and `(stat)_case_idx` refers to the case name index of each load.
# ## Wind speed related analysis
# We often want to make sense of some batch output data with data binned by windspeed. We can leverage the case-matrix from our output data to figure out the input wind speeds. Of course, `('InflowWind', 'Filename')` must exist in the case matrix. Lets load the wind speeds, save them, and append them to the case matrix as `('InflowWind', 'WindSpeed')`.
windspeed, seed, IECtype, cmw = Processing.get_windspeeds(cm, return_df=True)
cmw
# ### AEP
# Now that we know the wind speeds that we were operating at, we can find the AEP. We define the turbine class here, and the cumulative distribution or probability density function
# for the Weibull distribution per IEC 61400 is generated. We can then calculate the AEP.
#
# If we first want to verify the PDF, we initialize the `power_production` function, define the turbine class, and can plot a PDF (or CDF) for a given range of wind speeds:
pp = Analysis.Power_Production()
pp.turbine_class = 2
Vrange = np.arange(2,26) # Range of wind speeds being considered
weib_prob = pp.prob_WindDist(Vrange,disttype='pdf')
plt.close('all')
plt.plot(Vrange, weib_prob)
plt.grid(True)
plt.xlabel("Wind Speed m/s")
plt.ylabel('Probability')
plt.title('Probability Density Function \n IEC Class 2 Wind Speeds ')
plt.show()
# To get the AEP, we need to provide the wind speeds that the simulations were run for, and the corresponding average power results. Internally, in power_production.AEP, the mean power for a given average wind sped is multiplied times the wind speed's probability, then extrapolated to represent yearly production.
#
# Note: this might throw a python warning due to some poor pandas indexing practices - to be cleaned up eventually!
#
# To get the AEP for each, the process is simple:
AEP = pp.AEP(stats, windspeeds)
print('AEP = {}'.format(AEP))
# ##### About the wind speed warning:
# Here, we get a warning about the input windspeed array. This is because we passed the complete array output from Processing.get_windspeeds to the AEP function. The input windspeeds to power_production.AEP must satisfy either of the following two conditions:
# - each wind speed value corresponds to each each statistic value, so `len(windspeeds) = len(stats_df)`
# - each wind speed value corresponds to each run in the case matrix, so `len(windspeeds) = len(cm)`
#
# If the second of these conditions is satisfied, it is assumed that each dataset has the same wind speeds corresponding to each run. So, in this case, the wind speeds corresponding to DLC_1.1 and DLC_1.3 should be the same.
# ## Plotting
# Finally, we can make some plots. There are a few tools we have at our disposal here. First, we can look at more plots that show our design performance as a function of wind speed. Notably, we can pass the stats dictionary or dataframe to these statistics-related scripts.
#
# Currently, `an_plts.stat_curve()` can plot a "statistics curve" for of two types, a bar or a line graph.
#
# A bar graph is useful to compare design cases easily:
plt.close()
an_plts = Analysis.wsPlotting()
an_plts.stat_curve(windspeed, stats, 'TwrBsFxt', 'bar', names=['ROSCO', 'Legacy'])
plt.show()
# A line graph can be useful to show turbulent wind curves. Here we show the means with a first level of errorbars corresponding to standard deviations, and a second level showing minimums and maximums.
an_plts.stat_curve(windspeed, stats, 'GenPwr', 'line', stat_idx=0, names=['ROSCO'])
plt.show()
# ### Load Ranking (soon)
# We can plot the load rankings...
# ... pulling this into `Analysis.py` is in progress.
#
# First, we define how we will classify our comparisons. Most commonly this would be `('IEC','DLC')`, but I'm comparing controllers here. The `classifier_type` functionally refers to the channel of the case matrix to separate the data by, and the `classifier_names` are simply labels for the classifiers.
# Define a classification channel from the case-matrix
classifier_type = ('ServoDyn', 'DLL_FileName')
classifier_names = ['ROSCO', 'legacy']
# +
# Plot load rankings
fig_list, ax_list = an_plts.plot_load_ranking(load_ranking, cm, classifier_type, classifier_names=classifier_names, n_rankings=10, caseidx_labels=True)
# modify axis labels
for ax in ax_list:
ax.set_xlabel('Controller [-]', fontsize=10, fontweight='bold')
plt.show()
# -
# ### Time domain plotting
# We can also look at our data from the time domain results.
#
# We can compare any number of channels using the ROSCO toolbox plotting tools. First we'll load two cases to plot together, then plot the time histories.
# +
# Load some time domain cases
filenames = [outfiles[0][70], outfiles[1][70]] # select the 70th run from each dataset
fast_data = fast_io.load_fast_out(filenames, tmin=30)
# Change names so the legends make sense
fast_data[0]['meta']['name'] = 'ROSCO'
fast_data[1]['meta']['name'] = 'Legacy'
# +
# Define the plots we want to make (can be as many or as few channels and plots as you would like...)
cases = {'Baseline': ['Wind1VelX', 'GenPwr', 'BldPitch1', 'GenTq', 'RotSpeed'],
'Blade' : ['OoPDefl1', 'RootMyb1']}
# plot
fast_pl.plot_fast_out(cases, fast_data)
plt.show()
# -
# ### Spectral Analysis
#
# We can additionally do some frequency domain analysis. Here, `spec_cases` is defined by `(channel, run)` where the run index corresponds to the desired plotting index in the loaded fast data.
#
#
spec_cases = [('RootMyb1', 0), ('TwrBsFxt', 1)]
twrfreq = .0716
twrfreq_label = ['Tower']
fig, ax = fast_pl.plot_spectral(fast_data, spec_cases,
show_RtSpeed=True, RtSpeed_idx=[0],
add_freqs=[twrfreq], add_freq_labels=twrfreq_label,
averaging='Welch')
ax.set_title('DLC_1.1')
plt.show()
# ### Other fun plots
#
# Finally, we can plot the data distribution of any channels from our fast output data
channels = ['GenPwr']
caseid = [0,1]
an_plts.distribution(fast_data, channels, caseid, names=['ROSCO', 'Legacy'])
plt.show()
# ## In conclusion...
# If you made it this far, thanks for reading...
#
# There are a number of smaller subfunctionalities that are also available within these tools shows above. Perhaps most importantly, everything is fairly modularar - the hope being that these can provide some high-level tools that everyone can assimilate into their own workflows without too much disruption.
#
# Please add, contribute, fix, etc... That would be great for everyone involved!
| pCrunch/batch_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
# 1.9. Naive Bayes
## Os métodos Naive Bayes são um conjunto de algoritmos de aprendizagem supervisionada com base na aplicação do teorema de Bayes com a suposição "ingênua" de independência condicional entre cada par de recursos dado o valor da variável de classe. O teorema de Bayes afirma a seguinte relação, dada a variável de classe y e o vetor de característica dependente x através de x_n,:
### P (y \ mid x_1, \ dots, x_n) = \ frac {P (y) P (x_1, \ dots, x_n \ mid y)}
### {P (x_1, \ pontos, x_n)}
## Usando a premissa ingênua de independência condicional de que
### P (x_i | y, x_1, \ pontos, x_ {i-1}, x_ {i + 1}, \ pontos, x_n) = P (x_i | y),
## para todos os i, esta relação é simplificada para
### P (y \ mid x_1, \ dots, x_n) = \ frac {P (y) \ prod_ {i = 1} ^ {n} P (x_i \ mid y)}
### {P (x_1, \ pontos, x_n)}
## Como P (x_1, \ dots, x_n) é constante dada a entrada, podemos usar a seguinte regra de classificação:
### \ begin {align} \ begin {alinhados} P (y \ mid x_1, \ dots, x_n) \ propto P (y) \ prod_ {i = 1} ^ {n} P (x_i \ mid y) \\\ Downarrow \\\ hat {y} = \ arg \ max_y P (y) \ prod_ {i = 1} ^ {n} P (x_i \ mid y), \ end {alinhado} \ end {alinhar}
## e podemos usar a estimativa de Máximo A Posteriori (MAP) para estimar P (y) e P (x_i \ mid y); o primeiro é então a frequência relativa da classe y no conjunto de treinamento.
## Os diferentes classificadores Bayes ingênuos diferem principalmente pelas suposições que fazem em relação à distribuição de P (x_i \ mid y).
## Apesar de suas suposições aparentemente simplificadas demais, os classificadores Bayes ingênuos funcionaram muito bem em muitas situações do mundo real, como a famosa classificação de documentos e filtragem de spam. Eles requerem uma pequena quantidade de dados de treinamento para estimar os parâmetros necessários. (Para razões teóricas pelas quais o ingênuo Bayes funciona bem, e sobre os tipos de dados que ele faz, veja as referências abaixo.)
## Alunos e classificadores Naive Bayes podem ser extremamente rápidos em comparação com métodos mais sofisticados. O desacoplamento das distribuições de características condicionais de classe significa que cada distribuição pode ser estimada independentemente como uma distribuição unidimensional. Isso, por sua vez, ajuda a aliviar os problemas decorrentes da maldição da dimensionalidade.
## Por outro lado, embora o ingênuo Bayes seja conhecido como um classificador decente, é conhecido por ser um estimador ruim, de modo que as saídas de probabilidade de predict_proba não devem ser levadas muito a sério.
## Referências:
## <NAME> (2004). The optimality of Naive Bayes. Proc. FLAIRS. (https://www.cs.unb.ca/~hzhang/publications/FLAIRS04ZhangH.pdf)
| 01_Modelos_Supervisionados/1.9_Naive_Bayes/1.9_Naive_Bayes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="WCHIAGNUhmDy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="cac1f7e9-baf2-4f94-873e-d5a838da9adf"
>>> names = ["Eric", "Graham", "Terry", "John", "Terry"]
>>> print({k:len(k) for k in ["Eric", "Graham", "Terry", "John", "Terry"]})
| Chapter07/Exercise102/Exercise102.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="6Pc4_YU0SuKT" colab_type="text"
# The `hilbert_space_algebra` module defines a simple algebra of finite dimensional or countably infinite dimensional Hilbert spaces.
#
# Local/primitive degrees of freedom (e.g. a single multi-level atom or a cavity mode) are described by a LocalSpace; it requires a label, and may define a basis through the basis or dimension arguments. The LocalSpace may also define custom identifiers for operators acting on that space (subclasses of LocalOperator):
#
#
# + id="3lpWK6s1SRq-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 374} outputId="46191b04-0a0d-4ca6-f571-ad1c76da0bbc" executionInfo={"status": "ok", "timestamp": 1526899251804, "user_tz": -330, "elapsed": 4422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
# !pip install QNET
# + id="cloCg7VnRaJz" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import qnet
from qnet.algebra import *
import sympy as sp
# + id="_JB6gRjOR2lW" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="4dcb3979-ec16-4637-8a98-5e6434f8eb56" executionInfo={"status": "ok", "timestamp": 1526899728307, "user_tz": -330, "elapsed": 476, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
from sympy import symbols, I, sqrt
from qnet.algebra.circuit_algebra import Create, LocalSigma, SLH, Destroy, Matrix, identity_matrix
from qnet.algebra.operator_algebra import Destroy
s = Destroy(hs="s")
p = Destroy(hs="p")
gamma_p, gamma_s, epsilon, p0 = sp.symbols("gamma_p, gamma_s, epsilon, p0", positive=True)
S = -identity_matrix(2)
L = [sp.sqrt(2*gamma_p)*p, sp.sqrt(2*gamma_s)*s]
H = sp.I*epsilon/2 * p*s.dag()*s.dag() ; H = H + H.dag()
OPO = SLH(S,L,H).coherent_input(p0, 0)
OPO
# + id="7LrJGJQYSiBq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="5de75c0f-fe9f-41dd-8f29-46c5ebc6dc39" executionInfo={"status": "ok", "timestamp": 1526899743422, "user_tz": -330, "elapsed": 443, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
k = sp.symbols("k", positive=True)
OPO_prelimit = OPO.substitute({gamma_p : k**2*gamma_p, epsilon : k*epsilon})
OPO_prelimit
# + id="cc1hRwsyURi9" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 51} outputId="1a7f654c-e753-49f9-ed8d-0a8ae3a6150d" executionInfo={"status": "ok", "timestamp": 1526899801044, "user_tz": -330, "elapsed": 4944, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
from qnet.algebra.circuit_algebra import SLH, try_adiabatic_elimination, prepare_adiabatic_limit
try_adiabatic_elimination(OPO_prelimit, k=k)
# + id="X6nLab9bUVWb" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="5723dde5-50e4-4b6b-c008-a4fd6890852c" executionInfo={"status": "ok", "timestamp": 1526899813565, "user_tz": -330, "elapsed": 442, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
s = Destroy(hs="s")
p = Destroy(hs="p")
gamma_p, gamma_s, epsilon, p0 = sp.symbols("gamma_p, gamma_s, epsilon, p0", positive=True)
S = -identity_matrix(2)
L = [sp.sqrt(2*gamma_p)*p, sp.sqrt(2*gamma_s)*s]
H = sp.I*epsilon/2 * p*s.dag()*s.dag() ; H = H + H.dag()
OPO = SLH(S,L,H).coherent_input(p0, 0)
OPO
# + id="wDdBpfYmUiq_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="f92552f9-6714-4fc9-9730-3e09368dc514" executionInfo={"status": "ok", "timestamp": 1526899825199, "user_tz": -330, "elapsed": 553, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
k = sp.symbols("k", positive=True)
OPO_prelimit = OPO.substitute({gamma_p : k**2*gamma_p, epsilon : k*epsilon})
OPO_prelimit
# + id="Ee30pOK_UlfE" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="e1aad547-8aa3-424e-b6a0-61f2ca540f79" executionInfo={"status": "ok", "timestamp": 1526899838038, "user_tz": -330, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "102028172612648957868"}}
prepare_adiabatic_limit(OPO_prelimit, k=k)[2]
| Hilbert_Space.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import sys
import pathlib
import numpy as np
import pandas as pd
sys.path.insert(0, "../../scripts")
from utils import load_data
from pycytominer.cyto_utils import infer_cp_features
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from sklearn.decomposition import PCA
from tensorflow import keras
from vae import VAE
from tensorflow.keras.models import Model, Sequential
import seaborn
import random as python_random
import tensorflow as tf
# import umap
# -
data_splits = ["train", "test", "valid", "complete"]
data_dict = load_data(data_splits)
# +
# Prepare data for training
meta_features = infer_cp_features(data_dict["train"], metadata=True)
cp_features = infer_cp_features(data_dict["train"])
train_features_df = data_dict["train"].reindex(cp_features, axis="columns")
train_meta_df = data_dict["train"].reindex(meta_features, axis="columns")
test_features_df = data_dict["test"].reindex(cp_features, axis="columns")
test_meta_df = data_dict["test"].reindex(meta_features, axis="columns")
valid_features_df = data_dict["valid"].reindex(cp_features, axis="columns")
valid_meta_df = data_dict["valid"].reindex(meta_features, axis="columns")
complete_features_df = data_dict["complete"].reindex(cp_features, axis="columns")
complete_meta_df = data_dict["complete"].reindex(meta_features, axis="columns")
# -
print(train_features_df.shape)
train_features_df.head(3)
def shuffle_each_column(df):
columns = df.columns
df_copy = df.copy()
for column in columns:
df_copy[column] = df_copy[column].sample(frac=1).reset_index(drop=True)
return (df_copy)
train_features_df = shuffle_each_column(train_features_df)
valid_features_df = shuffle_each_column(valid_features_df)
encoder_architecture=[250]
decoder_architecture=[250]
cp_vae = VAE(
input_dim=train_features_df.shape[1],
latent_dim=90,
batch_size=32,
encoder_batch_norm=True,
epochs=50,
learning_rate=0.0001,
encoder_architecture=encoder_architecture,
decoder_architecture=decoder_architecture,
beta=0.06,
verbose=True,
)
cp_vae.compile_vae()
cp_vae.train(x_train=train_features_df, x_test=valid_features_df)
cp_vae.vae
# Save training performance
history_df = pd.DataFrame(cp_vae.vae.history.history)
history_df
encoder = cp_vae.encoder_block["encoder"]
decoder = cp_vae.decoder_block["decoder"]
encoder.save("models/level4EncoderShuffled_beta")
decoder.save("models/level4DecoderShuffled_beta")
# +
# history_df.to_csv('level4_training_random.csv')
# +
# history_df = pd.read_csv('level4_training_random.csv')
# +
# original_training_data = pd.read_csv('level4_training.csv')
# +
# plt.figure(figsize=(7, 5), dpi = 400)
# plt.plot(original_training_data["loss"], label="Training data")
# plt.plot(original_training_data["val_loss"], label="Validation data")
# plt.plot(history_df["loss"], label="Shuffled training data")
# plt.plot(history_df["val_loss"], label="Shuffled validation data")
# # plt.title("Loss for VAE training on Cell Painting Level 5 data")
# plt.ylabel("MSE + KL Divergence")
# plt.xlabel("No. Epoch")
# plt.ylim(0,5)
# plt.legend()
# plt.show()
# -
cp_vae = VAE(
input_dim=train_features_df.shape[1],
latent_dim=90,
batch_size=32,
encoder_batch_norm=True,
epochs=58,
learning_rate=0.0001,
encoder_architecture=encoder_architecture,
decoder_architecture=decoder_architecture,
beta=1,
verbose=True,
)
cp_vae.compile_vae()
cp_vae.train(x_train=train_features_df, x_test=valid_features_df)
cp_vae.vae
# Save training performance
history_df = pd.DataFrame(cp_vae.vae.history.history)
history_df
encoder = cp_vae.encoder_block["encoder"]
decoder = cp_vae.decoder_block["decoder"]
encoder.save("models/level4EncoderShuffled_vanilla")
decoder.save("models/level4DecoderShuffled_vanilla")
# +
# history_df.to_csv('level4_training_vanilla_random.csv')
# +
# history_df = pd.read_csv('level4_training_vanilla_random.csv')
# +
# original_training_data = pd.read_csv('level4_training_vanilla.csv')
# +
# plt.figure(figsize=(7, 5), dpi = 400)
# plt.plot(original_training_data["loss"], label="Training data")
# plt.plot(original_training_data["val_loss"], label="Validation data")
# plt.plot(history_df["loss"], label="Shuffled training data")
# plt.plot(history_df["val_loss"], label="Shuffled validation data")
# # plt.title("Loss for VAE training on Cell Painting Level 5 data")
# plt.ylabel("MSE + KL Divergence")
# plt.xlabel("No. Epoch")
# plt.ylim(0,5)
# plt.legend()
# plt.show()
# -
cp_vae = VAE(
input_dim=train_features_df.shape[1],
latent_dim=90,
batch_size=32,
encoder_batch_norm=True,
epochs=50,
learning_rate=0.0001,
encoder_architecture=encoder_architecture,
decoder_architecture=decoder_architecture,
beta=0,
lam = 10000,
verbose=True,
)
cp_vae.compile_vae()
cp_vae.train(x_train=train_features_df, x_test=valid_features_df)
cp_vae.vae
# Save training performance
history_df = pd.DataFrame(cp_vae.vae.history.history)
history_df
encoder = cp_vae.encoder_block["encoder"]
decoder = cp_vae.decoder_block["decoder"]
encoder.save("models/level4EncoderShuffled_mmd")
decoder.save("models/level4DecoderShuffled_mmd")
# +
# history_df.to_csv('level4_training_mmd_random.csv')
# +
# original_training_data = pd.read_csv('level4_training_mmd.csv')
# +
# plt.figure(figsize=(7, 5), dpi = 400)
# plt.plot(original_training_data["loss"], label="Training data")
# plt.plot(original_training_data["val_loss"], label="Validation data")
# plt.plot(history_df["loss"], label="Shuffled training data")
# plt.plot(history_df["val_loss"], label="Shuffled validation data")
# # plt.title("Loss for VAE training on Cell Painting Level 5 data")
# plt.ylabel("MSE + MMD")
# plt.xlabel("No. Epoch")
# plt.legend()
# plt.show()
# -
| cell-painting/2.train/generate training curve level 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from process import get_data
from sklearn.utils import shuffle
# %matplotlib inline
# +
# make predictions
def softmax(a):
expA = np.exp(a)
return expA / expA.sum(axis=1, keepdims=True)
def forward(X, W, b):
return softmax(X.dot(W) + b)
def predict(P_Y_given_X):
return np.argmax(P_Y_given_X, axis=1)
# calculate the accuracy
def classification_rate(Y, P):
return np.mean(Y == P)
def cross_entropy(T, pY):
return -np.mean(T*np.log(pY))
def y2indicator(y, K):
N = len(y)
ind = np.zeros((N, K))
for i in range(N):
ind[i, y[i]] = 1
return ind
# +
W = np.random.randn(D, K)
b = np.zeros(K)
Xtrain, Ytrain, Xtest, Ytest = get_data()
D = Xtrain.shape[1]
K = len(set(Ytrain) | set(Ytest))
# convert to indicator
Ytrain_ind = y2indicator(Ytrain, K)
Ytest_ind = y2indicator(Ytest, K)
train_costs = []
test_costs = []
learning_rate = 0.001
for i in range(10000):
pYtrain = forward(Xtrain, W, b)
pYtest = forward(Xtest, W, b)
ctrain = cross_entropy(Ytrain_ind, pYtrain)
ctest = cross_entropy(Ytest_ind, pYtest)
train_costs.append(ctrain)
test_costs.append(ctest)
# gradient descent
W -= learning_rate*Xtrain.T.dot(pYtrain - Ytrain_ind)
b -= learning_rate*(pYtrain - Ytrain_ind).sum(axis=0)
if i % 1000 == 0:
print(i, ctrain, ctest)
print("Final train classification_rate:", classification_rate(Ytrain, predict(pYtrain)))
print("Final test classification_rate:", classification_rate(Ytest, predict(pYtest)))
# -
def one_hot_encoder(data):
# One-hot encoding
unique_time = np.unique(data)
#print(unique_time)
one_hot = np.zeros((data.shape[0], len(unique_time)))
for t in unique_time:
one_hot[:,int(t)] = np.where(data==t, 1, 0)
return one_hot
def get_data_ant():
df = pd.read_csv('ecommerce_data.csv')
data = df.as_matrix()
X = data[:,:-1]
Y = data[:,-1].astype(np.int32)
X, Y = shuffle(X, Y, random_state=42)
N, D = X.shape
# One-hot encoding
X2 = np.zeros((N,D+3))
X2[:,:D-1] = X[:,:D-1]
X2[:,D-1:D+3] = one_hot_encoder(X[:,D-1])
X = X2
X_train = X[:-100,:]
Y_train = Y[:-100]
X_test = X[-100:,:]
Y_test = Y[-100:]
# normalize the data
for i in (1,2):
X_train[:,i] = (X_train[:,i] - X_train[:,i].mean())/X_train[:,i].std()
X_test[:,i] = (X_test[:,i] - X_test[:,i].mean())/X_test[:,i].std()
return X_train, Y_train, X_test, Y_test
def softmax_ant(a):
exp_a = np.exp(a)
return exp_a/exp_a.sum(axis=1, keepdims=True)
def classification_rate_ant(Y, Y_hat_class):
return 100*np.mean(Y==Y_hat_class)
def forward_ant(X, W, b):
return softmax(X.dot(W) + b)
# General multi-class cross-entropy
def cross_entropy_ant(Y, Y_hat):
return -np.mean(Y*np.log(Y_hat))
def predict_ant(Y_hat):
return np.argmax(Y_hat, axis=1)
def dJ_dw(Y, Y_hat, X):
return X.T.dot(Y-Y_hat)
def derivative_b(Y, Y_hat):
return (Y - Y_hat).sum(axis=0)
X_train, Y_train, X_test, Y_test = get_data()
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
# +
D = X_train.shape[1] # number of features
K = len(set(Y_train)) # number of classes
T_train = one_hot_encoder(Y_train)
T_test = one_hot_encoder(Y_test)
W = np.random.randn(D, K)
b = np.random.randn(K)
# Check
# P_Y_given_x = forward(X_train, W, b)
# Y_hat = predict(P_Y_given_x)
# print('Classification rate:', np.round(classification_rate(Y_train, Y_hat),4), '%')
# +
learning_rate = 1e-3
costs_train = []
costs_test = []
for epoch in range(10000):
Y_hat_train = forward_ant(X_train, W, b)
Y_hat_test = forward_ant(X_test, W, b)
ctrain = cross_entropy_ant(T_train, Y_hat_train)
ctest = cross_entropy_ant(T_test, Y_hat_test)
costs_train.append(ctrain)
costs_test.append(ctest)
W -= learning_rate * dJ_dw(T_train, Y_hat_train, X_train)
b -= learning_rate * derivative_b(T_train, Y_hat_train)
if epoch % 1000 == 0:
print(epoch, ctrain, ctest)
print("classification_rate:", classification_rate(Y_train, predict_ant(Y_hat_train)))
# plt.plot(costs_train)
# plt.plot(costs_test)
# -
costs_train
costs_test
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def y2indicator(y, K):
N = len(y)
ind = np.zeros((N, K))
for i in range(N):
ind[i, y[i]] = 1
return ind
# +
Xtrain, Ytrain, Xtest, Ytest = get_data()
D = Xtrain.shape[1]
K = len(set(Ytrain) | set(Ytest))
# convert to indicator
Ytrain_ind = y2indicator(Ytrain, K)
Ytest_ind = y2indicator(Ytest, K)
print(Ytrain_ind.shape)
# -
# initialize weights
| archive/logistic/analyse_ecom_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import queue
pq = queue.PriorityQueue()
# +
# task별 중요도가 제시되기에, 우선순위큐를 활용하기로 함
# 처음엔, queue라이브러리에 내장된 우선순위 큐로 접근함
# (우선순위, 값)
# 인덱싱이 불가함. iterable하지 않아 for로도 접근 불가
# 따라서, 리스트를 활용해 우선순위 큐를 구현해 활용하기로 함.
# 1. 리스트에 튜플로 (우선순위, 값)을 저장
# 2. lst.pop(0)으로, 가장 앞의 task를 뽑아내고
# 3. lst[i][0]으로 우선순위에 접근
pq.put((2,0))
pq.put((1,1))
pq.put((3,2))
pq.put((2,3))
# -
lst = [(2,0),(1,1),(3,2),(2,3)]
first = lst.pop(0)
lst[0][0]
first[0]
def solution(task, location):
# task[] : 최초로 주어진 task목록
# task_left[] : 최초로 주어진 task목록((우선순위, 값) 형식)
# task_final[] : 정렬 후 최종적인 task목록
task_left, task_final = [], []
for i, task in enumerate(task):
task_left.append((task,i))
while task_left:
temp = True
first = task_left.pop(0)
# 나머지 중 하나라도 더 중요한 게 있다면, temp = False / first가 제일 중요하다면, temp = True
for i in range(len(task_left)):
if first[0] < task_left[i][0]:
temp=False # 제일 먼저 가면 안된다.
# 가장 중요하다면, 바로 최종 인쇄 순서에 차례로 넣는다.
if temp == True:
task_final.append(first)
# 더 중요한게 있다면, 가장 뒤로 다시 넣는다.
else:
task_left.append(first)
for idx, task in enumerate(task_final):
if task[1] == location:
return idx+1
solution([1, 1, 9, 1, 1, 1],0)
| 1.Study/2. with computer/4.Programming/1.Algorithms/0.Practice/20191112.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Iterative Phase Estimation
# ## Setup
#
# First, make sure that you have the latest version of Qiskit installed. To upgrade your Qiskit package, run the following command:
#
# ```bash
# pip install --upgrade qiskit
# ```
#
# Get an API key from IonQ. This will be used by the IonQ provider inside Qiskit to submit circuits to the IonQ platform.
#
# After securing an API key, install the python package `qiskit_ionq` using `pip`:
#
# ```bash
# pip install qiskit_ionq
# ```
#
# (IonQ's adapter for Qiskit is currently in private beta -- your feedback is welcomed!)
#
# ### (Optional) Extra Dependencies
#
# Some examples use additional Python dependencies; please make sure to `pip install` them as needed.
#
# Dependencies:
# * `matplotlib`: To run `qiskit.visualization.plot_histogram`.
#
# **NOTE**: The provider expects an API key to be supplied via the `token` keyword argument to its constructor. If no token is directly provided, the provider will check for one in the `QISKIT_IONQ_API_TOKEN` environment variable.
#
# Now that the Python package has been installed, you can import and instantiate the provider:
# +
#import Aer here, before calling qiskit_ionq_provider
from qiskit import Aer
from qiskit_ionq import IonQProvider
#Call provider and set token value
provider = IonQProvider(token='<PASSWORD>')
# -
# The `provider` instance can now be used to create and submit circuits to IonQ.
#
# ### Backend Types
#
# The IonQ provider supports two backend types:
# * `ionq_simulator`: IonQ's simulator backend.
# * `ionq_qpu`: IonQ's QPU backend.
#
# To view all current backend types, use the `.backends` property on the provider instance:
provider.backends()
# ### Why do I care about the Iterative Phase Estimation Algorithm (IPEA)? What can you do with it?
#
# Recall your linear algebra knowledge. More specifically, recall the concepts of eigenvalues and eigenvectors. Once you have these two notions solified, consider a unitary operator $U$ and a state $\left|\psi\right>$ such that the following relation is true :
#
# $$U\left|\psi\right>=e^{2\pi i\phi}\left|\psi\right>$$
#
# In other words, $U$ has eigenvector $\left|\psi\right>$ with corresponding eigenvalue $e^{2\pi i\phi}$. $\phi$ is a real number, and it is our job to find out what its value is. That's it! That's the problem we are trying to solve. And we will solve it using this algorithm known as Iterative Phase Estimation.
# ### Okay, what is this algorithm and how do we use it to solve this problem?
#
# At this point, I will provide a peripheral explanation of what the algorithm essentially does. The detailed explanation will follow as I actually code the algorithm. Before I give the basic summary though, keep the following point in mind -
#
# IMPORTANT : We assume that $\phi$, the value we are trying to find, can be expressed as $\frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n}$. Here each of the $\phi_k$'s are either 0 or 1. Another way of thinking about this key assumption is that $\phi_1\phi_2\phi_3...\phi_n$ is the binary representation of $2^n\phi$ and $n$ is the number of iterations that we do
#
# Now for the general working : as the name of the algorithm suggests, there will be several iterations. Consider iteration k. The goal of this iteration will be to determine $\phi_{n-k+1}$ in the expression for $\phi$ above. To do this, the algorithm uses a circuit with two qubits : an auxiliary qubit and a main qubit. What will end up happening in every iteration is that $U$ will be acted on the main qubit in such a way that the auxiliary qubit's state, upon measurement, will collapse into $\phi_{n-k+1}$.
# ### Isn't there the standard Quantum Phase Estimation algorithm that does the same thing with just one iteration?
#
# Before we start, let me answer this good question. Yes, there is the standard Quantum Phase Estimation algorithm (often shortened to QPE) that solves this very problem with one iteration. Why aren't we using that algorithm if it will get the job done faster?
#
# Well, there is a trade-off one has to consider while choosing between the two algorithms. Essentially, the QPEA uses a larger circuit with more qubits, and with more qubits comes more cost in the form of hardware and noise. In contrast, the IPEA just uses two qubits (auxiliary and main). But, of course, where this algorithm loses out is in the number of iterations.
#
# Okay, now let us see how the IPEA works exactly! We are going to try to deduce the phase of the T-gate (our $U = T$). This gate has the following matrix -
#
# $$T = \begin{bmatrix}
# 1 & 0\\
# 0 & e^{i\frac{\pi}{4}}
# \end{bmatrix}$$
#
# which clearly tells you that the state $\left|1\right>$ is an eigenstate with eigenvalue $e^{i\frac{\pi}{4}}$. Let's see how our algorithm tells us this. Keep in mind that $\phi = \frac{1}{8}$ in this case because the algorithm gives us $\phi$ in the expression $2\pi i \phi$, and for the T-gate, $2\pi i \phi = i\frac{\pi}{4} \implies \phi = \frac{1}{8}$
# First, we import every library/module that will be needed.
# +
from qiskit import *
#import qiskit.aqua as aqua
#from qiskit.quantum_info import Pauli
#from qiskit.aqua.operators.primitive_ops import PauliOp
from qiskit.circuit.library import PhaseEstimation
from qiskit import QuantumCircuit
from matplotlib import pyplot as plt
import numpy as np
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.drivers import Molecule
# -
# Now, we write the function `buildControlledT`. As the name suggests, it creates our T-gate and applies it in that special way I alluded to earlier in the basic summary of the IPEA. An even simpler way of describing this function would be that this function performs one iteration of the IPEA. Let's go into more detail and take a look at what happens within one iteration (this description is meant not only to delineate the working of the IPEA, but also to help tracing the code below easier. I highly suggest you read each bullet point, and then find in the code below where this is being implemented before moving on to the next point)-
#
# - The first thing this function does is create and initialise the circuit. In every iteration, the auxiliary qubit ($q_0$ in our case) will be initialized in the state $\left |+\right> = \frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)$, and the other qubit ($q_1$) will be initialised in the state $\left|\psi\right>$ (this is the eigenvector of $U$ from above; in our case, $U=T$ and $\left|\psi\right>=\left|1\right>$). So at this point, our collective state is $\left |+\right> \otimes \left|\psi\right>$ with $\left|\psi\right> = \left|1\right>$ in our case
#
# <br>
#
# - It then applies the CT (controlled-T) gate to the circuit 2<sup>m</sup> times, where the input m is a number that will range from 0 to $n - 1$, where $n$ is the index of the last $\phi_k$ in the expression $\phi = \frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n}$. In the k<sup>th</sup> iteration, we will apply the T-gate $2^{n-k}$ times.
#
# The CT gate functions exactly like a CNOT gate - the only difference is that if the control qubit, in this case always $q_0$, is in state $\left|1\right>$, the $T$ gate will be applied to the target qubit instead of the $X$ gate. The target qubit in our case will be $q_1$. Now, let's see what happens when this gate is applied 2<sup>m</sup> times in terms of the quantum mechanics:
#
# We were in the state $\left |+\right> \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$. Let's see what happens to that state for different values of m -
#
# If m = 0, then the T gate is applied $2^0 = 1$ time. So $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ becomes $\frac{1}{\sqrt{2}}(\left|0\right> \otimes \left|\psi\right>+ \left|1\right> \otimes e^{2\pi i\phi}\left|\psi\right>) = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i\phi}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(\frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n})}\left|1\right>) \otimes \left|\psi\right>$
#
# If m = 1, $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ becomes $\frac{1}{\sqrt{2}}(\left|0\right> \otimes \left|\psi\right>+ \left|1\right> \otimes e^{2\times2\pi i\phi}\left|\psi\right>) = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\times2\pi i\phi}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(\phi_1 + \frac{\phi_2}{2} + \frac{\phi_3}{4} + ... + \frac{\phi_n}{2^{n-1}})}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i\phi_1}e^{2\pi i(\frac{\phi_2}{2} + \frac{\phi_3}{4} + ... + \frac{\phi_n}{2^{n-1}})}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(\frac{\phi_2}{2} + \frac{\phi_3}{4} + ... + \frac{\phi_n}{2^{n-1}})}\left|1\right>) \otimes \left|\psi\right>$
#
# If m = n - 1, $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ becomes $\frac{1}{\sqrt{2}}(\left|0\right> \otimes \left|\psi\right>+ \left|1\right> \otimes e^{2^{n-1}\times2\pi i\phi}\left|\psi\right>) = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{n-1}\times2\pi i\phi}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i(2^{n-2}\phi_1 + 2^{n-3}\phi_2 + 2^{n-4}\phi_3 + ... + 2^{-1}\phi_n})\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{n-2}\times2\pi i\phi_1}e^{2^{n-3}\times2\pi i\phi_2}...e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right> = \frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right> $
#
# <br>
#
# - The function then performs a phase correction. Why do we need this phase correction?
#
# Well, first note the following important point, and then we'll get back to that question : if we apply the T-gate to the main qubit $2^{n - 1}$ times, as we will in the first iteration of the IPEA, then m = $n-1$, and as shown above, our circuit's quantum state will be $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right>$. In this iteration, our goal is to find $\phi_n$. If $\phi_n = 0$, then this state is just $\left |+\right> \otimes \left|\psi\right>$, and if $\phi_n = 1$, then it's $\left |-\right> \otimes \left|\psi\right>$. This means that if we measure the auxiliary qubit in the x-basis (the basis {$\left |-\right>, \left |+\right>$}), the auxiliary qubit will become $\phi_n$. This is exactly what the "inverse QFT" in the code below is. It is using a Hadamard gate to convert the state $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2^{-1}\times2\pi i\phi_n}\left|1\right>) \otimes \left|\psi\right>$ to either $\left |1\right> \otimes \left|\psi\right>$ if $\phi_n = 1$ (in which case measuring qubit 0 will yield 1), or $\left |0\right> \otimes \left|\psi\right>$ if $\phi_n = 0$ (in which case measuring qubit 0 will yield 0).
#
# Now, back to the original question - why do we need this phase correction? Imagine we apply the T-gate to the main qubit not $2^{n - 1}$ times, but $2^{n - 2}$ times, as we will in the second iteration of the IPEA. Note that in this iteration, our goal will be to find $\phi_{n-1}$. The state will go from $\frac{1}{\sqrt{2}}(\left|0\right> + \left|1\right>)\otimes \left|\psi\right>$ to $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i (\frac{\phi_{n-1}}{2} + \frac{\phi_n}{4})}\left|1\right>)\otimes \left|\psi\right>$. But ideally, we would like the state to be $\frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i (\frac{\phi_{n-1}}{2})}\left|1\right>)\otimes \left|\psi\right>$. That's because if that were the state, all it would take is a simple measurement in the x-basis for the auxiliary qubit to collapse to whatever value $\phi_{n-1}$ held (as shown above in the case of the first iteration). So, how do we get the state of the circuit into the ideal state? We can just apply the rotation operator $R_z(\theta)$ with $\theta = \frac{-2\pi\phi_n}{4}$. You can work out the effect of applying this operator to qubit 0. The state that will result will be $e^{i\frac{\pi\phi_n}{4}}\frac{1}{\sqrt{2}}(\left|0\right> + e^{2\pi i \frac{\phi_{n-1}}{2}}\left|1\right>)$. The overall phase can be ignored, and we have the ideal state. This is why we need the phase correction. To remove these unwanted phases and create the state to which only a measurement in the x-basis is necessary to complete the iteration. Generally speaking, for iteration k, $\theta = -2\pi\times(\frac{\phi_{n-k+2}}{4} + \frac{\phi_{n-k+3}}{8} + ... + \frac{\phi_n}{2^n})$
#
# <br>
#
# - Finally, the function does this inverse Quantum Fourier Transform (QFT), which is nothing but a measurement in the Hadamard basis as described in previous bullet point. It then returns the circuit ready for execution on a quantum computer.
def buildControlledT(p, m):
# initialize the circuit
qc = QuantumCircuit(2, 1)
# Hardmard on ancilla, now in |+>
qc.h(0)
# initialize to |1>
qc.x(1)
# applying T gate to qubit 1
for i in range(2**m):
qc.cp(np.pi/4, 0, 1)
# phase correction
qc.rz(p, 0)
# inverse QFT (in other words, just measuring in the x-basis)
qc.h(0)
qc.measure([0],[0])
return qc
# The next function, as the name once again suggests, performs the IPEA algorithm. The above function just performed one iteration, and as you can see in the body of this function, there is a `for` loop in which `buildControlledT` is called, which implies that one iteration of this `for` loop represents one iteration of the IPEA. The `for` loop iterates k times (the input of the function). This tells us that the input k of the function signifies the number of iterations in the algorithm. But how many iterations do we want to feed in? Well, as long as $2^n\phi$ can be expressed in binary, we should be good. Remember that each iteration gives you one of the $\phi_k$'s (in particular, the k<sup>th</sup> iteration gives you $\phi_{n-k+1}$). This function does its iterations, and in each iteration, it is basically just creating the circuit with the two qubits, doing what it needs to do (the four bullet points above) to the circuit, running the circuit, recovering the result (the $\phi_k$ for that iteration), and appending it to the bits list. Once we get the bits, we can use the expression $\phi = \frac{\phi_1}{2} + \frac{\phi_2}{4} + \frac{\phi_3}{8} + ... + \frac{\phi_n}{2^n}$ to find $\phi$.
def IPEA(k, backend_string):
# get backend
if backend_string == 'qpu':
backend = provider.get_backend('ionq_qpu')
elif backend_string == 'qasm':
backend = Aer.get_backend('qasm_simulator')
# bits
bits = []
# phase correction
phase = 0.0
# loop over iterations
for i in range(k-1, -1, -1):
# construct the circuit
qc = buildControlledT(phase, i)
# run the circuit
job = execute(qc, backend)
if backend_string == 'qpu':
from qiskit.providers.jobstatus import JobStatus
import time
# Check if job is done
while job.status() is not JobStatus.DONE:
print("Job status is", job.status() )
time.sleep(60)
# grab a coffee! This can take up to a few minutes.
# once we break out of that while loop, we know our job is finished
print("Job status is", job.status() )
print(job.get_counts()) # these counts are the “true” counts from the actual QPU Run
# get result
result = job.result()
# get current bit
this_bit = int(max(result.get_counts(), key=result.get_counts().get))
print(result.get_counts())
bits.append(this_bit)
# update phase correction
phase /= 2
phase -= (2 * np.pi * this_bit / 4.0)
return bits
# If you have made it this far, then you are doing very well! This algorithm is complicated. Good job! The final function that we will have to define is `eig_from_bits`, which will take in the list of $\phi_k$'s that the above function will return, and return the eigenvalue associated with $\left|\psi\right>$
def eig_from_bits(bits):
eig = 0.
m = len(bits)
# loop over all bits
for k in range(len(bits)):
eig += bits[k] / (2**(m-k))
#eig *= 2*np.pi
return eig
# You have now understood the IPEA! Let's actually perform it and see if we can get our $\frac{1}{8}$
# +
# perform IPEA
backend = 'qasm'
bits = IPEA(5, backend)
print(bits)
# re-construct energy
eig = eig_from_bits(bits)
print(eig)
# -
# It worked! Let's see if we can understand the effect of the choice of input `n` on the result obtained.
# +
#perform IPEA with different values of n
n_values = []
eig_values = []
for i in range(1, 8):
n_values.append(i)
# perform IPEA
backend = 'qasm'
bits = IPEA(i, backend)
# re-construct energy
eig = eig_from_bits(bits)
eig_values.append(eig)
n_values, eig_values = np.array(n_values), np.array(eig_values)
plt.plot(n_values, eig_values)
plt.xlabel('n (bits)', fontsize=15)
plt.ylabel(r'$\phi$', fontsize=15)
plt.title(r'$\phi$ vs. n', fontsize=15)
# -
# ### Now, let's try the same thing on actual IonQ hardware
# +
# perform IPEA
backend = 'qpu'
bits = IPEA(5, backend)
print(bits)
# re-construct energy
eig = eig_from_bits(bits)
print(eig)
# +
#perform IPEA with different values of n
n_values = []
eig_values = []
for i in range(1, 8):
n_values.append(i)
# perform IPEA
backend = 'qpu'
bits = IPEA(i, backend)
# re-construct energy
eig = eig_from_bits(bits)
eig_values.append(eig)
n_values, eig_values = np.array(n_values), np.array(eig_values)
plt.plot(n_values, eig_values)
plt.xlabel('n (bits)', fontsize=15)
plt.ylabel(r'$\phi$', fontsize=15)
plt.title(r'$\phi$ vs. n', fontsize=15)
# -
# ### I hope you enjoyed going through this notebook!
| ipea/iterative-phase-estimation-algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python3
# ---
# ## Estudando Matrizes em Python com Numpy
# +
## Instalar a lib numpy
#pip install numpy
# -
import numpy as np
# Criando uma matriz de 3 linhas e 3 colunas
m = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
print(m)
#Acessando o valor de um indice
print(m[1,1])
# Alterando o valor de um indice
m[0,1] = 7
print(m)
#visualizar uma coluna especifica (Use o fatiamento - slicing)
print('Primeira coluna:', m[:,0])
print('Segunda coluna:', m[:,1])
print('Terceira coluna:', m[:,2])
print('Toda matriz:\n', m[:])
# visualizar uma linha especifica (Use o fatiamento - slicing)
print('Primeira linha:', m[0,:])
print('Segunda linha:', m[1,:])
print('Terceira linha:', m[2,:])
#numero de dimensoes
print(m.ndim)
#tamanho da matriz - ordem da matriz
print(m.shape)
#numero de elementos de uma matriz
print(m.size)
nova_matriz = np.zeros((3,4), dtype = int)
nova_matriz
matriz_um = np.ones((2,2), dtype= int)
matriz_um
len(nova_matriz)
| vetores_matrizes/matrizes_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="25b1e1db-8bc5-7029-f719-91da523bd121"
# ## Introduction ##
#
# This is my first work of machine learning. the notebook is written in python and has inspired from ["Exploring Survival on Titanic" by <NAME>, a Kernel in R on Kaggle][1].
#
#
# [1]: https://www.kaggle.com/mrisdal/titanic/exploring-survival-on-the-titanic
# + _cell_guid="2ce68358-02ec-556d-ba88-e773a50bc18b"
# %matplotlib inline
import numpy as np
import pandas as pd
import re as re
train = pd.read_csv('../input/train.csv', header = 0, dtype={'Age': np.float64})
test = pd.read_csv('../input/test.csv' , header = 0, dtype={'Age': np.float64})
full_data = [train, test]
print (train.info())
# + [markdown] _cell_guid="f9595646-65c9-6fc4-395f-0befc4d122ce"
# # Feature Engineering #
# + [markdown] _cell_guid="9b4c278b-aaca-e92c-ba77-b9b48379d1f1"
# ## 1. Pclass ##
# there is no missing value on this feature and already a numerical value. so let's check it's impact on our train set.
# + _cell_guid="4680d950-cf7d-a6ae-e813-535e2247d88e"
print (train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean())
# + [markdown] _cell_guid="5e70f81c-d4e2-1823-f0ba-a7c9b46984ff"
# ## 2. Sex ##
# + _cell_guid="6729681d-7915-1631-78d2-ddf3c35a424c"
print (train[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean())
# + [markdown] _cell_guid="7c58b7ee-d6a1-0cc9-2346-81c47846a54a"
# ## 3. SibSp and Parch ##
# With the number of siblings/spouse and the number of children/parents we can create new feature called Family Size.
# + _cell_guid="1a537f10-7cec-d0b7-8a34-fa9975655190"
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
print (train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean())
# + [markdown] _cell_guid="e4861d3e-10db-1a23-8728-44e4d5251844"
# it seems has a good effect on our prediction but let's go further and categorize people to check whether they are alone in this ship or not.
# + _cell_guid="8c35e945-c928-e3bc-bd9c-d6ddb287e4c9"
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
print (train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean())
# + [markdown] _cell_guid="2780ca4e-7923-b845-0b6b-5f68a45f6b93"
# good! the impact is considerable.
# + [markdown] _cell_guid="8aa419c0-6614-7efc-7797-97f4a5158b19"
# ## 4. Embarked ##
# the embarked feature has some missing value. and we try to fill those with the most occurred value ( 'S' ).
# + _cell_guid="0e70e9af-d7cc-8c40-b7d4-2643889c376d"
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
print (train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean())
# + [markdown] _cell_guid="e08c9ee8-d6d1-99b7-38bd-f0042c18a5d9"
# ## 5. Fare ##
# Fare also has some missing value and we will replace it with the median. then we categorize it into 4 ranges.
# + _cell_guid="a21335bd-4e8d-66e8-e6a5-5d2173b72d3b"
for dataset in full_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
train['CategoricalFare'] = pd.qcut(train['Fare'], 4)
print (train[['CategoricalFare', 'Survived']].groupby(['CategoricalFare'], as_index=False).mean())
# + [markdown] _cell_guid="ec8d1b22-a95f-9f16-77ab-7b60d2103852"
# ## 6. Age ##
# we have plenty of missing values in this feature. # generate random numbers between (mean - std) and (mean + std).
# then we categorize age into 5 range.
# + _cell_guid="b90c2870-ce5d-ae0e-a33d-59e35445500e"
for dataset in full_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
train['CategoricalAge'] = pd.cut(train['Age'], 5)
print (train[['CategoricalAge', 'Survived']].groupby(['CategoricalAge'], as_index=False).mean())
# + [markdown] _cell_guid="bd25ec3f-b601-c1cc-d701-991fac1621f9"
# ## 7. Name ##
# inside this feature we can find the title of people.
# + _cell_guid="ad042f43-bfe0-ded0-4171-379d8caaa749"
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
print(pd.crosstab(train['Title'], train['Sex']))
# + [markdown] _cell_guid="ca5fff8c-7a0d-6c18-2173-b8df6293c50a"
# so we have titles. let's categorize it and check the title impact on survival rate.
# + _cell_guid="8357238b-98fe-632a-acd5-33674a6132ce"
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
print (train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean())
# + [markdown] _cell_guid="68fa2057-e27a-e252-0d1b-869c00a303ba"
# # Data Cleaning #
# great! now let's clean our data and map our features into numerical values.
# + _cell_guid="2502bb70-ce6f-2497-7331-7d1f80521470"
for dataset in full_data:
# Mapping Sex
dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Mapping titles
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Mapping Embarked
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
# Mapping Fare
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
# Mapping Age
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
# Feature Selection
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp',\
'Parch', 'FamilySize']
train = train.drop(drop_elements, axis = 1)
train = train.drop(['CategoricalAge', 'CategoricalFare'], axis = 1)
test = test.drop(drop_elements, axis = 1)
print (train.head(10))
train = train.values
test = test.values
# + [markdown] _cell_guid="8aaaf2bc-e282-79cc-008a-e2e801b51b07"
# good! now we have a clean dataset and ready to predict. let's find which classifier works better on this dataset.
# + [markdown] _cell_guid="23b55b45-572b-7276-32e7-8f7a0dcfd25e"
# # Classifier Comparison #
# + _cell_guid="31ded30a-8de4-6507-e7f7-5805a0f1eaf1"
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import accuracy_score, log_loss
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
classifiers = [
KNeighborsClassifier(3),
SVC(probability=True),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis(),
LogisticRegression()]
log_cols = ["Classifier", "Accuracy"]
log = pd.DataFrame(columns=log_cols)
sss = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
X = train[0::, 1::]
y = train[0::, 0]
acc_dict = {}
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
for clf in classifiers:
name = clf.__class__.__name__
clf.fit(X_train, y_train)
train_predictions = clf.predict(X_test)
acc = accuracy_score(y_test, train_predictions)
if name in acc_dict:
acc_dict[name] += acc
else:
acc_dict[name] = acc
for clf in acc_dict:
acc_dict[clf] = acc_dict[clf] / 10.0
log_entry = pd.DataFrame([[clf, acc_dict[clf]]], columns=log_cols)
log = log.append(log_entry)
plt.xlabel('Accuracy')
plt.title('Classifier Accuracy')
sns.set_color_codes("muted")
sns.barplot(x='Accuracy', y='Classifier', data=log, color="b")
# + [markdown] _cell_guid="438585cf-b7ad-73ba-49aa-87688ff21233"
# # Prediction #
# now we can use SVC classifier to predict our data.
# + _cell_guid="24967b57-732b-7180-bfd5-005beff75974"
candidate_classifier = SVC()
candidate_classifier.fit(train[0::, 1::], train[0::, 0])
result = candidate_classifier.predict(test)
| titanic/titanic-best-working-classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Advanced lists - below is an illustration of the difference between append and extend
x = [1,2,3]
x.append([4,5])
print(x)
x = [1,2,3]
x.extend([4,5])
print(x)
# insert is another list method that can be useful. It takes an index place and whatever you want to be inserted in that spot as shown below.
# Note that x = [1,2,3,4,5]
x.insert(3,'foo')
x
# I can also remove items.
x.remove('foo')
x
#
| Advanced Strings, Sets, and the rest .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 7. Ulysses’ Compass
# !pip install -q numpyro arviz causalgraphicalmodels daft
# +
import os
import warnings
import arviz as az
import matplotlib.pyplot as plt
import pandas as pd
import jax.numpy as jnp
from jax import lax, random, vmap
from jax.scipy.special import logsumexp
import numpyro
import numpyro.distributions as dist
import numpyro.optim as optim
from numpyro.diagnostics import print_summary
from numpyro.infer import Predictive, SVI, Trace_ELBO, init_to_value, log_likelihood
from numpyro.infer.autoguide import AutoLaplaceApproximation
if "SVG" in os.environ:
# %config InlineBackend.figure_formats = ["svg"]
warnings.formatwarning = lambda message, category, *args, **kwargs: "{}: {}\n".format(
category.__name__, message
)
az.style.use("arviz-darkgrid")
numpyro.set_platform("cpu")
# -
# ### Code 7.1
sppnames = [
"afarensis",
"africanus",
"habilis",
"boisei",
"rudolfensis",
"ergaster",
"sapiens",
]
brainvolcc = jnp.array([438, 452, 612, 521, 752, 871, 1350])
masskg = jnp.array([37.0, 35.5, 34.5, 41.5, 55.5, 61.0, 53.5])
d = pd.DataFrame({"species": sppnames, "brain": brainvolcc, "mass": masskg})
# ### Code 7.2
d["mass_std"] = (d.mass - d.mass.mean()) / d.mass.std()
d["brain_std"] = d.brain / d.brain.max()
# ### Code 7.3
# +
def model(mass_std, brain_std=None):
a = numpyro.sample("a", dist.Normal(0.5, 1))
b = numpyro.sample("b", dist.Normal(0, 10))
log_sigma = numpyro.sample("log_sigma", dist.Normal(0, 1))
mu = numpyro.deterministic("mu", a + b * mass_std)
numpyro.sample("brain_std", dist.Normal(mu, jnp.exp(log_sigma)), obs=brain_std)
m7_1 = AutoLaplaceApproximation(model)
svi = SVI(
model,
m7_1,
optim.Adam(0.3),
Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 1000)
p7_1 = svi_result.params
# -
# ### Code 7.4
# +
def model(mass_std, brain_std):
intercept = numpyro.sample("intercept", dist.Normal(0, 10))
b_mass_std = numpyro.sample("b_mass_std", dist.Normal(0, 10))
sigma = numpyro.sample("sigma", dist.HalfCauchy(2))
mu = intercept + b_mass_std * mass_std
numpyro.sample("brain_std", dist.Normal(mu, sigma), obs=brain_std)
m7_1_OLS = AutoLaplaceApproximation(model)
svi = SVI(
model,
m7_1_OLS,
optim=optim.Adam(0.01),
loss=Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 1000)
p7_1_OLS = svi_result.params
post = m7_1_OLS.sample_posterior(random.PRNGKey(1), p7_1_OLS, (1000,))
# -
# ### Code 7.5
post = m7_1.sample_posterior(random.PRNGKey(12), p7_1, (1000,))
s = Predictive(m7_1.model, post)(random.PRNGKey(2), d.mass_std.values)
r = jnp.mean(s["brain_std"], 0) - d.brain_std.values
resid_var = jnp.var(r, ddof=1)
outcome_var = jnp.var(d.brain_std.values, ddof=1)
1 - resid_var / outcome_var
# ### Code 7.6
def R2_is_bad(quap_fit):
quap, params = quap_fit
post = quap.sample_posterior(random.PRNGKey(1), params, (1000,))
s = Predictive(quap.model, post)(random.PRNGKey(2), d.mass_std.values)
r = jnp.mean(s["brain_std"], 0) - d.brain_std.values
return 1 - jnp.var(r, ddof=1) / jnp.var(d.brain_std.values, ddof=1)
# ### Code 7.7
# +
def model(mass_std, brain_std=None):
a = numpyro.sample("a", dist.Normal(0.5, 1))
b = numpyro.sample("b", dist.Normal(0, 10).expand([2]))
log_sigma = numpyro.sample("log_sigma", dist.Normal(0, 1))
mu = numpyro.deterministic("mu", a + b[0] * mass_std + b[1] * mass_std ** 2)
numpyro.sample("brain_std", dist.Normal(mu, jnp.exp(log_sigma)), obs=brain_std)
m7_2 = AutoLaplaceApproximation(
model, init_loc_fn=init_to_value(values={"b": jnp.repeat(0.0, 2)})
)
svi = SVI(
model,
m7_2,
optim.Adam(0.3),
Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 2000)
p7_2 = svi_result.params
# -
# ### Code 7.8
# +
def model(mass_std, brain_std=None):
a = numpyro.sample("a", dist.Normal(0.5, 1))
b = numpyro.sample("b", dist.Normal(0, 10).expand([3]))
log_sigma = numpyro.sample("log_sigma", dist.Normal(0, 1))
mu = numpyro.deterministic(
"mu", a + b[0] * mass_std + b[1] * mass_std ** 2 + b[2] * mass_std ** 3
)
numpyro.sample("brain_std", dist.Normal(mu, jnp.exp(log_sigma)), obs=brain_std)
m7_3 = AutoLaplaceApproximation(
model, init_loc_fn=init_to_value(values={"b": jnp.repeat(0.0, 3)})
)
svi = SVI(
model,
m7_3,
optim.Adam(0.01),
Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 2000)
p7_3 = svi_result.params
def model(mass_std, brain_std=None):
a = numpyro.sample("a", dist.Normal(0.5, 1))
b = numpyro.sample("b", dist.Normal(0, 10).expand([4]))
log_sigma = numpyro.sample("log_sigma", dist.Normal(0, 1))
mu = numpyro.deterministic(
"mu", a + jnp.sum(b * jnp.power(mass_std[..., None], jnp.arange(1, 5)), -1)
)
numpyro.sample("brain_std", dist.Normal(mu, jnp.exp(log_sigma)), obs=brain_std)
m7_4 = AutoLaplaceApproximation(
model, init_loc_fn=init_to_value(values={"b": jnp.repeat(0.0, 4)})
)
svi = SVI(
model,
m7_4,
optim.Adam(0.01),
Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 2000)
p7_4 = svi_result.params
def model(mass_std, brain_std=None):
a = numpyro.sample("a", dist.Normal(0.5, 1))
b = numpyro.sample("b", dist.Normal(0, 10).expand([5]))
log_sigma = numpyro.sample("log_sigma", dist.Normal(0, 1))
mu = numpyro.deterministic(
"mu", a + jnp.sum(b * jnp.power(mass_std[..., None], jnp.arange(1, 6)), -1)
)
numpyro.sample("brain_std", dist.Normal(mu, jnp.exp(log_sigma)), obs=brain_std)
m7_5 = AutoLaplaceApproximation(
model, init_loc_fn=init_to_value(values={"b": jnp.repeat(0.0, 5)})
)
svi = SVI(
model,
m7_5,
optim.Adam(0.01),
Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 2000)
p7_5 = svi_result.params
# -
# ### Code 7.9
# +
def model(mass_std, brain_std=None):
a = numpyro.sample("a", dist.Normal(0.5, 1))
b = numpyro.sample("b", dist.Normal(0, 10).expand([6]))
log_sigma = numpyro.sample("log_sigma", dist.Normal(0, 1))
mu = numpyro.deterministic(
"mu", a + jnp.sum(b * jnp.power(mass_std[..., None], jnp.arange(1, 7)), -1)
)
numpyro.sample("brain_std", dist.Normal(mu, jnp.exp(log_sigma)), obs=brain_std)
m7_6 = AutoLaplaceApproximation(
model, init_loc_fn=init_to_value(values={"b": jnp.repeat(0.0, 6)})
)
svi = SVI(
model,
m7_6,
optim.Adam(0.003),
Trace_ELBO(),
mass_std=d.mass_std.values,
brain_std=d.brain_std.values,
)
svi_result = svi.run(random.PRNGKey(0), 5000)
p7_6 = svi_result.params
# -
# ### Code 7.10
post = m7_1.sample_posterior(random.PRNGKey(1), p7_1, (1000,))
mass_seq = jnp.linspace(d.mass_std.min(), d.mass_std.max(), num=100)
l = Predictive(m7_1.model, post, return_sites=["mu"])(
random.PRNGKey(2), mass_std=mass_seq
)["mu"]
mu = jnp.mean(l, 0)
ci = jnp.percentile(l, jnp.array([5.5, 94.5]), 0)
az.plot_pair(d[["mass_std", "brain_std"]].to_dict("list"))
plt.plot(mass_seq, mu, "k")
plt.fill_between(mass_seq, ci[0], ci[1], color="k", alpha=0.2)
plt.title("m7.1: R^2 = {:0.2f}".format(R2_is_bad((m7_1, p7_1)).item()))
plt.show()
# ### Code 7.11
i = 1
d_minus_i = d.drop(i)
# ### Code 7.12
p = jnp.array([0.3, 0.7])
-jnp.sum(p * jnp.log(p))
# ### Code 7.13
# +
def lppd_fn(seed, quad, params, num_samples=1000):
post = quad.sample_posterior(random.PRNGKey(1), params, (int(1e4),))
logprob = log_likelihood(quad.model, post, d.mass_std.values, d.brain_std.values)
logprob = logprob["brain_std"]
return logsumexp(logprob, 0) - jnp.log(logprob.shape[0])
lppd_fn(random.PRNGKey(1), m7_1, p7_1, int(1e4))
# -
# ### Code 7.14
post = m7_1.sample_posterior(random.PRNGKey(1), p7_1, (int(1e4),))
logprob = log_likelihood(m7_1.model, post, d.mass_std.values, d.brain_std.values)
logprob = logprob["brain_std"]
n = logprob.shape[1]
ns = logprob.shape[0]
f = lambda i: logsumexp(logprob[:, i]) - jnp.log(ns)
lppd = vmap(f)(jnp.arange(n))
lppd
# ### Code 7.15
[
jnp.sum(lppd_fn(random.PRNGKey(1), m[0], m[1])).item()
for m in (
(m7_1, p7_1),
(m7_2, p7_2),
(m7_3, p7_3),
(m7_4, p7_4),
(m7_5, p7_5),
(m7_6, p7_6),
)
]
# ### Code 7.16
# +
def model(mm, y, b_sigma):
a = numpyro.param("a", jnp.array([0.0]))
Bvec = a
k = mm.shape[1]
if k > 1:
b = numpyro.sample("b", dist.Normal(0, b_sigma).expand([k - 1]))
Bvec = jnp.concatenate([Bvec, b])
mu = jnp.matmul(mm, Bvec)
numpyro.sample("y", dist.Normal(mu, 1), obs=y)
def sim_train_test(i, N=20, k=3, rho=[0.15, -0.4], b_sigma=100):
n_dim = max(k, 3)
Rho = jnp.identity(n_dim)
Rho = Rho.at[1 : len(rho) + 1, 0].set(jnp.array(rho))
Rho = Rho.at[0, 1 : len(rho) + 1].set(jnp.array(rho))
X_train = dist.MultivariateNormal(jnp.zeros(n_dim), Rho).sample(
random.fold_in(random.PRNGKey(0), i), (N,)
)
mm_train = jnp.ones((N, 1))
if k > 1:
mm_train = jnp.concatenate([mm_train, X_train[:, 1:k]], axis=1)
if k > 1:
m = AutoLaplaceApproximation(
model, init_loc_fn=init_to_value(values={"b": jnp.zeros(k - 1)})
)
else:
m = lambda mm, y, b_sigma: None
svi = SVI(
model,
m,
optim.Adam(0.3),
Trace_ELBO(),
mm=mm_train,
y=X_train[:, 0],
b_sigma=b_sigma,
)
svi_result = svi.run(random.fold_in(random.PRNGKey(1), i), 1000, progress_bar=False)
params = svi_result.params
coefs = params["a"]
if k > 1:
coefs = jnp.concatenate([coefs, m.median(params)["b"]])
logprob = dist.Normal(jnp.matmul(mm_train, coefs)).log_prob(X_train[:, 0])
dev_train = (-2) * jnp.sum(logprob)
X_test = dist.MultivariateNormal(jnp.zeros(n_dim), Rho).sample(
random.fold_in(random.PRNGKey(2), i), (N,)
)
mm_test = jnp.ones((N, 1))
if k > 1:
mm_test = jnp.concatenate([mm_test, X_test[:, 1:k]], axis=1)
logprob = dist.Normal(jnp.matmul(mm_test, coefs)).log_prob(X_test[:, 0])
dev_test = (-2) * jnp.sum(logprob)
return jnp.stack([dev_train, dev_test])
def dev_fn(N, k):
print(k)
r = lax.map(lambda i: sim_train_test(i, N, k), jnp.arange((int(1e4))))
return jnp.concatenate([jnp.mean(r, 0), jnp.std(r, 0)])
N = 20
kseq = range(1, 6)
dev = jnp.stack([dev_fn(N, k) for k in kseq], axis=1)
# -
# ### Code 7.17
def dev_fn(N, k):
print(k)
r = vmap(lambda i: sim_train_test(i, N, k))(jnp.arange((int(1e4))))
return jnp.concatenate([jnp.mean(r, 0), jnp.std(r, 0)])
# ### Code 7.18
plt.subplot(
ylim=(jnp.min(dev[0]).item() - 5, jnp.max(dev[0]).item() + 12),
xlim=(0.9, 5.2),
xlabel="number of parameters",
ylabel="deviance",
)
plt.title("N = {}".format(N))
plt.scatter(jnp.arange(1, 6), dev[0], s=80, color="b")
plt.scatter(jnp.arange(1.1, 6), dev[1], s=80, color="k")
pts_int = (dev[0] - dev[2], dev[0] + dev[2])
pts_out = (dev[1] - dev[3], dev[1] + dev[3])
plt.vlines(jnp.arange(1, 6), pts_int[0], pts_int[1], color="b")
plt.vlines(jnp.arange(1.1, 6), pts_out[0], pts_out[1], color="k")
plt.annotate(
"in", (2, dev[0][1]), xytext=(-25, -5), textcoords="offset pixels", color="b"
)
plt.annotate("out", (2.1, dev[1][1]), xytext=(10, -5), textcoords="offset pixels")
plt.annotate(
"+1SD",
(2.1, pts_out[1][1]),
xytext=(10, -5),
textcoords="offset pixels",
fontsize=12,
)
plt.annotate(
"-1SD",
(2.1, pts_out[0][1]),
xytext=(10, -5),
textcoords="offset pixels",
fontsize=12,
)
plt.show()
# ### Code 7.19
# +
cars = pd.read_csv("../data/cars.csv", sep=",")
def model(speed, cars_dist):
a = numpyro.sample("a", dist.Normal(0, 100))
b = numpyro.sample("b", dist.Normal(0, 10))
sigma = numpyro.sample("sigma", dist.Exponential(1))
mu = a + b * speed
numpyro.sample("dist", dist.Normal(mu, sigma), obs=cars_dist)
m = AutoLaplaceApproximation(model)
svi = SVI(
model,
m,
optim.Adam(1),
Trace_ELBO(),
speed=cars.speed.values,
cars_dist=cars.dist.values,
)
svi_result = svi.run(random.PRNGKey(0), 5000)
params = svi_result.params
post = m.sample_posterior(random.PRNGKey(94), params, (1000,))
# -
# ### Code 7.20
# +
n_samples = 1000
def logprob_fn(s):
mu = post["a"][s] + post["b"][s] * cars.speed.values
return dist.Normal(mu, post["sigma"][s]).log_prob(cars.dist.values)
logprob = vmap(logprob_fn, out_axes=1)(jnp.arange(n_samples))
# -
# ### Code 7.21
n_cases = cars.shape[0]
lppd = logsumexp(logprob, 1) - jnp.log(n_samples)
# ### Code 7.22
pWAIC = jnp.var(logprob, 1)
# ### Code 7.23
-2 * (jnp.sum(lppd) - jnp.sum(pWAIC))
# ### Code 7.24
waic_vec = -2 * (lppd - pWAIC)
jnp.sqrt(n_cases * jnp.var(waic_vec))
# ### Code 7.25
# +
with numpyro.handlers.seed(rng_seed=71):
# number of plants
N = 100
# simulate initial heights
h0 = numpyro.sample("h0", dist.Normal(10, 2).expand([N]))
# assign treatments and simulate fungus and growth
treatment = jnp.repeat(jnp.arange(2), repeats=N // 2)
fungus = numpyro.sample(
"fungus", dist.Binomial(total_count=1, probs=(0.5 - treatment * 0.4))
)
h1 = h0 + numpyro.sample("diff", dist.Normal(5 - 3 * fungus))
# compose a clean data frame
d = pd.DataFrame({"h0": h0, "h1": h1, "treatment": treatment, "fungus": fungus})
def model(h0, h1):
p = numpyro.sample("p", dist.LogNormal(0, 0.25))
sigma = numpyro.sample("sigma", dist.Exponential(1))
mu = h0 * p
numpyro.sample("h1", dist.Normal(mu, sigma), obs=h1)
m6_6 = AutoLaplaceApproximation(model)
svi = SVI(model, m6_6, optim.Adam(0.1), Trace_ELBO(), h0=d.h0.values, h1=d.h1.values)
svi_result = svi.run(random.PRNGKey(0), 1000)
p6_6 = svi_result.params
def model(treatment, fungus, h0, h1):
a = numpyro.sample("a", dist.LogNormal(0, 0.2))
bt = numpyro.sample("bt", dist.Normal(0, 0.5))
bf = numpyro.sample("bf", dist.Normal(0, 0.5))
sigma = numpyro.sample("sigma", dist.Exponential(1))
p = a + bt * treatment + bf * fungus
mu = h0 * p
numpyro.sample("h1", dist.Normal(mu, sigma), obs=h1)
m6_7 = AutoLaplaceApproximation(model)
svi = SVI(
model,
m6_7,
optim.Adam(0.3),
Trace_ELBO(),
treatment=d.treatment.values,
fungus=d.fungus.values,
h0=d.h0.values,
h1=d.h1.values,
)
svi_result = svi.run(random.PRNGKey(0), 1000)
p6_7 = svi_result.params
def model(treatment, h0, h1):
a = numpyro.sample("a", dist.LogNormal(0, 0.2))
bt = numpyro.sample("bt", dist.Normal(0, 0.5))
sigma = numpyro.sample("sigma", dist.Exponential(1))
p = a + bt * treatment
mu = h0 * p
numpyro.sample("h1", dist.Normal(mu, sigma), obs=h1)
m6_8 = AutoLaplaceApproximation(model)
svi = SVI(
model,
m6_8,
optim.Adam(1),
Trace_ELBO(),
treatment=d.treatment.values,
h0=d.h0.values,
h1=d.h1.values,
)
svi_result = svi.run(random.PRNGKey(0), 1000)
p6_8 = svi_result.params
post = m6_7.sample_posterior(random.PRNGKey(11), p6_7, (1000,))
logprob = log_likelihood(
m6_7.model,
post,
treatment=d.treatment.values,
fungus=d.fungus.values,
h0=d.h0.values,
h1=d.h1.values,
)
az6_7 = az.from_dict(sample_stats={"log_likelihood": logprob["h1"][None, ...]})
az.waic(az6_7, scale="deviance")
# -
# ### Code 7.26
post = m6_6.sample_posterior(random.PRNGKey(77), p6_6, (1000,))
logprob = log_likelihood(m6_6.model, post, h0=d.h0.values, h1=d.h1.values)
az6_6 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
post = m6_7.sample_posterior(random.PRNGKey(77), p6_7, (1000,))
logprob = log_likelihood(
m6_7.model,
post,
treatment=d.treatment.values,
fungus=d.fungus.values,
h0=d.h0.values,
h1=d.h1.values,
)
az6_7 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
post = m6_8.sample_posterior(random.PRNGKey(77), p6_8, (1000,))
logprob = log_likelihood(
m6_8.model, post, treatment=d.treatment.values, h0=d.h0.values, h1=d.h1.values
)
az6_8 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
az.compare({"m6.6": az6_6, "m6.7": az6_7, "m6.8": az6_8}, ic="waic", scale="deviance")
# ### Code 7.27
post = m6_7.sample_posterior(random.PRNGKey(91), p6_7, (1000,))
logprob = log_likelihood(
m6_7.model,
post,
treatment=d.treatment.values,
fungus=d.fungus.values,
h0=d.h0.values,
h1=d.h1.values,
)
az6_7 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
waic_m6_7 = az.waic(az6_7, pointwise=True, scale="deviance")
post = m6_8.sample_posterior(random.PRNGKey(91), p6_8, (1000,))
logprob = log_likelihood(
m6_8.model, post, treatment=d.treatment.values, h0=d.h0.values, h1=d.h1.values
)
az6_8 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
waic_m6_8 = az.waic(az6_8, pointwise=True, scale="deviance")
n = waic_m6_7.n_data_points
diff_m6_7_m6_8 = waic_m6_7.waic_i.values - waic_m6_8.waic_i.values
jnp.sqrt(n * jnp.var(diff_m6_7_m6_8))
# ### Code 7.28
40.0 + jnp.array([-1, 1]) * 10.4 * 2.6
# ### Code 7.29
compare = az.compare(
{"m6.6": az6_6, "m6.7": az6_7, "m6.8": az6_8}, ic="waic", scale="deviance"
)
az.plot_compare(compare)
plt.show()
# ### Code 7.30
post = m6_6.sample_posterior(random.PRNGKey(92), p6_6, (1000,))
logprob = log_likelihood(m6_6.model, post, h0=d.h0.values, h1=d.h1.values)
az6_6 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
waic_m6_6 = az.waic(az6_6, pointwise=True, scale="deviance")
diff_m6_6_m6_8 = waic_m6_6.waic_i.values - waic_m6_8.waic_i.values
jnp.sqrt(n * jnp.var(diff_m6_6_m6_8))
# ### Code 7.31
post = m6_6.sample_posterior(random.PRNGKey(93), p6_6, (1000,))
logprob = log_likelihood(m6_6.model, post, h0=d.h0.values, h1=d.h1.values)
az6_6 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
waic_m6_6 = az.waic(az6_6, pointwise=True, scale="deviance")
post = m6_7.sample_posterior(random.PRNGKey(93), p6_7, (1000,))
logprob = log_likelihood(
m6_7.model,
post,
treatment=d.treatment.values,
fungus=d.fungus.values,
h0=d.h0.values,
h1=d.h1.values,
)
az6_7 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
waic_m6_7 = az.waic(az6_7, pointwise=True, scale="deviance")
post = m6_8.sample_posterior(random.PRNGKey(93), p6_8, (1000,))
logprob = log_likelihood(
m6_8.model, post, treatment=d.treatment.values, h0=d.h0.values, h1=d.h1.values
)
az6_8 = az.from_dict({}, log_likelihood={"h1": logprob["h1"][None, ...]})
waic_m6_8 = az.waic(az6_8, pointwise=True, scale="deviance")
dSE = lambda waic1, waic2: jnp.sqrt(
n * jnp.var(waic1.waic_i.values - waic2.waic_i.values)
)
data = {"m6.6": waic_m6_6, "m6.7": waic_m6_7, "m6.8": waic_m6_8}
pd.DataFrame(
{
row: {col: dSE(row_val, col_val) for col, col_val in data.items()}
for row, row_val in data.items()
}
)
# ### Code 7.32
# +
WaffleDivorce = pd.read_csv("../data/WaffleDivorce.csv", sep=";")
d = WaffleDivorce
d["A"] = d.MedianAgeMarriage.pipe(lambda x: (x - x.mean()) / x.std())
d["D"] = d.Divorce.pipe(lambda x: (x - x.mean()) / x.std())
d["M"] = d.Marriage.pipe(lambda x: (x - x.mean()) / x.std())
def model(A, D=None):
a = numpyro.sample("a", dist.Normal(0, 0.2))
bA = numpyro.sample("bA", dist.Normal(0, 0.5))
sigma = numpyro.sample("sigma", dist.Exponential(1))
mu = numpyro.deterministic("mu", a + bA * A)
numpyro.sample("D", dist.Normal(mu, sigma), obs=D)
m5_1 = AutoLaplaceApproximation(model)
svi = SVI(model, m5_1, optim.Adam(1), Trace_ELBO(), A=d.A.values, D=d.D.values)
svi_result = svi.run(random.PRNGKey(0), 1000)
p5_1 = svi_result.params
def model(M, D=None):
a = numpyro.sample("a", dist.Normal(0, 0.2))
bM = numpyro.sample("bM", dist.Normal(0, 0.5))
sigma = numpyro.sample("sigma", dist.Exponential(1))
mu = a + bM * M
numpyro.sample("D", dist.Normal(mu, sigma), obs=D)
m5_2 = AutoLaplaceApproximation(model)
svi = SVI(model, m5_2, optim.Adam(1), Trace_ELBO(), M=d.M.values, D=d.D.values)
svi_result = svi.run(random.PRNGKey(0), 1000)
p5_2 = svi_result.params
def model(M, A, D=None):
a = numpyro.sample("a", dist.Normal(0, 0.2))
bM = numpyro.sample("bM", dist.Normal(0, 0.5))
bA = numpyro.sample("bA", dist.Normal(0, 0.5))
sigma = numpyro.sample("sigma", dist.Exponential(1))
mu = numpyro.deterministic("mu", a + bM * M + bA * A)
numpyro.sample("D", dist.Normal(mu, sigma), obs=D)
m5_3 = AutoLaplaceApproximation(model)
svi = SVI(
model, m5_3, optim.Adam(1), Trace_ELBO(), M=d.M.values, A=d.A.values, D=d.D.values
)
svi_result = svi.run(random.PRNGKey(0), 1000)
p5_3 = svi_result.params
# -
# ### Code 7.33
post = m5_1.sample_posterior(random.PRNGKey(24071847), p5_1, (1000,))
logprob = log_likelihood(m5_1.model, post, A=d.A.values, D=d.D.values)["D"]
az5_1 = az.from_dict(
posterior={k: v[None, ...] for k, v in post.items()},
log_likelihood={"D": logprob[None, ...]},
)
post = m5_2.sample_posterior(random.PRNGKey(24071847), p5_2, (1000,))
logprob = log_likelihood(m5_2.model, post, M=d.M.values, D=d.D.values)["D"]
az5_2 = az.from_dict(
posterior={k: v[None, ...] for k, v in post.items()},
log_likelihood={"D": logprob[None, ...]},
)
post = m5_3.sample_posterior(random.PRNGKey(24071847), p5_3, (1000,))
logprob = log_likelihood(m5_3.model, post, A=d.A.values, M=d.M.values, D=d.D.values)[
"D"
]
az5_3 = az.from_dict(
posterior={k: v[None, ...] for k, v in post.items()},
log_likelihood={"D": logprob[None, ...]},
)
az.compare({"m5.1": az5_1, "m5.2": az5_2, "m5.3": az5_3}, ic="waic", scale="deviance")
# ### Code 7.34
PSIS_m5_3 = az.loo(az5_3, pointwise=True, scale="deviance")
WAIC_m5_3 = az.waic(az5_3, pointwise=True, scale="deviance")
penalty = az5_3.log_likelihood.stack(sample=("chain", "draw")).var(dim="sample")
plt.plot(PSIS_m5_3.pareto_k.values, penalty.D.values, "o", mfc="none")
plt.gca().set(xlabel="PSIS Pareto k", ylabel="WAIC penalty")
plt.show()
# ### Code 7.35
# +
def model(M, A, D=None):
a = numpyro.sample("a", dist.Normal(0, 0.2))
bM = numpyro.sample("bM", dist.Normal(0, 0.5))
bA = numpyro.sample("bA", dist.Normal(0, 0.5))
sigma = numpyro.sample("sigma", dist.Exponential(1))
mu = a + bM * M + bA * A
numpyro.sample("D", dist.StudentT(2, mu, sigma), obs=D)
m5_3t = AutoLaplaceApproximation(model)
svi = SVI(
model,
m5_3t,
optim.Adam(0.3),
Trace_ELBO(),
M=d.M.values,
A=d.A.values,
D=d.D.values,
)
svi_result = svi.run(random.PRNGKey(0), 1000)
p5_3t = svi_result.params
| notebooks/07_ulysses_compass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os,psycopg2,base64;
from subprocess import PIPE,Popen;
import shlex;
from ipywidgets import IntProgress,HTML,VBox;
from IPython.display import display;
datasets = [
'dz_lrs'
,'nhdplus'
,'nhdplus_watersheds'
,'nhdplus_indexing'
,'nhdplus_navigation30'
,'nhdplus_delineation'
,'nhdplus_toponet'
,'waterspg_support'
,'waterspg'
]
cs = "dbname=%s user=%s password=%s host=%s port=%s" % (
'nhdplus'
,'postgres'
,os.environ['POSTGRES_PASSWORD']
,'dz_pg'
,5432
);
try:
conn = psycopg2.connect(cs);
except:
raise Exception("database connection error");
loading_dock = "/loading_dock/";
for item in datasets:
if not os.path.exists(loading_dock + item + ".dmp"):
raise Exception('specified dump file not found in loading dock: ' + item);
def pg_restore(host_name,host_port,database_name,user_name,database_password,dumpfile,threads=1):
command = 'pg_restore -c -h {0} -p {1} -d {2} -U {3} -j {5} {4}'\
.format(host_name,host_port,database_name,user_name,dumpfile,threads);
command = shlex.split(command);
p = Popen(command,shell=False,stdin=PIPE,stdout=PIPE,stderr=PIPE);
return p.communicate(bytes('{}\n'.format(database_password),'utf-8'));
cur = conn.cursor();
cur.execute("DROP SCHEMA IF EXISTS " + ','.join(item) + " CASCADE;");
conn.commit();
i = IntProgress(min=0,max=900,value=100);
label = HTML();
box = VBox(children=[label,i]);
display(box);
for item in datasets:
label.value = "Loading " + item + " schema...";
pg_restore(
'dz_pg'
,5432
,'nhdplus'
,'postgres'
,os.environ['POSTGRES_PASSWORD']
,'/loading_dock/' + item + '.dmp'
,4
);
i.value += 100;
i.bar_style = 'success';
label.value = 'All database loads complete';
cur.execute("NOTIFY ddl_command_end;");
conn.commit();
cur.close();
conn.close();
| jupyter/notebooks/Utilities/Load_NHDPlus_Database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pb111/Python-tutorials-and-projects/blob/master/Python_Custom_Exceptions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ceve44bFKfGU"
# # **Python Custom Exceptions**
#
# - In this tutorial, you will learn how to define custom exceptions depending upon your requirements with the help of examples.
#
#
# + [markdown] id="qJyveehx-4tp"
# ## **1. Introduction**
#
#
# - Python has numerous [built-in exceptions](https://www.programiz.com/python-programming/exceptions) that force your program to output an error when something in the program goes wrong.
#
# - However, sometimes you may need to create your own custom exceptions that serve your purpose.
# + [markdown] id="yZezWpAs_VM3"
# ## **2. Creating Custom Exceptions**
#
# - In Python, users can define custom exceptions by creating a new class.
# - This exception class has to be derived, either directly or indirectly, from the built-in `Exception` class.
# - Most of the built-in exceptions are also derived from this class.
# + id="EMLso9kYBx2c"
class CustomError(Exception):
pass
# + colab={"base_uri": "https://localhost:8080/", "height": 163} id="_mjDXZSZFasZ" outputId="5876c434-0ee4-4704-bd28-c94f80417605"
raise CustomError
# + colab={"base_uri": "https://localhost:8080/", "height": 163} id="HL9oJ7FKFkX9" outputId="aacde0bb-fd99-4d09-dd00-52c95635e797"
raise CustomError("An error occurred")
# + [markdown] id="rCs5OdquGbNL"
# - Here, we have created a user-defined exception called `CustomError` which inherits from the `Exception` class. This new exception, like other exceptions, can be raised using the `raise` statement with an optional error message.
#
# - When we are developing a large Python program, it is a good practice to place all the user-defined exceptions that our program raises in a separate file. Many standard modules do this. They define their exceptions separately as `exceptions.py` or `errors.py` (generally but not always).
#
# - User-defined exception class can implement everything a normal class can do, but we generally make them simple and concise. Most implementations declare a custom base class and derive others exception classes from this base class. This concept is made clearer in the following example.
# + [markdown] id="e9h5wEOJHOpu"
# #### **Example: User-Defined Exception in Python**
#
# - In this example, we will illustrate how user-defined exceptions can be used in a program to raise and catch errors.
#
# - This program will ask the user to enter a number until they guess a stored number correctly. To help them figure it out, a hint is provided whether their guess is greater than or less than the stored number.
# + colab={"base_uri": "https://localhost:8080/"} id="Opv-Yi35HdxR" outputId="886c028f-f450-4c55-84af-6d3810e26887"
# define Python user-defined exceptions
class Error(Exception):
"""Base class for other exceptions"""
pass
class ValueTooSmallError(Error):
"""Raised when the input value is too small"""
pass
class ValueTooLargeError(Error):
"""Raised when the input value is too large"""
pass
# you need to guess this number
number = 10
# user guesses a number until he/she gets it right
while True:
try:
i_num = int(input("Enter a number: "))
if i_num < number:
raise ValueTooSmallError
elif i_num > number:
raise ValueTooLargeError
break
except ValueTooSmallError:
print("This value is too small, try again!")
print()
except ValueTooLargeError:
print("This value is too large, try again!")
print()
print("Congratulations! You guessed it correctly.")
# + [markdown] id="jHxFG1YHJOHP"
# - We have defined a base class called `Error`.
#
# - The other two exceptions (`ValueTooSmallError` and `ValueTooLargeError`) that are actually raised by our program are derived from this class. This is the standard way to define user-defined exceptions in Python programming, but you are not limited to this way only.
#
#
# + [markdown] id="3Z1HumkU_gPG"
# ## **3. Customizing Exception Classes**
#
#
# - We can further customize this class to accept other arguments as per our needs.
#
# - To learn about customizing the Exception classes, you need to have the basic knowledge of Object-Oriented programming.
#
# - Visit [Python Object Oriented Programming](https://www.programiz.com/python-programming/object-oriented-programming) to start learning about Object-Oriented programming in Python.
# + [markdown] id="lkBVVXnPKN_R"
# Let's look at one example:
# + colab={"base_uri": "https://localhost:8080/", "height": 214} id="ChVGEUWpKQ1w" outputId="f32d8ff3-d743-44f4-a1b6-f083a1016fa9"
class SalaryNotInRangeError(Exception):
"""Exception raised for errors in the input salary.
Attributes:
salary -- input salary which caused the error
message -- explanation of the error
"""
def __init__(self, salary, message="Salary is not in (5000, 15000) range"):
self.salary = salary
self.message = message
super().__init__(self.message)
salary = int(input("Enter salary amount: "))
if not 5000 < salary < 15000:
raise SalaryNotInRangeError(salary)
# + [markdown] id="SVjeF3eOLN5I"
# - Here, we have overridden the constructor of the `Exception` class to accept our own custom arguments `salary` and `message`. Then, the constructor of the parent `Exception` class is called manually with the `self.message` argument using `super()`.
#
# - The custom `self.salary` attribute is defined to be used later.
#
# - The inherited `__str__` method of the `Exception` class is then used to display the corresponding message when `SalaryNotInRangeError` is raised.
#
# - We can also customize the `__str__` method itself by overriding it.
# + colab={"base_uri": "https://localhost:8080/", "height": 214} id="_W7wpov9Ka-K" outputId="9035ae51-3ba8-4c47-b693-d976380e8d10"
class SalaryNotInRangeError(Exception):
"""Exception raised for errors in the input salary.
Attributes:
salary -- input salary which caused the error
message -- explanation of the error
"""
def __init__(self, salary, message="Salary is not in (5000, 15000) range"):
self.salary = salary
self.message = message
super().__init__(self.message)
def __str__(self):
return f'{self.salary} -> {self.message}'
salary = int(input("Enter salary amount: "))
if not 5000 < salary < 15000:
raise SalaryNotInRangeError(salary)
# + [markdown] id="Fg8YrE_FMXFb"
# - To learn more about how you can handle exceptions in Python, visit [Python Exception Handling](https://www.programiz.com/python-programming/exception-handling).
# + [markdown] id="Li-Kn5Ly65yB"
# https://www.programiz.com/python-programming/user-defined-exception
| Python_Custom_Exceptions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: heart
# language: python
# name: heart
# ---
import torch
from transformers import AutoTokenizer, AutoModel
# +
src_tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-es-en") #sanity -- this may not be a joint vocab
tgt_tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
model = AutoModel.from_pretrained("Helsinki-NLP/opus-mt-es-en", output_attentions=True)
# +
# check out vocab tokenization
# -
src_tokenizer.tokenize("compositional")
src_tokenizer.tokenize("compositiva")
src_tokenizer.tokenize("My vocabulary is limited.")
# +
#sanity check -- use the non-auto versions for the helsinki models
#https://huggingface.co/docs/transformers/main/en/model_doc/marian#transformers.MarianMTModel
from transformers import MarianTokenizer, MarianMTModel
src = "es" # source language
trg = "en" # target language
model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}"
model = MarianMTModel.from_pretrained(model_name)
tokenizer = MarianTokenizer.from_pretrained(model_name)
# +
sample_text = "dónde está el autobús?"
batch = tokenizer([sample_text], return_tensors="pt")
generated_ids = model.generate(**batch)
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(output)
# -
#get embeddings given generated_ids
def get_embed_from_text(text):
tokens = tokenizer([text], return_tensors="pt")
# print(tokens)
weight = model.get_input_embeddings().weight
embed = []
for token_id, mask in zip(tokens['input_ids'][0], tokens['attention_mask'][0]):
this_embed = weight[token_id] * mask
embed.append(this_embed)
embed.pop() #last is eos padding -- will be added seemingly regardless (id = 0), so pop it off here
return torch.stack(embed)
embed_orig = get_embed_from_text("couch")
embed_swap = get_embed_from_text("sofa")
embed_orig
tokenizer.tokenize("couches")
tokenizer.tokenize("sofa")
embed_orig.shape
#requires tensors of the same shape -- 1&1, or subword decomps into the same # of units
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
output = cos(embed_orig, embed_swap)
print(output)
def compute_cos(embed1, embed2):
#first, average subwords to get 1 embed per word
embed1_avg = torch.mean(embed1, dim=0).unsqueeze(0)
embed2_avg = torch.mean(embed2, dim=0).unsqueeze(0)
#second, cosine similarity
cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6)
output = cos(embed1_avg, embed2_avg)
# print(output)
return output[0]
# +
embed_orig = get_embed_from_text("couch")
embed_swap = get_embed_from_text("sofa")
compute_cos(embed_orig, embed_swap)
# +
# visualize alignments with bertviz
# +
src_sent = "No copies y pegues oraciones de otros sitios."
tgt_sent = "Do not copy-paste sentences from elsewhere."
encoder_input_ids = src_tokenizer(src_sent, return_tensors="pt", add_special_tokens=False).input_ids
decoder_input_ids = tgt_tokenizer(tgt_sent, return_tensors="pt", add_special_tokens=False).input_ids
outputs = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids)
encoder_text = src_tokenizer.convert_ids_to_tokens(encoder_input_ids[0])
decoder_text = tgt_tokenizer.convert_ids_to_tokens(decoder_input_ids[0])
# -
from bertviz import head_view
head_view(
encoder_attention=outputs.encoder_attentions,
decoder_attention=outputs.decoder_attentions,
cross_attention=outputs.cross_attentions,
encoder_tokens= encoder_text,
decoder_tokens = decoder_text
)
# +
# awesome align -- demo https://colab.research.google.com/drive/1205ubqebM0OsZa1nRgbGJBtitgHqIVv6?usp=sharing#scrollTo=ODwJ_gQ8bnqR
# -
# # !pip install transformers==3.1.0 (using 4+ works)
import torch
import transformers
import itertools
from collections import defaultdict
model = transformers.BertModel.from_pretrained('bert-base-multilingual-cased')
tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-multilingual-cased')
# +
# src = 'awesome-align is awesome !'
# tgt = '牛对齐 是 牛 !'
tgt = "No copies y pegues oraciones de otros sitios."
src = "Do not copy-paste sentences from elsewhere."
# +
# pre-processing
sent_src, sent_tgt = src.strip().split(), tgt.strip().split()
token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', model_max_length=tokenizer.model_max_length, truncation=True)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', truncation=True, model_max_length=tokenizer.model_max_length)['input_ids']
sub2word_map_src = []
for i, word_list in enumerate(token_src):
sub2word_map_src += [i for x in word_list]
sub2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
sub2word_map_tgt += [i for x in word_list]
# alignment
align_layer = 8
threshold = 1e-3
model.eval()
with torch.no_grad():
out_src = model(ids_src.unsqueeze(0), output_hidden_states=True)[2][align_layer][0, 1:-1]
out_tgt = model(ids_tgt.unsqueeze(0), output_hidden_states=True)[2][align_layer][0, 1:-1]
dot_prod = torch.matmul(out_src, out_tgt.transpose(-1, -2))
softmax_srctgt = torch.nn.Softmax(dim=-1)(dot_prod)
softmax_tgtsrc = torch.nn.Softmax(dim=-2)(dot_prod)
softmax_inter = (softmax_srctgt > threshold)*(softmax_tgtsrc > threshold)
align_subwords = torch.nonzero(softmax_inter, as_tuple=False)
align_words = set()
src2tgt = defaultdict(set)
for i, j in align_subwords:
align_words.add( (sub2word_map_src[i], sub2word_map_tgt[j]) )
src2tgt[sub2word_map_src[i]].add(sub2word_map_tgt[j])
# printing
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
for i, j in sorted(align_words):
print(f'{color.BOLD}{color.BLUE}{sent_src[i]}{color.END}==={color.BOLD}{color.RED}{sent_tgt[j]}{color.END}')
# -
align_words
align_subwords
src2tgt
src
for idx, s in enumerate(src.split()):
print(idx, s, src2tgt[idx])
| init_testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="RGrQ6zyMNqtR"
# # Decision Tree
#
# It is a binary decision making process to filter the best decision. At each step of the tree, the nodes (number of possible decisions) are divided in two smaller groups according to a condition (e.g. is the image grayscale or colour?) until the decision options in terminal nodes are indivisible.
#
# <br>
#
# ---
#
# <br>
#
# [Decision Tree, Wikipedia](https://en.wikipedia.org/wiki/Decision_tree)
#
# A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. It is one way to display an algorithm that only contains conditional control statements.
#
# Decision trees are commonly used in operations research, specifically in decision analysis, to help identify a strategy most likely to reach a goal, but are also a popular tool in machine learning.
#
# ----
#
# A decision tree is a flowchart-like structure in which each internal node represents a "test" on an attribute (e.g. whether a coin flip comes up heads or tails), each branch represents the outcome of the test, and each leaf node represents a class label (decision taken after computing all attributes).
# + [markdown] id="0JdbK2M9O1Ln"
# ## Entropy & Information Gain
#
# $Entropy = -p(+) \cdot log(p(+)) - p(-) \cdot log(p(-))$
#
# Example:
#
# In the total of 8 pictures, we want to search the family photo from winter vacation.
#
# **References**
# <NAME> (2016) Machine Learning Decision Tree - Mathematical approach to ID3 Algorithm https://www.youtube.com/watch?v=UPKugq0fK04&ab_channel=MinsukHeo허민석
# + colab={"base_uri": "https://localhost:8080/"} id="hgq-NOouMVgc" outputId="54b7f9ae-ae74-4c94-87f0-dc905e577237"
import numpy as np
# Entropy([1+, 7-]) - log base 2 was used here.
entropy = -(1/8)*np.log2(1/8)-(7/8)*np.log2(7/8)
np.round(entropy, 3)
# + [markdown] id="ZBuG7g2DRgnc"
# Information Gain (family photo from winter, cartoon)
#
# $E(\text{winter family photo}) - E(\text{winter family photo, cartoon})$
#
# entropy - (4/8 * E([0+, 4-]) + (4/8 * E([1+, 3-]
# + colab={"base_uri": "https://localhost:8080/"} id="wUfHKldAQR42" outputId="624aa12c-c817-4379-a130-9a389102d8fd"
# IG(winter family photo, cartoon)
cartoon_entropy = entropy - (4/8 * (- 1/4 * np.log2(1/4) - 3/4 * np.log2(3/4)))
np.round(cartoon_entropy, 3)
# + colab={"base_uri": "https://localhost:8080/"} id="EXMe7FU4S_y8" outputId="ff4cd963-3203-46fe-b7a4-e3d1e633df4c"
# IG(winter family photo, winter)
winter_entropy = entropy - (5/8 * (np.log2(1/5)) + 3/8 * (np.log2(0/3)))
np.round(winter_entropy, 4) # correct answer = 0.093 ?
# + id="N0NiH-izUpwP"
# IG(winter family photo, n_person > 1)
person_entropy = entropy - (5/8 * (-5/8 * np.log2(5/8)) + 3/8 * (-3/8 * np.log2(3/8)))
np.round(winter_entropy, 4) # correct answer = 0.093 ?
# + [markdown] id="NRl5F9XXS_t8"
# ### Entropy Formula
#
# High entropy means high uncertainly.
# Low entropy means low uncertainty.
# $H(X)= -\sum_{i=1}^{n}p_{i} \cdot log_{2} \cdot p_{i}$
#
# + colab={"base_uri": "https://localhost:8080/"} id="rwA7ZkWmWIQA" outputId="6444536a-0655-4f2e-f18a-d2ae24d65337"
# Coin toss
# 50% 50% chance of head and tail
-(0.5 * np.log2(0.5) + 0.5 * np.log2(0.5))
# + colab={"base_uri": "https://localhost:8080/"} id="BPReBG1OWddP" outputId="a96b4469-d905-46b8-d82c-9f126b0da689"
# 100% head 0% tail
-(np.log2(1) + 0 * np.log2(0.5))
# + colab={"base_uri": "https://localhost:8080/"} id="YW2rSaQtWmX7" outputId="1dc02dd4-f61d-44a1-da3a-7031d7019b24"
# 90% head 10% tail
entropy = -(0.9*np.log2(0.9) + 0.1 * np.log2(0.1))
np.round(entropy, 2)
| 00 Fundamentals/Decision Tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: model3
# language: python
# name: model3
# ---
# +
import sys
sys.path.append('../')
import logging
import pandas as pd
from sklearn.cluster import DBSCAN
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lstm_mobility_model.two_layer_latlng_location import TwoLayerLstmModelBuilder
from lstm_mobility_model.load import DataPreprocessor
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# %matplotlib inline
# -
# ### Data Preprocessing
# +
def preprocess_data(dataframe):
processor = DataPreprocessor()
return processor.preprocess_traces_to_dict(dataframe)
dataframe = pd.read_csv('../sample_data/sample_data_0.csv')
preprocessed_data = preprocess_data(dataframe)
# -
# ### LSTM mobility model
lstm_model = \
TwoLayerLstmModelBuilder(
model_path='lstm_model_weights/',
model_name='two_layer_categorical_location',
learning_rate=0.01,
training_epochs=1000,
lstm_dropout=0.0,
lstm_units=16,
number_of_mixtures=40,
sampling_bias=0.0)
lstm_model.train(preprocessed_data,
batch_size=100)
generated_traces = lstm_model.generate(
preprocessed_data,
method='complete')
generated_traces_df = pd.concat(list(generated_traces.values()))
# +
_ = plt.figure()
_ = generated_traces_df[generated_traces_df['start_hour_since_day'] != 0][
'start_hour_since_day'].hist(bins=30)
_ = plt.title('Generated Activity Start Time Histogram')
_ = plt.xlim(0, 25)
_ = plt.figure()
_ = dataframe[dataframe['start_hour_since_day'] != 0][
'start_hour_since_day'].hist(bins=30)
_ = plt.title('Observed Activity Start Time Histogram')
_ = plt.xlim(0, 25)
# +
_ = generated_traces_df[generated_traces_df['start_hour_since_day'] != 0].plot.scatter(
x='start_hour_since_day',
y='duration')
_ = plt.title('Generated Start Time vs. Duration')
_ =plt.xlim(0, 25)
_ = dataframe[dataframe['start_hour_since_day'] != 0].plot.scatter(
x='start_hour_since_day',
y='duration')
_ = plt.title('Observed Start Time vs. Duration')
_ = plt.xlim(0, 25)
# +
_ = generated_traces_df[generated_traces_df['start_hour_since_day'] != 0].plot.scatter(
x='lon',
y='lat')
_ = plt.title('Generated Lat Lon')
_ = dataframe[dataframe['start_hour_since_day'] != 0].plot.scatter(
x='lon',
y='lat')
_ = plt.title('Observed Lat Lon')
| examples/Two Layers Lat Lng Location.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ## LECTURE 4
print('hello')
1+2
for number in (1,2,3):
print(number)
print('1+3 is {}'.format(1+3))
# !pip install psycopg2
import pandas
import configparser
import psycopg2
# +
config = configparser.ConfigParser()
config.read('config.ini')
host=config['myaws']['host']
db=config['myaws']['db']
user=config['myaws']['user']
pwd = config['<PASSWORD>']['<PASSWORD>']
conn = psycopg2.connect( host=host,
user=user,
password = <PASSWORD>,
dbname=db)
# -
cur=conn.cursor()
sql_statement = """ select bathroom,bedroom
from public.house_price_full
where bathroom>2"""
cur.execute(sql_statement)
cur.fetchone()
for bathroom,bedroom in cur.fetchall()[:10]:
print(bathroom,bedroom)
df=pandas.read_sql_query(sql_statement,conn)
df[:10]
sql_statement= """
select built_in,
avg(price) as avg_price
from public.house_price_full
group by built_in
order by built_in
"""
# +
df_price=pandas.read_sql_query(sql_statement,conn)
df_price.plot(y='avg_price',x='built_in')
# -
sql_statement= """
select price,area
from public.house_price_full
"""
# +
df_price=pandas.read_sql_query(sql_statement,conn)
df_price['area'].hist()
# -
sql_statement= """
select house_type,
avg(price) as avg_price
from public.house_price_full
group by house_type
order by avg_price desc
"""
# +
df_price=pandas.read_sql_query(sql_statement,conn)
df_price.plot.bar(x='house_type',y='avg_price')
# +
sql_statement = """
insert into gp11.student(s_email,s_name,major)
values('{}','{}','{}')
""".format('<EMAIL>','s5','ia')
print(sql_statement)
# -
cur.execute(sql_statement)
conn.rollback()
sql_statement = """
insert into gp11.student(s_email,s_name,major)
values('{}','{}','{}')
""".format('<EMAIL>','s6','ia')
cur.execute(sql_statement)
conn.commit()
df_student=pandas.read_sql_query('select * from gp11.student',conn)
df_student[:]
# +
sql_statement = """
delete from gp11.student
where s_email = '{}'
""".format('<EMAIL>','s6','ia')
print(sql_statement)
# -
cur.execute(sql_statement)
conn.rollback()
conn.commit()
df_student=pandas.read_sql_query('select * from gp11.student',conn)
df_student[:]
cur.close()
conn.close()
| Lec4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# # Viterbi algorithm
a = np.loadtxt('transitionMatrix.txt') # n x n
b = np.loadtxt('emissionMatrix.txt') # n x m
init = np.loadtxt('initialStateDistribution.txt') # n x 1 (pi)
n = len(init) # number of states
m = b.shape[1] # number of observations
o = np.loadtxt('observations.txt', dtype=int) # observations
T = len(o)
l = np.zeros((n,T))
phi = np.zeros_like(l)
print('a:', a.shape)
print('b:', b.shape)
print('init:', init.shape)
print('n:', n)
print('m:', m)
print('o:', o.shape)
print('T:', T)
print('l:', l.shape)
print('phi:', phi.shape)
def initialize_l():
'''
first step of filling in l* matrix
'''
l[:,0] = np.log(init) + np.log(b[:,o[0]])
def update_l(curr_t, next_t):
'''
fill l in from left to right given the current t timestep and the next timestep, t+1
also creates theta for t+1
'''
next_1 = np.max(np.add(l[:,curr_t], np.log(a)), axis=1) + np.log(b[:,o[next_t]])
next_phi = np.asarray([np.argmax(l[:,curr_t] + np.log(a), axis=1)])
return next_1, next_phi
# +
# Initialize l* matrix
initialize_l()
# Fill l* matrix from left to right
for t in range(T-1):
l[:,t+1], phi[:,t+1] = update_l(t, t+1)
# -
s = np.zeros(T, dtype=int)
def initialize_s():
s[-1] = np.argmax(l[:,-1])
def update_s(curr_t, next_t):
'''
computes most likely states by backtracking
'''
s_curr = phi[s[next_t], next_t]
return s_curr
# +
# Initialize most likely states (s*) matrix
initialize_s()
# Fill s* matrix from right to left
for t in range(T-2, -1, -1):
s[t] = update_s(t, t+1)
# -
def viterbi_letters(s):
m = ""
for c in s:
if c == 26:
m += " "
else:
m += chr(c + 97)
return m
letters = viterbi_letters(s)
from itertools import groupby
message = ''
let_uniq = [i[0] for i in groupby(letters)]
for letter in let_uniq:
message += letter
print(message)
label = [chr(i) for i in range(97, 97+26, 1)]
label.append('<SPC>')
plt.plot(s)
plt.xlabel('time')
plt.ylabel('letter')
plt.yticks(np.arange(0, 27, step=1), labels=label)
plt.show()
| content/project/cse250-hw7/hw7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import os
print(os.getcwd())
dataset = pd.read_csv(os.getcwd()+'/student_scores.csv')
dataset.shape
dataset.head()
dataset.describe()
dataset.plot(x='Hours', y='Scores', style='o')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
# +
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 1].values
# -
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
#print the intercept
print(regressor.intercept_)
print(regressor.coef_)
y_pred = regressor.predict(X_test)
#print the prediction
print(y_pred)
df = pd.DataFrame({'Actual': Y_test, 'Predicted': y_pred})
df.shape
print(df)
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(Y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(Y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(Y_test, y_pred)))
| simple-linear-regression/Linear_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["parameters"]
project_id = 'elife-data-pipeline'
output_dataset = 'de_dev'
output_table_prefix = 'data_science_'
target_paper_count = 50
# max_paper_count is ignore if it is a good match
max_paper_count = 2000
# +
import logging
import sys
from functools import partial
import pandas as pd
import data_science_pipeline.configure_warnings # pylint: disable=unused-import
import data_science_pipeline.configure_notebook_logging # pylint: disable=unused-import
from data_science_pipeline.sql import get_sql
from data_science_pipeline.utils.bq import run_query_and_save_to_table, get_client
from data_science_pipeline.utils.jupyter import (
printmd,
to_markdown_sql,
read_big_query as _read_big_query
)
# -
query_name = 'disambiguated_editor_papers'
destination_table_name = ''.join([output_table_prefix, query_name])
logging.basicConfig(level='INFO', stream=sys.stdout)
read_big_query = partial(_read_big_query, project_id=project_id)
print('processing %s' % query_name)
_sql = get_sql('%s.sql' % query_name).format(
project=project_id,
dataset=output_dataset,
target_paper_count=target_paper_count,
max_paper_count=max_paper_count
)
printmd(to_markdown_sql(_sql))
run_query_and_save_to_table(
client=get_client(project_id=project_id),
query=_sql,
destination_dataset=output_dataset,
destination_table_name=destination_table_name
)
print('done')
_sql = get_sql('disambiguated_editor_papers_count.sql').format(
project=project_id,
dataset=output_dataset
)
editor_pubmed_count_df = read_big_query(_sql)
print(len(editor_pubmed_count_df))
editor_pubmed_count_df.head(3)
with pd.option_context("display.max_rows", 1000):
print(
editor_pubmed_count_df
[['person_id', 'name', 'pubmed_count', 'relevant_pubmed_id_count', 'retrieved_pubmed_id_count']]
.to_string(index=False)
)
print('editors with pubmed urls without parsed pubmed id:\n%s' % editor_pubmed_count_df[
editor_pubmed_count_df['relevant_pubmed_url_count'] > editor_pubmed_count_df['relevant_pubmed_id_count']
][['person_id', 'name', 'relevant_pubmed_url_count', 'relevant_pubmed_id_count']].to_string(index=False))
print(
'editors without disambiguated pubmed papers despite having relevant pubmed ids:\n%s' % (
editor_pubmed_count_df[
(editor_pubmed_count_df['pubmed_count'] == 0)
& (editor_pubmed_count_df['relevant_pubmed_id_count'] > 0)
]
[['person_id', 'name', 'pubmed_count', 'relevant_pubmed_id_count', 'retrieved_pubmed_id_count']]
.to_string(index=False)
)
)
print(
'editors with less than five disambiguated pubmed papers:\n%s' % (
editor_pubmed_count_df[
(editor_pubmed_count_df['pubmed_count'] > 0)
& (editor_pubmed_count_df['pubmed_count'] < 5)
]
[['person_id', 'name', 'pubmed_count', 'relevant_pubmed_id_count', 'retrieved_pubmed_id_count']]
.to_string(index=False)
)
)
print(
'editors without additional disambiguated pubmed papers (apart from relevant pubmed ids):\n%s' % (
editor_pubmed_count_df[
(editor_pubmed_count_df['pubmed_count'] <= editor_pubmed_count_df['relevant_pubmed_id_count'])
& (
editor_pubmed_count_df['retrieved_pubmed_id_count']
> editor_pubmed_count_df['relevant_pubmed_id_count']
)
]
[['person_id', 'name', 'relevant_pubmed_id_count', 'retrieved_pubmed_id_count', 'search_term']]
.to_string(index=False)
)
)
print(
'editors with only relevant pubmed papers:\n%s' % (
editor_pubmed_count_df[
(editor_pubmed_count_df['pubmed_count'] > 0)
& (
editor_pubmed_count_df['retrieved_pubmed_id_count']
<= editor_pubmed_count_df['relevant_pubmed_id_count']
)
]
[['person_id', 'name', 'relevant_pubmed_id_count', 'retrieved_pubmed_id_count', 'search_term']]
.to_string(index=False)
)
)
print(
'editors without any disambiguated pubmed papers:\n%s' % (
editor_pubmed_count_df[
editor_pubmed_count_df['pubmed_count'] == 0
]
[['person_id', 'name', 'relevant_pubmed_id_count', 'retrieved_pubmed_id_count', 'search_term']]
.to_string(index=False)
)
)
| notebooks/peerscout/peerscout-disambiguate-editor-papers.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Javascript (Node.js)
// language: javascript
// name: javascript
// ---
// <h1>Event loop samples</h1>
// <h2>How setTimeout executes</h2>
function main(){
console.log('A');
setTimeout(
function exec(){ console.log('B'); }
, 0);
runWhileLoopForNSeconds(3);
console.log('C');
}
main();
function runWhileLoopForNSeconds(sec){
let start = Date.now(), now = start;
while (now - start < (sec*1000)) {
now = Date.now();
}
}
// <h2>Preference to promises</h2>
// +
var bar = () => console.log('bar')
var baz = () => console.log('baz')
var foo = () => {
console.log('foo')
setTimeout(bar, 0)
new Promise((resolve, reject) =>
resolve('should be right after baz, before bar')
).then(resolve => console.log(resolve))
baz()
}
foo()
// -
// Lets pass them functions with reference
| src/jsNotes/basics/event loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:myclone]
# language: python
# name: conda-env-myclone-py
# ---
# +
# data sets for family_rules
simpsons_data = ("person bart",
"person lisa",
"person maggie",
"person marge",
"person homer",
"person abe",
"parent marge bart",
"parent marge lisa",
"parent marge maggie",
"parent homer bart",
"parent homer lisa",
"parent homer maggie",
"parent abe homer")
black_data = ("person sirius",
"person regulus",
"person walburga",
"person alphard",
"person cygnus",
"person pollux",
"person bellatrix",
"person andromeda",
"person narcissa",
"person nymphadora",
"person draco",
"parent walburga sirius",
"parent walburga regulus",
"parent pollux walburga",
"parent pollux alphard",
"parent pollux cygnus",
"parent cygnus bellatrix",
"parent cygnus andromeda",
"parent cygnus narcissa",
"parent andromeda nymphadora",
"parent narcissa draco")
sibling_test_data = [ 'person mario',
'person luigi',
'person papa',
'parent papa mario',
'parent papa luigi' ]
grandparent_test_data = [ 'person jay',
'person claire',
'person alex',
'parent jay claire',
'parent claire alex' ]
anonymous_family_test_data = [ 'person a1', 'person b1', 'person b2',
'person c1', 'person c2', 'person c3',
'person c4', 'person d1', 'person d2',
'person d3', 'person d4',
'parent a1 b1',
'parent a1 b2',
'parent b1 c1',
'parent b1 c2',
'parent b2 c3',
'parent b2 c4',
'parent c1 d1',
'parent c2 d2',
'parent c3 d3',
'parent c4 d4' ]
names_relationship = [ 'person a1', 'person b1', 'person b2',
'person c1', 'person c2', 'person c3',
'person c4', 'person d1', 'person d2',
'person d3', 'person d4',
'parent a1 b1',
'parent a1 b2',
'parent b1 c1',
'parent b1 c2',
'parent b2 c3',
'parent b2 c4',
'parent c1 d1',
'parent c2 d2',
'parent c3 d3',
'parent c4 d4' ]
# -
# class BinaryTree:
# def __init__(self, value):
# self.value = value
# self.name = person_name
# self.relationship = relationship
# self.left_child = None
# self.right_child = None
for name_relationship in names_relationship:
print(name_relationship)
for name in names_relationship:
print(name)
names=['person a1', 'person b1', 'person b2', 'person c1', 'person c2', 'person c3', 'person c4', 'person d1', 'person d2','person d3','person d4']
parent=[('a1', 'b1','b2','c1', 'c2', 'c3', 'c4')]
relationship = ()
# +
a1_node = BinaryTree('a1')
a1_node.insert_left('b1')
a1_node.insert_right('b2')
b1_node = a1_node.left_child
b1_node.insert_left('c1')
b1_node.insert_right('c2')
c1_node = b1_node.left_child
c1_node.insert_left('d1')
c2_node = b1_node.right_child
c2_node.insert_left('d2')
d1_node = c1_node.left_child
d2_node = c2_node.left_child
print(a_node.value) # a
print(b_node.value) # b
print(c_node.value) # c
print(d_node.value) # d
print(e_node.value) # e
print(f_node.value) # f
# -
parent.relationship={
'a1':['b1', 'b2'],
'b1':['c1', 'c2'],
'b2':['c3', 'c4'],
'c1':['d1'],
'c2':['d2'],
'c3':['d3'],
}
indexes = range(len(names))
indexes_parent = range(len(parent.relationship))
len(names_relationship)
names_relationship.index('person a1')
#Var index type intit has 9
'''
Create a manual mapping between the index and value for the items in the list 'names'
'''
for index, parent.relationship in zip(indexes_parent, sorted(parent.relationship)):
print(index, name)
for index, name in zip(indexes, sorted(names)):
print(index, name)
# Try again
from queue import Queue
class Node(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insert_left(self, child):
if self.left is None:
self.left = child
else:
child.left = self.left
self.left = child
def insert_right(self, child):
if self.right is None:
self.right = child
else:
child.right = self.right
self.right = child
# + [markdown] code_folding=[]
# class Tree(object):
# def __init__(self):
# self.left = None
# self.right = None
# self.data = None
#
# root = Tree()
# root.data = "root"
# root.left = Tree()
# root.left.data = "left"
# root.right = Tree()
# root.right.data = "right"
#
# +
class BinaryTree(object):
def __init__(self, value):
self.value = value
self.relationship = None #('parent', 'child', 'sibling')
self.left = None
self.right = None
tree = BinaryTree('a')
print(tree.value) # a
print(tree.left) # None
print(tree.right) # None
def insert_left(self, value):
if self.left is None:
self.left = BinaryTree(value)
else:
new_node = BinaryTree(value)
new_node.left = self.left
self.left = new_node
def insert_right(self, value):
if self.right is None:
self.right = BinaryTree(value)
else:
new_node = BinaryTree(value)
new_node.right = self.right
self.right = new_node
# -
tree = BinaryTree('a1')
print(tree.value) # a
print(tree.left_child) # None
print(tree.right_child) # None
def insert_left(BinaryTree, value):
if self.left_child == None:
self.left_child = BinaryTree(value)
else:
new_node = BinaryTree(value)
new_node.left_child = self.left_child
self.left_child = new_node
def insert_right(self, value):
if self.right_child == None:
self.right_child = BinaryTree(value)
else:
new_node = BinaryTree(value)
new_node.right_child = self.right_child
self.right_child = new_node
def bfs(self):
queue = Queue()
queue.put(self)
while not queue.empty():
current_node = queue.get()
print(current_node.value)
if current_node.left_child:
queue.put(current_node.left_child)
if current_node.right_child:
queue.put(current_node.right_child)
# +
a_node = BinaryTree('a')
a_node.insert_left('b')
a_node.insert_right('c')
b_node = a_node.left_child
b_node.insert_right('d')
c_node = a_node.right_child
c_node.insert_left('e')
c_node.insert_right('f')
d_node = b_node.right_child
e_node = c_node.left_child
f_node = c_node.right_child
print(a_node.value) # a
print(b_node.value) # b
print(c_node.value) # c
print(d_node.value) # d
print(e_node.value) # e
print(f_node.value) # f
# -
print(BinaryTree)
# +
a1_node = BinaryTree('a1')
a1_node.insert_left('b1')
a1_node.insert_right('b2')
b1_node = a1_node.left
b1_node.insert_left('c1')
b1_node.insert_right('c2')
c1_node = b1_node.left
c1_node.insert_left('d1')
c2_node = b1_node.right
c2_node.insert_left('d2')
d1_node = c1_node.left
d2_node = c2_node.left
print(a_node.value) # a
print(b_node.value) # b
print(c_node.value) # c
print(d_node.value) # d
# -
| lab1/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Objective** Read the AudioSet data and convert it into a usable JSON format file.
#
# **Prerequisite**: Download the data from [AudioSet](https://research.google.com/audioset/download.html) and uncompress the `tar.gz` file. Make sure this notebook is in the same directory as the uncompressed embeddings folder that is generated `audioset_v1_embeddings`
# +
# #!pip3 install tensorflow
# -
import numpy as np
import json
import tensorflow as tf
import os
import pandas as pd
# +
directory = "audioset_v1_embeddings/eval"
dataset = []
for file_name in os.listdir(directory):
if file_name.endswith(".tfrecord"):
dataset.append(os.path.join(directory,file_name))
# -
tf.compat.v1.enable_eager_execution()
raw_dataset = tf.data.TFRecordDataset(dataset)
# +
class_labels = pd.read_csv('class_labels_indices.csv')
labels = class_labels['display_name'].tolist()
music_class = class_labels[class_labels['display_name'].str.contains('Music', case=False)]
music_labels = music_class['index'].tolist()
# +
audios = []
counter = 0
NUM_SECONDS = 10
for raw_record in raw_dataset:
example = tf.train.SequenceExample()
example.ParseFromString(raw_record.numpy())
# Audio Meta data
audio_labels = example.context.feature['labels'].int64_list.value
start_time = example.context.feature['start_time_seconds'].float_list.value
end_time = example.context.feature['end_time_seconds'].float_list.value
video_id = example.context.feature['video_id'].bytes_list.value
if not (set(music_labels) & set(audio_labels)):
continue
# Audio Feature
feature_list = example.feature_lists.feature_list['audio_embedding'].feature
final_features = [list(feature.bytes_list.value[0]) for feature in feature_list]
audio_embedding = [item for sublist in final_features[:NUM_SECONDS] for item in sublist]
if len(final_features) < NUM_SECONDS:
continue
audio = {
'label': audio_labels,
'video_id': video_id[0],
'start_time': start_time[0],
'end_time': end_time[0],
'data': audio_embedding
}
audios.append(audio)
counter += 1
if counter % 100 == 0:
print(f"Processing {counter}th file ...")
# -
with open('music_set.json', 'w') as file:
str_audio = repr(audios)
json.dump(str_audio, file)
[audio['data'][:10] for audio in audios[:4]]
# ### References
#
# How to read from `tfrecord` files: https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/tfrecord.ipynb#scrollTo=nsEAACHcnm3f
| AudioSet Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
# ### Declaring constants in tensorflow
x = tf.constant(6)
y = tf.constant(5)
# unlike other constants, we cannot access value of these tf constants directly by printing, ex:
print(x,y)
# If we have to perform mathematical operations on this, we have to go through tensorflow only, direct mathematical operation will be invalid
add = x + y
print(add)
# Nothing shows up as you can see, Now let's try it with the Tensorflow operations
add = tf.add(x,y)
print(add)
# As you can see, this code is right but it does not actually do anything, This is where the concept of session comes from, Now let's see how to actually perform addition.
with tf.Session() as sess:
output_add = sess.run(add)
print(output_add)
# after this line, session automatically closes
# ### Session basically does the addition operation but when called, it will not perform the function until it is run in a session, you can run it in a session, store in some variable and you can access it after coming out of session as well.
#Once created I can access this variable out of the session as well.
output_add
# MULTIPLY
multiply = tf.multiply(x,y)
with tf.Session() as sess:
output_mul = sess.run(multiply)
print(output_mul)
| Basics/Demo of the using sessions in Tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Apache Spark (Anaconda Python 3)
# language: python
# name: spark-anaconda
# ---
# # PageRank in [Apache Spark](http://spark.apache.org)
#
# This is one of the basic examples how Apache Spark works and how it looks like.
#
# Make sure to read about [Transformations](http://spark.apache.org/docs/latest/programming-guide.html#transformations) and especially map, flatMap, join, ...
#
# Read more about the RDD's [Python API](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD).
#
# When using the "Apache Spark" themes kernels in SageMathCloud,
# the object "`sc`" for the "Spark Context" is already pre-initialized.
sc
# The data for your simplified link graph.
link_data = {
0: [1, 11],
1: [2, 6],
2: [1, 0],
3: [1, 0],
4: [1],
5: [0, 1],
6: [0, 7],
7: [0, 1, 2, 3, 9],
8: [0, 5, 9],
9: [7, 11],
10: [4, 7],
11: [0, 10]
}
import networkx as nx
link_graph = nx.DiGraph(link_data)
labels = dict((n, str(n)) for n in link_graph.nodes())
nx.draw_circular(link_graph, labels = labels)
# The initial rank-data is the value 1. for each node.
ranks = sc.parallelize(link_data.keys()).map(lambda x : (x, 1.))
# This initializes the edges of the graph data as `links`, which are modeled in Spark as key-value-tuples.
links = sc.parallelize(link_data.items()).cache()
# This is a demonstration of what does happen, when the rank-key-value tuples are joined with the links-key-value tuples.
# Take a close look, it's a list of tuples in tuples with lists inside of them!
sorted(links.join(ranks).collect())
def computeContribs(node_urls_rank):
"""
This function takes elements from the joined dataset above and
computes the contribution to each outgoing link based on the
current rank.
"""
_, (urls, rank) = node_urls_rank
nb_urls = len(urls)
for url in urls:
yield url, rank / nb_urls
# Here is a debug printout to outline what the first operation in the code below is doing:
c = links.join(ranks).flatMap(computeContribs)
print(c.toDebugString().decode("utf8"))
# ## Executing the PageRank Algorithm
# This takes a while to execute. Do
#
# tail -f ~/.smc/jupyter/jupyter-notebook.*
#
# in a Terminal to see what's going on behind the scenes!
# +
from operator import add
for iteration in range(10):
# compute contributions of each node where it links to
contribs = links.join(ranks).flatMap(computeContribs)
# use a full outer join to make sure, that not well connected nodes aren't dropped
contribs = links.fullOuterJoin(contribs).mapValues(lambda x : x[1] or 0.0)
# Sum up all contributions per link
ranks = contribs.reduceByKey(add)
# Re-calculate URL ranks
ranks = ranks.mapValues(lambda rank: rank * 0.85 + 0.15)
# Collects all URL ranks
for (link, rank) in sorted(ranks.collect()):
print("%s has rank: %s." % (link, rank / len(link_data)))
# -
# ## Comparison with NetworkX
import networkx as nx
g = nx.DiGraph(link_data)
nx.pagerank(g)
# ## Task
#
# Now go back to the directed graph in the beginning and make up your mind if those numbers make sense or not.
# Why is the weight of node 1 higher than the weight of node 2?
| data-analysis/spark-pagerank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from GAN import GAN
from utils import show_all_variables
from utils import load_cifar10_with_validation
import tensorflow as tf
import argparse
import numpy as np
import matplotlib.pyplot as plt
from utils import load_cifar10_with_validation
from scipy.spatial import distance as dist
import matplotlib.pyplot as plt
import numpy as np
import argparse
import glob
import cv2
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
gan = GAN(sess, epoch=500, batch_size=100, dataset_name='cifar10', checkpoint_dir='checkpoint', result_dir='results', log_dir='logs', directory='./train', reuse=True)
gan.build_model()
gan.load_model()
samples = gan.sample()
# -
samples
save_img(samples, [4,4])
def save_img(images, size, pad=5):
figure = np.ones((32 * size[0] + (size[0]-1)*pad, 32 * size[1] + (size[1]-1)*pad, 3))
for i in range(size[0]):
for j in range(size[1]):
idx = i*size[0]+j
vMin = np.amin(images[idx])
vMax = np.amax(images[idx])
img_arr = images[idx].reshape(32*32*3,1) # flatten
for k, v in enumerate(img_arr):
img_arr[k] = (v-vMin)/(vMax-vMin)
img_arr = img_arr.reshape(32,32,3) # M*N*3
figure[i * 32 + i*pad: (i + 1) * 32 + i*pad,j * 32 + j*pad: (j + 1) * 32+ j*pad] = img_arr
plt.figure(figsize=(20, 20))
plt.imshow(figure)
plt.imsave('gan_cifar-10.png',figure)
plt.show()
| Monte-Carlo-Attacks/Monte-Carlo-CIFAR_GAN/.ipynb_checkpoints/Create Images-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="RVAQjtI_cvsv"
# We would like to work with a prediction / classification problem. We are interested in seeing which font color to use on different background colors (in RGB values).
#
# Say we choose a specific color like this one, we would like to make a model which can predict which font type is best suited for it in regards to it's luminence (we are only working with white or black font).
#
# 
#
# There is a formula for solving this problem. The correct font color for the background is decided by how bright the background color is and if background color luminance > 0.5 then you should choose a dark font and of course if the background color luminance < 0.5 you should choose a white font.
# This formula can be found on StackOverflow: https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color#1855903
#
# 
#
# But what if we didn't have this formula, or it simply hasn't been discovered yet? In that case, a neural network can help us. We therefore try to make a model, which can solve this prediction problem for us using a neural network.
#
# 
#
# Credits for idea and illustrations go to: <NAME> (Youtube: https://www.youtube.com/watch?v=tAioWlhKA90)
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="nreuHxT2ONCI" outputId="4a60d2ee-5c79-4684-a247-6d48c24c6a0e"
#First we imported the needed libraries
import pandas as pd
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import EarlyStopping
# + [markdown] colab_type="text" id="LdylSeCYzePJ"
# Here, we download our training dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 210} colab_type="code" id="xadss7RsOvqJ" outputId="a85582c3-54d4-4966-fa8b-baac5ed9ab61"
# Download the color training dataset
# !wget https://raw.githubusercontent.com/thomasnield/kotlin_simple_neural_network/master/src/main/resources/color_training_set.csv
# + colab={} colab_type="code" id="IPWbgPK3O4Xj"
predictors = pd.read_csv('color_training_set.csv')
predictors.columns = ['r', 'g', 'b']
# + colab={"base_uri": "https://localhost:8080/", "height": 158} colab_type="code" id="TLXo2G8YXexd" outputId="f1e2ac6f-fdec-4214-8616-2ca3dd9b33dd"
predictors.info()
# + [markdown] colab_type="text" id="G4TKS168zmH7"
# As can be seen, the training dataset consists of 1344 different background colors in RGB values. We will use these to train our model.
#
# Next up, we define a function to calculate the optimal font color using the formula we described before. This will be used to classify which font color is optimal for each of our 1344 background colors.
# Of course, this is a bit of a cheat since we supposedly haven't discovered this formula yet, but the equivalent would be to manually classify each background color by hand. We simply do this to save time, yay for being lazy! 😏
# + colab={} colab_type="code" id="cJptJ0BFUuRP"
# Predict the optimal shade using the formula. This function takes a list as its input.
def FormulaicPredict(color):
r = color[0]
g = color[1]
b = color[2]
d = 0
l = 0
luminance = (0.299 * r + 0.587 * g + 0.114 * b)/255
if luminance > 0.5:
d = 1 # bright background, black font
l = 0
else:
d = 0 # dark background, white font
l = 1
return pd.Series([d,l])
# + [markdown] colab_type="text" id="M2as0FAF1L6j"
# We will now apply the above formula to each row in our training dataset. With this, we create a new DataFrame with the predictions for each background color.
# + colab={} colab_type="code" id="OBho0ZuCWjdq"
target = predictors.apply(FormulaicPredict, axis=1)
target.columns = ['dark', 'light']
# + colab={"base_uri": "https://localhost:8080/", "height": 195} colab_type="code" id="esSsrv3mhm3d" outputId="f8607d7a-b94d-4698-a165-db3f6a87e94a"
target.head()
# + [markdown] colab_type="text" id="yvaRRlvlom3s"
# We start building the architecture for our model, first we set up our model with the Sequential() function.
#
# We add our first layer using 3 nodes, using the 'relu' activation which is one of the most commonly used activation functions.
#
# 
#
# Here is a picture of some of the other activation functions just to give a understanding of how they work.
#
#
#
#
#
#
#
# 
#
# Here we see a picture of how the idea of our different layers of the model should work with the relu activation function, we take our nodes value and they are multiplied by the weights and added in the next layer and then again multiplied and added for a sum. This should give the network the value to predict which font color to use based on which background color it see's. These weights can change over time as the model get's more information.
# + colab={"base_uri": "https://localhost:8080/", "height": 3202} colab_type="code" id="J5ONXE_McfFN" outputId="3157946b-03d4-4e3a-fe82-e90c405683ac"
np.random.seed(10) # Set to 10 for a good result, 8 for a worse one
model = Sequential()
model.add(Dense(3, activation='relu', input_dim=3))
model.add(Dense(3, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=3)
# Fit the model
history = model.fit(predictors, target, validation_split=0.3, epochs=250, callbacks=[early_stopping_monitor])
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="BVYfLtMeKFM8" outputId="0688173b-acdb-4f6b-cbe0-d39f6beac92f"
model.summary()
# -
# Let's plot the results of our trained model. Here we plot the accuracy and loss over time.
import matplotlib.pyplot as plt
import seaborn as sns
# +
plt.style.use('seaborn')
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + [markdown] colab_type="text" id="X3XbdsGRuu1q"
# There is room for optimizing the model, where you can think about different things.
# How many layers are needed, how many nodes are needed in every layer and which activation function should you use.
# You can also tweek the model on learning rate and how to measure for error.
#
# Let's quickly try predicting with a black background color.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="D8CCRoWtKepd" outputId="c01fe234-4476-4ecd-e6bc-81c3e7d4a0ff"
bgcolor = pd.DataFrame([[0,0,0]])
prediction = model.predict(bgcolor)
prediction
# -
# The left value is dark, the right value is light. Ideally, the preferred font color would be 1, but it would require a lot of optimization to reach that, as that would be a perfectly trained model. To evaluate which font color we should go with, we simply check which of these values is the highest instead.
# + [markdown] colab_type="text" id="u5GH_L-sKFNC"
# ## Interactive prediction
# -
# The following code won't run without Jupyter Notebook since we use some features specific to that.
# + colab={} colab_type="code" id="5fCB8XUSdu5u"
from IPython.core.display import display, HTML
def predictColor(r, g, b):
bgcolor = pd.DataFrame([[r,g,b]])
bgcolor_hex = '#%02x%02x%02x' % (r, g, b)
black = '#000'
white = '#fff'
fontcolor = ''
prediction = model.predict(bgcolor)
if prediction[0][0] > prediction[0][1]:
fontcolor = black
else:
fontcolor = white
display(HTML('<div style="background-color:{0}; color:{1};">SOME TEXT</div>'.format(bgcolor_hex, fontcolor)))
# + colab={} colab_type="code" id="8CF96Sr2yltu"
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
colorpicker = widgets.ColorPicker(
concise=False,
description='Pick a color',
value='blue',
disabled=False
)
display(colorpicker)
out = widgets.Output()
display(out)
@out.capture()
def on_change(change):
if change['type'] == 'change' and change['name'] == 'value':
h = colorpicker.value
rgb = tuple(int(h.lstrip('#')[i:i+2], 16) for i in (0, 2 ,4))
predictColor(rgb[0], rgb[1], rgb[2])
colorpicker.observe(on_change)
| m3_presentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["pdf-title"]
# # What's this PyTorch business?
#
# You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.
#
# For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, PyTorch (or TensorFlow, if you choose to use that notebook).
# + [markdown] tags=["pdf-ignore"]
# ### What is PyTorch?
#
# PyTorch is a system for executing dynamic computational graphs over Tensor objects that behave similarly as numpy ndarray. It comes with a powerful automatic differentiation engine that removes the need for manual back-propagation.
#
# ### Why?
#
# * Our code will now run on GPUs! Much faster training. When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly (which is beyond the scope of this class).
# * We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand.
# * We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :)
# * We want you to be exposed to the sort of deep learning code you might run into in academia or industry.
#
# ### PyTorch versions
# This notebook assumes that you are using **PyTorch version 1.0**. In some of the previous versions (e.g. before 0.4), Tensors had to be wrapped in Variable objects to be used in autograd; however Variables have now been deprecated. In addition 1.0 also separates a Tensor's datatype from its device, and uses numpy-style factories for constructing Tensors rather than directly invoking Tensor constructors.
# + [markdown] tags=["pdf-ignore"]
# ## How will I learn PyTorch?
#
# <NAME> has made an excellent [tutorial](https://github.com/jcjohnson/pytorch-examples) for PyTorch.
#
# You can also find the detailed [API doc](http://pytorch.org/docs/stable/index.html) here. If you have other questions that are not addressed by the API docs, the [PyTorch forum](https://discuss.pytorch.org/) is a much better place to ask than StackOverflow.
#
#
# # Table of Contents
#
# This assignment has 5 parts. You will learn PyTorch on **three different levels of abstraction**, which will help you understand it better and prepare you for the final project.
#
# 1. Part I, Preparation: we will use CIFAR-10 dataset.
# 2. Part II, Barebones PyTorch: **Abstraction level 1**, we will work directly with the lowest-level PyTorch Tensors.
# 3. Part III, PyTorch Module API: **Abstraction level 2**, we will use `nn.Module` to define arbitrary neural network architecture.
# 4. Part IV, PyTorch Sequential API: **Abstraction level 3**, we will use `nn.Sequential` to define a linear feed-forward network very conveniently.
# 5. Part V, CIFAR-10 open-ended challenge: please implement your own network to get as high accuracy as possible on CIFAR-10. You can experiment with any layer, optimizer, hyperparameters or other advanced features.
#
# Here is a table of comparison:
#
# | API | Flexibility | Convenience |
# |---------------|-------------|-------------|
# | Barebone | High | Low |
# | `nn.Module` | High | Medium |
# | `nn.Sequential` | Low | High |
# -
# # Part I. Preparation
#
# First, we load the CIFAR-10 dataset. This might take a couple minutes the first time you do it, but the files should stay cached after that.
#
# In previous parts of the assignment we had to write our own code to <span class="burk">download the CIFAR-10 dataset, preprocess it, and iterate through it in minibatches</span>; PyTorch provides convenient tools to automate this process for us.
# + tags=["pdf-ignore"]
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import sampler
import torchvision.datasets as dset
import torchvision.transforms as T
import numpy as np
# + tags=["pdf-ignore"]
NUM_TRAIN = 49000
# The torchvision.transforms package provides tools for preprocessing data
# and for performing data augmentation; here we set up a transform to
# preprocess the data by subtracting the mean RGB value and dividing by the
# standard deviation of each RGB value; we've hardcoded the mean and std.
transform = T.Compose([
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
# We set up a Dataset object for each split (train / val / test); Datasets load
# training examples one at a time, so we wrap each Dataset in a DataLoader which
# iterates through the Dataset and forms minibatches. We divide the CIFAR-10
# training set into train and val sets by passing a Sampler object to the
# DataLoader telling how it should sample from the underlying Dataset.
cifar10_train = dset.CIFAR10('./cs231n/datasets', train=True, download=True,
transform=transform)
loader_train = DataLoader(cifar10_train, batch_size=64,
sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
cifar10_val = dset.CIFAR10('./cs231n/datasets', train=True, download=True,
transform=transform)
loader_val = DataLoader(cifar10_val, batch_size=64,
sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))
cifar10_test = dset.CIFAR10('./cs231n/datasets', train=False, download=True,
transform=transform)
loader_test = DataLoader(cifar10_test, batch_size=64)
# + [markdown] tags=["pdf-ignore"]
# You have an option to **use GPU by setting the flag to True below**. It is not necessary to use GPU for this assignment. Note that if your computer does not have CUDA enabled, `torch.cuda.is_available()` will return False and this notebook will fallback to CPU mode.
#
# The global variables `dtype` and `device` will control the data types throughout this assignment.
# + tags=["pdf-ignore-input"]
USE_GPU = True
dtype = torch.float32 # we will be using float throughout this tutorial
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Constant to control how frequently we print train loss
print_every = 100
print('using device:', device)
# -
# # Part II. Barebones PyTorch
#
# PyTorch ships with high-level APIs to help us define model architectures conveniently, which we will cover in Part II of this tutorial. In this section, we will start with the barebone PyTorch elements to understand the autograd engine better. After this exercise, you will come to <span class="burk">appreciate the high-level model API more</span>.
#
# We will start with a simple fully-connected ReLU network with two hidden layers and no biases for CIFAR classification.
# This implementation computes the forward pass using operations on PyTorch Tensors, and uses PyTorch autograd to compute gradients. It is important that you understand every line, because you will write a harder version after the example.
#
# When we create a PyTorch Tensor with `requires_grad=True`, then operations involving that Tensor will not just compute values; they will also build up a computational graph in the background, allowing us to easily backpropagate through the graph to compute gradients of some Tensors with respect to a downstream loss. Concretely if x is a Tensor with `x.requires_grad == True` then after backpropagation `x.grad` will be another Tensor holding the gradient of x with respect to the scalar loss at the end.
# + [markdown] tags=["pdf-ignore"]
# ### PyTorch Tensors: Flatten Function
# A PyTorch Tensor is conceptionally similar to a numpy array: it is an n-dimensional grid of numbers, and like numpy PyTorch provides many functions to efficiently operate on Tensors. As a simple example, we provide a `flatten` function below which reshapes image data for use in a fully-connected neural network.
#
# Recall that image data is typically stored in a Tensor of shape N x C x H x W, where:
#
# * N is the number of datapoints
# * C is the number of channels
# * H is the height of the intermediate feature map in pixels
# * W is the height of the intermediate feature map in pixels
#
# This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we use fully connected affine layers to process the image, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "flatten" operation to collapse the `C x H x W` values per representation into a single long vector. <span class="mark">The flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly).</span>
# + tags=["pdf-ignore-input"]
def flatten(x):
N = x.shape[0] # read in N, C, H, W
return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image
def test_flatten():
x = torch.arange(12).view(2, 1, 3, 2)
print('Before flattening: ', x)
print('After flattening: ', flatten(x))
test_flatten()
# + [markdown] tags=["pdf-ignore"]
# ### Barebones PyTorch: Two-Layer Network
#
# Here we define a function `two_layer_fc` which performs the forward pass of a two-layer fully-connected ReLU network on a batch of image data. After defining the forward pass we check that it doesn't crash and that it produces outputs of the right shape by running zeros through the network.
#
# You don't have to write any code here, but it's important that you read and understand the implementation.
# + code_folding=[] tags=["pdf-ignore-input"]
import torch.nn.functional as F # useful stateless functions
def two_layer_fc(x, params):
"""
A fully-connected neural networks; the architecture is:
NN is fully connected -> ReLU -> fully connected layer.
Note that this function only defines the forward pass;
PyTorch will take care of the backward pass for us.
The input to the network will be a minibatch of data, of shape
(N, d1, ..., dM) where d1 * ... * dM = D. The hidden layer will have H units,
and the output layer will produce scores for C classes.
Inputs:
- x: A PyTorch Tensor of shape (N, d1, ..., dM) giving a minibatch of
input data.
- params: A list [w1, w2] of PyTorch Tensors giving weights for the network;
w1 has shape (D, H) and w2 has shape (H, C).
Returns:
- scores: A PyTorch Tensor of shape (N, C) giving classification scores for
the input data x.
"""
# first we flatten the image
x = flatten(x) # shape: [batch_size, C x H x W]
w1, w2 = params
# Forward pass: compute predicted y using operations on Tensors. Since w1 and
# w2 have requires_grad=True, operations involving these Tensors will cause
# PyTorch to build a computational graph, allowing automatic computation of
# gradients. Since we are no longer implementing the backward pass by hand we
# don't need to keep references to intermediate values.
# you can also use `.clamp(min=0)`, equivalent to F.relu()
x = F.relu(x.mm(w1))
x = x.mm(w2)
return x
def two_layer_fc_test():
hidden_layer_size = 42
x = torch.zeros((64, 50), dtype=dtype) # minibatch size 64, feature dimension 50
w1 = torch.zeros((50, hidden_layer_size), dtype=dtype)
w2 = torch.zeros((hidden_layer_size, 10), dtype=dtype)
scores = two_layer_fc(x, [w1, w2])
print(scores.size()) # you should see [64, 10]
two_layer_fc_test()
# -
# ### Barebones PyTorch: Three-Layer ConvNet
#
# Here you will complete the implementation of the function `three_layer_convnet`, which will perform the forward pass of a three-layer convolutional network. Like above, we can immediately test our implementation by passing zeros through the network. The network should have the following architecture:
#
# 1. A convolutional layer (with bias) with `channel_1` filters, each with shape `KW1 x KH1`, and zero-padding of two
# 2. ReLU nonlinearity
# 3. A convolutional layer (with bias) with `channel_2` filters, each with shape `KW2 x KH2`, and zero-padding of one
# 4. ReLU nonlinearity
# 5. Fully-connected layer with bias, producing scores for C classes.
#
# Note that we have **no softmax activation** here after our fully-connected layer: this is because PyTorch's cross entropy loss performs a softmax activation for you, and by bundling that step in makes computation more efficient.
#
# **HINT**: For convolutions: http://pytorch.org/docs/stable/nn.html#torch.nn.functional.conv2d; pay attention to the shapes of convolutional filters!
def three_layer_convnet(x, params):
"""
Performs the forward pass of a three-layer convolutional network with the
architecture defined above.
Inputs:
- x: A PyTorch Tensor of shape (N, 3, H, W) giving a minibatch of images
- params: A list of PyTorch Tensors giving the weights and biases for the
network; should contain the following:
- conv_w1: PyTorch Tensor of shape (channel_1, 3, KH1, KW1) giving weights
for the first convolutional layer
- conv_b1: PyTorch Tensor of shape (channel_1,) giving biases for the first
convolutional layer
- conv_w2: PyTorch Tensor of shape (channel_2, channel_1, KH2, KW2) giving
weights for the second convolutional layer
- conv_b2: PyTorch Tensor of shape (channel_2,) giving biases for the second
convolutional layer
- fc_w: PyTorch Tensor giving weights for the fully-connected layer. Can you
figure out what the shape should be?(channel_2*32*32, C)
- fc_b: PyTorch Tensor giving biases for the fully-connected layer. Can you
figure out what the shape should be?(C)
Returns:
- scores: PyTorch Tensor of shape (N, C) giving classification scores for x
"""
conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params
scores = None
################################################################################
# TODO: Implement the forward pass for the three-layer ConvNet. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
x1 = F.conv2d(x, conv_w1, bias=conv_b1, stride=1, padding=2)
x1= F.relu(x1)
print(x1.shape)
x2 = F.conv2d(x1, conv_w2, bias=conv_b2, stride=1, padding=1)
x2 = F.relu(x2)
print(x2.shape)
x2= flatten(x2)
print('after flattening: ',x2.shape)
scores = x2.mm(fc_w) + fc_b
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE #
################################################################################
return scores
# After defining the forward pass of the ConvNet above, run the following cell to test your implementation.
#
# When you run this function, scores should have shape (64, 10).
# + code_folding=[] tags=["pdf-ignore-input"]
def three_layer_convnet_test():
x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32]
conv_w1 = torch.zeros((6, 3, 5, 5), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W]
conv_b1 = torch.zeros((6,)) # out_channel
conv_w2 = torch.zeros((9, 6, 3, 3), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W]
conv_b2 = torch.zeros((9,)) # out_channel
# you must calculate the shape of the tensor after two conv layers, before the fully-connected layer
fc_w = torch.zeros((9 * 32 * 32, 10))
fc_b = torch.zeros(10)
scores = three_layer_convnet(x, [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b])
print(scores.size()) # you should see [64, 10]
three_layer_convnet_test()
# -
# ### Barebones PyTorch: Initialization
# Let's write a couple utility methods to initialize the weight matrices for our models.
#
# - `random_weight(shape)` initializes a weight tensor with the Kaiming normalization method.
# - `zero_weight(shape)` initializes a weight tensor with all zeros. Useful for instantiating bias parameters.
#
# The `random_weight` function uses the Kaiming normal initialization method, described in:
#
# He et al, *Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification*, ICCV 2015, https://arxiv.org/abs/1502.01852
# + tags=["pdf-ignore-input"]
def random_weight(shape):
"""
Create random Tensors for weights; setting requires_grad=True means that we
want to compute gradients for these Tensors during the backward pass.
We use Kaiming normalization: sqrt(2 / fan_in)
"""
if len(shape) == 2: # FC weight
fan_in = shape[0]
else:
fan_in = np.prod(shape[1:]) # conv weight [out_channel, in_channel, kH, kW]
# randn is standard normal distribution generator.
w = torch.randn(shape, device=device, dtype=dtype) * np.sqrt(2. / fan_in)
w.requires_grad = True
return w
def zero_weight(shape):
return torch.zeros(shape, device=device, dtype=dtype, requires_grad=True)
# create a weight of shape [3 x 5]
# you should see the type `torch.cuda.FloatTensor` if you use GPU.
# Otherwise it should be `torch.FloatTensor`
random_weight((3, 5))
# -
# ### Barebones PyTorch: Check Accuracy
# When training the model we will use the following function to check the accuracy of our model on the training or validation sets.
#
# When checking accuracy we don't need to compute any gradients; as a result we don't need PyTorch to build a computational graph for us when we compute scores. To prevent a graph from being built we scope our computation under a `torch.no_grad()` context manager.
# + tags=["pdf-ignore-input"]
def check_accuracy_part2(loader, model_fn, params):
"""
Check the accuracy of a classification model.
Inputs:
- loader: A DataLoader for the data split we want to check
- model_fn: A function that performs the forward pass of the model,
with the signature scores = model_fn(x, params)
- params: List of PyTorch Tensors giving parameters of the model
Returns: Nothing, but prints the accuracy of the model
"""
split = 'val' if loader.dataset.train else 'test'
print('Checking accuracy on the %s set' % split)
num_correct, num_samples = 0, 0
with torch.no_grad():
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.int64)
scores = model_fn(x, params)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc))
# -
# ### BareBones PyTorch: Training Loop
# We can now set up a basic training loop to train our network. We will train the model using stochastic gradient descent without momentum. We will use `torch.functional.cross_entropy` to compute the loss; you can [read about it here](http://pytorch.org/docs/stable/nn.html#cross-entropy).
#
# The training loop takes as input the neural network function, a list of initialized parameters (`[w1, w2]` in our example), and learning rate.
# + tags=["pdf-ignore-input"]
def train_part2(model_fn, params, learning_rate):
"""
Train a model on CIFAR-10.
Inputs:
- model_fn: A Python function that performs the forward pass of the model.
It should have the signature scores = model_fn(x, params) where x is a
PyTorch Tensor of image data, params is a list of PyTorch Tensors giving
model weights, and scores is a PyTorch Tensor of shape (N, C) giving
scores for the elements in x.
- params: List of PyTorch Tensors giving weights for the model
- learning_rate: Python scalar giving the learning rate to use for SGD
Returns: Nothing
"""
for t, (x, y) in enumerate(loader_train):
# Move the data to the proper device (GPU or CPU)
x = x.to(device=device, dtype=dtype)
y = y.to(device=device, dtype=torch.long)
# Forward pass: compute scores and loss
scores = model_fn(x, params)
loss = F.cross_entropy(scores, y)
# Backward pass: PyTorch figures out which Tensors in the computational
# graph has requires_grad=True and uses backpropagation to compute the
# gradient of the loss with respect to these Tensors, and stores the
# gradients in the .grad attribute of each Tensor.
loss.backward()
# Update parameters. We don't want to backpropagate through the
# parameter updates, so we scope the updates under a torch.no_grad()
# context manager to prevent a computational graph from being built.
with torch.no_grad():
for w in params:
w -= learning_rate * w.grad
# Manually zero the gradients after running the backward pass
w.grad.zero_()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
check_accuracy_part2(loader_val, model_fn, params)
print()
# -
# ### BareBones PyTorch: Train a Two-Layer Network
# Now we are ready to run the training loop. We need to explicitly allocate tensors for the fully connected weights, `w1` and `w2`.
#
# Each minibatch of CIFAR has 64 examples, so the tensor shape is `[64, 3, 32, 32]`.
#
# After flattening, `x` shape should be `[64, 3 * 32 * 32]`. This will be the size of the first dimension of `w1`.
# The second dimension of `w1` is the hidden layer size, which will also be the first dimension of `w2`.
#
# Finally, the output of the network is a 10-dimensional vector that represents the probability distribution over 10 classes.
#
# You don't need to tune any hyperparameters but you should see accuracies above 40% after training for one epoch.
# +
hidden_layer_size = 4000
learning_rate = 1e-2
w1 = random_weight((3 * 32 * 32, hidden_layer_size))
w2 = random_weight((hidden_layer_size, 10))
train_part2(two_layer_fc, [w1, w2], learning_rate)
# -
# ### BareBones PyTorch: Training a ConvNet
#
# In the below you should use the functions defined above to train a three-layer convolutional network on CIFAR. The network should have the following architecture:
#
# 1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2
# 2. ReLU
# 3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1
# 4. ReLU
# 5. Fully-connected layer (with bias) to compute scores for 10 classes
#
# You should initialize your weight matrices using the `random_weight` function defined above, and you should initialize your bias vectors using the `zero_weight` function above.
#
# You don't need to tune any hyperparameters, but if everything works correctly you should achieve an accuracy above 42% after one epoch.
# +
learning_rate = 3e-3
channel_1 = 32
channel_2 = 16
conv_w1 = None
conv_b1 = None
conv_w2 = None
conv_b2 = None
fc_w = None
fc_b = None
################################################################################
# TODO: Initialize the parameters of a three-layer ConvNet. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
conv_w1 = random_weight((channel_1, 3, 5, 5))
conv_b1 = zero_weight(channel_1)
conv_w2 = random_weight((channel_2, channel_1, 3, 3))
conv_b2 = zero_weight(channel_2)
fc_w = random_weight((channel_2 * 32 * 32, 10))
fc_b = zero_weight(10)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE #
################################################################################
params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]
train_part2(three_layer_convnet, params, learning_rate)
# -
# # Part III. PyTorch Module API
#
# Barebone PyTorch requires that we track all the parameter tensors by hand. This is fine for small networks with a few tensors, but it would be extremely inconvenient and error-prone to track tens or hundreds of tensors in larger networks.
#
# PyTorch provides the `nn.Module` API for you to define arbitrary network architectures, while tracking every learnable parameters for you. In Part II, we implemented SGD ourselves. PyTorch also provides the `torch.optim` package that implements all the common optimizers, such as RMSProp, Adagrad, and Adam. It even supports approximate second-order methods like L-BFGS! You can refer to the [doc](http://pytorch.org/docs/master/optim.html) for the exact specifications of each optimizer.
#
# To use the Module API, follow the steps below:
#
# 1. Subclass `nn.Module`. Give your network class an intuitive name like `TwoLayerFC`.
#
# 2. In the constructor `__init__()`, define all the layers you need as class attributes. Layer objects like `nn.Linear` and `nn.Conv2d` are themselves `nn.Module` subclasses and contain learnable parameters, so that you don't have to instantiate the raw tensors yourself. `nn.Module` will track these internal parameters for you. Refer to the [doc](http://pytorch.org/docs/master/nn.html) to learn more about the dozens of builtin layers. **Warning**: don't forget to call the `super().__init__()` first!
#
# 3. In the `forward()` method, define the *connectivity* of your network. You should use the attributes defined in `__init__` as function calls that take tensor as input and output the "transformed" tensor. Do *not* create any new layers with learnable parameters in `forward()`! All of them must be declared upfront in `__init__`.
#
# After you define your Module subclass, you can instantiate it as an object and call it just like the NN forward function in part II.
#
# ### Module API: Two-Layer Network
# Here is a concrete example of a 2-layer fully connected network:
# + code_folding=[]
class TwoLayerFC(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
# assign layer objects to class attributes
self.fc1 = nn.Linear(input_size, hidden_size)
# nn.init package contains convenient initialization methods
# http://pytorch.org/docs/master/nn.html#torch-nn-init
nn.init.kaiming_normal_(self.fc1.weight)
self.fc2 = nn.Linear(hidden_size, num_classes)
nn.init.kaiming_normal_(self.fc2.weight)
def forward(self, x):
# forward always defines connectivity
x = flatten(x)
scores = self.fc2(F.relu(self.fc1(x)))
return scores
def test_TwoLayerFC():
input_size = 50
x = torch.zeros((64, input_size), dtype=dtype) # minibatch size 64, feature dimension 50
model = TwoLayerFC(input_size, 42, 10)
scores = model(x)
print(scores.size()) # you should see [64, 10]
test_TwoLayerFC()
# -
# ### Module API: Three-Layer ConvNet
# It's your turn to implement a 3-layer ConvNet followed by a fully connected layer. The network architecture should be the same as in Part II:
#
# 1. Convolutional layer with `channel_1` 5x5 filters with zero-padding of 2
# 2. ReLU
# 3. Convolutional layer with `channel_2` 3x3 filters with zero-padding of 1
# 4. ReLU
# 5. Fully-connected layer to `num_classes` classes
#
# You should initialize the weight matrices of the model using the Kaiming normal initialization method.
#
# **HINT**: http://pytorch.org/docs/stable/nn.html#conv2d
#
# After you implement the three-layer ConvNet, the `test_ThreeLayerConvNet` function will run your implementation; it should print `(64, 10)` for the shape of the output scores.
# +
class ThreeLayerConvNet(nn.Module):
def __init__(self, in_channel, channel_1, channel_2, num_classes):
super().__init__()
########################################################################
# TODO: Set up the layers you need for a three-layer ConvNet with the #
# architecture defined above. #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.conv1 = nn.Conv2d(in_channel, channel_1, 5, padding=2)
nn.init.kaiming_normal_(self.conv1.weight)
self.conv2 = nn.Conv2d(channel_1, channel_2, 3, padding=1)
nn.init.kaiming_normal_(self.conv2.weight)
self.fc = nn.Linear(channel_2 * 32 * 32, num_classes)
nn.init.kaiming_normal_(self.fc.weight)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
scores = None
########################################################################
# TODO: Implement the forward function for a 3-layer ConvNet. you #
# should use the layers you defined in __init__ and specify the #
# connectivity of those layers in forward() #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
scores_1 = F.relu(self.conv1(x))
scores_2 = F.relu(self.conv2(scores_1))
x = flatten(scores_2)
scores_3 = self.fc(x)
scores = scores_3
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
return scores
def test_ThreeLayerConvNet():
x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32]
model = ThreeLayerConvNet(in_channel=3, channel_1=12, channel_2=8, num_classes=10)
scores = model(x)
print(scores.size()) # you should see [64, 10]
test_ThreeLayerConvNet()
# -
# ### Module API: Check Accuracy
# Given the validation or test set, we can check the classification accuracy of a neural network.
#
# This version is slightly different from the one in part II. You don't manually pass in the parameters anymore.
def check_accuracy_part34(loader, model):
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
num_correct = 0
num_samples = 0
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
# ### Module API: Training Loop
# We also use a slightly different training loop. Rather than updating the values of the weights ourselves, we use an Optimizer object from the `torch.optim` package, which abstract the notion of an optimization algorithm and provides implementations of most of the algorithms commonly used to optimize neural networks.
def train_part34(model, optimizer, epochs=1):
"""
Train a model on CIFAR-10 using the PyTorch Module API.
Inputs:
- model: A PyTorch Module giving the model to train.
- optimizer: An Optimizer object we will use to train the model
- epochs: (Optional) A Python integer giving the number of epochs to train for
Returns: Nothing, but prints model accuracies during training.
"""
model = model.to(device=device) # move the model parameters to CPU/GPU
for e in range(epochs):
for t, (x, y) in enumerate(loader_train):
model.train() # put model to training mode
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = F.cross_entropy(scores, y)
# Zero out all of the gradients for the variables which the optimizer
# will update.
optimizer.zero_grad()
# This is the backwards pass: compute the gradient of the loss with
# respect to each parameter of the model.
loss.backward()
# Actually update the parameters of the model using the gradients
# computed by the backwards pass.
optimizer.step()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
check_accuracy_part34(loader_val, model)
print()
# ### Module API: Train a Two-Layer Network
# Now we are ready to run the training loop. In contrast to part II, we don't explicitly allocate parameter tensors anymore.
#
# Simply pass the input size, hidden layer size, and number of classes (i.e. output size) to the constructor of `TwoLayerFC`.
#
# You also need to define an optimizer that tracks all the learnable parameters inside `TwoLayerFC`.
#
# You don't need to tune any hyperparameters, but you should see model accuracies above 40% after training for one epoch.
# +
hidden_layer_size = 4000
learning_rate = 1e-2
model = TwoLayerFC(3 * 32 * 32, hidden_layer_size, 10)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
train_part34(model, optimizer)
# -
# ### Module API: Train a Three-Layer ConvNet
# You should now use the Module API to train a three-layer ConvNet on CIFAR. This should look very similar to training the two-layer network! You don't need to tune any hyperparameters, but you should achieve above above 45% after training for one epoch.
#
# You should train the model using stochastic gradient descent without momentum.
# +
learning_rate = 3e-3
channel_1 = 32
channel_2 = 16
model = None
optimizer = None
################################################################################
# TODO: Instantiate your ThreeLayerConvNet model and a corresponding optimizer #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model = ThreeLayerConvNet(in_channel=3, channel_1=channel_1, channel_2=channel_2, num_classes=10)
optimizer =optim.SGD(model.parameters(), lr=learning_rate)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE
################################################################################
train_part34(model, optimizer)
# -
# # Part IV. PyTorch Sequential API
#
# Part III introduced the PyTorch Module API, which allows you to define arbitrary learnable layers and their connectivity.
#
# For simple models like a stack of feed forward layers, you still need to go through 3 steps: subclass `nn.Module`, assign layers to class attributes in `__init__`, and call each layer one by one in `forward()`. Is there a more convenient way?
#
# Fortunately, PyTorch provides a container Module called `nn.Sequential`, which merges the above steps into one. It is not as flexible as `nn.Module`, because you cannot specify more complex topology than a feed-forward stack, but it's good enough for many use cases.
#
# ### Sequential API: Two-Layer Network
# Let's see how to rewrite our two-layer fully connected network example with `nn.Sequential`, and train it using the training loop defined above.
#
# Again, you don't need to tune any hyperparameters here, but you shoud achieve above 40% accuracy after one epoch of training.
# + code_folding=[]
# We need to wrap `flatten` function in a module in order to stack it
# in nn.Sequential
class Flatten(nn.Module):
def forward(self, x):
return flatten(x)
hidden_layer_size = 4000
learning_rate = 1e-2
model = nn.Sequential(
Flatten(),
nn.Linear(3 * 32 * 32, hidden_layer_size),
nn.ReLU(),
nn.Linear(hidden_layer_size, 10),
)
# you can use Nesterov momentum in optim.SGD
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, nesterov=True)
train_part34(model, optimizer)
# -
# ### Sequential API: Three-Layer ConvNet
# Here you should use `nn.Sequential` to define and train a three-layer ConvNet with the same architecture we used in Part III:
#
# 1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2
# 2. ReLU
# 3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1
# 4. ReLU
# 5. Fully-connected layer (with bias) to compute scores for 10 classes
#
# You should initialize your weight matrices using the `random_weight` function defined above, and you should initialize your bias vectors using the `zero_weight` function above.
#
# You should optimize your model using stochastic gradient descent with Nesterov momentum 0.9.
#
# Again, you don't need to tune any hyperparameters but you should see accuracy above 55% after one epoch of training.
# +
channel_1 = 32
channel_2 = 16
learning_rate = 1e-2
model = None
optimizer = None
################################################################################
# TODO: Rewrite the 2-layer ConvNet with bias from Part III with the #
# Sequential API. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model = nn.Sequential(
nn.Conv2d(3, channel_1, 5, padding=2),
nn.ReLU(),
nn.Conv2d(channel_1, channel_2, 3, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(channel_2 * 32 * 32, 10)
)
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, nesterov=True)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE
################################################################################
train_part34(model, optimizer)
# -
# # Part V. CIFAR-10 open-ended challenge
#
# In this section, you can experiment with whatever ConvNet architecture you'd like on CIFAR-10.
#
# Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves **at least 70%** accuracy on the CIFAR-10 **validation** set within 10 epochs. You can use the check_accuracy and train functions from above. You can use either `nn.Module` or `nn.Sequential` API.
#
# Describe what you did at the end of this notebook.
#
# Here are the official API documentation for each component. One note: what we call in the class "spatial batch norm" is called "BatchNorm2D" in PyTorch.
#
# * Layers in torch.nn package: http://pytorch.org/docs/stable/nn.html
# * Activations: http://pytorch.org/docs/stable/nn.html#non-linear-activations
# * Loss functions: http://pytorch.org/docs/stable/nn.html#loss-functions
# * Optimizers: http://pytorch.org/docs/stable/optim.html
#
#
# ### Things you might try:
# - **Filter size**: Above we used 5x5; would smaller filters be more efficient?
# - **Number of filters**: Above we used 32 filters. Do more or fewer do better?
# - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?
# - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?
# - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? <span class="burk">Good architectures to try include:</span>
# - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
# - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
# - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]
# - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).
# - **Regularization**: Add l2 weight regularization, or perhaps use Dropout.
#
# ### Tips for training
# For each network architecture that you try, you should tune the learning rate and other hyperparameters. When doing this there are a couple important things to keep in mind:
#
# - If the parameters are working well, you should see improvement within a few hundred iterations
# - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.
# - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.
# - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set.
#
# ### Going above and beyond
# If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these, but don't miss the fun if you have time!
#
# - Alternative optimizers: you can try Adam, Adagrad, RMSprop, etc.
# - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.
# - Model ensembles
# - Data augmentation
# - New Architectures
# - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.
# - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.
# - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)
#
# ### Have fun and happy training!
# +
################################################################################
# TODO: #
# Experiment with any architectures, optimizers, and hyperparameters. #
# Achieve AT LEAST 70% accuracy on the *validation set* within 10 epochs. #
# #
# Note that you can use the check_accuracy function to evaluate on either #
# the test set or the validation set, by passing either loader_test or #
# loader_val as the second argument to check_accuracy. You should not touch #
# the test set until you have finished your architecture and hyperparameter #
# tuning, and only run the test set once at the end to report a final value. #
################################################################################
model = None
optimizer = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE
################################################################################
# You should get at least 70% accuracy
train_part34(model, optimizer, epochs=10)
# + [markdown] tags=["pdf-inline"]
# ## Describe what you did
#
# In the cell below you should write an explanation of what you did, any additional features that you implemented, and/or any graphs that you made in the process of training and evaluating your network.
# + [markdown] tags=["pdf-inline"]
# TODO: Describe what you did
# -
# ## Test set -- run this only once
#
# Now that we've gotten a result we're happy with, we test our final model on the test set (which you should store in best_model). Think about how this compares to your validation set accuracy.
best_model = model
check_accuracy_part34(loader_test, best_model)
| assignment2/PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="CODE"></form>''')
# -
# Importing the libraries:
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sqlite3
import numpy as np
import nibabel as ni
from nibabel import nifti1
import ia870 as MM
import dtimp as dtimp
import functions_will as FW
import ia636 as ia
import sys
import glob
# # Corpus callosum diffusion properties differences in systemic lupus patients
#
# `<NAME> - IA369Z`
#
# ## Abstract
# Corpus Callosum (CC) is the largest white matter structure and it plays a crucial role in clinical and research studies due to its shape and volume correlation to subject's characteristics and neurodegenerative diseases. CC segmentation and parcellation are an important step for any medical and research study. To date, magnetic resonance imaging (MRI) is the main tool for evaluate and infer informations about the brain's microstructure because it offers a better tissue contrast.
# Systemic lupus erythematosus, referred to as SLE or lupus, is a chronic disease that causes systemic inflammation which affects multiple organs. As the central nervous system (CNS) is frequently affected in patients with SLE, several studies have sought a relationship between cerebral and corpus callosum volumes with SLE patients. In this work, we analyse diffusion properties in CC sub-regions between two groups, controls and patients diagnosed with SLE.
#
# ## 1 - Introduction
#
# The corpus callosum (CC) is by far the largest fiber bundles in the human brain interconnecting the two cerebral hemispheres with more than 300 million fibers [1]. CC is also the largest white matter structure in the brain, and despite its main functions, several studies have reported its relation to sexual dimorphism [2], handedness [3], and cerebral speech dominance [4]. Other morphometric studies have correlated CC's size in musicians [5] and positive semantic test performance [6]. The importance of CC has grown in the last decades due to its changes in volume associated with neurodegenerative or inflammatory diseases such as Alzheimer's [7] and multiple sclerosis [8]. And also CC seems to be affected by other central nervous system diseases such as epilepsy [9], depression [10], schizophrenia [11], dyslexia [12], and obesity [13].
#
# Systemic lupus erythematosus (SLE), also known simply as lupus, is an autoimmune disease with an incidence of between 2.0 and 7.6 per 100,000 per year [14]. Neurologic and psychiatric symptoms vary from headache, mood disorders, and impairment of cognitive function occur in as many as 70\% of patients with SLE [15]. The cause remains unclear and it is believed to involve hormonal, environmental, and genetic factors. Several studies have found a relationship between corpus callosum volume and its association with the disease duration [16].
#
# Diffusion MRI makes the study of neuronal fibers and tissues microstructure possible through the diffusion model. Diffusion MRI measures the dephasing of spins of protons in the presence of a spatially-varying magnetic field. The signal attenuation reflects the general mobility of water molecules, depending on temperature, viscosity and presence of large molecules. It also depends on barriers and obstacles imposed by microstructure, cell membranes, myelin sheaths and microtubules [17].
#
# Other MRI modality known as diffusion tensor imaging (DTI) revolutionized the field of white matter mapping. DTI takes advantage of the macroscopic geometrical arrangement of white matter bundles that becomes apparent through diffusion MRI measurements [18]. Due to CC's well-oriented fibers perpendicular to the sagittal plane and its high anisotropy [19], DTI is capable of generating good CC visualization.
#
# Image segmentation is commonly used for measuring and visualizing the brain’s anatomical structures in MRI images, and to facilitate the analyses, the CC is commonly parceled into smaller portions. Even though manual segmentation is the gold standard, it is subjective, time consuming, and requires specialist knowledge. Which leads to low repeatability for large number of patients. Automatic segmentation turns out to be a resonable solution, although there are several limitations due to DTI images and CC shape variability among subjects [20]. The partitioning of CC has been an object of interest for radiologic assessment. The first approach was carried out by Witelson in 1989 and was originated from non-human primates [20]. Several posterior studies rely on Witelson's classification such as the method proposed by Hofer [1].
# ### 1.1 - Reproducible Objective Quantification Scheme (ROQS)
#
# ROQS is a semi-automated segmentation process that requires four steps to segment white matter structures. A manual identification of the anatomical structure should be done and a seed placement is the first step towards the segmentation process. Secondly, the determination of of thresholds and selection criteria. The third step is to create a binary mask and the last step is to calculate the boundary of the structure of interest.
#
# Based on the fact that mostly white matter scructures of the brain have their axis of greatest diffusivity oriented predominantly along one of the three cardinal directions, the first calculation of the ROQS algorithm is to determine the x, y, and z componentes of the principal eigenvector of the seed pixel, where the principal eigenvector denotes de direction of maximal diffusivity [16]. Restricting the selection to pixels with the same maximum components of the principal eigenvector as the seed pixel is the first criteria. The next step is the calculation of the magnification array:
#
#
# $$ MagnificationArray = \frac{\text{FA}-\max(\text{FA})*\alpha}{\max(\text{FA})*\beta}+\gamma $$
#
# Where $\alpha$, $\beta$ e $\gamma$ are constants previously defined empirically by the author. The pixels whose anisotropy value is close to the seed's value are selected and then finally pixels are restricted to those with the maximum component value greater than the maximum component of the seed pixel minus 0.2. This threshold dependent on the seed, as opposed to a constant value, is used to adjust for variations in MR scanners and variations in diffusion properties in various areas of the brain. A value of 0.2 is chosen because diffusion properties within major white matter tracts are slightly heterogeneous [16]. The third step is the creation of a binary mask, where pixels that fit the previous criteria are assigned a value of one and all others are assigned a value of zero. Finally, the last step in ROQS is to determine the boundary of the CC by connecting all boundary pixels assigned with value of one, but have an adjacent zero valued pixel.
#
#
# ### 1.2 - Witelson's Parcellation Method
#
# The Witelson's Partitioning defines five callosal regions based on dividing the CC along its anterior-posterior dimension [18]. The CC is subdivided into regions comprising the anterior third, the anterior and posterior midbody, the posterior third, and the posterior one-fifth. Compartments of the anterior third, including the rostrum, genu, and rostral body, are assigned to prefrontal, premotor, and supplementary motor cortical areas. Fibers originating in the motor cortex are assumed to cross the CC through the anterior midbody, whereas somaesthetic and posterior parietal fiber bundles cross the CC through the posterior midbody. Compartments of the posterior third, including the isthmus and splenium, are assigned to temporal, parietal, and occipital cortical regions. It should be noted, however, that neither Witelson's classification nor other geometric partitioning schemes exactly mirror the texture of the
# CC at the cellular level [1].
#
# ### 1.3 - Hofer and Frahm's Parcellation Method
#
# The Hofer and Frahm parcellation method incorporates a geometric representation of the five subregions of the CC based on diffusion imaging fiber tractography. A geometric baseline was defined in the midsagittal section of the CC and connected the anterior and posterior points of the structure. After fiber tracking, the CC regions crossed by fibers belonging to defined cortical areas provided a natural segmentation [1]. These maximum segments were used as border lines and projected the baseline. The callosal parietal, temporal, and occipital fiber bundles overlapped and could not be separated by vertical lines. They constitute the most posterior region of the CC. The figure below shows an example of the difference between Witelson's (top) and Hofer and Frahm (bottom) parcellation methods:
#
# <img src="WandHschemes.png">
# ## 2 - Methodology
#
# ### 2.1 - Subjects and Image Acquisition
#
# For experiments, DWI from 14 subjects at the University of Campinas, were acquired on a Philips scanner Achieva 3T in the axial plane with a $1x1mm$ spatial resolution and $2mm$ slice thickness, along $32$ directions ($b-value=1000s/mm^2$, $TR=8.5s$, and $TE=61ms$). All data used in this experiment was acquired through a project approved by the research ethics committee from the School of Medicine at UNICAMP. From each acquired DWI volume, only the midsaggital slice was used.
#
# ### 2.2 - Workflow
#
# The dataset is composed by 3 controls and 6 patients diagnosed with SLE. The DWI input data from each subject followed the process above:
#
# <img src="WorkFlow_Done.png">
# ### 2.3 - Segmentation and Parcellation
#
# The dataset is composed by 3 controls and 6 patients diagnosed with SLE. The corpus callosum of each subject was segmented using the ROQS method and parcellated using two different methods, Witelson and Hofer and Frahm.
# #### Segmentation:
# +
# Defining the functions we will use to segment and parcelate:
def run_analysis(rootdir):
eigvals, eigvects, T3 = dtimp.loadNiftiDTI(rootdir, reorient=True)
FA,MD = dtimp.getFractionalAnisotropy(eigvals)
FA[np.isnan(FA)] = 0
FA[FA>1] = 1
fissure, FA_mean = dtimp.getFissureSlice(eigvals, FA)
wFA = FA*abs(eigvects[0,0]) #weighted FA
return wFA, FA, MD, fissure, eigvals, eigvects
def corpusCallosumParcellationGeometric (segmentation, scheme = 'HOFER'):
SCHEMES = ['HOFER', 'WITELSON']
scheme = scheme.upper()
if not scheme in SCHEMES:
raise Exception('Unknown scheme!')
def coef_linear (a, p):
return p[0]-a*p[1]
def predicty(x, a, b):
return a*x + b
def predictx(y, a, b):
return (y-b)/a
# Base and normal vectors
M,N = np.nonzero(segmentation)
minN = np.min(N)
maxN = np.max(N)
minM = segmentation[:,minN].nonzero()[0].mean()
maxM = segmentation[:,maxN].nonzero()[0].mean()
p1 = np.array([minM, minN])
p2 = np.array([maxM, maxN])
base_v = p2 - p1
base_length = np.sqrt((base_v**2).sum())
base_v = base_v / np.sqrt((base_v**2).sum())
cut_v = np.array([-base_v[1], base_v[0]])
# Line's coefficients
hofer = np.array([1.0/6, 1.0/2, 2.0/3, 3.0/4]).reshape(4,1)
witelson = np.array([1.0/3, 1.0/2, 2.0/3, 4.0/5]).reshape(4,1)
if scheme == 'HOFER':
P = p1 + hofer*base_length*base_v
if scheme == 'WITELSON':
P = p1 + witelson*base_length*base_v
p3, p4, p5, p6 = P
rbase_A = base_v[0]/base_v[1]
rbase_B = p1[0]-rbase_A*p1[1]
rA = cut_v[0]/cut_v[1]
r3B = coef_linear(rA, p3)
r4B = coef_linear(rA, p4)
r5B = coef_linear(rA, p5)
r6B = coef_linear(rA, p6)
# Rotulating the mask
H,W = np.shape(segmentation)
Parcellation = np.zeros((H,W), dtype='int')
y,x = segmentation.nonzero()
labels = np.zeros(y.size, dtype='int')
above_base = y <= predicty(x, rbase_A, rbase_B)
left_r3 = x <= predictx(y, rA, r3B)
left_r4 = x <= predictx(y, rA, r4B)
left_r5 = x <= predictx(y, rA, r5B)
left_r6 = x <= predictx(y, rA, r6B)
labels[np.logical_and(left_r3==False, left_r4)] = 2
labels[np.logical_and(left_r4==False, left_r5)] = 3
labels[np.logical_and(left_r5==False, left_r6)] = 4
labels[np.logical_or(np.logical_and(above_base==False, left_r4), left_r3)] = 1
labels[np.logical_or(np.logical_and(above_base==False, left_r5==False), left_r6==False)] = 5
Parcellation[segmentation] = labels
return Parcellation
# +
# Defining the path:
dir_bas = "candidates"
dirs = glob.glob(dir_bas+"/*C*")
acumC = np.zeros((3,6,4))
acumC2 = np.zeros((3,6,4))
indx = 0
# Seed Manual Placement:
# as the ROQS method is a semi-automatic one
# in case you want to test this code using your own images
# you must enter the coordinates of your manual seeds.
seedx = [31,28,25]
seedy = [152,101,125]
for candidate in dirs:
wFA, FA, MD, fissure, eigvals, eigvects = run_analysis(candidate)
escala = [3*FA[fissure,:,:].shape[-2],2*FA[fissure,:,:].shape[-1]]
wFA_ms = wFA[fissure,:,:]
eigvects_ms = abs(eigvects[0,:,fissure])
wFA_ss = np.load(candidate+'/segmentacao/wFA_ss.npy').astype(np.bool)
escala = [wFA_ss.shape[-2],wFA_ss.shape[-1]]
teste = mpimg.imread(candidate+'/mask_AF.png')[:,:,0]
mask_p3 = np.array(teste).astype(np.bool)
mask_sem = FW.resizedti(mask_p3,escala).astype(bool)
con_mask_cc = np.logical_xor(MM.iaero(mask_sem),mask_sem)
# ROQS code:
seg_mask_seed_ero = MM.iaero(mask_sem)
seed = [seedx[indx],seedy[indx]]
max_comp_in = np.argmax(eigvects_ms[:,seed[0],seed[1]],axis=0)
max_comp_in = np.argmax(ia.iahistogram(max_comp_in))
Cmax_seed = eigvects_ms[max_comp_in,seed[0],seed[1]]
princ = np.argmax(eigvects_ms,axis=0)
fsc = princ == max_comp_in
alpha = 0.3
beta = 0.3
gamma = 0.5
FA_ms = FA[fissure]
MA = (FA_ms-np.amax(FA_ms)*alpha)/(np.amax(FA_ms)*beta)+gamma
ssc = np.clip(np.amax(eigvects_ms*MA,axis=0),0,1)
ssc = ssc*fsc
mask_cc = ssc > Cmax_seed-0.1
fr = MM.ialabel(mask_cc,MM.iasebox())
fra = MM.iablob(fr,'area')
mask_cc = fra == np.unique(fra)[-1]
np.save(candidate+'/segmentacao/mask_ROQS', mask_cc)
segmentation = mask_cc # Saving the segmentation mask
# Parcelattion code:
Ts = np.array([[4.0,0,0],[0,4,0],[0,0,1]])
t = 0.2
wFA_slice = wFA[fissure]
FA_slice = FA[fissure]
MD_slice = MD[fissure]
fc = 8.0
Ts = np.array([[fc,0,0],[0,fc,0],[0,0,1]])
# Hofer and Frahm code:
scheme = 'HOFER'
geometricw = corpusCallosumParcellationGeometric(segmentation, scheme)
# If you would like to see the segmentation and parcellation images from each subject:
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(segmentation))
#plt.figure(2)
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(geometricw))
hofer = geometricw
labels = geometricw.max(axis=0)
labels = labels[labels[:-1]!=labels[1:]][1:].copy()
measuresw = []
measuresw.append([FA_slice[segmentation].mean(), FA_slice[segmentation].std(), MD_slice[segmentation].mean(), MD_slice[segmentation].std()])
for l in labels:
selection = (geometricw==l)
measuresw.append([FA_slice[selection].mean(), FA_slice[selection].std(), MD_slice[selection].mean(), MD_slice[selection].std()])
measuresw = np.array(measuresw)
acumC[indx] = measuresw
# Witelson code:
scheme = 'WITELSON'
geometricw = corpusCallosumParcellationGeometric(segmentation, scheme)
# If you would like to see the segmentation and parcellation images, discomment the lines below:
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(segmentation))
#plt.figure(2)
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(geometricw))
witelson = geometricw
labels = geometricw.max(axis=0)
labels = labels[labels[:-1]!=labels[1:]][1:].copy()
measuresw = []
measuresw.append([FA_slice[segmentation].mean(), FA_slice[segmentation].std(), MD_slice[segmentation].mean(), MD_slice[segmentation].std()])
for l in labels:
selection = (geometricw==l)
measuresw.append([FA_slice[selection].mean(), FA_slice[selection].std(), MD_slice[selection].mean(), MD_slice[selection].std()])
measuresw = np.array(measuresw)
acumC2[indx] = measuresw
indx += 1
# -
plt.figure(figsize=(10,10))
plt.imshow(ia.ianormalize(segmentation[20:40,90:165]))
plt.title('Segmentation mask of one the subjects.')
# #### Hofer and Frahm Parcellation:
plt.figure(figsize=(10,10))
plt.imshow(ia.ianormalize(hofer[20:40,90:165]))
plt.title('Example of Hofer and Frahm parcellation for one of the subjects.')
# #### Witelson Parcellation:
plt.figure(figsize=(10,10))
plt.imshow(ia.ianormalize(witelson[20:40,90:165]))
plt.title('Example of Witelson parcellation for one of the subjects.')
# ### Corpus callosum diffusion properties:
# +
# Defining the path:
dir_bas = "candidates"
dirs = glob.glob(dir_bas+"/*P*")
acumP = np.zeros((6,6,4))
acumP2 = np.zeros((6,6,4))
indx = 0
# Seed Manual Placement:
# as the ROQS method is a semi-automatic one
# in case you want to test this code using your own images
# you must enter the coordinates of your manual seeds.
seedx = [25,25,30,29,29,29]
seedy = [132,111,95,146,106,67]
for candidate in dirs:
wFA, FA, MD, fissure, eigvals, eigvects = run_analysis(candidate)
escala = [3*FA[fissure,:,:].shape[-2],2*FA[fissure,:,:].shape[-1]]
wFA_ms = wFA[fissure,:,:]
eigvects_ms = abs(eigvects[0,:,fissure])
wFA_ss = np.load(candidate+'/segmentacao/wFA_ss.npy').astype(np.bool)
escala = [wFA_ss.shape[-2],wFA_ss.shape[-1]]
teste = mpimg.imread(candidate+'/mask_AF.png')[:,:,0]
mask_p3 = np.array(teste).astype(np.bool)
mask_sem = FW.resizedti(mask_p3,escala).astype(bool)
con_mask_cc = np.logical_xor(MM.iaero(mask_sem),mask_sem)
# ROQS code:
seg_mask_seed_ero = MM.iaero(mask_sem)
seed = [seedx[indx],seedy[indx]]
max_comp_in = np.argmax(eigvects_ms[:,seed[0],seed[1]],axis=0)
max_comp_in = np.argmax(ia.iahistogram(max_comp_in))
Cmax_seed = eigvects_ms[max_comp_in,seed[0],seed[1]]
princ = np.argmax(eigvects_ms,axis=0)
fsc = princ == max_comp_in
alpha = 0.3
beta = 0.3
gamma = 0.5
FA_ms = FA[fissure]
MA = (FA_ms-np.amax(FA_ms)*alpha)/(np.amax(FA_ms)*beta)+gamma
ssc = np.clip(np.amax(eigvects_ms*MA,axis=0),0,1)
ssc = ssc*fsc
mask_cc = ssc > Cmax_seed-0.1
fr = MM.ialabel(mask_cc,MM.iasebox())
fra = MM.iablob(fr,'area')
mask_cc = fra == np.unique(fra)[-1]
np.save(candidate+'/segmentacao/mask_ROQS', mask_cc)
segmentation = mask_cc # Saving the segmentation mask
# Parcelattion code:
Ts = np.array([[4.0,0,0],[0,4,0],[0,0,1]])
t = 0.2
wFA_slice = wFA[fissure]
FA_slice = FA[fissure]
MD_slice = MD[fissure]
fc = 8.0
Ts = np.array([[fc,0,0],[0,fc,0],[0,0,1]])
#Hofer and Frahm code:
scheme = 'HOFER'
geometricw = corpusCallosumParcellationGeometric(segmentation, scheme)
# If you would like to see the segmentation and parcellation images from each subject:
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(segmentation))
#plt.title('Segmentation')
#plt.figure(2)
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(geometricw))
#plt.title('Hofer and Frahm Parcellation')
labels = geometricw.max(axis=0)
labels = labels[labels[:-1]!=labels[1:]][1:].copy()
measuresw = []
measuresw.append([FA_slice[segmentation].mean(), FA_slice[segmentation].std(), MD_slice[segmentation].mean(), MD_slice[segmentation].std()])
for l in labels:
selection = (geometricw==l)
measuresw.append([FA_slice[selection].mean(), FA_slice[selection].std(), MD_slice[selection].mean(), MD_slice[selection].std()])
measuresw = np.array(measuresw)
acumP[indx] = measuresw
#Witelson code:
scheme = 'WITELSON'
geometricw = corpusCallosumParcellationGeometric(segmentation, scheme)
# If you would like to see the segmentation and parcellation images from each subject:
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(segmentation))
#plt.title('Segmentation')
#plt.figure(2)
#plt.figure(figsize=(10,10))
#plt.imshow(ia.ianormalize(geometricw))
#plt.title('Witelson Parcellation')
labels = geometricw.max(axis=0)
labels = labels[labels[:-1]!=labels[1:]][1:].copy()
measuresw = []
measuresw.append([FA_slice[segmentation].mean(), FA_slice[segmentation].std(), MD_slice[segmentation].mean(), MD_slice[segmentation].std()])
for l in labels:
selection = (geometricw==l)
measuresw.append([FA_slice[selection].mean(), FA_slice[selection].std(), MD_slice[selection].mean(), MD_slice[selection].std()])
measuresw = np.array(measuresw)
acumP2[indx] = measuresw
indx += 1
# -
# - Controls:
# +
# Calculating the average for all controls:
auxcumC = np.average(acumC,axis=0)
auxcumC2 = np.average(acumC2,axis=0)
print '====================================================='
print ' Hofer and Frahm Parcellation '
print '====================================================='
print ' FA (mean) FA (std) MD (mean) MD (std)'
print('Corpus : %.3e, %.3e, %.3e, %.3e' %(auxcumC[0,0], auxcumC[0,1],auxcumC[0,2],auxcumC[0,3]))
print('Slice 1: %.3e, %.3e, %.3e, %.3e' %(auxcumC[1,0], auxcumC[1,1],auxcumC[1,2],auxcumC[1,3]))
print('Slice 2: %.3e, %.3e, %.3e, %.3e' %(auxcumC[2,0], auxcumC[2,1],auxcumC[2,2],auxcumC[2,3]))
print('Slice 3: %.3e, %.3e, %.3e, %.3e' %(auxcumC[3,0], auxcumC[3,1],auxcumC[3,2],auxcumC[3,3]))
print('Slice 4: %.3e, %.3e, %.3e, %.3e' %(auxcumC[4,0], auxcumC[4,1],auxcumC[4,2],auxcumC[4,3]))
print('Slice 5: %.3e, %.3e, %.3e, %.3e' %(auxcumC[5,0], auxcumC[4,1],auxcumC[5,2],auxcumC[5,3]))
print('\n')
print '====================================================='
print ' <NAME>cellation '
print '====================================================='
print ' FA (mean) FA (std) MD (mean) MD (std)'
print('Corpus : %.3e, %.3e, %.3e, %.3e' %(auxcumC2[0,0], auxcumC2[0,1],auxcumC2[0,2],auxcumC2[0,3]))
print('Slice 1: %.3e, %.3e, %.3e, %.3e' %(auxcumC2[1,0], auxcumC2[1,1],auxcumC2[1,2],auxcumC2[1,3]))
print('Slice 2: %.3e, %.3e, %.3e, %.3e' %(auxcumC2[2,0], auxcumC2[2,1],auxcumC2[2,2],auxcumC2[2,3]))
print('Slice 3: %.3e, %.3e, %.3e, %.3e' %(auxcumC2[3,0], auxcumC2[3,1],auxcumC2[3,2],auxcumC2[3,3]))
print('Slice 4: %.3e, %.3e, %.3e, %.3e' %(auxcumC2[4,0], auxcumC2[4,1],auxcumC2[4,2],auxcumC2[4,3]))
print('Slice 5: %.3e, %.3e, %.3e, %.3e' %(auxcumC2[5,0], auxcumC2[4,1],auxcumC2[5,2],auxcumC2[5,3]))
# -
# - Patients:
# +
# Calculating the average for all patients:
auxcumP = np.average(acumP,axis=0)
auxcumP2 = np.average(acumP2,axis=0)
print '====================================================='
print ' Hofer and Frahm Parcellation '
print '====================================================='
print ' FA (mean) FA (std) MD (mean) MD (std)'
print('Corpus : %.3e, %.3e, %.3e, %.3e' %(auxcumP[0,0], auxcumP[0,1],auxcumP[0,2],auxcumP[0,3]))
print('Slice 1: %.3e, %.3e, %.3e, %.3e' %(auxcumP[1,0], auxcumP[1,1],auxcumP[1,2],auxcumP[1,3]))
print('Slice 2: %.3e, %.3e, %.3e, %.3e' %(auxcumP[2,0], auxcumP[2,1],auxcumP[2,2],auxcumP[2,3]))
print('Slice 3: %.3e, %.3e, %.3e, %.3e' %(auxcumP[3,0], auxcumP[3,1],auxcumP[3,2],auxcumP[3,3]))
print('Slice 4: %.3e, %.3e, %.3e, %.3e' %(auxcumP[4,0], auxcumP[4,1],auxcumP[4,2],auxcumP[4,3]))
print('Slice 5: %.3e, %.3e, %.3e, %.3e' %(auxcumP[5,0], auxcumP[4,1],auxcumP[5,2],auxcumP[5,3]))
print('\n')
print '====================================================='
print ' <NAME> '
print '====================================================='
print ' FA (mean) FA (std) MD (mean) MD (std)'
print('Corpus : %.3e, %.3e, %.3e, %.3e' %(auxcumP2[0,0], auxcumP2[0,1],auxcumP2[0,2],auxcumP2[0,3]))
print('Slice 1: %.3e, %.3e, %.3e, %.3e' %(auxcumP2[1,0], auxcumP2[1,1],auxcumP2[1,2],auxcumP2[1,3]))
print('Slice 2: %.3e, %.3e, %.3e, %.3e' %(auxcumP2[2,0], auxcumP2[2,1],auxcumP2[2,2],auxcumP2[2,3]))
print('Slice 3: %.3e, %.3e, %.3e, %.3e' %(auxcumP2[3,0], auxcumP2[3,1],auxcumP2[3,2],auxcumP2[3,3]))
print('Slice 4: %.3e, %.3e, %.3e, %.3e' %(auxcumP2[4,0], auxcumP2[4,1],auxcumP2[4,2],auxcumP2[4,3]))
print('Slice 5: %.3e, %.3e, %.3e, %.3e' %(auxcumP2[5,0], auxcumP2[4,1],auxcumP2[5,2],auxcumP2[5,3]))
# -
# As que could see from the tables above, there is not a significant difference in the diffusion properties between the two groups. We believe this is due to the small size of the subjects aquired. As many studies have reported significant differences in CC volume in lupus we spect to increase the number of subjets for further analyses.
# ## 5 - References
#
# [1] <NAME>; <NAME>. Topography of the human corpus callosum revisited—comprehensive fiber tractography using diffusion tensor magnetic resonance imaging. Neuroimage, v. 32, n. 3, p. 989-994, 2006.
#
# [2] <NAME>., 1989. Hand and sex differences in the isthmus and genu of the human corpus callosum. Brain, 112(3), pp.799-835.
#
# [3] <NAME>. Movement control and maturation in elementary-grade children. Percept. Motor Skills 41, 151-154, 1975.
#
# [4] O'KUSKY, John et al. The corpus callosum is larger with right‐hemisphere cerebral speech dominance. Annals of neurology, v. 24, n. 3, p. 379-383, 1988.
#
# [5] SCHLAUG, Gottfried et al. Increased corpus callosum size in musicians. Neuropsychologia, v. 33, n. 8, p. 1047-1055, 1995.
#
# [6] <NAME>.; <NAME>.; <NAME>.; <NAME>. (2012). "Functional role of corpus callosum regions in human memory functioning". International Journal of Psychophysiology. 85 (3): 396–7. doi:10.1016/j.ijpsycho.2012.07.092.
#
# [7] GOLD, <NAME>. et al. White matter integrity and vulnerability to Alzheimer's disease: preliminary findings and future directions. Biochimica et Biophysica Acta (BBA)-Molecular Basis of Disease, v. 1822, n. 3, p. 416-422, 2012.
#
# [8] OZTURK, Arzu et al. MRI of the corpus callosum in multiple sclerosis: association with disability. Multiple Sclerosis Journal, v. 16, n. 2, p. 166-177, 2010.
#
# [9] <NAME>. et al. Corpus callosum in first-episode patients with schizophrenia–A magnetic resonance imaging study. Psychological medicine, v. 33, n. 06, p. 1019-1027, 2003.
#
# [10] LACERDA, Acioly LT et al. Anatomical MRI study of corpus callosum in unipolar depression. Journal of psychiatric research, v. 39, n. 4, p. 347-354, 2005.
#
# [11] SAMARTZIS, Lampros et al. White matter alterations in early stages of schizophrenia: a systematic review of diffusion tensor imaging studies. Journal of Neuroimaging, v. 24, n. 2, p. 101-110, 2014.
#
# [12] <NAME>, Kerstin et al. Less developed corpus callosum in dyslexic subjects—a structural MRI study. Neuropsychologia, v. 40, n. 7, p. 1035-1044, 2002.
#
# [13] <NAME>. et al. Compromised white matter integrity in obesity. Obesity Reviews, v. 16, n. 4, p. 273-281, 2015.
#
# [14] <NAME>., <NAME>., <NAME>. and <NAME>., 2003. The epidemiology of systemic lupus erythematosus. Clinical reviews in allergy & immunology, 25(1), pp.3-11.
#
# [15] <NAME>., <NAME>., <NAME>. and <NAME>., 1988. Identification of brain lesions in neuropsychiatric systemic lupus erythematosus by magnetic resonance scanning. Arthritis & Rheumatology, 31(2), pp.159-166.
#
# [16] APPENZELLER, S., <NAME>., <NAME>., <NAME>. and <NAME>., 2005. Cerebral and corpus callosum atrophy in systemic lupus erythematosus. Arthritis & Rheumatology, 52(9), pp.2783-2789.
#
# [17] <NAME>.; <NAME>; <NAME>. MR diffusion tensor spectroscopy and imaging. Biophysical journal, v. 66, n. 1, p. 259-267, 1994.
#
# [18] ABOITIZ, Francisco et al. Fiber composition of the human corpus callosum. Brain research, v. 598, n. 1, p. 143-153, 1992.
#
# [19] <NAME>., <NAME>. and <NAME>., 2007. Diffusion tensor imaging segmentation of white matter structures using a Reproducible Objective Quantification Scheme (ROQS). Neuroimage, 35(1), pp.166-174.
#
# [20] <NAME>., <NAME>., <NAME>., <NAME>. (2017). Corpus Callosum 2D Segmentation on Diffusion Tensor Imaging using Growing Neural Gas Network. Lecture Notes in Computational Vision and Biomechanics, (Accepted).
#
# [21] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2005. Mapping structural differences of the corpus callosum in individuals with 18q deletions using targetless regional spatial normalization. Human brain mapping, 24(4), pp.325-331.
#
| deliver/22062017_Paper_MECPv2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (pystan_refitting)=
# # Refitting PyStan models with ArviZ
#
# ArviZ is backend agnostic and therefore does not sample directly. In order to take advantage of algorithms that require refitting models several times, ArviZ uses {class}`~arviz.SamplingWrapper` to convert the API of the sampling backend to a common set of functions. Hence, functions like Leave Future Out Cross Validation can be used in ArviZ independently of the sampling backend used.
# Below there is one example of `SamplingWrapper` usage for PyStan exteding {class}`arviz.PyStanSamplingWrapper` which already implements some default methods targetted to PyStan.
#
# Before starting, it is important to note that PyStan cannot call the C++ functions it uses. Therefore, the **code** of the model must be slightly modified in order to be compatible with the cross validation refitting functions.
import arviz as az
import pystan
import numpy as np
import matplotlib.pyplot as plt
# For the example we will use a linear regression.
# +
np.random.seed(26)
xdata = np.linspace(0, 50, 100)
b0, b1, sigma = -2, 1, 3
ydata = np.random.normal(loc=b1 * xdata + b0, scale=sigma)
# -
plt.plot(xdata, ydata)
# Now we will write the Stan code, keeping in mind that it must be able to compute the pointwise log likelihood on excluded data, that is, data which is not used to fit the model. Thus, the backbone of the code must look like:
#
# ```
# data {
# data_for_fitting
# excluded_data
# ...
# }
# model {
# // fit against data_for_fitting
# ...
# }
# generated quantities {
# ....
# log_lik for data_for_fitting
# log_lik_excluded for excluded_data
# }
# ```
refit_lr_code = """
data {
// Define data for fitting
int<lower=0> N;
vector[N] x;
vector[N] y;
// Define excluded data. It will not be used when fitting.
int<lower=0> N_ex;
vector[N_ex] x_ex;
vector[N_ex] y_ex;
}
parameters {
real b0;
real b1;
real<lower=0> sigma_e;
}
model {
b0 ~ normal(0, 10);
b1 ~ normal(0, 10);
sigma_e ~ normal(0, 10);
for (i in 1:N) {
y[i] ~ normal(b0 + b1 * x[i], sigma_e); // use only data for fitting
}
}
generated quantities {
vector[N] log_lik;
vector[N_ex] log_lik_ex;
vector[N] y_hat;
for (i in 1:N) {
// calculate log likelihood and posterior predictive, there are
// no restrictions on adding more generated quantities
log_lik[i] = normal_lpdf(y[i] | b0 + b1 * x[i], sigma_e);
y_hat[i] = normal_rng(b0 + b1 * x[i], sigma_e);
}
for (j in 1:N_ex) {
// calculate the log likelihood of the exluded data given data_for_fitting
log_lik_ex[j] = normal_lpdf(y_ex[j] | b0 + b1 * x_ex[j], sigma_e);
}
}
"""
sm = pystan.StanModel(model_code=refit_lr_code)
data_dict = {
"N": len(ydata),
"y": ydata,
"x": xdata,
# No excluded data in initial fit
"N_ex": 0,
"x_ex": [],
"y_ex": [],
}
sample_kwargs = {"iter": 1000, "chains": 4}
fit = sm.sampling(data=data_dict, **sample_kwargs)
# We have defined a dictionary `sample_kwargs` that will be passed to the `SamplingWrapper` in order to make sure that all
# refits use the same sampler parameters. We follow the same pattern with `az.from_pystan`.
dims = {"y": ["time"], "x": ["time"], "log_likelihood": ["time"], "y_hat": ["time"]}
idata_kwargs = {
"posterior_predictive": ["y_hat"],
"observed_data": "y",
"constant_data": "x",
"log_likelihood": ["log_lik", "log_lik_ex"],
"dims": dims,
}
idata = az.from_pystan(posterior=fit, **idata_kwargs)
# We will create a subclass of {class}`~arviz.PyStanSamplingWrapper`. Therefore, instead of having to implement all functions required by {func}`~arviz.reloo` we only have to implement `sel_observations`. As explained in its docs, it takes one argument which are the indices of the data to be excluded and returns `modified_observed_data` which is passed as `data` to `sampling` function of PyStan model and `excluded_observed_data` which is used to retrieve the log likelihood of the excluded data (as passing the excluded data would make no sense).
class LinearRegressionWrapper(az.PyStanSamplingWrapper):
def sel_observations(self, idx):
xdata = self.idata_orig.constant_data.x.values
ydata = self.idata_orig.observed_data.y.values
mask = np.full_like(xdata, True, dtype=bool)
mask[idx] = False
N_obs = len(mask)
N_ex = np.sum(~mask)
observations = {
"N": N_obs - N_ex,
"x": xdata[mask],
"y": ydata[mask],
"N_ex": N_ex,
"x_ex": xdata[~mask],
"y_ex": ydata[~mask],
}
return observations, "log_lik_ex"
loo_orig = az.loo(idata, pointwise=True)
loo_orig
# In this case, the Leave-One-Out Cross Validation (LOO-CV) approximation using Pareto Smoothed Importance Sampling (PSIS) works for all observations, so we will use modify `loo_orig` in order to make {func}`~arviz.reloo` believe that PSIS failed for some observations. This will also serve as a validation of our wrapper, as the PSIS LOO-CV already returned the correct value.
loo_orig.pareto_k[[13, 42, 56, 73]] = np.array([0.8, 1.2, 2.6, 0.9])
# We initialize our sampling wrapper
pystan_wrapper = LinearRegressionWrapper(
sm, idata_orig=idata, sample_kwargs=sample_kwargs, idata_kwargs=idata_kwargs
)
# And eventually, we can use this wrapper to call `az.reloo`, and compare the results with the PSIS LOO-CV results.
loo_relooed = az.reloo(pystan_wrapper, loo_orig=loo_orig)
loo_relooed
loo_orig
| doc/source/user_guide/pystan_refitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["hide-cell"]
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# -
# # The Newton-Euler equations
#
# Engineering mechanics uses 6 equations of motion to describe the
# relationship between applied forces and moments to the motion of rigid
# bodies. These 6 equations are the _Newton-Euler_ kinetic equations, you
# can write the equations succinctly as vector-relationships between
# forces and acceleration as
#
# - $\mathbf{F} = m\mathbf{a}$ equations 1-3
# - $\mathbf{M}_{G} = \frac{d}{dt}\mathbf{h}_G$ equations 4-6
#
# where $\mathbf{F}$ is the resultant sum of forces on a rigid body,
# $\mathbf{M}_{G}$ is the resultant sum of moments about a rigid body's
# center of mass, and $\mathbf{h}_G is the _angular momentum_ of a rigid
# body. Angular momentum is a description of the inertia in a rigid body
# due to spinning. To start, we are limiting our study to 3-DOF planar
# rigid body motion. This simplifies the Newton-Euler equation to 3
# coupled differential equations as such,
#
# 1. $\mathbf{F}\cdot\hat{e}_1 = m\mathbf{a}\cdot\hat{e}_1$
# 2. $\mathbf{F}\cdot\hat{e}_2 = m\mathbf{a}\cdot\hat{e}_2$
# 3. $\mathbf{M}_{G}\cdot\hat{e}_3 = I_{zz}\alpha$
#
# where $\hat{e}_{1},~\hat{e}_{2},$ and $\hat{e}_{3}$ are three orthogonal
# unit vectors, $I_{zz}$ is the moment of inertia for the rigid body, and
# $\alpha$ is the angular acceleration of the rigid body. Every rigid body
# in an engineering system can be described by the Newton-Euler equations.
#
# > __Note:__ The mass of an object describes how difficult it is to move
# > it in a straight line. The moment of inertia describes how difficult
# > it is to rotate an object. The units for moment of inertia are
# > $kg-m^2$ (in SI). A quick explanation of moment of inertia is that it
# > is the variance in the center of mass.
#
# ## Example
#
# Consider a baseball that is thrown. It has an initial velocity of 6 m/s
# at an angle of 60$^o$ from the ground and its rotating 10 times/sec.
# Determine the acceleration of the baseball and its maximum height.
#
# In this example, consider the three planar Newton-Euler equations by
# first drawing a Free Body Diagram as seen below.
#
# 
#
# The force of gravity acts over the entire object equally and no other
# forces are present. Now, write out the 3 Newton-Euler equations
#
# > __Note:__ You will include force of drag, but for now just consider
# > the motion without drag.
#
# 1. $0 = m\ddot{x}$
# 2. $-mg = m\ddot{y}$
# 3. $0 = I_{zz}\ddot{\theta}$
#
# Integrating the three equations, you get three equations to describe the
# position and angle of the baseball.
#
# 1. $x(t) = x(0)+6\cos\frac{\pi}{3}t$
# 2. $y(t) = y(0)+6\sin\frac{\pi}{3}t-\frac{gt^2}{2}$
# 3. $\theta(t) = \theta(0)+10\frac{rot}{s}\frac{2\pi~rad}{rot}t$
#
#
# + tags=["hide-input"]
t = np.linspace(0,1.1,10)
x = 6*np.cos(np.pi/3)*t
y = 6*np.sin(np.pi/3)*t-9.81*t**2/2
theta = 10*2*np.pi*t
plt.plot(x,y,'o')
plt.xlabel('distance (m)')
plt.ylabel('height (m)');
# -
# A freefalling object does not have a force in the x-direction or an
# applied moment, so the angular acceleration is 0 rad/s/s. The ball will
# continue spinning until an external moment is applied e.g. another
# player's baseball mitt.
#
# ## Describing kinetic properties
#
# The motion of the baseball, its path and its rotation, describes the
# __kinematics__ or _geometry of motion_. You can also use __kinetic__
# properties to describe moving object. The Newton-Euler create
# quantitative comparisons between __kinematic__ and __kinetic__
# properties.
#
# The first __kinetic__ property is the _Forces_ acting on the baseball:
#
# $\mathbf{F} = 0\hat{i} - mg\hat{j}$.
#
# where $\mathbf{F}$ is the _force_ acting on the baseball. A _force_ is a
# vector quantity with magnitude and direction or another way to state
# this is that it has components along each unit vector.
| module_01/newtoneuler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Ejercicios: Regresión lineal vs. Regresión logística
#
#
# En el notebook "02_Ejemplo_Regresion_Logística", hemos visto un ejemplo multiclase. A continuación, se plantea un ejercicio que nos llevará de un problema de regresión a otro de clasificación.
#
# 1. Comienza realizando un modelo de regresión lineal como hemos visto hasta ahora con los datos que se muestran a continuación, teniendo como target la columna "duración". Es decir, averigua cuánto aumenta o disminuye la duración cuando se aumenta una unidad los valores de las demás columnas.
#
# - ¿Qué acierto tiene? (score)
# - ¿Cuál es el RMSE? ¿Dirías que es un valor alto?
# - Viendo la matriz de correlación, ¿tienen coherencia los resultados de la regresión?
# - ¿Es la regresión un buen método para este dataset?
#
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# -
df = pd.read_csv('data/usuarios_win_mac_lin.csv')
df['duracion'].apply(lambda x: x>12).value_counts()
df['clase_dur'] = df['duracion'].apply(lambda x: int(x//10) if x <20 else 11)
df['clase_dur'] = df['duracion'].apply(lambda x: 1 if x<= 10 else 2 if x<= 20 else 3)
df['clase_dur'].value_counts()
#
# 2. Pasemos ahora a un problema de clasificación binaria. Trata el dataset para asignar la clase en función de la duración: si la duración es <= 12, pertenecerá a la clase "1", si no, pertenecerá a la clase "2". Crea ahora un modelo de regresión logística para predecir a qué clase pertenece y calcula:
#
# - ¿Estamos ante un problema con datos balanceados o desbalanceados?
# - Utiliza las diferentes métricas que hemos visto en clase para medir el algoritmo.
# - Aprovechando que estamos ante un caso binario, realiza la gráfica de la curva ROC para ver cómo es el performance del algoritmo.
# - Compara los resultados obtenidos con los que obtendrías si al sistema lineal le añadieras un umbral de decisión al final en base a la división que hemos hecho previamente
#
# 3. Pasemos ahora al problema de clasificación multiclase. Trata el dataset para asignar 3 clases en función de la duración: si la duración es <= 10, pertenecerá a la clase "1", si no, si es <= 20 pertenecerá a la clase "2", y si no, a la clase "3". Crea ahora un modelo de regresión logística para predecir a qué clase pertenece y calcula:
#
# - ¿Estamos ante un problema con datos balanceados o desbalanceados?
# - Elige una métrica apropiada y calcula qué acierto tiene.
# - Compara los resultados obtenidos con los que obtendrías si al sistema lineal le añadieras un umbral de decisión al final en base a la división que hemos hecho previamente
| Bloque 3 - Machine Learning/01_Supervisado/2-Logistic Regression/ejercicios/01_Multinominal_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ON Model
# +
import numpy as np
import pandas as pd
from pystatplottools.pdf_env.loading_figure_mode import loading_figure_mode
fma, plt = loading_figure_mode(develop=True) # develop=False will export the generated figures as pngs into "./data/RectangleData"
plt.style.use('seaborn-dark-palette')
if 'root_dir' not in locals():
# Navigate to simulations/ONModel directory as simulation root directory
import os
os.chdir("../simulations/ONModel")
root_dir = os.getcwd()
# To be able to compute custom measures
import sys
sys.path.append("./../../python_scripts")
mcmc_model_dir = "ONModelMetropolis/"
mcmc_data_dir = root_dir + "/data/" + mcmc_model_dir
mcmc_results_dir = root_dir + "/results/" + mcmc_model_dir
data_dir = root_dir + "/data/" + mcmc_model_dir
results_dir = root_dir + "/results/" + mcmc_model_dir
# -
# ## MCMC Results
# ### Expectation Values
from mcmctools.modes.expectation_value import load_expectation_value_results
expectation_values = load_expectation_value_results(files_dir="ONModelMetropolis")
# Insert Kappa as column (as floating number)
expectation_values.insert(0, "Kappa", expectation_values.index.values.astype(np.float))
expectation_values = expectation_values.sort_values("Kappa")
expectation_values
# Computation of the total mean and of the two point correlator
total_mean = expectation_values.loc[:, ("ExpVal", "Mean", slice(None))].values.mean(axis=1)
mean = expectation_values.loc[:, ("ExpVal", "Mean", slice(None))].values
two_point_correlator = expectation_values["ExpVal", "TwoPointCorrelation", ""].values - np.power(mean, 2.0).sum(axis=1)
# +
fig, axes = fma.newfig(1.4, ncols=4, figsize=(15, 4))
axes[0].plot(expectation_values["Kappa"], total_mean, "o-")
axes[0].set_xlabel("Kappa")
axes[0].set_ylabel("Mean")
axes[1].plot(expectation_values["Kappa"], expectation_values["ExpVal", "SecondMoment", ""], "o-")
axes[1].set_xlabel("Kappa")
axes[1].set_ylabel("SecondMoment")
axes[2].plot(expectation_values["Kappa"], expectation_values["ExpVal", "Energy", ""], "o-")
axes[2].set_xlabel("Kappa")
axes[2].set_ylabel("Energy")
axes[3].plot(expectation_values["Kappa"], two_point_correlator, "o-")
axes[3].set_xlabel("Kappa")
axes[3].set_ylabel("Two Point Correlator")
plt.tight_layout()
fma.savefig(results_dir, "expectation_values")
# -
# ## Configurations as Pytorch Dataset
# We show how the mcmc configurations can be stored and loaded as a .pt file.
#
# (See also python_scripts/loading_configurations.py and python_scripts/pytorch_data_generation.py)
# ### Preparation
# +
data_generator_args = {
# ConfigDataGenerator Args
"data_type": "target_param",
# Args for ConfigurationLoader
"path": mcmc_data_dir,
"total_number_of_data_per_file": 10000,
"identifier": "expectation_value",
"running_parameter": "kappa",
"chunksize": 100 # If no chunksize is given, all data is loaded at once
}
# Prepare in memory dataset
from pystatplottools.pytorch_data_generation.data_generation.datagenerationroutines import prepare_in_memory_dataset
from mcmctools.pytorch.data_generation.datagenerationroutines import data_generator_factory
prepare_in_memory_dataset(
root=data_dir,
batch_size=89,
data_generator_args=data_generator_args,
data_generator_name="BatchConfigDataGenerator",
data_generator_factory=data_generator_factory
)
# -
# ### Generating and Loading the Dataset
# +
# Load in memory dataset
from pystatplottools.pytorch_data_generation.data_generation.datagenerationroutines import load_in_memory_dataset
# The dataset is generated and stored as a .pt file in the data_dir/data directory the first time this function is called. Otherwise the .pt is loaded.
data_loader = load_in_memory_dataset(
root=data_dir, batch_size=128, data_generator_factory=data_generator_factory, slices=None, shuffle=True,
num_workers=0, rebuild=False
# sample_data_generator_name="ConfigDataGenerator" # optional: for a generation of new samples
)
# Load training data
for batch_idx, batch in enumerate(data_loader):
data, target = batch
# print(batch_idx, len(data))
# -
# ### Inspection of the Dataset - Sample Visualization
# +
from pystatplottools.visualization import sample_visualization
config_dim = (8, 8) # Dimension of the data
num_std=1
# Random samples
config, label = data_loader.dataset.get_random_sample()
batch, batch_label = data_loader.dataset.get_random_batch(108)
# -
# Single Sample
sample_visualization.fd_im_single_sample(sample=config, label=label, config_dim=config_dim, num_std=num_std,
fma=fma, filename="single_sample", directory=results_dir, figsize=(10, 4));
# Batch with labels
sample_visualization.fd_im_batch(batch, batch_labels=batch_label, num_samples=25, dim=(5, 5),
config_dim=config_dim, num_std=num_std,
fma=fma, filename="batch", directory=results_dir, width=2.3, ratio=1.0, figsize=(12, 12));
# Batch grid
sample_visualization.fd_im_batch_grid(batch, config_dim=config_dim, num_std=num_std,
fma=fma, filename="batch_grid", directory=results_dir);
| jupyter_notebooks/on_model.ipynb |