code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 「指数関数とn次関数的」
#
#
# これはpythonのあくまでもトレーニングです。
#
# 初期y(0)=1の倍加時間Tdの指数関数と
#
# $$
# \begin{array}{rcl}
# y(t)&=&2^{\frac{t}{Td}}\\
# &=&e^{\log_e(2)\frac{t}{Td}}\\
# \end{array}$$
#
#
# 以下のN次関数
#
# $$y(t)=1+\sum_{i=1}^{N}{\frac{log_2(2)^i}{Td^i i}} t^i$$
#
# の比較
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
print("e=",np.e)
print("log_e(2)=",np.log(2.0))
# +
def func_n(t,n=1,td=1.0):
res=1
for i in range(1,n+1):
res+=((np.log(2)**i)/(td**i))/i*t**i
return res
def func_exp(t,td=1.0):
return np.exp(np.log(2.0)*t/td)
# +
t_lin=np.linspace(0,30,300)
Td=30
plot=plt.plot(t_lin,func_exp(t_lin,td=Td),label="exp")
plot=plt.plot(t_lin,func_n(t_lin,n=1,td=Td),label="n=1")
plot=plt.plot(t_lin,func_n(t_lin,n=2,td=Td),label="n=2")
plot=plt.plot(t_lin,func_n(t_lin,n=4,td=Td),label="n=4")
plt.legend()
# -
# ### m階微分
#
# 指数関数のm階微分
#
# $$y^{(m)}(t)={\left(\frac{\log_e(2)}{Td}\right)}^m e^{\log_e(2)\frac{t}{Td}}$$
#
# N次関数のm階微分
#
# $$y^{(m)}(t)=\sum_{i=m}^{N}{\frac{log_2(2)^i (i-1)!}{Td^i (i-m)!}} t^{i-m}$$
#
#
#
#
# +
def func_n_div(t,n=1,td=1.0,m=1):
res=0
if(m>n):
return 0*t
for i in range(m,n+1):
coef=1
for j in range(i-m+1,i):
coef*=j
#print(n,i,coef)
res+=((np.log(2)**i)/(td**i))*(t**(i-m))*coef
return res
def func_exp_div(t,td=1.0,m=1):
return ((np.log(2.0)/td)**m)*np.exp(np.log(2.0)*t/td)
# -
1階微分
# +
t_lin=np.linspace(0,30,100)
plot=plt.plot(t_lin,func_exp_div(t_lin,td=Td,m=1),label="exp")
plot=plt.plot(t_lin,func_n_div(t_lin,n=1,td=Td,m=1),label="n=1")
plot=plt.plot(t_lin,func_n_div(t_lin,n=2,td=Td,m=1),label="n=2")
plot=plt.plot(t_lin,func_n_div(t_lin,n=3,td=Td,m=1),label="n=3")
plot=plt.plot(t_lin,func_n_div(t_lin,n=4,td=Td,m=1),label="n=4")
plt.legend()
# -
# 2階微分
# +
t_lin=np.linspace(0,30,100)
plot=plt.plot(t_lin,func_exp_div(t_lin,td=Td,m=2),label="exp")
plot=plt.plot(t_lin,func_n_div(t_lin,n=1,td=Td,m=2),label="n=1")
plot=plt.plot(t_lin,func_n_div(t_lin,n=2,td=Td,m=2),label="n=2")
plot=plt.plot(t_lin,func_n_div(t_lin,n=3,td=Td,m=2),label="n=3")
plot=plt.plot(t_lin,func_n_div(t_lin,n=4,td=Td,m=2),label="n=4")
plt.legend()
# +
t_lin=np.linspace(0,30,100)
plot=plt.plot(t_lin,func_exp_div(t_lin,td=Td,m=3),label="exp")
plot=plt.plot(t_lin,func_n_div(t_lin,n=1,td=Td,m=3),label="n=1")
plot=plt.plot(t_lin,func_n_div(t_lin,n=2,td=Td,m=3),label="n=2")
plot=plt.plot(t_lin,func_n_div(t_lin,n=3,td=Td,m=3),label="n=3")
plot=plt.plot(t_lin,func_n_div(t_lin,n=4,td=Td,m=3),label="n=4")
plt.legend()
# +
t_lin=np.linspace(0,30,100)
plot=plt.plot(t_lin,func_exp_div(t_lin,td=Td,m=4),label="exp")
plot=plt.plot(t_lin,func_n_div(t_lin,n=1,td=Td,m=4),label="n=1")
plot=plt.plot(t_lin,func_n_div(t_lin,n=2,td=Td,m=4),label="n=2")
plot=plt.plot(t_lin,func_n_div(t_lin,n=3,td=Td,m=4),label="n=3")
plot=plt.plot(t_lin,func_n_div(t_lin,n=4,td=Td,m=4),label="n=4")
plt.legend()
# +
t_lin=np.linspace(0,30,100)
Td=30
plot=plt.plot(t_lin,func_exp_div(t_lin,td=Td,m=1),label="exp")
plot=plt.plot(t_lin,func_n_div(t_lin,n=1,td=Td,m=1),label="n=1")
plot=plt.plot(t_lin,func_n_div(t_lin,n=2,td=Td,m=1),label="n=2")
plot=plt.plot(t_lin,func_n_div(t_lin,n=4,td=Td,m=1),label="n=4")
plt.legend()
# +
t_lin=np.linspace(0,300,100)
Td=30
plot=plt.plot(t_lin,func_exp_div(t_lin,td=Td,m=1),label="exp")
plot=plt.plot(t_lin,func_n_div(t_lin,n=1,td=Td,m=1),label="n=1")
plot=plt.plot(t_lin,func_n_div(t_lin,n=2,td=Td,m=1),label="n=2")
plot=plt.plot(t_lin,func_n_div(t_lin,n=4,td=Td,m=1),label="n=4")
plt.legend()
plt.yscale("log")
# +
t_log=np.logspace(0,4,num=100)
Td=30
plot=plt.plot(t_log,func_exp(t_log,td=Td),label="exp")
plot=plt.plot(t_log,func_n(t_log,n=1,td=Td),label="n=1")
plot=plt.plot(t_log,func_n(t_log,n=2,td=Td),label="n=2")
plot=plt.plot(t_log,func_n(t_log,n=4,td=Td),label="n=4")
plt.legend()
plt.yscale("log")
plt.xscale("log")
# +
t_log=np.logspace(0,6,num=100)
Td=30
#plot=plt.plot(t_log,func_exp(t_log,td=Td),label="exp")
plot=plt.plot(t_log,func_n(t_log,n=1,td=Td),label="n=1")
plot=plt.plot(t_log,func_n(t_log,n=2,td=Td),label="n=2")
plot=plt.plot(t_log,func_n(t_log,n=4,td=Td),label="n=4")
plt.legend()
plt.yscale("log")
plt.xscale("log")
# +
t_log=np.logspace(0,6,num=100)
Td=30
#plot=plt.plot(t_log,func_exp(t_log,td=Td),label="exp")
plot=plt.plot(t_log,1/func_n(t_log,n=1,td=Td),label="n=1")
plot=plt.plot(t_log,1/func_n(t_log,n=2,td=Td),label="n=2")
plot=plt.plot(t_log,1/func_n(t_log,n=4,td=Td),label="n=4")
plt.legend()
plt.yscale("log")
plt.xscale("log")
# +
t_lin=np.linspace(0,300,100)
Td=30
plot=plt.plot(t_lin,1/func_exp(t_lin,td=Td),label="exp")
plot=plt.plot(t_lin,1/func_n(t_lin,n=1,td=Td),label="n=1")
plot=plt.plot(t_lin,1/func_n(t_lin,n=2,td=Td),label="n=2")
plot=plt.plot(t_lin,1/func_n(t_lin,n=4,td=Td),label="n=4")
plt.legend()
# +
t_lin=np.linspace(0,600,100)
Td=30
plot=plt.plot(t_lin,1/func_exp(t_lin,td=Td),label="exp")
plot=plt.plot(t_lin,1/func_n(t_lin,n=1,td=Td),label="n=1")
plot=plt.plot(t_lin,1/func_n(t_lin,n=2,td=Td),label="n=2")
plot=plt.plot(t_lin,1/func_n(t_lin,n=4,td=Td),label="n=4")
plt.legend()
plt.yscale("log")
# -
| .ipynb_checkpoints/exponential-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Short Exercises
# Using Gauss-Legendre quadrature estimate the following integrals with $L = 2,4,6,8,$ and $30$.
# - $\int_0^{\pi/2} e^{\sin x} \,dx \approx ~3.104379017855555098181$
# - $\int_0^{2.405} J_0(x) dx \approx 1.470300035485$, where $J_0(x)$ is a Bessel function of the first kind given by $$ J_\alpha(x) = \sum_{m=0}^\infty \frac{(-1)^m}{m! \, \Gamma(m+\alpha+1)} {\left(\frac{x}{2}\right)}^{2m+\alpha}. $$
#
#
# ## Solution
# The file $\texttt{ch16.py}$ contains all of the functions contained in the Chapter 16 notes. It will be imported in order to use the $\texttt{GLQuad}$ and $\texttt{generalGL}$ functions. It is also acceptable to paste the functions individually.
#
# $\br$We will then define the endpoints $\texttt{a}$ and $\texttt{b}$ for each part, and the function $\texttt{f}$ to be integrated for each part. In addition, the variable $\texttt{Ls}$ is defined to store the values of $L$ we want to solve for.
#
# $\br$The functions are then iterated and printed.
# +
import numpy as np
import math
from ch16 import *
# Define values of L
Ls = np.array([2,4,6,8,30])
# Define bounds and function for part a
f = lambda x: np.exp(np.sin(x))
a = 0
b = np.pi/2
# Calculate for part a and print
print('Estimating int_0^pi/2 of e^sin(x) dx\n')
print('L - estimation')
i = 0
for L in Ls:
ans = generalGL(f,a,b,L)
print(L,"- %.16f" % ans)
i += 1
# Define bounds and function for part b
def f(x, M = 100):
"""Order zero Bessel function of the first-kind
evaluated at x
Inputs:
alpha: value of alpha
x: point to evaluate Bessel function at
M: number of terms to include in sum
Returns:
J_0(x)
"""
total = 0.0
for m in range(M):
total += (-1)**m/(math.factorial(m)*math.gamma(m+1))*(0.5*x)**(2*m)
return total
a = 0
b = 2.405
# Calculate for part a and print
print('\nEstimating int_0^2.405 of J_0(x) dx\n')
print('L - estimation')
i = 0
for L in Ls:
ans = generalGL(f,a,b,L)
print(L,"- %.16f" % ans)
i += 1
# -
# As expected, the answers converge to the exact integral.
# ## Gauss-Lobatto Quadrature
# One sometimes desires a quadrature rule to include the endpoints of the interval. The Gauss-Legendre quadrature rules do not include $x=\pm 1$. Gauss-Lobatto quadrature includes both of these points in the set.
# - Derive the $L=2$ Gauss-Lobatto quadrature set. There is only one degree of freedom in this quadrature set, the weight, and it needs to integrate linear polynomials exactly. This quadrature rule will have the form
# \[ \int\limits_{-1}^{1} f(x)\,dx = w f(-1) + w f(1).\]
# \item Now derive the $L=3$ Gauss-Lobatto quadrature set. Now there are two degrees of freedom because the $x$'s must be $\pm 1$ and 0. This rule will integrate cubics exactly and have the form:
# \[ \int\limits_{-1}^{1} f(x)\,dx = w_1 f(-1) + w_2 f(0) + w_1 f(1).\]
# - Implement this quadrature rule and verify that it integrates that appropriate polynomials exactly.
#
# ## Solution
# For $L = 2$ we have the following
#
# $$\int_{-1}^{1} x^0 = w(-1)^0 + w(1)^0.$$
# Integrating and solving the above leads to
#
# $$2 = 2w,$$
#
# therefore
#
# $$w = 1.$$
# For $L = 3$ we have the following
#
# $$\int_{-1}^{1} x^0 = w_1(-1)^0 + w_2(0)^0 + w_1(1)^0,$$
#
# $$\int_{-1}^{1} x^1 = w_1(-1)^1 + w_2(0)^1 + w_1(1)^1,$$
#
# and
#
# $$\int_{-1}^{1} x^2 = w_1(-1)^2 + w_2(0)^2 + w_1(1)^2.$$
# Note that we went out to second degree polynomials because the first degree polynomial was of no use. Integrating and solving both of the above leads to
#
# $$2 = 2w_1 + w_2,$$
#
# $$0 = 0,$$
#
# and
#
# $$\frac{2}{3} = 2w_1.$$
# We will then solve this simple system for $w_1$ and $w_2$, where the results are
#
# $$w_1 = \frac{1}{3},$$
#
# and
#
# $$w_2 = \frac{4}{3}.$$
# In order to verify the integration, we will make a simple function that can use the Gauss-Lobatto quadrature for $L = 2$ and $L = 3$.
def LobattoQuad(f,L):
"""Compute the Gauss-Lobatto Quadrature estimate
of the integral of f(x,y) from x = -1 to 1
Inputs:
f: name of function to integrate
L: Order of integration rule (2 or 3)
Returns:
Gauss-Lobatto Quadrature estimate"""
# L = 2 or L = 3 please
assert L == 2 or L == 3
# Solve for L = 2
if L == 2:
w = 1
return w*f(-1) + w*f(1)
# Solve for L = 3
if L == 3:
w_1 = 1.0/3
w_2 = 4.0/3
return w_1*f(-1) + w_2*f(0) + w_1*f(1)
# First, we will test it for $L = 2$ with the following integral of a linear polynomial
#
# $$\int_{-1}^{1} \Big(10x - 12\Big)~dx = -24$$
# +
# Define function
f = lambda x: 10*x - 12
# Solve and print
ans = LobattoQuad(f,2)
print('The result is',ans)
# -
# As expected, it integrated the linear polynomial exactly.
#
# Next, we will test it for L = 3 with the following integral of a cubic polynomial
#
# $$\int_{-1}^{1} \Big(8x^3 + 6x^2 + 4x + 2\Big)~dx = 8$$
# +
# Define function
f = lambda x: 8*x**3 + 6*x**2 + 4*x + 2
# Solve and print
ans = LobattoQuad(f,3)
print('The result is',ans)
# -
# Exact integration? Close enough.
# ## Integration and Root Finding
# Consider a 1-D cylindrical reactor with geometric buckling 0.0203124 cm$^{-1}$ and $\Sigma_\mathrm{f} = 0.07$ cm$^{-1}$.
# - Find the critical radius of this reactor.
# - Using the numerical integration method of your choice, find the peak scalar flux assuming that power per unit height is 2 MW/cm. Use 200 MeV/fission = $3.204 \times 10^{-11}$ J.
# - [Challenge] Now assume the reactor has a height of 500 cm and a power of 1000 MW. What is the peak scalar flux? You'll need a multi-dimensional integral in this case.
#
# ## Solution
#
# We know through previous courses that the scalar flux in a 1-D cylindrical reactor is
#
# $$\phi(r) = A J_0(B_\mathrm{g} r),$$
#
# where $J_0$ is the order-0 Bessel function of the first kind. To find the critical raadius of the reactor, we need to find $r$ such that
#
# $$J_0(B_\mathrm{g} r) = 0.$$
#
# $\br$We will do this using inexact Newton from Chapter 13, although there are several ways we can do this.
#
# $\br$The file $\texttt{ch13.py}$ contains the functions from chapter 13, including $\texttt{inexact\_newton}$. We will define the function we want to find the root of, and then use the inexact Newton function to do so. The SciPy function $\texttt{special.jv}$ will be used to evaluate the zero-th order Bessel function.
# +
import numpy as np
import scipy.special
# Import inexact_newton
from ch13 import inexact_newton
# Define given constants
B_g = 0.0203124 # [1/cm]
Sig_f = 0.07 # [1/cm]
# Define function to find root of
f = lambda r: scipy.special.jv(0,B_g*r)
# Solve for root
R_crit = inexact_newton(f,100.0)
# Print to user
print('The critical radius is %.2f' % R_crit,"cm")
# -
# Given that the scalar flux is
#
# $$\phi(r) = A J_0(B_\mathrm{g} r),$$
#
# the maximum value of the scalar flux is $A$, because $J_0(0) = 1$ is the maximum of this funtion. To find $A$ we need to solve the equation
#
# $$P = E_\mathrm{f} R_\mathrm{f},$$
#
# where $P$ is the power per unit height, $E_\mathrm{f}$ is the energy per fission, and $R_\mathrm{f}$ is the fission rate.
#
# The fission rate is given by
#
# $$R_\mathrm{f} = 2 \pi \int_0^\mathrm{R} \Sigma_f A J_0(B_\mathrm{g} r)r~dr,$$
#
# which is the form of the integral because the differential area element $dA$ is given by
#
# $$dA = 2 \pi r dr,$$
#
# in 1-D cylindrical coordinates. Also, we can pull the $\Sigma_\mathrm{f}$ and $A$ out of the integral because it is constant in this problem. Therefore, $A$ is given by
#
# $$A = \frac{P}{E_\mathrm{f}}~\Bigg( 2 \pi \int_0^\mathrm{R} \Sigma_f J_0(B_\mathrm{g}r)r~dr\Bigg)^{-1}.$$
#
# $\br$Gauss-Legendre quadrature will be used to estimate the integral with $L = 8$. The file $\texttt{ch16.py}$ contains the functions needed and is imported, but they can also be pasted. It is acceptable to use any of the numerical methods of integration present in the lecture notes.
# +
# Import generalGL and GLQuad
from ch16 import generalGL,GLQuad
# Define the power per unit length
P = 2.0E6 # [J/cm]
E_f = 3.204E-11 # [J]
# Define integrand to solve
integrand = lambda r: 2.0*np.pi*Sig_f*scipy.special.jv(0,B_g*r)*r
# Solve using L = 8
integral = generalGL(integrand,0,R_crit,8)
phiMax = P/(E_f*integral)
print('The peak scalar flux is %.5e' % phiMax,"n/cm^2-s")
# -
# ### case 2
# In this case the scalar flux looks like
#
# $$\phi(r,z) = A J_0 \Big(\frac{2.4048}{R_\mathrm{crit}}r\Big)~\mathrm{cos}\Big(\frac{\pi}{H_\mathrm{crit}}z\Big).$$
#
# Now we can determine total power by
#
# $$P = E_\mathrm{f}~\Bigg( \int_0^R r~dr \int_{-H/2}^{H/2} 2 \pi \Sigma_\mathrm{f} A J_0 \Big(\frac{2.405}{R_\mathrm{crit}}r\Big)~\mathrm{cos}\Big(\frac{\pi}{H_\mathrm{crit}}z\Big)~dz\Bigg).$$
#
# This makes
#
# $$A = \frac{P}{E_\mathrm{f}}~\Bigg( \int_0^R r~dr \int_{-H/2}^{H/2} 2 \pi \Sigma_\mathrm{f} A J_0 \Big(\frac{2.405}{R_\mathrm{crit}}r\Big)~\mathrm{cos}\Big(\frac{\pi}{H_\mathrm{crit}}z\Big)~dz\Bigg)^{-1}.$$
#
# $\br$To find the new critical radius we will need to solve the equation
#
# $$(B_\mathrm{g}^{1D})^2 = \Big(\frac{\pi}{H_\mathrm{crit}}\Big)^2 + \Big(\frac{2.405}{r}\Big)^2,$$
#
# $\br$and we will use the $\texttt{inexact\_newton}$ function from the Chapter 13 notes.
# +
# Define function for root-find
H_crit = 500.0 # [cm]
f = lambda r: B_g**2 - (np.pi/H_crit)**2 - (2.405/r)**2
# Determine root
R_crit = inexact_newton(f,100)
# Print to user
print('The new critical radius is %.2f' % R_crit,"cm")
# -
# In order to solve the integral, we will need a 2D integral function. We will use $\texttt{GLQuad2D}$ and $\texttt{generalGL2D}$ from the Chapter 16 notes.
# +
# Import generalGL2D and GLQuad2D
from ch16 import generalGL2D,GLQuad2D
# Define necessary constants
P = 1000.0E6 # [J]
E_f = 3.204E-11 # [J]
# Define integral to solve
integrand = lambda r,z: (2.0*np.pi*Sig_f*
scipy.special.jv(0,(2.405/R_crit)*r)*r*np.cos(np.pi/H_crit*z))
# Solve integral and phiMax
integral = generalGL2D(integrand,0,R_crit,-H_crit/2,H_crit/2,8)
phiMax = P/(E_f*integral)
print('The peak scalar flux is %.5e' % phiMax,"n/cm^2-s")
| solution_chapter16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Get scale limit for a mobile re-captured image
# 1. Zoom in initial scale.
# 2. Crop image w/ current scale.
# 3. Execute FFT & calculate wave length of the cropped image.
# 4. Get pixel width in x-y axes.
# 5. Optimize scale to obtain 10-pixel width and 10-pixel height (iterate Step 2 to 4).
# +
from pre_process import *
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage.feature import peak_local_max
dirName = "snipping"
imFilename = os.path.join(dirName, "source.png")
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
pim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
height, width = pim.shape
n_pixel = 5
coordinates = np.array(peak_local_max(pim, min_distance=3))
N = coordinates.shape[0]
d = np.sqrt(width*height/N)
nx, ny = tuple(np.array([width, height]) // int(d * n_pixel))
height, width = pim.shape
y_off, x_off = (0, 0)
h, w = (height//ny, width//nx)
rect = patches.Rectangle((y_off, x_off), h, w, linewidth=3, edgecolor='b', facecolor='none')
_, axes = plt.subplots(ncols=2, figsize=(15,15))
for y, x in coordinates:
pim[y, x] = 0
axes[0].imshow(pim[y_off:y_off+h, x_off:x_off+w], 'gray')
axes[1].imshow(im)
axes[1].add_patch(rect)
plt.show()
# -
# ### Peak coordinates
d
# #### Camera pixel size per Monitor pixel
z = int(np.ceil(d/2)+1)
# +
pim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
pim_original = pim.copy()
for y, x in coordinates:
pim[y-z:y+z, x-z:x+z] = pim_original[y, x]
n_pixel = 3
nx, ny = tuple(np.array([width, height]) // int(d * n_pixel))
y_off, x_off = (500, 1000)
h, w = (height//ny, width//nx)
rect = patches.Rectangle((y_off, x_off), h, w, linewidth=3, edgecolor='b', facecolor='none')
print(h, w)
_, axes = plt.subplots(ncols=3, figsize=(15,30))
axes[0].imshow(pim[y_off:y_off+h, x_off:x_off+w], 'gray')
axes[1].imshow(pim, cmap='gray')
axes[1].add_patch(rect)
axes[2].imshow(pim_original, cmap='gray')
plt.show()
# +
demoire = np.zeros(pim.shape)
mask = np.ones(pim.shape)
for y, x in coordinates:
ymin = y-z; ymax = y+z
xmin = x-z; xmax = x+z
mask[ymin:ymax+1, xmin:xmax+1] += 1
demoire[ymin:ymax+1, xmin:xmax+1] += pim_original[ymin:ymax+1, xmin:xmax+1]
demoire /= mask
pim_original = pim_original.astype('uint32')
for y in range(int(demoire.shape[0])):
for x in range(int(demoire.shape[1])):
if demoire[y][x] == 0:
demoire[y][x] = pim_original[y][x]
# +
from PIL import Image
demoire = demoire.astype("uint8")
pim_image = Image.fromarray(demoire)
pim_image.save('demoire.png')
print(pim)
pim_image=Image.fromarray(pim)
pim_image.save('pim.png')
pim_original_image=Image.fromarray(pim_original)
pim_image.save('pim_original.png')
im_image = Image.fromarray(im)
im_image.save('original.png')
| data/etc/pre_process/jiwon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import print_function
# to be able to see plots
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import cPickle
import sys
sys.path.append("../tools")
from tools import collage
# just to use a fraction of GPU memory
# This is not needed on dedicated machines.
# Allows you to share the GPU.
gpu_memory_usage=0.33
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_usage
set_session(tf.Session(config=config))
# -
# # Read CIFAR10 dataset
# +
from tools import readCIFAR, mapLabelsOneHot
# First run ../data/downloadCIFAR.sh
trnData, tstData, trnLabels, tstLabels = readCIFAR('../data/cifar-10-batches-py')
plt.subplot(1, 2, 1)
img = collage(trnData[:16])
print(img.shape)
plt.imshow(img)
plt.subplot(1, 2, 2)
img = collage(tstData[:16])
plt.imshow(img)
plt.show()
trnLabels = mapLabelsOneHot(trnLabels)
tstLabels = mapLabelsOneHot(tstLabels)
# -
# # Normalize data
trnData = trnData.astype(np.float32) / 255.0 - 0.5
tstData = tstData.astype(np.float32) / 255.0 - 0.5
# # Define net
# +
from keras.layers import Input, Reshape, Dense, Dropout, Flatten
from keras.layers import Activation
from keras.models import Model
from keras import regularizers
w_decay = 0.0001
w_reg = regularizers.l2(w_decay)
def get_simple_FC_network(input_data, layer_cout, layer_dim):
net = Flatten()(input_data)
for i in range(layer_cout):
net = Dense(layer_dim, activation='relu')(net)
net = Dense(10, name='out', activation='softmax')(net)
return net
# +
from keras import optimizers
from keras.models import Model
from keras import losses
from keras import metrics
input_data = Input(shape=(32, 32, 3), name='data')
net = get_simple_FC_network(input_data, 5, 256)
model = Model(inputs=[input_data], outputs=[net])
# Build stacked GAN model
print('Model')
model.summary()
model.compile(loss=losses.categorical_crossentropy, optimizer=optimizers.Adam(lr=0.001), metrics=[metrics.categorical_accuracy])
# -
from keras import backend
model.fit(
x=trnData, y=trnLabels,
batch_size=64, epochs=10, verbose=1,
validation_data=[tstData, tstLabels], shuffle=True)
# # Predict and evaluate
classProb = model.predict(x=tstData[0:2])
print('Class probabilities:', classProb, '\n')
loss, acc = model.evaluate(x=tstData, y=tstLabels, batch_size=1024)
print()
print('loss', loss)
print('acc', acc)
# # Compute test accuracy by hand
# +
classProb = model.predict(x=tstData)
print(classProb.shape)
correctProb = (classProb * tstLabels).sum(axis=1)
wrongProb = (classProb * (1-tstLabels)).max(axis=1)
print(correctProb.shape, wrongProb.shape)
accuracy = (correctProb > wrongProb).mean()
print('Accuracy: ', accuracy)
# -
| 01/keras_simple_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:misc-env]
# language: python
# name: conda-env-misc-env-py
# ---
# +
from pynput import keyboard
import pyautogui
import time
# Config
start_key = "f2"
stop_key = "esc"
logging = False
# Global vars
stop_spamming = False
start_spamming = False
def on_key_press(key):
global stop_spamming
try:
temp = key.char # To trigger AttributeError
if (logging):
print('alphanumeric key {0} pressed'.format(key.char))
except AttributeError:
if (logging):
print('special key {0} pressed'.format(key))
if (key == keyboard.Key[stop_key]):
stop_spamming = True
def on_key_release(key):
global start_spamming
if (logging):
print('{0} released'.format(key))
if key == keyboard.Key.esc:
# Stop listener
return False
if (key == keyboard.Key[start_key]):
start_spamming = True
# Non-blocking listener
listener = keyboard.Listener(
on_press=on_key_press,
on_release=on_key_release)
listener.start()
while(True):
if (stop_spamming or start_spamming):
break
file = open("beemovie.txt", "r")
for line in file:
if (stop_spamming):
print("stopping")
break
pyautogui.write(line)
pyautogui.press("enter")
time.sleep(1)
| chat-spammer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a ><img src = "logo_ehtp.jpg" width = 300, align = "center"></a>
# <h1 align=center><font size = 6>Master Spécialisé Data Engineering </font></h1>
#
#
# <h6 align=center><font size = 5 color=brown><u>Programmation Python pour Data Science</u></font></h6>
# <h6 align=center><font size = 4 color=blue><u>traiter et Analyser les données démographiques</u></font></h6>
#
#
#
# <ul>
#
#
#
#
#
# <br>
#
#
# </ul>
#
# <h2>ensemble de données du recensement général de la population et de l’habitat de 2004 (RGPH2004) de la région Marrakech-Tensift-Al Haouz </h2>
# <div class="alert alert-block alert-danger" style="margin-top: 20px">
# <table style="width:50%">
# <tr>
# <td colspan=2 bgcolor="#00FF00">première feuille du fichier Recensement_RGPH2004_RegionMTA.xlsx</td>
# </tr>
#
# <tr>
# <th> Champ</th>
# <th>Signification du champ</th>
# </tr>
# <tr>
# <td>ID Identifiant </td>
# <td>numérique attribué à la commune</td>
# </tr>
# <tr>
# <td>Code_Commune </td>
# <td>Code de la commune</td>
# </tr>
# <tr>
# <td>Type_Commune </td>
# <td>Type de la commune (Urbaine ou rurale)</td>
# </tr>
# <tr>
# <td>Surface_ha </td>
# <td>Surface de la commune en hectare</td>
# </tr>
# <tr>
# <td>Population </td>
# <td>Population de la commune</td>
# </tr>
# <tr>
# <td>Pop_inf6_pc </td>
# <td>pourcentage de la population dont l'age est inférieur à 6 ans</td>
# </tr>
# <tr>
# <td>Pop_615_pc</td>
# <td>pourcentage de la population dont l'age est compris entre 6 et 15 ans</td>
# </tr>
# <tr>
# <td>Pop_1560_pc</td>
# <td> pourcentage de la population dont l'age est compris entre 15 et 60 ans</td>
# </tr>
# <tr>
# <td>Pop_inf6_pc </td>
# <td>pourcentage de la population dont l'age est inférieur à 6 ans</td>
# </tr>
# <tr>
# <td>Pop_sup60_pc</td>
# <td>pourcentage de la population dont l'age est supérieur à 60 ans</td>
# </tr>
# <tr>
# <td>Celebataire_pc</td>
# <td> pourcentage de la population dont l'état matrimonial est célibataires</td>
# </tr>
# <tr>
# <td>Marie_pc</td>
# <td>pourcentage de la population dont l'état matrimonial est marié</td>
# </tr>
# <tr>
# <td>Mrge_Autre_pc</td>
# <td>pourcentage de la population dont l'état matrimonial est autre</td>
# </tr>
# <tr>
# <td>Age_Moyen_1marriage</td>
# <td>Age moyen au premier mariage</td>
# </tr>
# <tr>
# <td>Taux_Analphabetisme</td>
# <td>taux d'analphabétisme de la population de la commune</td>
# </tr>
# <tr>
# <td>Taux_activite</td>
# <td>taux d'activité de la population de la commune</td>
# </tr>
#
#
# </table>
# <table style="width:50%">
# <tr>
# <td colspan=2 bgcolor="#00FF00">deuxiéme feuille (Code_Nom_Province.csv) donnant le code et le nom des provinces de la région </td>
# </tr>
# <tr>
# <th> Champ</th>
# <th>Signification du champ</th>
# </tr>
# <tr>
# <tr>
#
# <td>Code_Province</td>
# <td>code de la province</td>
#
# </tr>
# <tr>
# <td>Nom_Province</td>
# <td>Nom de la province</td>
# </tr>
# </table>
# </div>
#
# <hr>
#
# <div class="alert alert-block alert-info" >
# <center><h1>PARTIE 1 <h1></center>
# </div>
# <h3><ul>
# <li>les librariries usuelles </li>
# </ul> </h3>
import pandas as pd
import numpy as np
from pandas import Series,DataFrame,read_excel
import matplotlib.pyplot as plt
from matplotlib import rcParams,pyplot
# <h3>1)- Chargement de fichier excel </h3>
dfc=pd.read_excel('Recensement_RGPH2004_RegionMTA.xlsx')
# <h3> 2)- Affichage des dix premières lignes et dix dernières lignes</h3>
# <h5>2-1) Affichage des dix premières lignes </h5>
# +
dfc.head(10)
# -
# <h5>2-2) Affichage des dix dernières lignes </h5>
# +
dfc.tail(10)
# -
# <h3> 3)- les statistiques des champs « population » et « Surface_ha » </h3>
#
# <h4>(min, max, moyenne, médian, somme et écart-type) </h4>
# <h5>3-1) La surface en hectare </h5>
# +
dfc['Surface_ha']
print("La surface totale est égale à :",round(dfc['Surface_ha'].sum(),2),"hectares")
# -
# <h5>3-2) La description de la surface </h5>
# +
dfc['Surface_ha'].describe()
# -
La surface moyenne est de 14 780 hectares avec une dispersion des valeur entre 608 hectares (minimum), et 73 906 hectares
# <h5>3-3) La Population totale </h5>
print("La population totale est égale à :",round(dfc['Population'].sum()/1000000,2),"Millions de citoyens")
# <h5>3-4) La description de la population </h5>
dfc['Population'].describe()
# <h6 - La Population moyenne par commune est de 14 232 citoyens avec une dispersion des valeurs entre 2497 citoyens (minimum), et 280 275 citoyens.
# <h3> 4)- Suppression : la colonne « ID » du DataFrame « dfc » <h3>
dfc.drop('ID',axis=1,inplace=True)
# <h3> 5)- Les valeurs uniques dans le champ « Nom_Commune » et les valeurs qui se répetent </h3>
# <h4> 5.1) Les valeurs uniques dans le champ « Nom_Commune » </h4>
#
dfc['Nom_Commune'].unique()
#
# <h4> 5.2) les valeurs qui se répètent plus qu’une fois </h4>
dfc['Nom_Commune'].value_counts().to_frame()
#
# <h3> 6)- La commune ayant le taux d’activité le plus bas et celle ayant le taux d’activité le plus haut(et plus bas) </h3>
# <h5> 6-1) la commune ayant le taux d’activité le plus haut </h5>
# +
dfc.loc[dfc['Taux_activite']==dfc['Taux_activite'].max(),['Taux_activite','Nom_Commune']]
# -
# <h5> 6-2) la commune ayant le taux d’activité le plus bas </h5>
# +
dfc.loc[dfc['Taux_activite']==dfc['Taux_activite'].min() ,['Taux_activite','Nom_Commune']]
# -
# <h3> 7)- Les communes ayant simultanément une population supérieure à 10000 et un taux d’analphabétisme supérieur à 40% </h3>
#
dfc[(dfc['Population']>10000) & (dfc['Taux_Analphabetisme']>40)]['Nom_Commune'].unique()
# <h3> 8)- les communes dont le nom commence ou se termine par la lettre "A"</h3>
# +
dfc[dfc['Nom_Commune'].str.contains('^A|A$')]
# -
# <h3> 9)- Visualisation graphique de la répartition de la population des communes </h3>
# +
#Représenter graphiquement la répartition de la population des communes
dfc['Population'].hist()
plt.suptitle("Répartition de la population des communes")
plt.xlabel('Population')
plt.ylabel('frequence ')
plt.show()
# les communes ayant une poplation inferieur à 25000 sont les plus fréquentes ( PLUS DE 200 communes)
# -
# <h6><font color=red>Commentaire:</font> On constate que plus de 200 Communes ont une population Inférieures à 25 000 avec une faible répartition dans les autres strats de populations.
# <h3> 10)- Visualisation graphique du la taux d’activité en fonction du taux d’analphabétisme </h3>
# +
plt.subplot()
plt.scatter(dfc['Taux_activite'],dfc['Taux_Analphabetisme'],marker='+',s=50,color="green")
plt.xlabel('Taux_activite',size=12)
plt.ylabel('Taux_Analphabetisme',size=12)
plt.suptitle('taux d’activité en fonction du taux d’analphabétisme ')
plt.grid()
plt.show()
# -
# <h5> <font color=red>Commentaire:</font>- La zone avec un taux Activité entre 30% et 40% Vs un taux Analphabétisme entre 55% et 75% connait une forte concentration de la répartition.<br>
# - Les commununes ayant un taux Activité Supérieur à 45%, ont un taux Analphabétisme Supérieur à 68% !</h5>
#
# <h3> 11)- Création d'une nouvelle colonne appelée « densite_pop » </h3>
# <h5>11-1) convertir l'unité en hectare pour calculer la densité de la population </h5>
# +
#Convertir l'unité ha en km
dfc['Surface_km']=dfc['Surface_ha']/100
#Calculer la densité
dfc['densite_pop'] =dfc['Population']/dfc['Surface_km']
#Afficher la densité par commune
dfc[['Nom_Commune','densite_pop']]
# -
# <h3> 12)- Création d'une fonction qui permet d’extraire le code de la province à partir du code de la commune </h3>
# +
#fonction « codeProvince »
def codeProvince(mylist):
return mylist[:3]
dfc['Code_Province'] = dfc.Code_Commune.apply(codeProvince)
# -
dfc
# <h3> 13)- Ecrire une fonction Python appelée « nomProvince » </h3>
# <h5>13-1)- Chargement de fichier csv </h5>
#fonction Python « nomProvince » qui permet de déterminer le nom de la province à partir de son code en lisant un fichier CSV
dfc1=pd.read_csv('Code_Nom_Province.csv',dtype=str)
# +
dfc1
# -
# <h5>13-2)fonction Python « nomProvince » qui permet de déterminer le nom de la province à partir de son code en lisant un fichier CSV</h5>
# <h5>Pour le but d'ajouter la colonne « Nom_Province » aux données « dfc » </h5>
# +
keys=[]
values=[]
for c in dfc1.itertuples():
keys.append(c.Code_Province)
for v in dfc1.itertuples():
values.append(v.Nom_Province)
def nomProvince():
return { keys[i] : values[i] for i in range(len(keys)) }
# +
dfc['Nom_Province'] = dfc.Code_Province.map(nomProvince())
# -
dfc
# <h3> 14)- Index pour « dfc » les deux colonnes « Code_Province » et « Code_Commune » </h3>
# <h5> 14-1) Trier selon l'index </h5>
# +
#trier selon index
index=pd.MultiIndex.from_tuples(list(zip(dfc['Code_Province'],dfc['Code_Commune'])))
index
dfc.set_index(index,inplace = True)
dfc.sort_index(axis = 0, ascending = False)
# -
# <h3> 15)- Exporter votre DataFrame « dfc » vers un fichier Excel </h3>
# +
#dfc.to_excel(r'dfc_ELHARCHAOUI_SAYAH.xlsx')
# -
# <h3> 16)- Le nombre de communes rurales et urbaines de chaque code province
# <h5>16-1)le nombre de communes urbaines de chaque province</h5>
#le nombre de communes urbaines de chaque province
dfc[dfc.Type_Commune=="Urbain"]["Nom_Province"].value_counts()
# <h5>16-2)le nombre de communes rurales de chaque province</h5>
# +
#le nombre de communes rurales de chaque province
dfc[dfc.Type_Commune=="Rural"]["Nom_Province"].value_counts()
# -
# <h3> 17)- Calculer le taux d’activité de chaque province </h3>
#le taux d’activité (population active/population totale) de chaque province
dfc['Population_active']=(dfc['Population']*dfc['Taux_activite'])/100
tx_active_province=(dfc.groupby('Nom_Province')["Population_active"].sum())/(dfc.groupby('Nom_Province')["Population"].sum())
tx_active_province.to_frame(name="taux d'activité par province")
# <h3> 18)- Création d'un DataFrame des Provinces qui donne les informations agrégées
dfp = dfc.groupby(['Nom_Province','Code_Province'])['Code_Province','Population','Population_active','Surface_km'].sum()
dfp
# <h3> 19)- Calculer le taux d’activité et le taux d’analphabétisme par province
# +
# Calculer le taux d'activité
dfp['taux_activité_prov']=round((dfp['Population_active']/dfp['Population'])*100,2)
# Calculer le taux d’analphabétisme
dfc['Population_Analphabe']=(dfc['Population']*dfc['Taux_Analphabetisme'])/100
dfp['Taux_Analphabetisme_prov']=round(((dfc.groupby('Nom_Province')['Population_Analphabe'].sum())/(dfp['Population']))*100,2)
# -
dfp
# <h3> 20)- Les pourcentages de répartition de population par tranche d’âge
# +
dfc['inf6']=(dfc['Population']*dfc['Pop_inf6_pc'])/100
dfp['tauxPop_inf6_pc_prov']=round(((dfc.groupby('Nom_Province')['inf6'].sum())/(dfp['Population']))*100,2)
dfc['pop615']=(dfc['Population']*dfc['Pop_615_pc'])/100
dfp['tauxPop_615_prov']=round(((dfc.groupby('Nom_Province')['pop615'].sum())/(dfp['Population']))*100,2)
dfc['pop1560']=(dfc['Population']*dfc['Pop_1560_pc'])/100
dfp['tauxPop_1560_prov']=round(((dfc.groupby('Nom_Province')['pop1560'].sum())/(dfp['Population']))*100,2)
dfc['popsup60']=(dfc['Population']*dfc['Pop_sup60_pc'])/100
dfp['tauxPop_sup60_prov']=round(((dfc.groupby('Nom_Province')['popsup60'].sum())/(dfp['Population']))*100,2)
# -
dfp
# <h3> 21)- Exporter votre DataFrame « dfp » vers un fichier Excel
# +
#dfp.to_excel(r'dfp_ELHARCHAOUI_SAYAH_prod.xlsx')
# -
#
# <h3> 22)- Représentation sur le même graphique la population et la population active des provinces.
# +
dfp[["Population_active","Population"]].plot(kind="bar",figsize=(18,5)
,title='La population active et totale par provinces',color=["#21629E","#14AF9A"])
plt.xlabel('Nom de province',size=15,labelpad =20)
plt.ylabel('Nombre de population',size=13)
plt.tick_params(axis='x', pad=20,rotation=0)
plt.show()
# -
# <h5> <font color=red>Commentaire:</font>La population active au province marrakech a connu une légère
# hausse par rapport l'autres provinces </h5>
#
#
# <h3> 23)- Représenter sur le même graphique le taux d’activité et le taux d’analphabétisme des provinces</h3>
# +
dfp[["taux_activité_prov","Taux_Analphabetisme_prov"]].plot(kind="bar",figsize=(18,5)
,title='le taux d’activité et le taux d’analphabétisme des provinces',color=["#21629E","#14AF9A"])
plt.xlabel('Taux_activite_prov',size=15,labelpad =20)
plt.ylabel('Taux_Analphabetisme_prov',size=13)
plt.tick_params(axis='x', pad=20,rotation=0)
plt.show()
# -
# <h5> <font color=red>Commentaire:</font> La Répartition montre que le taux acivité est moins volatile tandis que le taux analphabétisme est plus dispersé.</h5>
# <h3> 24)- Représenter sur le même graphique les pourcentages de répartition de population par tranche
# +
# groupement de graphique taux de repartition des tranches dage par province
dfp[["tauxPop_inf6_pc_prov","tauxPop_615_prov","tauxPop_1560_prov","tauxPop_sup60_prov"]].T.plot.pie(subplots=True,
layout=(2,3),figsize=(20,10),autopct='%1.1f%%',legend=False,
labels=['< 6 ans','entre 6 et 15 ans','entre 15 et 60 ans','> 60 ans'],
title="Répartition de la population des provinces par tranches d'age")
plt.show()
# -
#
#
#
# <div class="alert alert-block alert-info" >
# <center><h1>PARTIE 2 <h1></center>
# </div>
# <h5> Automatisation avec Python et le librairie Selenieum</h5>
# <h5>pour extraire les informations caractéristiques démographiques des communes en 2014</h5>
# <h5>à partir du <a href=http://rgphentableaux.hcp.ma/ > site web du Haut-Commissariat au plan </a> </h5>
# <h5>les informations a extraires sont :</h5>
# <ul>
# <li> Population de la commune</li>
# <li>Age moyen au premier mariage</li>
# <li>Taux d'activité</li>
# <li>Taux d'analphabétisme</li>
# </ul>
# +
import requests
import pandas as pd
import numpy as np
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
# -
#
# <h5> sélection de <font color=blue>la balise select</font> a l'aide de l'element :<mark>find_element_by_name</mark> avec l'attribut nom de select : <font color=red>COMMUNELIST</font> qui contient une liste d'options, chaque <font color=blue> balise option </font>a une valeur code commune et un texte nom commune</h5>
# <h5>pour le but de récupperer le contenu de toutes les options a l'aide de l'element: element.<mark>find_elements_by_tag_name </mark> avec l'attribut nom de l'option : <font color=red>option</font>
# +
driver_exe ="chromedriver.exe"
browser = webdriver.Chrome(driver_exe)
browser.get("http://rgphentableaux.hcp.ma/Default1/")
element = browser.find_element_by_name('COMMUNELIST')
all_options = element.find_elements_by_tag_name("option")
df = pd.DataFrame(np.random.randint(0,100,size=(1686, 2)), columns=list('AB'))
df['A'] = df['A'].astype(str)
df['B'] = df['B'].astype(str)
# -
for i in range(0,1686) :
df.loc[i,'A'] = all_options[i].get_attribute('value')
df.loc[i,'B'] = all_options[i].get_attribute('text')
dfcc=pd.read_excel('testt.xlsx')
# +
def fun(supprimedernierpoint):
return supprimedernierpoint[:-1]
df['A'] = df.A.apply(fun)
# +
#for i in range(0,217) :
#for i in range(0,1686) :
#if (dfc.loc[i, 'Nom_Commune'].casefold() == df.loc[j, 'B'].casefold()) :
#dfc.loc[i, 'Code_Commune2014'] = df.loc[j, 'A']
# -
import requests
def rgph2014(codecommune2014):
HEADERS = {
'Referer': 'http://rgphentableaux.hcp.ma/Default1/'
}
THEMES = [2, 4, 6]
data = {"Code_Commune2014": codecommune2014}
for theme in THEMES:
url = f"http://rgphentableaux.hcp.ma/Default1/getDATA/?type=Commune&CGEO={codecommune2014}.&them={theme}"
result = requests.get(url, headers=HEADERS).json()
for line in result:
indicator = line.get('INDICATEUR')
if indicator == 'IEE_Population municipale':
data['population2014'] = line["DATA2014"]
if indicator == 'IRE_Âge moyen au premier mariage':
data['age_moyen_mariage2014'] = line["DATA2014"]
if indicator == "IEE_Taux d'analphabétisme":
data['taux_analphabetisme2014'] = line["DATA2014"]
if indicator == "IEE_Taux net d'activité":
data['taux_activite2014'] = line["DATA2014"]
return data
dfc["Code_Commune2014"]=dfcc["Code_Commune2014"]
# +
dfc2 = pd.DataFrame([rgph2014(codecommune2014) for codecommune2014 in dfcc["Code_Commune2014"]])
# +
dfc2=pd.merge(dfc, dfc2, on =['Code_Commune2014'])
# -
dfc2
# <h5>26)le taux d’accroissement de la population de chaque commune ((population 2014-Population 2004)/population 2004)</h5>
# +
dfc2['population2014'] = dfc2['population2014'].astype(int)
dfc2['taux d’accroissement commune'] =round(((dfc2['population2014']-dfc2['Population'])/dfc2['Population'])*100,2)
# -
dfc2.head(5)
# <h5>27</h5
# +
dfc2['population2014'] = dfc2['population2014'].astype(int)
dfc2['taux d’accroissement province']=(dfc2.groupby('Code_Province')['population2014'].sum()- dfc.groupby('Code_Province')['Population'].sum())/dfc.groupby('Code_Province')['Population'].sum()
# -
#
# <h5>29</h5>
#
#
dfc2.to_excel(r'dfc2_ELHARCHAOUI_SAYAH_prod.xlsx')
# +
dfc2
# -
# <h5> 28 )un graphique qui met en exergue la variation de la population des provinces entre 2004 et 2014 </h5>
dfp2 = dfc2.groupby(['Nom_Province','Code_Province'])['Code_Province','Population','Population_active','population2014'].sum()
# +
dfp2[["Population","population2014"]].plot(kind="bar",figsize=(18,5)
,title='la variation de la population des provinces entre 2004 et 2014',color=["#21629E","#14AF9A"])
plt.xlabel('Nom Province',size=15,labelpad =20)
plt.ylabel('Population2004-2014',size=13)
plt.tick_params(axis='x', pad=20,rotation=0)
plt.show()
# -
| PROJET PYTHON DEMOGRAPHIE .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pygromos2
# language: python
# name: pygromos2
# ---
# # Gromos Trajectory evaluation with Pygromos and Pandas
#
# ## Example file for the evaluation of GROMOS trajectory files in pygromos
# 1. Analysis of a GROMOS trc file (position trajectory)
# 1. Import
# 2. Common Functions
# 2. Analysis of a GROMOS tre file (energy trajectory)
# 1. Import
# 2. Common Functions
# + pycharm={"is_executing": true}
# general imports for manual data manipulations. Not needed if only provided functions are used
import numpy as np
import pandas as pd
# +
#specific imports from pygromos for trc and tre file support
import pygromos.files.trajectory.trc as traj_trc
import pygromos.files.trajectory.tre as traj_tre
# -
# ## 1) TRC
# ### 1.1) TRC import
# import the trajectory file into a Trc class
trc = traj_trc.Trc(input_value="example_files/Traj_files/test_CHE_vacuum_sd.trc")
# The Trc class offers the normal gromos block structure and additionally a pandas DataFrame called database where all the timesteps are stored.
# For typical trc files the only classic block is the TITLE block, and all the other blocks are stored inside the database.
#
# Additionally many common functions are offered to evaluate the given data. If a needed function is not provided, the normal pandas syntax can be used to create custom functions.
#
# If you have a function that's generally useful, please contact the developers to possibly add it to the pygromos code to help other people :)
[x for x in dir(trc) if not x.startswith("_")]
# ### 1.2) Common trc functions
# Get the average movement length between two frames
trc.get_atom_movement_length_mean(atomI=1)
# Or get the center of mass movement for a whole group of atoms. The atoms are provided as numbers in a list.
trc.get_cog_movement_total_series_for_atom_group(atoms=[1,2,5]).mean()
# Get the average distance between two atoms over all time frames
trc.get_atom_pair_distance_mean(atomI=1, atomJ=2)
# #### RMSD
# Calculate the rmsd to the initial frame (0th frame).
# Alternatively a different trajectory can be provide as argument to the rmsd function.
# The accepted arguments are integer or single trajectory frame.
rmsd = trc.rmsd(0)
# Which returns the rmsd for every time frame to the initial frame.
# It can be seen how the rmsd slowly gets larger as the simulations get farther away from the initial setup.
rmsd
# The mean over all frames can be easily taken with the pandas function mean()
rmsd.mean()
# #### RDF
# +
# This functionality is still under development
# -
# ## 2) TRE
# ### 2.1) Tre import and structure
# +
# import the trajectory file into a Tre class
from pygromos.files.trajectory.tre_field_libs.ene_fields import gromos_2015_tre_block_names_table
tre = traj_tre.Tre(input_value="example_files/Traj_files/test_CHE_H2O_bilayer.tre", _ene_ana_names=gromos_2015_tre_block_names_table)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
tre.database
# -
[x for x in dir(tre) if not x.startswith("_")]
# Tre files contain all energy related data (like split up energy terms, temperature, pressure, .....). In PyGromos they generally share the same block structure as other files, but all the data inside the specific timesteps is stored efficiently inside a pandas DataFrame, here called tre.database . This database offers manipulation with all pandas functions. Alternatively many common functions are provided inside the Tre class.
#
# This class should in principle replace further usage of the gromos++ ene_ana function, since all these operation can be done efficiently on the pandas DataFrame.
#
# We are currently working on adding more common functions to the Tre class. If you find a useful function please contact the developers so the function can be added for general usage :)
# ### 2.2) Common Tre functions
# calculate the average density over all timesteps
tre.get_density().mean()
# calculate the mean temperature over all frames for all baths in the system. In this example two baths with slightly different temperatures.
tre.get_temperature().mean()
# Tables and lists inside the database are stored in numpy arrays. For example the two temperatures from the previous example are stored in a numpy array of size 2 since it has two temperature baths
# Specific values inside a tre file can also be directly accessed with numpy and pandas syntax
tre.database.iloc[2]
# select the first nonbonded energy value for the first force group over all time frames
tre.database["nonbonded"].apply(lambda x: x[0][0])
# + pycharm={"name": "#%%\n"}
tre.get_totals()
# -
# ### $\lambda$-Sampling & TREs
# import the trajectory file into a Tre class
tre = traj_tre.Tre(input_value="example_files/Traj_files/RAFE_TI_l0_5.tre")
tre.get_precalclam()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### EDS in TREs
# -
# import the trajectory file into a Tre class
tre = traj_tre.Tre(input_value="example_files/Traj_files/RAFE_eds.tre")
tre.get_eds()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Concatenate and Copy multiple Trajectories
# -
# Trajectories offer a wide range of additional file manipulations. Trajectory classes can be copied (deep) and added to each other to concatenate multiple small simulation pieces into one large trajectory.
tre_copy = traj_tre.Tre(input_value=tre)
# + pycharm={"name": "#%%\n"}
tre_copy.database.shape
# + pycharm={"name": "#%%\n"}
tre_combined = tre + tre_copy
# + pycharm={"name": "#%%\n"}
tre_combined.database.shape
# -
# In the new combined trajectory we have one long trajectory made from the two smaller ones. The length is one element shorter, since normally the last element of the first trajectory and the first element of the second trajectory is the same element. This can be controlled via the option "skip_new_0=True" in the add_traj() function which is the core of the "+" operator for trajectories. In the following line the default behavior can be seen as a smooth numbering in the TIMESTEPs.
# + pycharm={"name": "#%%\n"}
tre_combined.database.TIMESTEP_time
| examples/example_gromos_trajectories.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pytorch_env] *
# language: python
# name: conda-env-pytorch_env-py
# ---
# +
import numpy as np
import torch
import torch.nn as nn
# -
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
X = torch.randn(size=(1, 3, 256, 128))
net = ResNet()
l1 = net.conv1(X)
print(l1.shape)
l2 = net.maxpool(l1)
print(l2.shape)
l3 = net.layer1(l2)
print(l3.shape)
l4 = net.layer2(l3)
print(l4.shape)
l5 = net.layer3(l4)
print(l5.shape)
l6 = net.layer4(l5)
print(l6.shape)
| EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import torch
import torch.utils.data
import math
import torch.nn.functional as F
npdata = np.genfromtxt(
open("./data/house-train.csv", "rb"),
delimiter=",",
dtype='unicode'
)
npdata
npdata[0]
npdata[0, [4, 38, 46, 62, 12, 15]]
npdata[0, [80]]
neighborhoods = list(set(npdata[1:, 12]))
building_type = list(set(npdata[1:, 15]))
# +
class HouseDataset(torch.utils.data.Dataset):
def __init__(self):
npdata = np.genfromtxt(
open("./data/house-train.csv", "rb"),
delimiter=",",
dtype='unicode'
)
np_inputs = npdata[1:, [4, 38, 46, 62, 12, 15]]
for row in np_inputs:
row[4] = neighborhoods.index(row[4])
row[5] = building_type.index(row[5])
np_inputs = np_inputs.astype(np.float32)
np_outputs = npdata[1:, [80]].astype(np.float32)
self.inputs = torch.from_numpy(np_inputs)
self.outputs = torch.from_numpy(np_outputs)
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
return (self.inputs[idx], self.outputs[idx])
# -
dataset = HouseDataset()
dataset.__getitem__(0)
train_size = math.floor(len(dataset) * 0.8)
val_size = len(dataset) - train_size
train_data, val_data = torch.utils.data.random_split(
dataset,
[train_size, val_size]
)
len(train_data)
len(val_data)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=128,
shuffle=True
)
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=128,
shuffle=True
)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.n_emb = torch.nn.Embedding(len(neighborhoods), 5)
self.bt_emb = torch.nn.Embedding(len(building_type), 3)
# 4 + 5 + 3 = 12
self.layer1 = torch.nn.Linear(12, 16)
self.layer2 = torch.nn.Linear(16, 1)
def forward(self, x):
neigh = x[:, 4].long()
bt = x[:, 5].long()
e_neigh = self.n_emb(neigh)
e_bt = self.bt_emb(bt)
all_inputs = torch.cat((x[:, [0, 1, 2, 3]], e_neigh, e_bt), dim=1)
x = self.layer1(all_inputs)
x = F.relu(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.layer2(x)
return x
net = Net()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01, weight_decay=0.01)
# +
total_pct_error = 0
total_count = 0
net.train()
for i in range(250):
for j, data in enumerate(train_loader, 0):
inputs = data[0]
outputs = data[1]
net.zero_grad()
net_output = net(inputs)
diff = (net_output - outputs).abs()
pct_error = (diff / outputs * 100).mean().item()
total_pct_error += (pct_error * len(inputs))
total_count += len(inputs)
loss = criterion(net_output, outputs)
loss.backward()
optimizer.step()
print("%.1f%%" % (total_pct_error / total_count))
# +
val_pct_error = 0
val_count = 0
net.eval()
for i, data in enumerate(val_loader, 0):
inputs = data[0]
prices = data[1]
net_output = net(inputs)
diff = (net_output - prices).abs()
pct_diff = (diff / prices * 100).mean().item()
val_pct_error += (pct_diff * len(inputs))
val_count += len(inputs)
print("%0.1f%%" % (val_pct_error / val_count))
# -
torch.save({
'model': net.state_dict(),
'optimizer': optimizer.state_dict()
}, './house_prices.pt')
| House Prices-Regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import json
# %matplotlib inline
players = pd.read_json('players_statistics.json')
players.info()
players.head()
X = players[['SkatingTimeOnIceMinutes', 'SkatingShotsOnGoal' , 'SkatingPlusMinus', 'SkatingAssistsPowerPlay']]
y = players['SkatingGoalsTotal']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)
from sklearn.linear_model import LinearRegression
# +
lm = LinearRegression()
# +
lm.fit(X_train,y_train)
# -
coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])
predictions = lm.predict(X_test)
plt.scatter(y_test,predictions)
sns.distplot((y_test-predictions),bins=50);
| scripts/data_vis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Eating Magical Cakes with Dynamic Programming
#
# > Finding the optimal saving rate with dynamic programming.
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - image: images/cake.jpeg
# - categories: [economics, mathematics, python]
# In layman terms, [dynamic programming](https://en.wikipedia.org/wiki/Dynamic_programming) is a method of mathematical optimization—breaking down a complex problem into simpler parts and storing the results to save on computing power. We use dynamic programming to efficiently calculate the Bellman equation and determine optimal solutions.
# ## Background
#
# Suppose a nice genie has given us a magic cake. The cake's initial size at time 0 is $y_0z_0$, some known number. Once per day, we are allowed to eat a piece of any size from the cake. Overnight, the cake will regenerate with multiplicative random variable $z_t$ whose logarithm is independently and identically distributed $N(\mu, \sigma)$. This means we potentially have an unlimited source of cake!
#
# Suppose the genie places the condition that we must choose a constant fraction of the cake to eat each day. As rational economists, we want to fully maximize our happiness by eating the right amount of cake each day. For this experiment, we can assume our utility function to be $E\big[\sum_{t=0}^{\infty}\beta^tu(c_t)\big]$.
# ## Exercise
#
# We apply the following:
#
# 1. Guess that the optimal policy is to eat a constant fraction of the magic cake in every period.
# 2. Assume that $u(c) = \frac{c^{1-r}-1}{1-r}$ for $r \geq 0$. Fix $z_0y_0=69$, $r=2$, $\beta = 0.95$, $\mu=0$, and $\sigma = 0.1$.
# 4. Using np.random.seed(1234), generate a fixed sequence of 100 log normally distributed random variables.
# 5. Use Python to search for the fraction that maximizes the expected reward for that fixed sequence.
# ## Terminology
#
# We use the following terminology:
#
# - The **state** space, denoted $X$ is the size of the cake when we start with $(y_0, z_0)$.
# - The **control,** denoted $U$, is the piece of any size from the cake that you choose to eat. So we have $u \in U$ such that $0 < u < 1$.
# - The **law of motion** represents the transition into the next price. This is the the multiplicative random variable or shock $z_t$, whose logarithm is independently and identically distributed $N(\mu, \sigma)$.
# - The **reward** $r$ is $r: X \times Y \rightarrow \mathbb{R}$, so we have the reward r(x,y) for $x \in X$ if we choose some fraction $u \in U$.
# - The **discount factor** is $\beta$, how much we discount to forgo current felicity in favor of future felicity.
# ## Setting up the Dynamic Program
#
# Let us reiterate what we know so far:
#
# <ol>
# <li> The state space is the random size of the cake</li>
# <li> The control space is how much one eats in period $t$</li>
# <li> The law of motion is $x_{t+1} = z_{t+1}(x_t-c_t)$, where $x_t$ is the known size of the cake at time $t$. $z_{t+1}$ is lognormally distributed with parameters $\mu$ and $\sigma$.</li>
# <li> The reward function is $u(c)$.</li>
# <li> The discount factor is $\beta$. </li>
# </ol>
#
# For this problem, we can apply the Bellman equation:
#
# $$
# V(x_t) = max_{c_t \in [0,x_t]}\left\{ u(c_t) + \beta E[V\left(z_{t+1}(x_t-c_t)\right)|z_t] \right\}
# $$
#
# $V(\cdot)$ represents a value fuction where, for each $c$ given a value $0 \leq u \leq 1$, we maximize the expected value of the optimal plan. Essentially, the bellman equation lets us find the optimal solution of a complex, iterative problems by breaking it down into simpler parts—which is why we use dynmaic programming to solve this problem. An explanation of the bellman equation can be found [here](https://towardsdatascience.com/the-bellman-equation-59258a0d3fa7).
#
# ## Code
# Let us first initialize the values of the state space: we assume that $u(c) = \frac{c^{1-r}-1}{1-r}$ for $r \geq 0$. Fix $z_0y_0=69$, $r=2$, $\beta = 0.95$, $\mu=0$, and $\sigma = 0.1$. We then generate a fixed sequence of 100 log normally distributed random variables.
# +
import numpy as np
# Finding the fraction that maximizes the expected reward for the fixed sequence
z0y0 = 69
r = 2
beta = 0.97
mu = 0
sigma = .1
T = 100
# Generate a fixed sequence of 100 log normally distributed random variables.
np.random.seed(1234)
sequence = np.random.lognormal(mu, sigma, T)
# -
# Next, we create the felicity (utility) function. We use this to calculate our happiness for given amount of cake we consume.
# +
# Felicity function
def felicity(c,r):
"""""
Computing the felicity given consumption c
and reward r
"""""
# u(c)
u = (c**(1-r)-1) / (1-r)
return u
# -
# We can then create a function to calculate our utility overtime.
# +
# Value function
def bigU(c, beta, r, T):
"""""
Computing the sum of utilities
C is the sequence of consumption
beta is the discount factor
r is the reward curvature
T is the time period
"""""
U = 0
for t in range(T):
utility = beta**(t)*felicity(c[t], r)
U+= utility
return U
# -
# Now we create a function to calculate the law of motion. We need this to figure out how much the cake will grow in the next period.
# +
# Law of motion
def motion(initial_state, z, share):
"""""
Computing the sequence of consumption
z equals a sequence of numbers, representing
the random shocks to the size of the cake
given the initial state
"""""
c = []
y_0 = initial_state
c_0 = share * y_0
c.append(c_0)
for t in range(len(z)):
y_1 = z[t] * (y_0 - c_0)
c_1 = share*y_1
c.append(c_1)
y_0 = y_1
c_0 = c_1
return c
# -
# Finally, we can use the above functions to create a function that determines the fraction of the cake that yields the maximum utility overtime.
# +
# Maximize expected reward function
# Initial values
z0y0 = 69
r = .2
beta = 0.95
mu = 0
sigma = .1
T = 100
def max_cake(initial_state,
beta,
r,
mu,
sigma,
T,
a,
b,
increment):
"""""
Finding the number in range [a,b] that provides
the maximum utility
initial_state is the size of the cake at state 0
beta is the discount factor
r is some given value to compute utility for this case
T is the period of time
mu is the given mean
sigma is the given standard deviation
initial_state is the size of the cake at state 0
Given the range a to b, with increments from a to b
"""""
# Generate a fixed sequence of T log normally distributed random variables.
np.random.seed(1234)
sequence = np.random.lognormal(mu, sigma, T)
# Array of fractions
control = np.arange(a, b, increment)
# Empty array
control_utilities = []
# Compute value function for each constant fraction
for i in range(len(control)):
c = motion(initial_state, sequence, control[i])
utility = bigU(c, beta, r, T)
control_utilities.append(utility)
# Find the fraction that maximizes utility
print('The best constant fraction is',
control[control_utilities.index(max(control_utilities))],
'and the maximum utility is', round(max(control_utilities),2))
max_cake(z0y0, beta, r, mu, sigma, T, .001, 1, .001)
# -
#
# ## Conclusion
#
# Provided that the optimal policy is to eat a constant fraction of the magic cake in every period, we find that eating 22% of the cake each day maximizes our expected reward for the fixed sequence. Thank you genie!
#
# Things to consider: We made assumptions on our reward function, discount factor, etc. If we were to change the value of these conditions, there is no doubnt that our best constant fraction and maximum utility would also change. And just because we are "happiest" does not mean we are happy at all! I imagine that once I choose a fraction of the cake to eat for each day, I would be compelled to eat the same flavored cake every day for the rest of my life. What a sneaky genie!
# ## Credits
#
# Photo by [<NAME>](https://unsplash.com/@rosenberg12?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash.](https://unsplash.com/s/photos/cake?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
#
# This exercise is based on Sargent and Stachurski's article on [Optimal Saving.](https://python.quantecon.org/cake_eating_problem.html)
| _notebooks/2021-10-21-Magic-Cake.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
s = input()
upperL = lowerL = 0
for c in s:
if c.isupper():
upperL += 1
elif c.islower():
lowerL += 1
print('UPPER CASE {}'.format(upperL))
print('LOWER CASE {}'.format(lowerL))
# -
# 1, 2, 3, 4, 5, 6, 7, 8, 9
numbers = map(int, input().split(','))
oddNumbers = [e for e in numbers if e % 2 != 0]
print(oddNumbers)
# +
n = int(input())
def gen_div_by_7(n):
num = 0
while (num <= n):
if num % 7 == 0:
yield num
num += 1
for i in gen_div_by_7(n):
print(i, end=" ")
# -
keys = ['<KEY>']
values = [1, 2, 3]
x = dict(zip(keys, values))
print(x)
a = [1, 2, 3, 4, 5]
b = [2, 2, 9, 0, 9]
list(map(lambda pair: max(pair), zip(a, b)))
| study_jupyter/Interview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/NoCodeProgram/CodingTest/blob/main/mathBit/powerOfTwo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="1h-aKmHHyS5l"
# Title : Power of Two
#
# Chapter : Math, Bit
#
# Link :
#
# ChapterLink :
#
# 문제: 주어진 숫자가 2의 제곱수인지 판별하여라
# + id="wYxcpiAMyPK5"
#O(lg n) time complexity
def isPowerOfTwo(n):
if n == 0:
return False
while n%2 == 0:
n = n/2
if n==1:
return True
else:
return False
# + colab={"base_uri": "https://localhost:8080/"} id="rSZsPU2jzrFw" outputId="ac82af47-a869-4a77-e995-0422ca2be7e8"
print(isPowerOfTwo(4))
print(isPowerOfTwo(8))
print(isPowerOfTwo(5))
# + id="4RBmdj4xzvDM"
#O(1) time complexity
def isPowerOfTwo(n):
if n == 0:
return False
diff_bits = n & (n-1)
if diff_bits == 0:
return True
else:
return False
# + colab={"base_uri": "https://localhost:8080/"} id="tUGWLzAC0PG9" outputId="44952ec1-68b1-4ef0-8615-0dfd03bd80ae"
print(isPowerOfTwo(4))
print(isPowerOfTwo(8))
print(isPowerOfTwo(5))
# + id="mgl0LJlH0Qc5"
| mathBit/powerOfTwo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <div class="contentcontainer med left" style="margin-left: -50px;">
# <dl class="dl-horizontal">
# <dt>Title</dt> <dd> Polygons Element</dd>
# <dt>Dependencies</dt> <dd>Bokeh</dd>
# <dt>Backends</dt> <dd><a href='./Polygons.ipynb'>Bokeh</a></dd> <dd><a href='../matplotlib/Polygons.ipynb'>Matplotlib</a></dd>
# </dl>
# </div>
import numpy as np
import holoviews as hv
hv.extension('bokeh')
# A ``Polygons`` represents a contiguous filled area in a 2D space as a list of paths. Just like the ``Contours`` element additional scalar value dimensions maybe may be supplied, which can be used to color the ``Polygons`` with the defined ``cmap``. Like other ``Path`` types it accepts a list of arrays, dataframes, a dictionary of columns (or any of the other literal formats including tuples of columns and lists of tuples).
#
# In order to efficiently represent the scalar values associated with each path the dictionary format is preferable since it can store the scalar values without expanding them into a whole column. Additionally it allows passing multiple columns as a single array by specifying the dimension names as a tuple.
#
# In this example we will create a list of random polygons each with an associated ``level`` value. Polygons will default to using the first value dimension as the ``color_index`` but for clarity we will define the ``color_index`` explicitly:
# +
# %%opts Polygons [color_index='level'] (line_color='black' line_width=1)
np.random.seed(1)
def rectangle(x=0, y=0, width=.05, height=.05):
return np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)])
hv.Polygons([{('x', 'y'): rectangle(x, y), 'level': z}
for x, y, z in np.random.rand(100, 3)], vdims='level').redim.range(x=(-.1,1.1), y=(-0.1, 1.1))
# -
# ``Polygons`` is a very versatile element which may be used to draw custom annotations, choropleth maps (as can be seen in the [texas_unemploment example](../../../gallery/demos/bokeh/texas_choropleth_example.ipynb)) among many other examples. We can also use some of the other path based annotations to quickly generate polygons, including ``Box``, ``Bounds`` and ``Ellipse`` elements. In the simple case we can simply pass a list of these elements:
hv.Polygons([hv.Box(i, i, i) for i in range(1, 10)])
# Alternatively we can use the ``array`` method to return the x/y-coordinates of the annotations and define additional z-values by declaring a dictionary:
hv.Polygons([{('x', 'y'): hv.Box(0, 0, i).array(), 'z': i} for i in range(1, 10)[::-1]], vdims='z') +\
hv.Polygons([{('x', 'y'): hv.Ellipse(0, 0, (i, i)).array(), 'z': i} for i in range(1, 10)[::-1]], vdims='z')
# For full documentation and the available style and plot options, use ``hv.help(hv.Polygons).``
| examples/reference/elements/bokeh/Polygons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''venv'': venv)'
# language: python
# name: python3
# ---
# +
from matplotlib import pyplot as plt
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
plt.plot(years, gdp)
plt.plot(years, gdp, marker='o')
plt.title('My first graph')
plt.show
# -
# Bar Chart
movies = ["<NAME>", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
plt.bar(range(len(movies)), num_oscars)
plt.xticks(range(len(movies)), movies)
plt.show
# +
# Line Charts
variance = [1, 2, 4, 8, 16, 32, 64, 128, 256]
bias_squared = [256, 128, 64, 32, 16, 8, 4, 2, 1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = [i for i, _ in enumerate(variance)]
plt.plot(xs, variance, 'g-', label='variance')
plt.plot(xs, bias_squared, 'r-.', label='bias^2')
plt.plot(xs, total_error, 'b:', label='total error')
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.xticks([])
plt.title("The Bias-Variance Tradeoff")
plt.show()
# +
from matplotlib import pyplot as plt
# Scatterplots
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
#plt.scatter(friends, minutes)
# matplotlib choose the scale
x = [99, 90, 85, 97, 90]
y = [100, 85, 60, 90, 70]
plt.scatter(x, y)
plt.axis("equal")
plt.show()
# -
| ch_3-data_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# COMPARING MEDIAN INCOME WITH NET MIGRATION
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# +
# Read the excel file into a pandas DataFrame
combined_data = pd.read_excel('2010-2019combineddata.xls')
combined_data.head()
# +
# Assign the data to X and y
# Note: Sklearn requires a two-dimensional array of values
# so we use reshape to create this
X = combined_data["Median Income"].values.reshape(-1, 1)
y = combined_data["Net Migration"].values.reshape(-1, 1)
print("Shape: ", X.shape, y.shape)
# -
# Plot the data to see if a linear trend exists
plt.scatter(X, y)
plt.xlabel("Median Income")
plt.ylabel("Net Migration")
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Create the model
from sklearn.linear_model import LinearRegression
model = LinearRegression()
# Fit the model to the training data.
model.fit(X_train, y_train)
# +
print('Weight coefficients: ', model.coef_)
print('y-axis intercept: ', model.intercept_)
# Note: we have to transform our min and max values
# This is the required format for `model.predict()`
x_min = np.array([[X.min()]])
x_max = np.array([[X.max()]])
# Calculate the y_min and y_max using model.predict and x_min and x_max
y_min = model.predict(x_min)
y_max = model.predict(x_max)
# +
# Plot X and y using plt.scatter
# Plot the model fit line using [x_min[0], x_max[0]], [y_min[0], y_max[0]]
plt.scatter(X, y, c='blue')
plt.plot([x_min[0], x_max[0]], [y_min[0], y_max[0]], c='red')
plt.xlabel("Median Income")
plt.ylabel("Net Migration")
# +
from sklearn.metrics import mean_squared_error, r2_score
# Use our model to make predictions
predicted = model.predict(X_test)
# Score the predictions with mse and r2
mse = mean_squared_error(y_test, predicted)
r2 = r2_score(y_test, predicted)
print(f"Mean Squared Error (MSE): {mse}")
print(f"R-squared (R2 ): {r2}")
model.score(X_test, y_test)
# -
# UNEMPLOYMENT RATE AND NET MIGRATION
# +
# Assign the data to X and y
# Note: Sklearn requires a two-dimensional array of values
# so we use reshape to create this
X = combined_data["Unemployment Rate"].values.reshape(-1, 1)
y = combined_data["Net Migration"].values.reshape(-1, 1)
print("Shape: ", X.shape, y.shape)
# -
# Plot the data to see if a linear trend exists
plt.scatter(X, y)
plt.xlabel("Unemployment Rate")
plt.ylabel("Net Migration")
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Create the model
from sklearn.linear_model import LinearRegression
model = LinearRegression()
# Fit the model to the training data.
model.fit(X_train, y_train)
# +
print('Weight coefficients: ', model.coef_)
print('y-axis intercept: ', model.intercept_)
# Note: we have to transform our min and max values
# This is the required format for `model.predict()`
x_min = np.array([[X.min()]])
x_max = np.array([[X.max()]])
# Calculate the y_min and y_max using model.predict and x_min and x_max
y_min = model.predict(x_min)
y_max = model.predict(x_max)
# +
# Plot X and y using plt.scatter
# Plot the model fit line using [x_min[0], x_max[0]], [y_min[0], y_max[0]]
plt.scatter(X, y, c='blue')
plt.plot([x_min[0], x_max[0]], [y_min[0], y_max[0]], c='red')
plt.xlabel("Unemployment Rate")
plt.ylabel("Net Migration")
# +
from sklearn.metrics import mean_squared_error, r2_score
# Use our model to make predictions
predicted = model.predict(X_test)
# Score the predictions with mse and r2
mse = mean_squared_error(y_test, predicted)
r2 = r2_score(y_test, predicted)
print(f"Mean Squared Error (MSE): {mse}")
print(f"R-squared (R2 ): {r2}")
model.score(X_test, y_test)
# -
# COMPARUING UNEMPLOYMENT WITH MEDIAN INCOME
# +
# Assign the data to X and y
# Note: Sklearn requires a two-dimensional array of values
# so we use reshape to create this
X = combined_data["Unemployment Rate"].values.reshape(-1, 1)
y = combined_data["Median Income"].values.reshape(-1, 1)
print("Shape: ", X.shape, y.shape)
# -
# Plot the data to see if a linear trend exists
plt.scatter(X, y)
plt.xlabel("Unemployment Rate")
plt.ylabel("Median Income")
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Create the model
from sklearn.linear_model import LinearRegression
model = LinearRegression()
# Fit the model to the training data.
model.fit(X_train, y_train)
# +
print('Weight coefficients: ', model.coef_)
print('y-axis intercept: ', model.intercept_)
# Note: we have to transform our min and max values
# This is the required format for `model.predict()`
x_min = np.array([[X.min()]])
x_max = np.array([[X.max()]])
# Calculate the y_min and y_max using model.predict and x_min and x_max
y_min = model.predict(x_min)
y_max = model.predict(x_max)
# +
# Plot X and y using plt.scatter
# Plot the model fit line using [x_min[0], x_max[0]], [y_min[0], y_max[0]]
plt.scatter(X, y, c='blue')
plt.plot([x_min[0], x_max[0]], [y_min[0], y_max[0]], c='red')
plt.xlabel("Unemployment Rate")
plt.ylabel("Median Income")
# +
from sklearn.metrics import mean_squared_error, r2_score
# Use our model to make predictions
predicted = model.predict(X_test)
# Score the predictions with mse and r2
mse = mean_squared_error(y_test, predicted)
r2 = r2_score(y_test, predicted)
print(f"Mean Squared Error (MSE): {mse}")
print(f"R-squared (R2 ): {r2}")
model.score(X_test, y_test)
# -
| .ipynb_checkpoints/NetMigrationLinearRegressionNoDummies-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Standard data science libraries
import pandas as pd
import numpy as np
from scipy import stats
import featuretools as ft
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('bmh')
# Options for pandas
pd.options.display.max_columns = 20
# Display all cell outputs
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
# +
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic)
# The class MUST call this class decorator at creation time
@magics_class
class MyMagics(Magics):
@line_magic
def lmagic(self, line):
"my line magic"
print("Full access to the main IPython object:", self.shell)
print("Variables in the user namespace:", list(self.shell.user_ns.keys()))
return line
@cell_magic
def cmagic(self, line, cell):
"my cell magic"
return line, cell
@line_cell_magic
def lcmagic(self, line, cell=None):
"Magic that works both as %lcmagic and as %%lcmagic"
if cell is None:
print("Called as line magic")
return line
else:
print("Called as cell magic")
return line, cell
# In order to actually use these magics, you must register them with a
# running IPython.
def load_ipython_extension(ipython):
"""
Any module file that define a function named `load_ipython_extension`
can be loaded via `%load_ext module.path` or be configured to be
autoloaded by IPython at startup time.
"""
# You can register the class itself without instantiating it. IPython will
# call the default constructor on it.
ipython.register_magics(MyMagics)
# -
# %my_line_magic
| slack_interaction/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Setup
import sys
sys.path.append('..')
# +
import config
import matplotlib.pyplot as plt
import numpy as np
import os
import skimage
import warnings
from dataset_utils import *
from preprocessing import crop_image, crop_images_and_save_all
from vis_utils import grid_vis_for_crop_and_merge
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
warnings.filterwarnings('ignore')
# -
ICOSEG_SUBSET_80_PATH = '../datasets/icoseg/subset_80'
# ## 2. Crop images and save all
# %%time
crop_images_and_save_all(load_icoseg_subset_80_with_img_names(),
dataset_path=ICOSEG_SUBSET_80_PATH,
img_format='png',
patch_h=config.INPUT_SHAPE[0],
patch_w=config.INPUT_SHAPE[1],
append_img_h_w=True)
| 2018-2019/project/utils/crop_images_and_save_all.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Creating ASAs and NFTs
#
# Here we'll create some quick, helpful functions to create and manage our own ASAs.
# These could be used for a whole range of things, including (but not limited to) NFTs.
#
# We'll also cover how to upload and pin to the IPFS via Pinata and how to include [ARC-69](https://github.com/algokittens/arc69) metadata when creating your assets.
#
# By the end of this you should be able to start building your own tools for creating and managing ASAs and NFTs.
# To begin, we'll import the required algorand methods as well as our environment we set up [here](./env_file.md), requests (which we'll use to connect to Pinata), hashlib for creating the hash of our file, json for ARC-69 and the sleep method for waiting on the algorand network.
#
# We'll then create the algorand connections we need, and get the key and address from our mnemonic. If all works, you should see your algorand address printed at the end.
from algosdk.v2client import algod
from algosdk import mnemonic, account
from algosdk.future.transaction import AssetConfigTxn
import env
import requests
import hashlib
import json
from time import sleep
ac = algod.AlgodClient(env.API_TOKEN, env.ALGOD_ADDRESS, env.API_HEADERS)
private_key = mnemonic.to_private_key(env.MY_PHRASE)
sender_address = account.address_from_private_key(private_key)
sender_address
# The algorand network accepts 6 different types of transactions.
# You've probably used `pay` (the payment transaction for sending algo) and `axfer` (the asset transfer transaction for sending any other kind of asset).
# To create an manage our assets, we're going to use a transaction called `acfg`.
# The Asset Configuration Transaction.
#
# Fortunately the python library makes this really simple.
#
# Let's create our own function, to create an asset
def create_asset(total, decimals, **kwargs):
arguments = {
"sender": sender_address,
# These are just some default details that the transaction requires
"sp": ac.suggested_params(),
# Total is the number of divisible units
# Decimals is the number of decimal places
# For instance, if total is 100 and decimal is 1
# Then you'd have 10.0 assets.
"total": total,
"decimals": decimals,
# When DefaultFrozen is true, holders won't be able to transfer
# the asset out of their wallet without them being unfrozen first
"default_frozen": False,
# These are who has the ability to do certain things with the asset
# Often you want them to be the creator of the asset
# But in many cases, you probably want at least freeze and clawback
# to be "" so your holders can be confident you won't mess with their
# collection.
"manager": sender_address,
"reserve": sender_address,
"freeze": sender_address,
"clawback": sender_address,
# This is just protection to make sure you don't accidentally unset
# any of the above properties, locking out access
"strict_empty_address_check": False,
# We can override any settings, or provide those we haven't used yet
# by using kwargs when calling the method.
# E.g. create_asset(1, 0, asset_name="My Cool NFT", freeze="", clawback="")
**kwargs
}
# Actual config transaciton
txn = AssetConfigTxn(**arguments)
# Sign it, so the blockchain is confident we're the ones who sent it
signed_txn = txn.sign(private_key)
# Send it! Create the asset!
txid = ac.send_transaction(signed_txn)
return txid
create_txid = create_asset(10000000, 2, asset_name="My Cool ASA", unit_name="COOL", freeze="", clawback="")
create_txid
# Now we've got the transaction that's been used to create our asset, but we don't have
# the asset itself.
# Lets create another method that can find a created asset using the indexer.
def find_asset_from_create_tx(txid):
# First, wait until our transaction has been confirmed
while True:
tx_info = ac.pending_transaction_info(txid)
if 'confirmed-round' in tx_info:
break
sleep(1)
assetId = tx_info['asset-index']
return ac.asset_info(assetId)
find_asset_from_create_tx(create_txid)
# ## Creating NFTs
#
# Now we've got some helpful tools for creating our ASAs,
# it becomes pretty trivial to use them for creating an NFT.
# Just set the `url` and the optional `metadata_hash` to point to the file you want to track.
#
# > A quick note about `metadata_hash`. I've seen a lot of people (myself included) use the content identifier from the IPFS URI as the `metadata_hash` but, [they're two different things](https://docs.ipfs.io/concepts/hashing/#content-identifiers-are-not-file-hashes).
# >
# > The file hash should be determined by hashing the file itself, using something like python's `hashlib` library. We'll do that here to demonstrate.
#
# Lets say we want to create an NFT out of this ground breaking piece of digital media.
# A black pixel:
#
# 
#
# For now, I've manually uploaded it to pinata, but we'll go through automating the upload later.
# It's located at:
#
# ```
# ipfs://QmVGs9MZxq4sh3boTJwqrZkNA6uhWZBcthbEAc2d5G37g1
# ```
#
# We'll also ignore the hash for now, cause we'll automate it later too.
#
# To create an NFT, we just have to create the asset as before and assign the URL:
create_txid = create_asset(
total=1,
decimals=0,
asset_name="My Cool NFT",
freeze="",
clawback="",
url="ipfs://QmVGs9MZxq4sh3boTJwqrZkNA6uhWZBcthbEAc2d5G37g1"
)
my_cool_nft = find_asset_from_create_tx(create_txid)
my_cool_nft
# Easy as that.
#
# ### Automating upload and metadata-hash
#
# Now, lets also handle uploading the NFT, and creating some hash_metadata for it.
def upload_to_pinata(file):
url = 'https://api.pinata.cloud/pinning/pinFileToIPFS'
# open the file in binary read mode, otherwise we might get some weird
# behaviour with requests
with open(file, 'rb') as f:
# Send it to the endpoint... simple as that. Requests makes this really
# easy.
r = requests.post(
url,
files={'file': f},
headers={'Authorization': f"Bearer {env.PINATA_JWT}"}
)
# If something went wrong, raise an error, otherwise just return the data
# we want
r.raise_for_status()
return r.json()
def get_hash(file):
m = hashlib.sha256()
# Read the file in a bit at a time, adding each to the hash:
with open(file, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
# Actually compile the hash and return it in bytes format
return m.digest()
# And so it's now just simply a matter of passing that file to these methods and adding the return to our Asset Management Transaction.
pin = upload_to_pinata('../a-black-pixel.png')
pin['IpfsHash']
# Note, it's the same as above, because IPFS recognises it as a duplicate
file_hash = get_hash('../a-black-pixel.png')
file_hash
create_txid = create_asset(
total=1,
decimals=0,
asset_name="My Cool Automated NFT",
freeze="",
clawback="",
url=f"ipfs://{pin['IpfsHash']}",
metadata_hash=file_hash
)
my_cool_automated_nft = find_asset_from_create_tx(create_txid)
my_cool_automated_nft
# + [markdown] tags=[]
# ## ARC-69
#
# [ARC-69](https://github.com/algokittens/arc69) is a proposal by [AlgoKittens](https://github.com/algokittens) to track metadata along with your NFTs.
# It's supported in a range of places, including Rand Gallery, where you can see an NFT's "Attributes" or "Traits".
#
# The general summary is that you append the metadata to the notes field of an Asset Configuration Transaction. As such, you can add the metadata at creation, or update it later, as long as you still have the `manager` property set on the asset.
#
# The metadata is stored in the json format, which may look something like this:
#
# ```json
# {
# "standard": "arc69",
# "description": "An NFT we created as a demo",
# "media_url": "ipfs://QmVGs9MZxq4sh3boTJwqrZkNA6uhWZBcthbEAc2d5G37g1",
# "external_url": "https://github.com/sjk0-9",
# "mime_type":"image/png",
# "attributes": [
# {
# "trait_type": "Color",
# "value": "Black"
# },
# {
# "trait_type": "Size",
# "value": "Smallest"
# },
# {
# "trait_type": "Rarity",
# "value": 100
# }
# ]
# }
# ```
#
# Since we can apply this to previous assets we've made, lets create a new method that lets us update our assets to apply the metadata.
# -
def update_asset(assetId, **kwargs):
# More or less same as above, though we need to add the assetId we're updating.
arguments = {
"index": assetId,
"sender": sender_address,
# These are just some default details that the transaction requires
"sp": ac.suggested_params(),
# If we don't send these through, that's actually a delete command.
# It's ok if you've unset any fields, you can't overwrite those
"manager": sender_address,
"reserve": sender_address,
"freeze": sender_address,
"clawback": sender_address,
**kwargs
}
# Actual config transaciton
txn = AssetConfigTxn(**arguments)
# Sign it, so the blockchain is confident we're the ones who sent it
signed_txn = txn.sign(private_key)
# Send it! Update the asset!
txid = ac.send_transaction(signed_txn)
return txid
update_asset(my_cool_automated_nft['index'], note='Hello World!')
# If you take a look at that in algoexplorer, you'll see the transaction with the note attached.
# Let's make the note actually fulfill the ARC69 requirements though
# +
def create_arc69_note(
description=None, external_url=None, media_url=None, mime_type=None, attributes=None
):
body = {
"standard": 'arc69',
"description": description,
"external_url": external_url,
"media_url": media_url,
"mime_type": mime_type,
"attributes": attributes
}
# Removes all the fields without any data in them
clean_body = {k: v for k, v in body.items() if v is not None}
# Write to a json, byte string.
# Specify the separators without spaces in them, so we save on
# storage space. You've only got 1000 bytes to work with.
return json.dumps(clean_body, separators=[",", ":"]).encode()
# Just a simple helper to format attributes without having to type
# the whole thing out over and over
def arc69_attr(trait_type=None, value=None, **kwargs):
body = { "trait_type": trait_type, "value": value, **kwargs }
clean_body = {k: v for k, v in body.items() if v is not None}
return clean_body
# -
arc69_metadata = create_arc69_note(
description='An NFT we created as a demo',
media_url="ipfs://QmVGs9MZxq4sh3boTJwqrZkNA6uhWZBcthbEAc2d5G37g1",
external_url="https://github.com/sjk0-9",
mime_type="image/png",
attributes=[
arc69_attr('Color', 'Grey'),
arc69_attr('Size', 'Smallest'),
arc69_attr('Rarity', 100)
]
)
arc69_metadata
# And so with this, we can either create a new asset and put the metadata in the note:
create_txid = create_asset(
total=1,
decimals=0,
asset_name="My Cool ARC-69 NFT",
freeze="",
clawback="",
url=f"ipfs://{pin['IpfsHash']}",
metadata_hash=file_hash,
note=arc69_metadata
)
my_cool_arc69_nft = find_asset_from_create_tx(create_txid)
my_cool_arc69_nft
# Or add it to an existing asset:
update_asset(my_cool_automated_nft['index'], note=arc69_metadata)
# ## Congratulations!
#
# If you've been able to follow along with this guide, you should have everything you need to start programatically creating and publishing your own NFTs.
#
# Ideas on how you could build off of this:
#
# * Read [line by line through a CSV](https://docs.python.org/3/library/csv.html#csv.DictReader) and batch create/update assets.
# * Use a tool such as [nft-generator-py](https://github.com/Jon-Becker/nft-generator-py) or [HashLips](https://github.com/HashLips/hashlips_art_engine) to generate NFTs to then batch publish.
#
# I'd love to see what you've created with this.
# Feel free to get in touch, my twitter is [sjk0_9](https://twitter.com/sjk0_9).
#
# If you've got any feedback or improvements, please let me know!
| src/creating_asas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## What's New
#
# ### Summary
#
# Brought to you by popular demand, this edition of `made-with-gs-quant` will showcase some of our newest features and capabilities. Going forward, look out for a `What's New` section in `made_with_gs_quant` that will highlight new additions.
#
# The content of this notebook is split into:
# * [1 - Let's get started with gs quant](#1---Let's-get-started-with-gs-quant)
# * [2 - Cash flows](#2---Cash-flows)
# * [3 - Portfolios](#3---Portfolios)
# * [4 - Scenarios](#4---Scenarios)
# * [5 - Compute](#4---Compute)
# ### 1 - Let's get started with gs quant
# Start every session with authenticating with your unique client id and secret. If you don't have a registered app, create one [here](https://marquee.gs.com/s/developer/myapps/register). `run_analytics` scope is required for the functionality covered in this example. Below produced using gs-quant version 0.8.115.
from gs_quant.session import GsSession
GsSession.use(client_id=None, client_secret=None, scopes=('run_analytics',))
# ### 2 - Cash flows
#
# Let's start with something small but impactful - ability to model future cashflows. You can simply use `CashFlows` as any of the other risk measures to get a dataframe of future cashflows.
# +
from gs_quant.risk import Cashflows
from gs_quant.common import Currency, PayReceive
from gs_quant.instrument import IRSwaption
swaption = IRSwaption(PayReceive.Receive, '10y', Currency.USD, expiration_date='6m')
cf = swaption.calc(Cashflows)
cf.head()
# -
# ### 3 - Portfolios
# I have used `Portfolio` wrappers in my previous analyses, so this one should be somewhat familiar. `Portfolio` makes working with many positions easier and allows risk to be viewed both at position level and aggregate level (more portfolio features like saving in the pipeline..look for it in `What's New` section in the new editions!)
#
# Let's first define a portfolio - I will define a book of swaptions with varying strikes.
# +
from gs_quant.markets.portfolio import Portfolio
from gs_quant.common import Currency, PayReceive
from gs_quant.instrument import IRSwaption
swaptions = [IRSwaption(PayReceive.Receive, '10y', Currency.USD, expiration_date='6m', strike=strike,
name=strike) for strike in ('atm', 'atm-10', 'atm-25', 'atm-50')]
# create portfolio from list of instruments
portfolio = Portfolio(swaptions, name='swaption_port')
# resolve all the instruments in place
portfolio.resolve()
# -
# Now we can work with this portfolio the same way we do with an instrument - that is, we can price it or calculate any of the available risk measures on it.
from gs_quant.risk import DollarPrice, IRVegaParallel
risks = portfolio.calc((DollarPrice, IRVegaParallel))
# With `risks` in hand, we can examine these risks on an instrument level or aggregate level:
# +
# all risks for a specific (in this case, first) instrument in the portfolio
print(risks[portfolio[0]])
# single risk for a specific (in this case, first) instrument in the portfolio
print(risks[portfolio[0]][DollarPrice])
# or...
print(risks[DollarPrice][portfolio[0]])
# aggregate risk for the portfolio
print(risks[IRVegaParallel].aggregate())
# -
# Finally, we can convert our portfolio to a dataframe where each row will represent an instrument.
frame = portfolio.to_frame()
frame.head(2)
# We can append `RiskResults` as columns to this dataframe as we compute additional measures.
# +
import pandas as pd
rks = pd.DataFrame(risks)
rks.index = frame.index
pd.concat([frame, rks], axis=1)
# -
# Note the last two columns in the frame are the risks we appended!
#
# Want to see more portfolio examples? Refer to portfolio [examples here](https://nbviewer.jupyter.org/github/goldmansachs/gs-quant/tree/master/gs_quant/examples/02_portfolio/) and [previous editions](https://nbviewer.jupyter.org/github/goldmansachs/gs-quant/tree/master/gs_quant/made_with_gs_quant/) of `made_with_gs_quant`.
# ### 4 - Scenarios
#
# The team has also invested to build our scenario capabilities. In particular we: (1) simplified `MarketDataShock` scenario (2) added aging or `carry` scenario (3) introduced composite scenarios. Let's look at an example of each.
# ##### Simple Market Shock
#
# `MarketDataShockBasedScenario` is designed to allow users to shock any kind of market data. In this example we will shock the spot by 1bp. Note while before you had to specify each of the underlying curves used to build spot curve, you can now simply indicate `('IR', 'USD')`
# +
from gs_quant.risk import MarketDataShockBasedScenario, MarketDataPattern, MarketDataShock, MarketDataShockType
ir_spot_scenario = MarketDataShockBasedScenario(
shocks={
MarketDataPattern('IR', 'USD'): MarketDataShock(MarketDataShockType.Absolute, 1e-4), # 1 bp shock
MarketDataPattern('IR Reset', 'USD-3m'): MarketDataShock(shock_type=MarketDataShockType.Absolute, value=1e-4),
})
with ir_spot_scenario:
spot_shock = portfolio.price()
print('Base price: {:,.2f}'.format(risks[DollarPrice].aggregate())) # or portfolio.price().aggregate()
print('Spot shock price: {:,.2f}'.format(spot_shock.aggregate()))
# -
# ##### Carry Scenario
#
# The `CarryScenario` can be used to move the pricing date into the future. In order to do this we do two things: (1) project the market data forward in time (either keeping spot rates constant or fwd rates constant - more on this in a bit) to the new pricing date and (2) lifecycle the trades so that any fixings or expiries that happen between today’s date and the target future date have been handled.
# +
from gs_quant.markets import PricingContext
from gs_quant.risk import CarryScenario
carry_scenario = CarryScenario(time_shift=22)
with carry_scenario:
carry_shock = portfolio.price()
print('Base price: {:,.2f}'.format(risks[DollarPrice].aggregate())) # portfolio.price().aggregate()
print('Carry shock price: {:,.2f}'.format(carry_shock.aggregate()))
# -
# ##### Combined Scenarios
#
# We can now combine these two scenarios to produce a 'carry then bump' scenario (note all permutations aren't supported yet but are underway):
# +
with carry_scenario, ir_spot_scenario:
carry_then_bump_shock = portfolio.price()
print('Base price: {:,.2f}'.format(risks[DollarPrice].aggregate())) # portfolio.price().aggregate()
print('Carry then bump price: {:,.2f}'.format(carry_then_bump_shock.aggregate()))
# -
# More on scenario examples in the [examples folder](https://nbviewer.jupyter.org/github/goldmansachs/gs-quant/tree/master/gs_quant/examples/04_scenario/) in the gs-quant repo.
# ### 5 - Compute
#
# Finally to optimize both speed and performance we have improved our async and batch framework.
#
# In a nutshell, `async` allows the requests to be processed in parallel and `batch` allows them to be grouped together and avoid timeouts for particularly large requests.
#
# Additionally, multiple contexts can be controlled by a 'master' `PricingContext` to batch requests together in order to also avoid timeouts and sending too many requests. For example of this please see [`4-Delta Hedging`](https://nbviewer.jupyter.org/github/goldmansachs/gs-quant/blob/master/gs_quant/made_with_gs_quant/4-Delta%20Hedging.ipynb) notebook.
#
# For now, let's look at a simpler example using the scenarios we discussed in the previous section. Here we will look at how the price of our portfolio evolved as we move it 21 days forward in time both keeping spot rates constant (`roll_to_fwds=False`) and fwd rates constant (`roll_to_fwds=True`). Let's use a `PricingContext` with batch mode to organize these requests.
# +
from gs_quant.markets import PricingContext
# note nest_asyncio required when using is_batch=True
import nest_asyncio
nest_asyncio.apply()
roll_to_spot = []
roll_to_fwd = []
with PricingContext(is_batch=True):
for time_shift in range(22):
with PricingContext(is_async=True), CarryScenario(time_shift, roll_to_fwds=True):
roll_to_spot.append(portfolio.price())
with PricingContext(is_async=True), CarryScenario(time_shift, roll_to_fwds=False):
roll_to_fwd.append(portfolio.price())
# -
# Let's now visualize the result:
# +
import matplotlib.pyplot as plt
import numpy as np
pd.Series([r.aggregate() for r in roll_to_spot], name='Roll to Spot', dtype=np.dtype(float)).plot(figsize=(10, 6))
pd.Series([r.aggregate() for r in roll_to_fwd], name='Roll to Fwd', dtype=np.dtype(float)).plot(figsize=(10, 6))
plt.xlabel('TimeShift')
plt.ylabel('PV')
# -
# That's all for this edition - please keep sharing your feedback on what would make `gs-quant` even more useful for you as well as suggestions for `made_with_gs_quant` you would like to see in the future.
#
# Happy coding!
| gs_quant/made_with_gs_quant/5-What's New.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import glob
# +
Input_dir='data_raw_all'
Output_dir='data_raw_all_rename'
files = glob.glob(Input_dir + '/*.*')
x_data = []
y_data = []
for aktfile in files:
base = os.path.basename(aktfile)
t1 = base[9:10]
t2 = base[10:11]
zeiger=base[17:18]
rest=base[19:36]
new_name = Input_dir + '\\' + t1 + '.' + t2 + '_zeiger' + zeiger + '_' + rest + '.jpg'
os.rename(aktfile, new_name)
# print(aktfile)
# print(new_name)
# zeiger_rv00_va0_u1_2019-06-04T044009.153Z
# 5.0_zeiger2_2019-09-14_13-10-12
# -
| Rename.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="9ea552dc-2ab8-a0ac-dda5-dd7c385a1d3a"
# # Introduction
# Here we process the data from the electron microscopy 3D dataset, the first lines automatically download it if you have Kaggle setup otherwise you can download the dataset [here](https://www.kaggle.com/kmader/electron-microscopy-3d-segmentation/data)
# -
import os
if not os.path.exists('input'):
# !kaggle datasets download -d kmader/electron-microscopy-3d-segmentation -wp emdata
# !mkdir input
# !mv emdata/* input
# !cd input/ && unzip *.zip
# + _cell_guid="f7ca9567-9c3e-f6a9-075c-216307025533"
import matplotlib.patches as mpatches # for showing rectangles and annotations
from skimage.color import label2rgb # for making overlay plots
import numpy as np # for matrix operations and array support
from skimage.measure import regionprops # for shape analysis
from skimage.measure import label # for labeling regions
import matplotlib.pyplot as plt # for showing plots
from skimage.io import imread # for reading images
# %matplotlib inline
# + [markdown] _cell_guid="ffbd3d02-e49a-23e9-7789-b91f88cff3cf"
# # Connected Component Labeling
# scikit-image has basic support for [connected component labeling](http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.label) and we can do some small demos with the label function and small test images, before moving onto bigger datasets.
#
# ## Neighborhood
# In the course we use the term neighborhood and here we use the term ```connectivity``` for the same idea.
#
# - For a 2D image a connectivity = 1 is just the 4-neighborhood (or pixels that share an edge, connectivity = 2 is then 8-neighborhood (or pixels that share a vertex).
# + _cell_guid="bc5c886a-18a9-3abf-7b51-ea8df86530af"
# simple test image diagonal
test_img = np.eye(4)
print('Input Image')
print(test_img)
test_label_4 = label(test_img, connectivity=1)
print('Labels with 4-neighborhood')
print(test_label_4)
test_label_8 = label(test_img, connectivity=2)
print('Labels with 8-neighborhood')
print(test_label_8)
# + [markdown] _cell_guid="ca54e2df-8d90-3ef8-2f96-dd7d1ba4ab1a"
# ## 3D Neighborhood
#
# For a 3D image a connectivity = 1 is just the 6-neighborhood (or voxels that share an face, connectivity = 2 is then voxels that share an edge and 3 is voxels that share a vertex
# + _cell_guid="a49571af-5e81-6c7f-6354-336e4968b0ac"
test_img = np.array(
[1 if x in [0, 13, 26] else 0 for x in range(27)]).reshape((3, 3, 3))
print('Input Image')
print(test_img)
test_label_1 = label(test_img, connectivity=1)
print('Labels with Face-sharing')
print(test_label_1)
test_label_2 = label(test_img, connectivity=2)
print('Labels with Edge-Sharing')
print(test_label_2)
test_label_3 = label(test_img, connectivity=3)
print('Labels with Vertex-Sharing')
print(test_label_3)
# + _cell_guid="bca1bf58-37f9-db1f-4444-8960a7d4b0cd"
import os
import numpy as np
def imread_or_invent(in_path):
np.random.seed(2018)
if os.path.exists(in_path):
return imread(in_path)
else:
print('Getting creative...')
fake_shape = (10, 50, 75)
if 'groundtruth' in in_path:
return (np.random.uniform(0, 1, size=fake_shape) > 0.99).astype(int)
else:
return np.random.uniform(0, 1, size=fake_shape)
em_image_vol = imread_or_invent('input/training.tif')
em_thresh_vol = imread_or_invent('input/training_groundtruth.tif')
print("Data Loaded, Dimensions", em_image_vol.shape, '->', em_thresh_vol.shape)
# + [markdown] _cell_guid="160081b9-ab62-e268-50f5-6d34053f79d8"
# # 2D Analysis
# Here we work with a single 2D slice to get started and take it randomly from the middle
# + _cell_guid="be4e88a3-8b8c-9fa0-acd9-8297b8e282df"
em_idx = np.random.permutation(range(em_image_vol.shape[0]))[0]
em_slice = em_image_vol[em_idx]
em_thresh = em_thresh_vol[em_idx]
print("Slice Loaded, Dimensions", em_slice.shape)
# + _cell_guid="d3fe48c8-ebb3-af65-777a-c8d638946072"
# show the slice and threshold
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 4))
ax1.imshow(em_slice, cmap='gray')
ax1.axis('off')
ax1.set_title('Image')
ax2.imshow(em_thresh, cmap='gray')
ax2.axis('off')
ax2.set_title('Segmentation')
# here we mark the threshold on the original image
ax3.imshow(label2rgb(em_thresh, em_slice, bg_label=0))
ax3.axis('off')
ax3.set_title('Overlayed')
# + _cell_guid="574c1696-a1fc-2267-db34-ec0b4b310672"
# make connected component labels
em_label = label(em_thresh)
print(em_label.max(), 'number of labels')
# show the segmentation, labels and overlay
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 4))
ax1.imshow(em_thresh, cmap='gray')
ax1.axis('off')
ax1.set_title('Segmentation')
ax2.imshow(em_label, cmap=plt.cm.gist_earth)
ax2.axis('off')
ax2.set_title('Labeling')
# here we mark the threshold on the original image
ax3.imshow(label2rgb(em_label, em_slice, bg_label=0))
ax3.axis('off')
ax3.set_title('Overlayed')
# + [markdown] _cell_guid="cfabd782-de43-ea00-856a-b0cbb755ca1a"
# # Shape Analysis
# For shape analysis we use the regionprops function which calculates the area, perimeter, and other features for a shape. The analysis creates a list of these with one for each label in the original image.
# + _cell_guid="f924cdaf-2d55-5089-51b9-8aa6ec820a3b"
shape_analysis_list = regionprops(em_label)
first_region = shape_analysis_list[0]
print('List of region properties for', len(shape_analysis_list), 'regions')
print('Features Calculated:', ', '.join(
[f for f in dir(first_region) if not f.startswith('_')]))
# + _cell_guid="5cc2bacc-26d5-e55a-8e64-caadaf3010d8"
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(label2rgb(em_label, em_slice, bg_label=0))
for region in shape_analysis_list:
# draw rectangle using the bounding box
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
ax.set_axis_off()
plt.tight_layout()
# + [markdown] _cell_guid="4b59cd5a-2c6f-75eb-a90a-993dd484d1be"
# ## Anisotropy
# We can calculate anisotropy as we did in the course by using the largest and shortest lengths, called here as ```major_axis_length``` and ```minor_axis_length``` respectively
#
# - Try using different formulas for anisotropy to see how it changes what is shown
#
# $$ Aiso1 = \frac{\text{Longest Side}}{\text{Shortest Side}} - 1 $$
#
# $$ Aiso2 = \frac{\text{Longest Side}-\text{Shortest Side}}{\text{Longest Side}} $$
#
# $$ Aiso3 = \frac{\text{Longest Side}}{\text{Average Side Length}} - 1 $$
#
# $$ Aiso4 = \frac{\text{Longest Side}-\text{Shortest Side}}{\text{Average Side Length}} $$
# + _cell_guid="3954c870-771c-7c9a-ca4b-6ed302ff5d67"
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(label2rgb(em_label, em_slice, bg_label=0))
for region in shape_analysis_list:
x1 = region.major_axis_length
x2 = region.minor_axis_length
anisotropy = (x1-x2)/np.clip(x1+x2, 0.1, 9999)
# for anisotropic shapes use red for the others use blue
print('Label:', region.label, 'Anisotropy %2.2f' % anisotropy)
if anisotropy > 0.1:
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
else:
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='green', linewidth=2)
ax.add_patch(rect)
ax.set_axis_off()
plt.tight_layout()
# + [markdown] _cell_guid="fcfdd250-2824-6ba1-6cf3-1698ef431733"
# # Tasks
# - Perform the analysis in 3D
# - Find the largest and smallest structures
# - Find the structures with the highest and lowest 3D anisotropy
# + _cell_guid="aa1033ba-0c78-71f9-4718-518f6e7ca155"
| Exercises/06-Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recommendations with IBM
#
# This notebook produces recommendations for users from real data on the IBM Watson Studio platform.
#
#
# ## Table of Contents
#
# I. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br>
# II. [Rank Based Recommendations](#Rank)<br>
# III. [User-User Based Collaborative Filtering/Content based](#User-User)<br>
# IV. [Matrix Factorization](#Matrix-Fact)<br>
# V. [Extras & Concluding](#conclusions)
#
# +
import sys
sys.path.insert(0, './tests')
sys.path.insert(0, './data')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import project_tests as t
import pickle
# %matplotlib inline
df = pd.read_csv('data/user-item-interactions.csv')
df_content = pd.read_csv('data/articles_community.csv')
del df['Unnamed: 0']
del df_content['Unnamed: 0']
# Show df to get an idea of the data
df.head()
# -
# Show df_content to get an idea of the data
df_content.head()
# ### <a class="anchor" id="Exploratory-Data-Analysis">Part I : Exploratory Data Analysis</a>
#
#
# +
num_articles = len(df.article_id.unique())
print('Number of unique articles = {}'.format(num_articles))
num_rows = df.shape[0]
print('Number of rows in df = {}'.format(num_rows))
num_articles_content = len(df_content.article_id.unique())
print('Number of unique articles in df_content = {}'.format(num_articles_content))
num_rows_content = df_content.shape[0]
print('Number of rows in df_content = {}'.format(num_rows_content))
df_unique = df.article_id.astype(int).unique()
df_content_unique = df_content.article_id.astype(int).unique()
print('Unique number of users = {}'.format(len(df.email.unique())))
num = len(df_content_unique[np.isin(df_content_unique, df_unique)])
print('Number of unique articles in interactions and community = {}'.format(num))
print('Mean article interactions = {}'.format(df.article_id.value_counts().mean()))
print('Median article interactions = {}'.format(df.article_id.value_counts().median()))
print('Mean user interactions = {}'.format(df.email.value_counts().mean()))
print('Median user interactions = {}'.format(df.email.value_counts().median()))
print('Top interactors\n')
print(df.email.value_counts().head(15))
print('Top articles\n')
print(df.article_id.value_counts().head(15))
df.email.value_counts().median()
# +
# Fill in the median and maximum number of user_article interactios below
median_val =df.email.value_counts().median() # 50% of individuals interact with ____ number of articles or fewer.
max_views_by_user = df.email.value_counts().head(1).values[0]
# -
# `2.` Explore and remove duplicate articles from the **df_content** dataframe.
# Find and explore duplicate articles
df_content[df_content.doc_body.duplicated()].shape[0]
# Find and explore duplicate article descriptioin
df_content[df_content.doc_description.duplicated()].shape[0]
# Find and explore duplicate article doc full name
df_content[df_content.doc_full_name.duplicated()].shape[0]
# Find and explore duplicate article ids
df_content[df_content.article_id.duplicated()].shape[0]
# Remove any rows that have the same article_id - only keep the first
df_content = df_content.drop_duplicates('article_id', keep = 'first')
df_content.shape[0]
# Find and explore duplicate document descriptions
c = df_content[df_content.doc_description.duplicated()].doc_body
c.head(50)
unique_articles = df.article_id.nunique()# The number of unique articles that have at least one interaction
total_articles = df_content.article_id.nunique()# The number of unique articles on the IBM platform
unique_users = df.email.nunique()# The number of unique users
user_article_interactions = df.shape[0]# The number of user-article interactions
# `4.` Finding the most viewed article ID
df[df.email.isnull()].head()
df[df.article_id == 1024.0]
# Most viewed article_id
df.article_id.value_counts().sort_values(ascending = False).head(1).index[0]
most_viewed_article_id = df.article_id.value_counts().sort_values(ascending = False).head(1).index.astype(str)[0]# The most viewed article in the dataset as a string with one value following the decimal
max_views = df.article_id.value_counts().sort_values(ascending = False).head(1).values.astype(int)[0]# The most viewed article in the dataset was viewed how many times?
# +
# Run this cell to map the user email to a user_id column and remove the email column
def email_mapper():
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
email_encoded = email_mapper()
del df['email']
df['user_id'] = email_encoded
# show header
df.head()
# +
## If you stored all your results in the variable names above,
## you shouldn't need to change anything in this cell
sol_1_dict = {
'`50% of individuals have _____ or fewer interactions.`': median_val,
'`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,
'`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,
'`The most viewed article in the dataset was viewed _____ times.`': max_views,
'`The article_id of the most viewed article is ______.`': most_viewed_article_id,
'`The number of unique articles that have at least 1 rating ______.`': unique_articles,
'`The number of unique users in the dataset is ______`': unique_users,
'`The number of unique articles on the IBM platform`': total_articles
}
# Test your dictionary against the solution
t.sol_1_test(sol_1_dict)
# -
# ### <a class="anchor" id="Rank">Part II: Rank-Based Recommendations</a>
#
# `1.` Produce ranked based recommendations
#Get list of top article IDs
idx = df.article_id.value_counts().sort_values(ascending = False).head(10).index.to_list()
idx
# +
def get_top_articles(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
idx = df.article_id.value_counts().sort_values(ascending = False).head(n).index
top_articles = df[df.article_id.isin(idx)]['title'].unique()
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
# Your code here
top_articles = df.article_id.value_counts().sort_values(ascending = False).head(n).index.to_list()
return top_articles # Return the top article ids
# -
# Test ranked based functions
print(get_top_article_ids(10))
print(get_top_articles(10))
# +
# Test your function by returning the top 5, 10, and 20 articles
top_5 = get_top_articles(5)
top_10 = get_top_articles(10)
top_20 = get_top_articles(20)
# Test each of your three lists from above
t.sol_2_test(get_top_articles)
# -
df
# ### <a class="anchor" id="User-User">Part III: User-User Based Collaborative Filtering</a>
#
#
# `1.` Reformat the **df** dataframe to be shaped with users as the rows and articles as the columns.
#
# * Each **user** should only appear in each **row** once.
#
#
# * Each **article** should only show up in one **column**.
#
#
# * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1.
#
#
# * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**.
#
# Use the tests to make sure the basic structure of your matrix matches what is expected by the solution.
d = df[['user_id', 'article_id']]
d['value'] = 1
d = d.groupby(['user_id', 'article_id']).max().unstack()
d = d.fillna(0)
d
# +
# create the user-article matrix with 1's and 0's
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
df = df[['user_id', 'article_id']]
df['value'] = 1
df = df.groupby(['user_id', 'article_id']).max().unstack()
df = df.fillna(0)
user_item = df
return user_item # return the user_item matrix
user_item = create_user_item_matrix(df)
# -
## Tests: You should just need to run this cell. Don't change the code.
assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right."
assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right."
assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right."
print("You have passed our quick tests! Please proceed!")
# `2.` Function below should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users.
#
# Use the tests to test your function.
user_item
# +
# Exploring the IDs of the user_item multiindex
# Finding similar articles
dot_prod_articles = user_item.dot(np.transpose(user_item))
print(dot_prod_articles)
idx = np.where(user_item.index == 5145)[0][0]
print(idx)
print('[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]')
print(dot_prod_articles.iloc[idx])
print('---------------------------------')
print(np.max(dot_prod_articles.iloc[idx]))
sim_idx= np.where(dot_prod_articles.iloc[idx] == np.max(dot_prod_articles.iloc[idx]))[0]
print(sim_idx)
sim_art = np.array(df[df['user_id'].isin(sim_idx)]['title'])
sim_art
# -
def find_similar_users(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user_id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# find row of each user
idx = np.where(user_item.index == user_id)[0][0]
# compute similarity of each user to the provided user
dot_prod_articles = user_item.dot(np.transpose(user_item))
sim = dot_prod_articles.iloc[idx].sort_values(ascending = False)
print(sim)
# sort by similarity
sim = sim[sim.values > 0]
# create list of just the ids
sim = sim.index.drop(user_id)
# remove the own user's id
most_similar_users = sim.to_list()
return most_similar_users # return a list of the users in order from most to least similar
# +
# Exploring similar users
idx = np.where(user_item.index == 1)[0][0]
print(idx)
dot_prod_articles = user_item.dot(np.transpose(user_item))
sim = dot_prod_articles.iloc[idx].sort_values(ascending = False)
print(sim.head())
find_similar_users(1)
# -
dot_prod_articles
# Do a spot check of your function
print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10]))
print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5]))
print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3]))
# `3.` Find articles to recommend to similar users
# Exploring the articles for a specific user
user_art = user_item.iloc[3933][user_item.iloc[3933].values == 1].value.index.to_list()
user_art
# +
# Remapping article id from string to float
article_id = ['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']
article_id = map(float, article_id)
print(article_id)
# +
def get_article_names(article_ids, df=df):
'''
INPUT:
article_ids - (list) a list of article ids
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the title column)
'''
# Your code here
article_id = map(float, article_ids)
article_names = []
for id in article_id:
n = np.unique(np.array(df[df.article_id.isin([id])]['title']))[0]
article_names.append(str(n))
return article_names # Return the article names associated with list of article ids
def get_user_articles(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = (user_item.loc[user_id][user_item.loc[user_id].values == 1].value.index).to_list()
article_names = get_article_names(article_ids)
return article_ids, article_names # return the ids and names
def user_user_recs(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
Users who are the same closeness are chosen arbitrarily as the 'next' user
For the user where the number of recommended articles starts below m
and ends exceeding m, the last items are chosen arbitrarily
'''
sim = find_similar_users(user_id)
user_art_id, user_art_name = get_user_articles(user_id)
recs = []
for user in sim:
sim_id, sim_name = get_user_articles(user)
new_recs = np.setdiff1d(sim_id, user_art_id, assume_unique = True)
recs.append(new_recs)
recs = np.unique(np.concatenate(recs, axis=0))
recommendations = recs[:m]
return recommendations # return your recommendations for this user_id
# -
# Checking user based collaborative recommendation engine works
get_article_names(user_user_recs(1, 10))
# Check Results
get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1
# Test your functions here - No need to change this code - just run this cell
assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_user_articles(20)[0]) == set([232.0, 844.0, 1320.0])
assert set(get_user_articles(20)[1]) == set(['self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook','housing (2015): united states demographic measures'])
assert set(get_user_articles(2)[0]) == set([1024.0, 1176.0, 1305.0, 1314.0, 1422.0, 1427.0])
assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])
print("If this is all you see, you passed all of our tests! Nice job!")
get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])
# `4.` Improve the consistency of the **user_user_recs** function from above.
#
# * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.
#
#
# * Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.
# +
# Exploring a dataframe with similarity and user_interactions contained for a specific user
dot_prod_articles = user_item.dot(np.transpose(user_item))
sim = dot_prod_articles.iloc[1].sort_values(ascending = False)
sim = sim.to_frame().reset_index()
sim.columns = ['user_id', 'similarity']
print(sim)
neighbor_counts = pd.Series(df.user_id.value_counts())
neighbor_counts = neighbor_counts.to_frame().reset_index()
neighbor_counts.columns = ['user_id', 'user_interactions']
print(neighbor_counts)
neighbours_df = pd.merge(sim,neighbor_counts[['user_id','user_interactions']],on='user_id', how='left')
print(neighbours_df)
neighbours_df.columns = ['user_id', 'similarity', 'user_interactions']
neighbours_df = neighbours_df.sort_values(['similarity', 'user_interactions'], ascending = False).reset_index()
print(neighbours_df.head())
neighbours_df.user_interactions.value_counts()
# +
x = pd.Series(df[df.user_id.isin([1, 2, 3])].user_id.value_counts())
x = x.to_frame().reset_index()
x.columns = ['user_id', 'user_interactions']
print(x)
print(sim[sim.user_id.isin([1,2,3])])
y = pd.merge(sim[sim.user_id.isin([1,2,3])],x[['user_id','user_interactions']],on='user_id', how='left')
y
# +
# Exploring exploring the articles with certain number of interacitons
user_item.columns.levels[1].to_list()
print(user_item.columns)
print(user_item['value', 0.0].sum())
print(user_item.columns[user_item.columns.levels[1] == '0.0'])
# +
def get_top_sorted_users(user_id, df=df, user_item=user_item):
'''
INPUT:
user_id - (int)
df - (pandas dataframe) df as defined at the top of the notebook
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Your code here
dot_prod_articles = user_item.dot(np.transpose(user_item))
sim = dot_prod_articles.loc[user_id].sort_values(ascending = False)
sim = sim.to_frame().reset_index().drop(0, axis =0)
sim.columns = ['user_id', 'similarity']
neighbor_counts = pd.Series(df.user_id.value_counts())
neighbor_counts = neighbor_counts.to_frame().reset_index()
neighbor_counts.columns = ['user_id', 'user_interactions']
neighbors_df = pd.merge(sim,neighbor_counts[['user_id','user_interactions']],on='user_id', how='left')
neighbors_df.columns = ['user_id', 'similarity', 'user_interactions']
neighbors_df = neighbors_df.sort_values(['similarity', 'user_interactions'], ascending = False).reset_index()
neighbors_df = neighbors_df.drop('index', axis = 1)
return neighbors_df # Return the dataframe specified in the doc_string
def user_user_recs_part2(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
recs_list = []
user_article_id, user_article = get_user_articles(user_id)
for id in get_top_sorted_users(user_id).user_id:
article_id, article = get_user_articles(id)
art_sum = []
for art_id in article_id:
art = user_item['value', art_id].sum()
art_sum.append(art)
dictionary = {'article_id': article_id, 'art_sum': art_sum}
sum_art = pd.DataFrame(dictionary)
sum_art = sum_art.sort_values(by = 'art_sum').reset_index()
new_recs = np.setdiff1d(sum_art.article_id, user_article_id, assume_unique= True)
np.unique(recs_list.append(new_recs))
recs_list = np.concatenate(recs_list, axis=0)
recs = recs_list[:m]
rec_names = get_article_names(recs)
return recs, rec_names
# -
dot_prod_articles = user_item.dot(np.transpose(user_item))
print(dot_prod_articles.loc[1])
sim = dot_prod_articles.loc[1].sort_values(ascending = False)
sim = sim.to_frame().reset_index().drop(0, axis =0)
sim.columns = ['user_id', 'similarity']
sim.head()
# Checking DF for similar users ranked
get_top_sorted_users(131).head(10)
# Quick spot check - don't change this code - just use it to test your functions
rec_ids, rec_names = user_user_recs_part2(20, 10)
print("The top 10 recommendations for user 20 are the following article ids:")
print(rec_ids)
print()
print("The top 10 recommendations for user 20 are the following article names:")
print(rec_names)
# `5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.
# +
### Tests with a dictionary of results
user1_most_sim = find_similar_users(1)[0]
user131_10th_sim = find_similar_users(131)[9]
# +
## Dictionary Test Here
sol_5_dict = {
'The user that is most similar to user 1.': user1_most_sim,
'The user that is the 10th most similar to user 131': user131_10th_sim,
}
sol_5_dict.items()
# +
t.sol_5_test(sol_5_dict)
# -
# `6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.
# **New user recomendations can be achieved by using a Rank based approach and the get_top_articles() method can be used to get a list of articles for the cold start problem**
# `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.
# +
new_user = '0.0'
# Provide a list of the top 10 article ids for a new_user using rank based method
new_user_recs = get_top_article_ids(10)
print(new_user_recs)
# +
assert set(new_user_recs) == set([1429.0, 1330.0, 1431.0, 1427.0, 1364.0, 1314.0, 1293.0, 1170.0, 1162.0, 1304.0]), "It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users."
print("That's right! Nice job!")
# -
# ### <a class="anchor" id="Matrix-Fact">Part IV: Matrix Factorization</a>
#
# In this part of the notebook, we will use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.
#
# `1.` Explore user_item_matrix
# Load the matrix here
user_item_matrix = pd.read_pickle('user_item_matrix.p')
# quick look at the matrix
user_item_matrix.head()
# `2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix.
# +
# Perform SVD on the User-Item Matrix Here
u, s, vt = np.linalg.svd(user_item_matrix)# use the built in to get the three matrices
# -
# **This matrix has no NANs and just indicates if a user has viewed the article or not so we can use np SVD **
# `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.
# +
num_latent_feats = np.arange(10,700+10,20)
sum_errs = []
for k in num_latent_feats:
# restructure with k latent features
s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]
# take dot product
user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))
# compute error for each prediction to actual value
diffs = np.subtract(user_item_matrix, user_item_est)
# total errors and keep track of them
err = np.sum(np.sum(np.abs(diffs)))
sum_errs.append(err)
plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);
plt.xlabel('Number of Latent Features');
plt.ylabel('Accuracy');
plt.title('Accuracy vs. Number of Latent Features');
# -
# `4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below.
#
# +
df_train = df.head(40000)
df_test = df.tail(5993)
def create_test_and_train_user_item(df_train, df_test):
'''
INPUT:
df_train - training dataframe
df_test - test dataframe
OUTPUT:
user_item_train - a user-item matrix of the training dataframe
(unique users for each row and unique articles for each column)
user_item_test - a user-item matrix of the testing dataframe
(unique users for each row and unique articles for each column)
test_idx - all of the test user ids
test_arts - all of the test article ids
'''
# train set
df_train = df_train[['user_id', 'article_id']]
df_train['value'] = 1
df_train = df_train.groupby(['user_id', 'article_id']).max().unstack()
df_train = df_train.fillna(0)
user_item_train = df_train
# test set
df_test = df_test[['user_id', 'article_id']]
df_test['value'] = 1
df_test = df_test.groupby(['user_id', 'article_id']).max().unstack()
df_test = df_test.fillna(0)
user_item_test = df_test
# user ids
test_idx = user_item_test.index
# article ids
test_arts = user_item_test.columns.levels[1]
return user_item_train, user_item_test, test_idx, test_arts
user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)
# -
# common user ids in train and test set
common_idx = user_item_train.index.isin(test_idx)
common_idx.sum()
# users we cannot predict
len(test_idx) - common_idx.sum()
# common movie ids in train and test set
common_arts = user_item_train.columns.levels[1].isin(test_arts)
common_arts.sum()
# movies we cannot predict
len(test_arts) - common_arts.sum()
# +
# Replace the values in the dictionary below
a = 662
b = 574
c = 20
d = 0
sol_4_dict = {
'How many users can we make predictions for in the test set?': c,
'How many users in the test set are we not able to make predictions for because of the cold start problem?': a,
'How many movies can we make predictions for in the test set?': b,
'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d
}
t.sol_4_test(sol_4_dict)
# -
# `5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.
#
# Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data.
# fit SVD on the user_item_train matrix
u_train, s_train, vt_train = np.linalg.svd(user_item_train)# use the built in to get the three matrices
# +
# Use these cells to see how well you can use the training
# decomposition to predict on test data
train_ind = user_item_train.index[common_idx]
test_ind = user_item_test.index.isin(train_ind)
test_ind = user_item_test.index[test_ind]
test_ind == train_ind
user_item_test.loc[test_ind]
u_test = u_train[common_idx, :]
vt_test = vt_train[:, common_arts]
# +
num_latent_feats = np.arange(10,700+10,20)
sum_errs = []
for k in num_latent_feats:
# restructure with k latent features
s_new, u_new, vt_new = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :]
u_test_new, vt_test_new = u_test[:, :k], vt_test[:k, :]
# take dot product
user_item_est = np.around(np.dot(np.dot(u_test_new, s_new), vt_test_new))
# compute error for each prediction to actual value
diffs = np.subtract(user_item_test.loc[test_ind], user_item_est)
# total errors and keep track of them
err = np.sum(np.sum(np.abs(diffs)))
sum_errs.append(err)
plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);
plt.xlabel('Number of Latent Features');
plt.ylabel('Accuracy');
plt.title('Accuracy vs. Number of Latent Features');
# -
# `6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of the results, is this an improvement on how the previous systems operated?
# **Performance Review.**
#
# The performance of the SVD becomes worse as the amount of latent features increases and this is due to overfitting.
#
# The amount of common users between the train and test dataset is so few that it is not recommended to use SVD to predict the test dataset and knowledge based or content based approaches could be deployed for this cold start problem.
#
# We could deploy the current recommendation engine with an experiment to check if there is an increase in interaction between users and articles for two different groups of users in a A/B test experiment where one group sees the SVD recommendations and another group sees a cold start approach.
#
# The metric to measure the success of the experiment will be the number of articles the user interacts with because that is the original concept of the model built. The original model doesnt account for time spent on an article by a user.
# Save notebook
from subprocess import call
call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])
| Recommendations_with_IBM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# + [markdown] id="nnKrcGcxwclI" colab_type="text"
# # Content Based Search for Curriculum Bot
#
# Let me step you through the demo of machine learning model to search training kit topics based on student's questions. For the purpose of this demo, let us use private data obtained from Lambda School Airtable data.
#
# To run this notebook, first upload **modSearchData.json** from github repository.
#
# Now we perform exploratory data analysis on the dataset.
# + id="9i4Z99uAwclb" colab_type="code" colab={} cellView="both"
#@title
# Generic imports
import pandas as pd
import numpy as np
# + id="5Bht-M3uwclh" colab_type="code" outputId="ef6f9ffc-c595-41f0-f825-b01172f70456" colab={"base_uri": "https://localhost:8080/", "height": 306}
df = pd.read_json('modSearchData.json')
print(f'Our data set has {df.shape[0]} records and {df.shape[1]} features or columns.')
# Identify initial records in the data
df.head()
# + id="Mt29VKvKwclq" colab_type="code" outputId="cef7c015-42a2-4e8f-9831-bdc8b24f75f9" colab={"base_uri": "https://localhost:8080/", "height": 139}
print('Checking the data consistency')
df.isnull().sum()
# + id="wDw0Y7OMKj1Z" colab_type="code" outputId="de346330-3a31-4762-90bc-29613c744625" colab={"base_uri": "https://localhost:8080/", "height": 289}
df.head()
# + [markdown] id="4DICD_9Bwclw" colab_type="text"
# ### Feature Engineering
#
# From the above output it appears all the features are clean. So for the sake of this demo we shall proceed with using the **name** and **description** features.
#
# Let us categorize our links into different categories.
#
# * ds - Data Science or Data Structure
# * web - Full Stack or Web Development
# * ios - iOS
# * android - Android
# * career - Career related
# * ux - UX
# * cs - Computer Science
#
# text information from **modSearchProfile** is considered for now.
# + id="d0pw9GwRwcl0" colab_type="code" colab={}
# Categorizing the training kit information
category = []
# section_names = !cat modSearchData.json | grep '"URL"' | cut -d/ -f4
for section in section_names:
if section in ['and-pre', 'android']:
category.append('android')
elif section in ['cd', 'cr', 'ls-edu', 'nxt', 'p4s']:
category.append('career')
elif section in ['cs']:
category.append('cs')
elif section in ['ds', 'ds-pre']:
category.append('ds')
elif section in ['fsw', 'fsw-pre', 'web1', 'web2', 'web3', 'web4java', 'web4node']:
category.append('web')
elif section in ['ios', 'ios-pre']:
category.append('ios')
elif section in ['ux', 'ux-pre']:
category.append('ux')
else:
category.append('other')
df['category'] = category
# Extract text information from modSearchProfile
def extract_text(row):
return dict(row)['text']
df['modSearchText'] = df['modSearchProfile'].apply(extract_text)
# Combining text based information
df['text'] = df.apply(lambda row: row['name'] + " " + row['description']
+ " " + row['modSearchText'], axis = 1)
# Dropping detailed text information. This can be used later if needed.
df.drop(columns=['modSearchProfile'], inplace=True)
# + id="I3fmkLaJ6hwf" colab_type="code" outputId="ad0eb9b1-fc6a-48c5-bfae-0beb35ae2410" colab={"base_uri": "https://localhost:8080/", "height": 35}
available_category = df.category.unique()
available_category
# + [markdown] id="6oPSVMVbwcmD" colab_type="text"
# ### Text Similarity Metrics:
#
# For building our content based search bot, we shall compare the name and description from training kit with students questions. For this we use commonly used text similarity metrics **Jaccard Similarity** and **Cosine Similarity**.
#
# #### Jaccard similarity:
# Also called intersection over union is defined as size of intersection divided by size of union of two sets.
#
# #### Cosine similarity:
# Calculates similarity by measuring the cosine of angle between two vectors.
#
# #### Reference:
# https://towardsdatascience.com/overview-of-text-similarity-metrics-3397c4601f50
# + id="ybIiA1O6wcmh" colab_type="code" colab={}
def get_jaccard_sim(str1, str2):
a = set(str1.split())
b = set(str2.split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
# + id="K-huXooUOnOD" colab_type="code" colab={}
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_cosine_sim(*strs):
vectors = [t for t in get_vectors(*strs)]
return cosine_similarity(vectors)[0][1]
def get_vectors(*strs):
text = [t for t in strs]
vectorizer = CountVectorizer(text)
vectorizer.fit(text)
return vectorizer.transform(text).toarray()
# + [markdown] id="j4SEPbe6wcmF" colab_type="text"
# ### Text processing using NLTK
#
# Before we run Jaccard similarity on our data we have to further clean up our text data.
#
# Cleaning of text data is done with the help of Natural Language Tool Kit(NLTK) library.
# + id="9E2ip9u1wcmH" colab_type="code" outputId="8f97724d-d1a7-4d59-c08a-ed41dbf2f938" colab={"base_uri": "https://localhost:8080/", "height": 191}
# !pip install --upgrade pip
# !pip install -U nltk
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
# + id="00diH9SawcmN" colab_type="code" colab={}
import string
table = str.maketrans('','', string.punctuation)
from nltk.tokenize import word_tokenize # Word Tokenizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words = set(stop_words)
from nltk.stem.wordnet import WordNetLemmatizer # Word Lemmatizer
lemmatizer = WordNetLemmatizer()
def clean_text(text):
"""
Cleaning the document before vectorization.
"""
# Tokenize by word
tokens = word_tokenize(text)
# Make all words lowercase
lowercase_tokens = [w.lower() for w in tokens]
# Strip punctuation from within words
no_punctuation = [x.translate(table) for x in lowercase_tokens]
# Remove words that aren't alphabetic
alphabetic = [word for word in no_punctuation if word.isalpha()]
# Remove stopwords
no_stop_words = [w for w in alphabetic if not w in stop_words]
# Lemmatize words
lemmas = [lemmatizer.lemmatize(word) for word in no_stop_words]
return ' '.join(lemmas)
# Clean up the text
df['cleaned_text'] = df.text.apply(clean_text)
# + [markdown] id="uVFRNwC7wcmX" colab_type="text"
# ### Student Search Input
#
# Below cell has sample student query information which shall be used as input to the content based recommendation system.
#
# Since query can overlap across different categories, we can request students to input specific category which he/she is looking for.
#
# **Category Based Search Format:**
# ```
# "<category>: <question>"
# ```
# + id="jhTOSzFiB7G4" colab_type="code" outputId="54a933c4-268a-4ef2-deac-4c1a85bb261f" colab={"base_uri": "https://localhost:8080/", "height": 225}
# 1st Sample User Information
student_query = "web: Advanced CSS"
# Check if the category is available
query_category = student_query.split(":")[0]
if query_category in available_category:
df_match_by_category = df[df['category']==query_category].copy()
query_without_category = clean_text(student_query.\
replace(query_category+":", ""))
df_match_by_category['jaccard_sim_value'] = \
df_match_by_category.cleaned_text.apply(get_jaccard_sim,
args=(query_without_category,))
sort_by_jaccard_sim = df_match_by_category.sort_values('jaccard_sim_value',
ascending=False).head(3)
print("\nCategory Based: Content matched based on Jaccard Similarity")
jaccard_match = sort_by_jaccard_sim[sort_by_jaccard_sim['jaccard_sim_value'] > 0]
print(jaccard_match.loc[:, ['name', 'jaccard_sim_value']])
df_match_by_category['cosine_sim_value'] = \
df_match_by_category.cleaned_text.apply(get_cosine_sim,
args=(query_without_category,))
sort_by_cosine_sim = df_match_by_category.sort_values('cosine_sim_value',
ascending=False).head(3)
print("\nCategory Based: Content matched based on Cosine Similarity")
cosine_match = sort_by_cosine_sim[sort_by_cosine_sim['cosine_sim_value'] > 0]
print(cosine_match.loc[:, ['name', 'cosine_sim_value']])
else:
df_full_match = df.copy()
df_full_match['jaccard_sim_value'] = \
df_full_match.cleaned_text.apply(get_jaccard_sim,
args=(clean_text(student_query),))
sort_by_jaccard_sim = df_full_match.sort_values('jaccard_sim_value',
ascending=False).head(3)
print("\nFull Match: Content matched based on Jaccard Similarity")
jaccard_match = sort_by_jaccard_sim[sort_by_jaccard_sim['jaccard_sim_value'] > 0]
print(jaccard_match.loc[:, ['name', 'jaccard_sim_value']])
df_full_match['cosine_sim_value'] = \
df_full_match.cleaned_text.apply(get_cosine_sim,
args=(clean_text(student_query),))
sort_by_cosine_sim = df_full_match.sort_values('cosine_sim_value',
ascending=False).head(3)
print("Full Match: Content matched based on Cosine Similarity")
cosine_match = sort_by_cosine_sim[sort_by_cosine_sim['cosine_sim_value'] > 0]
print(cosine_match.loc[:, ['name', 'cosine_sim_value']])
# + [markdown] id="usjrI8ue0aIJ" colab_type="text"
# ### Full Search
#
# Students can also query without specifying the category. This results in search across all the categories.
#
# **Full Search Format:**
# ```
# "<question>"
# ```
# + id="4cdQnzCqzw4p" colab_type="code" outputId="f1e1f4c8-6676-4cef-c06a-6ffa6f4cdb5e" colab={"base_uri": "https://localhost:8080/", "height": 225}
# 2nd Sample User Information
student_query = "Recursion"
# Check if the category is available
query_category = student_query.split(":")[0]
if query_category in available_category:
df_match_by_category = df[df['category']==query_category].copy()
query_without_category = clean_text(student_query.\
replace(query_category+":", ""))
df_match_by_category['jaccard_sim_value'] = \
df_match_by_category.cleaned_text.apply(get_jaccard_sim,
args=(query_without_category,))
sort_by_jaccard_sim = df_match_by_category.sort_values('jaccard_sim_value',
ascending=False).head(3)
print("\nCategory Based: Content matched based on Jaccard Similarity")
jaccard_match = sort_by_jaccard_sim[sort_by_jaccard_sim['jaccard_sim_value'] > 0]
print(jaccard_match.loc[:, ['name', 'jaccard_sim_value']])
df_match_by_category['cosine_sim_value'] = \
df_match_by_category.cleaned_text.apply(get_cosine_sim,
args=(query_without_category,))
sort_by_cosine_sim = df_match_by_category.sort_values('cosine_sim_value',
ascending=False).head(3)
print("\nCategory Based: Content matched based on Cosine Similarity")
cosine_match = sort_by_cosine_sim[sort_by_cosine_sim['cosine_sim_value'] > 0]
print(cosine_match.loc[:, ['name', 'cosine_sim_value']])
else:
df_full_match = df.copy()
df_full_match['jaccard_sim_value'] = \
df_full_match.cleaned_text.apply(get_jaccard_sim,
args=(clean_text(student_query),))
sort_by_jaccard_sim = df_full_match.sort_values('jaccard_sim_value',
ascending=False).head(3)
print("\nFull Match: Content matched based on Jaccard Similarity")
jaccard_match = sort_by_jaccard_sim[sort_by_jaccard_sim['jaccard_sim_value'] > 0]
print(jaccard_match.loc[:, ['name', 'jaccard_sim_value']])
df_full_match['cosine_sim_value'] = \
df_full_match.cleaned_text.apply(get_cosine_sim,
args=(clean_text(student_query),))
sort_by_cosine_sim = df_full_match.sort_values('cosine_sim_value',
ascending=False).head(3)
print("\nFull Match: Content matched based on Cosine Similarity")
cosine_match = sort_by_cosine_sim[sort_by_cosine_sim['cosine_sim_value'] > 0]
print(cosine_match.loc[:, ['name', 'cosine_sim_value']])
# + [markdown] id="L-LVEhUVwcm8" colab_type="text"
# ### Conclusion
#
# We can perform **A/B testing** based on Jaccard and Cosine Similarity for getting the feedback from students.
#
# Once we have enough feedback we can try building **User based collaborative filtering** for recommendating the results.
| notebooks/Content_Based_Search_for_Curriculum_Bot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pymongo
import json
import pandas as pd
from pymongo import MongoClient
client = MongoClient(host='mongodb1.dev.zippia.com')
client1 = MongoClient(host='localhost')
salary_by_location = client['zippia']['salary_by_location']
salary_by_location_old = client1['zippia']['salary_by_location']
salary_new = client['zippia']['salary']
salary_old = client1['zippia']['sly_lo']
# +
a= list(salary_new.find({'titleDisplay':'Computer Systems Administrator','isOutlier':0},{'caseId':1}))
for r in a:
r['_id'] = None
# with open('ComputerSystemsAdminstrator_new.json','w')as w:
# json.dump(a,w)
# df = pd.read_json('ComputerSystemsAdminstrator.json')
# print df.sort_values('caseId',ascending=False)
b= list(salary_old.find({'titleDisplay':'Computer Systems Administrator','isOutlier':0},{'caseId':1}))
for r in a:
r['_id'] = None
# with open('ComputerSystemsAdminstrator_old.json','w')as w:
# json.dump(b,w)
# df = pd.read_json('ComputerSystemsAdminstrator_old.json')
# print df.sort_values('caseId',ascending=False)
print b
# -
i = 0
for old in b:
for new in a:
i += 1
if old['caseId'] == new['caseId']:
break
else:
print new['caseId']
with open('ComputerSystemsAdminstrator.json','w')as w:
json.dump(a,w)
computer_new = list(salary_new.find({'titleDisplay':'Computer Systems Administrator','isOutlier':0},{'caseId':1}))
for r in computer_new:
r['_id'] = None
with open('ComputerSystemsAdminstrator_new.json','w')as w:
json.dump(a,w)
# df = pd.read_json('ComputerSystemsAdminstrator.json')
# +
computer_old = list(salary_old.find({'titleDisplay':'Computer Systems Administrator','isOutlier':0},{'caseId':1}))
for r in computer_old:
r['_id'] = None
with open('ComputerSystemsAdminstrator_old.json','w')as w:
json.dump(computer_old,w)
# df = pd.read_json('ComputerSystemsAdminstrator_old.json')
# print df.sort_values('caseId',ascending=False)
# -
x = set([x['caseId']for x in computer_new]).difference (set([x['caseId']for x in computer_old]))
x = list(x)
with open('ComputerSystemsAdminstrator.json','w') as w:
json.dump(x,w)
# +
import pymongo
import json
import pandas as pd
from pymongo import MongoClient
client = MongoClient(host='mongodb1.dev.zippia.com')
client1 = MongoClient(host='localhost')
salary_by_location = client['zippia']['salary_by_location']
salary_by_location_old = client1['zippia']['salary_by_location']
salary_new = client['zippia']['salary']
salary_old = client1['zippia']['sly_lo']
computer_new = list(salary_new.find({'titleDisplay':'Computer Systems Administrator','isOutlier':0},{'caseId':1}))
print len(computer_new)
for r in computer_new:
r['_id'] = None
with open('ComputerSystemsAdminstrator_new.json','w')as w:
json.dump(a,w)
computer_old = list(salary_old.find({'titleDisplay':'Computer Systems Administrator','isOutlier':0},{'caseId':1}))
print len(computer_old)
for r in computer_old:
r['_id'] = None
with open('ComputerSystemsAdminstrator_old.json','w')as w:
json.dump(computer_old,w)
x = set([x['caseId']for x in computer_new]).difference (set([x['caseId']for x in computer_old]))
x = list(x)
print len(x)
with open('/home/chenlei/ENV/data/ComputerSystemsAdminstrator.json','w') as w:
json.dump(x,w)
# +
computer_new = list(salary_new.find({'titleDisplay':'Accountant','isOutlier':0},{'caseId':1}))
print len(computer_new)
for r in computer_new:
r['_id'] = None
with open('Accountant_new.json','w')as w:
json.dump(a,w)
computer_old = list(salary_old.find({'titleDisplay':'Accountant','isOutlier':0},{'caseId':1}))
print len(computer_old)
for r in computer_old:
r['_id'] = None
with open('Accountant_old.json','w')as w:
json.dump(computer_old,w)
x = set([x['caseId']for x in computer_new]).difference (set([x['caseId']for x in computer_old]))
x = list(x)
print len(x)
with open('/home/chenlei/ENV/data/Accountant.json','w') as w:
json.dump(x,w)
# +
computer_new = list(salary_new.find({'titleDisplay':'Graphic Designer','isOutlier':0},{'caseId':1}))
print len(computer_new)
for r in computer_new:
r['_id'] = None
with open('Graphic Designer_new.json','w')as w:
json.dump(a,w)
computer_old = list(salary_old.find({'titleDisplay':'Graphic Designer','isOutlier':0},{'caseId':1}))
print len(computer_old)
for r in computer_old:
r['_id'] = None
with open('Graphic Designer_old.json','w')as w:
json.dump(computer_old,w)
x = set([x['caseId']for x in computer_new]).difference (set([x['caseId']for x in computer_old]))
x = list(x)
print len(x)
with open('/home/chenlei/ENV/data/GraphicDesigner.json','w') as w:
json.dump(x,w)
# +
computer_new = list(salary_new.find({'titleDisplay':'Investment Analyst','isOutlier':0},{'caseId':1}))
print len(computer_new)
for r in computer_new:
r['_id'] = None
with open('InvestmentAnalyst.json','w')as w:
json.dump(a,w)
computer_old = list(salary_old.find({'titleDisplay':'Investment Analyst','isOutlier':0},{'caseId':1}))
print len(computer_old)
for r in computer_old:
r['_id'] = None
with open('InvestmentAnalyst.json','w')as w:
json.dump(computer_old,w)
x = set([x['caseId']for x in computer_new]).difference (set([x['caseId']for x in computer_old]))
x = list(x)
print len(x)
with open('/home/chenlei/ENV/data/InvestmentAnalyst.json','w') as w:
json.dump(x,w)
# +
computer_new = list(salary_new.find({'titleDisplay':'Data Analyst','isOutlier':0},{'caseId':1}))
print len(computer_new)
for r in computer_new:
r['_id'] = None
with open('Data Analyst.json','w')as w:
json.dump(a,w)
computer_old = list(salary_old.find({'titleDisplay':'Data Analyst','isOutlier':0},{'caseId':1}))
print len(computer_old)
for r in computer_old:
r['_id'] = None
with open('Data Analyst_old.json','w')as w:
json.dump(computer_old,w)
x = set([x['caseId']for x in computer_new]).difference (set([x['caseId']for x in computer_old]))
x = list(x)
print len(x)
with open('/home/chenlei/ENV/data/DataAnalyst.json','w') as w:
json.dump(x,w)
# +
import json
a= list(salary_by_location.aggregate([
{'$group':{'_id':'$loc_type','avgAmount':{'$avg':'$salary'}}},
{'$project':{'loc_type':1,'avgAmount':1}}
]))
with open('City_by_salary.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('City_by_salary.json')
print df.sort_values('avgAmount',ascending=False)
# +
import json
a= list(salary_by_location_new.aggregate([
{'$group':{'_id':'$loc_type','avgAmount':{'$avg':'$salary'}}},
{'$project':{'loc_type':1,'avgAmount':1}}
]))
with open('City_by_salary.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('City_by_salary.json')
print df.sort_values('avgAmount',ascending=False)
# +
import json
a= list(salary_by_location_new.aggregate([{'$match':{'loc_name':'New York__NY'}},
{'$group':{'_id':'$loc_name','avgAmount':{'$avg':'$salary'}}},
{'$project':{'loc_name':1,'avgAmount':1}}
]))
with open('CA_salary.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('CA_salary.json')
print df.sort_values('avgAmount',ascending=False)
# -
import json
a= list(salary_by_location.aggregate([{'$match':{'loc_name':'New York__NY'}},
{'$group':{'_id':'$loc_name','avgAmount':{'$avg':'$salary'}}},
{'$project':{'loc_name':1,'avgAmount':1}}
]))
print a
with open('CA_salary1.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('CA_salary1.json')
print df.sort_values('avgAmount',ascending=False)
a= list(salary_by_location.aggregate(
[
{
'$match':{
'loc_name':'CA'
}
},
{'$group':
{
'_id':"$loc_name",
'count':{'$sum':1}
}
}
]
)
)
print a
with open('count.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('count.json')
print df.sort_values('count',ascending=False)
a= list(salary_by_location_new.aggregate([
{
'$match':{
'loc_name':'CA'
}
},
{
'$group':
{
'_id':"$loc_name",
'count':{'$sum':1}
}
}
]
)
)
print a
with open('count.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('count.json')
print df.sort_values('count',ascending=False)
a= list(salary_by_location_new.aggregate([
{
'$match':{
'value':'Computer Systems Administrator'
}
},
{
'$group':
{
'_id':"$salary",
'count':{'$sum':1}
}
}
]
)
)
with open('count.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('count.json')
print df.sort_values('count',ascending=False)
a= list(salary_by_location.aggregate([
{
'$match':{
'value':'Computer Systems Administrator'
}
},
{
'$group':
{
'_id':"$salary",
'count':{'$sum':1}
}
}
]
)
)
with open('count.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('count.json')
print df.sort_values('count',ascending=False)
import json
a= list(salary_by_location.aggregate([{'$match':{'value':'Supply Chain Development Manager'}},
{'$project':{'loc_name':1,'salary':1,'total_salary_records':1}}
]))
for x in a:
x['_id'] = None
with open('title.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('title.json')
print df.sort_values('salary',ascending=False)
# +
import json
a= list(salary_by_location.aggregate([{'$match':{'value':'Survey Researcher','loc_name':'New York__NY'}},
{'$project':{'loc_name':1,'salary':1,'total_salary_records':1}}
]))
for x in a:
x['_id'] = None
with open('title.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('title.json')
print df.sort_values('salary',ascending=False)
a= list(salary_by_location_new.aggregate([{'$match':{'value':'Survey Researcher','loc_name':'New York__NY'}},
{'$project':{'loc_name':1,'salary':1,'total_salary_records':1}}
]))
for x in a:
x['_id'] = None
with open('title.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('title.json')
print df.sort_values('salary',ascending=False)
# -
a =list(salary_by_location_new.find({'value':'Computer Systems Administrator'}))
print a
with open('title.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('title.json')
print df.sort_values('avgAmount',ascending=False)
import pymongo
from pymongo import MongoClient
client = MongoClient(host='mongodb1.dev.zippia.com')
client1 = MongoClient(host='localhost')
salary_by_location = client['zippia']['salary_by_location']
salary_by_location_old = client1['zippia']['salary_by_location']
salary_by_location.count({})
salary_by_location_old({})
# +
a = list(salary_by_location_old.find({'loc_name':{'$in':['CA','WA','MA','NY','FL']},
'value':{'$in':['Accountant','Computer Systems Administrator','Partner',
'Investment Analyst',
'Graphic Designer','Data Analyst',"Chief Scientific Officer",
"Investor"]}},{'salary':1,'total_salary_records':1,'loc_name':1,'value':1}))
for x in a:
x['_id'] = None
x['salary'] = int(x['salary'])
with open('title.json', 'w') as w:
json.dump(a,w)
import pandas as pd
df = pd.read_json('title.json')
print df
| Jupyter/first-python-notebook/salary_by_location_check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#
# 零点の確認
#
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib import rc
import seaborn as sns
# %matplotlib inline
plt.style.use("ggplot")
#rc('text', usetex=True)
x = np.linspace(-30,30,1000)
f_y0 = x
f_y1 = np.sin(x)
f_y2 = np.cos(x)
f_y3 = x**3 - 2 * x - 3
#x3 −2x−3
#plt.figure(figsize=(100,100))
#g = plt.subplot()
#fig, axes = plt.subplots(nrows=1, ncols=2, sharex=False, figsize=(20,10))
fig, axes = plt.subplots(nrows=1, ncols=1, sharex=False, figsize=(10,10))
#for i in range(2):
axes.plot(x,f_y0, color="black", label="y=x")
axes.plot(x,f_y1, color="red", label="y=sin(x)")
axes.plot(x,f_y2, color="green", label="y=cos(x)")
axes.plot(x,f_y3, color="blue", label="y=x^3−2x−3")
axes.set_aspect('equal')
axes.legend() # 凡例を表示
axes.set_xlabel('x')
axes.set_ylabel('y')
#axes[0].set_title('zoom')
#axes[0].set_ylim([0,10])
#axes[0].set_xlim([0,10])
axes.set_title('global')
axes.set_ylim([-5,5])
axes.set_xlim([-5,5])
#g2 = plt.subplot(g)
#g2.set_ylim([0,100])
#g2.set_xlim([0,100])
#axes[0].xlabel('x') # x軸のラベルを表示する
#axes[0].ylabel('y', rotation=0) # y軸のラベルを表示する
| texts/1.3.3-py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import os
import fnmatch
import numpy as np
from collections import defaultdict
rosters = []
for YEAR in range(1952,2020):
rosters.append(json.load(open('rosters/auto_roster_{}.json'.format(YEAR))))
nc2 = np.array([5,1,4,2,1,1,2,1,1,3,7,3,3,3,1])/38
y_map = { 'hgt': 'hgt',
'stre': 'str',
'spd': 'spd',
'jmp': 'jmp',
'endu': 'end',
'ins': 'ins',
'dnk': 'dnk',
'ft': 'ft.1',
'fg': '2pt',
'tp': '3pt',
'diq': 'diq',
'oiq': 'oiq',
'drb': 'drb',
'pss': 'pss',
'reb': 'reb' }
inv_ymap = {v:k for k,v in y_map.items()}
y_keys = ['hgt',
'str',
'spd',
'jmp',
'end',
'ins',
'dnk',
'ft.1',
'2pt',
'3pt',
'oiq',
'diq',
'drb',
'pss',
'reb']
players = []
for roster in rosters:
yr = roster['startingSeason']
teams = {t['tid']:t for t in roster['teams']}
for t in teams:
teams[t]['plist'] = []
teams[t]['year'] = yr
for player in roster['players']:
tid = player['tid']
prate = player['ratings'][0]
#print(prate)
r = np.array([prate[inv_ymap[k]] for k in y_keys])
ovr = (r * nc2).sum()
if tid in teams:
teams[tid]['plist'].append((ovr,player))
players += list(teams.values())
# +
#prate = players[0]['plist'][0]['ratings'][0]
#r = np.array([prate[inv_ymap[k]] for k in y_keys])
#prate
#y_map
for player in players:
wtotal = 0
rt = defaultdict(float)
pidx = [(op[0],i) for i,op in enumerate(player['plist'])]
plist = [p for o,p in player['plist']]
for i,pi in enumerate(sorted(pidx,reverse=True)):
p = plist[pi[1]]
w= (1/(i+1))**1.6
for r,rv in p['ratings'][0].items():
rt[r] += w*rv
rt['HEIGHT'] += w*p['hgt']
rt['WEIGHT'] += w*p['weight']
rt['AGE'] += w*(player['year']- p['born']['year'])
wtotal += w
for k,v in rt.items():
rt[k] = int(round(v/wtotal))
player['hgt'] = int(round(rt['HEIGHT']))
player['weight'] = int(round(rt['WEIGHT']))
player['born'] = {'loc':'','year':int(round(2019-rt['AGE']))}
del rt['AGE']
del rt['HEIGHT']
del rt['WEIGHT']
del player['plist']
player['ratings'] = [dict(rt)]
player['name'] = player['region'] + ' ' + player['name'] + str(player['year'])
del player['region']
del player['year']
del player['cid']
del player['did']
del player['pop']
del player['stadiumCapacity']
del player['tid']
del player['strategy']
# -
player
new_team = [
('PHX','PHO'),
('BKN','BRK'),
('CHA','CHO'),
('NOP','NOH'),
('NOH','NOK'),
('BRK','NJN'),
('OKC','SEA'),
('MEM','VAN'),
('WAS','WSB'),
('SAC','KCK'),
('LAC','SDC'),
('UTA','NOJ'),
('SDC','BUF'),
('NJN','NYN'),
('NYN','NYA'),
('NYA','NJA'),
('GSW','SFW'),
('SAS','DLC'),
('DLC','TEX'),
('HOU','SDR'),
('CHA','CHH'),
('SAS','SAA'),
('SAA','TEX'),
('DEN','DNA'),
('DNA','DNR'),
('WSB','CAP'),
('CAP','BAL'),
('BAL','CHZ'),
('CHZ','CHP'),
('NOH','KEN'),
('MIN','SDS'),
('SDS','SDA'),
('MIA','FLO'),
('FLO','MMF'),
('MMF','MNM'),
('SFW','PHW'),
('LAL','MNL'),
('UTA','UTS'),
('UTS','LAS'),
('LAS','ANA'),
('CHH','SSL'),
('SSL','CAR'),
('CAR','HSM'),
('DET','FTW'),
('ATL','STL'),
('STL','MLH'),
('MLH','TRI'),
('PHI','SYR'),
('TOR','PTC'),
('PTC','PTP'),
('PTP','MNP'),
('VAN','MMS'),
('MMS','MMT'),
('MMT','MMP'),
('MMP','NOB'),
('ORL','VIR'),
('VIR','WSA'),
('WSA','OAK'),
('SAC','KCK'),
('KCK','KCO'),
('KCO','KCK'),
('KCK','CIN'),
('CIN','ROC'),
('CHP','BLB'),
('IND','INA'),
('INA','INO')]
st = set()
for t in new_team:
st.add(t[0])
st.add(t[1])
# +
import random
def load_roster(filename):
base = json.load(open(filename,'rb'))
for i in range(len(base['players'])):
player = base['players'][i]
if 'name' in player and player['name'] !='':
sname = base['players'][i]['name'].split()
base['players'][i]['firstName'] = sname[0]
base['players'][i]['lastName'] = ' '.join(sname[1:])
del base['players'][i]['name']
players = base['players']
base['version'] = 32
return players,base
playersOG,base = load_roster('normed.json')
abbrev_to_tid = {_['abbrev']:_['tid'] for _ in base['teams']}
abbrev_to_tid['PHO'] = abbrev_to_tid['PHX']
abbrev_to_tid['CHO'] = abbrev_to_tid['CHA']
abbrev_to_tid['BRK'] = abbrev_to_tid['BKN']
abbrev_to_tid['NOH'] = abbrev_to_tid['NOP']
abbrev_to_tid['NJN'] = abbrev_to_tid['BKN']
abbrev_to_tid['SEA'] = abbrev_to_tid['OKC']
abbrev_to_tid['KCK'] = abbrev_to_tid['SAC']
abbrev_to_tid['SDC'] = abbrev_to_tid['LAC']
abbrev_to_tid['WSB'] = abbrev_to_tid['WAS']
abbrev_to_tid['CHH'] = abbrev_to_tid['CHA']
abbrev_to_tid['VAN'] = abbrev_to_tid['MEM']
abbrev_to_tid['NOK'] = abbrev_to_tid['NOP']
abbrev_to_tid['NOK'] = abbrev_to_tid['NOP']
abbrev_to_tid['BUF'] = abbrev_to_tid['LAC']
abbrev_to_tid['CAP'] = abbrev_to_tid['WAS']
for nt,ot in new_team:
try:
abbrev_to_tid[ot] = abbrev_to_tid[nt]
except:
print(nt,ot)
base['startingSeason'] = 2019
valid_tids = {abbrev_to_tid[_['abbrev']] for _ in base['teams']}
valid_teams = []
for t in base['teams']:
if t['tid'] in valid_tids:
tid = len(valid_teams)
otid = t['tid']
t['tid'] = tid
for k,v in abbrev_to_tid.items():
if v == otid:
abbrev_to_tid[k] = tid
valid_teams.append(t)
base['teams'] = valid_teams
del base['draftPicks']
# +
import random
team_ocs = defaultdict(list)
for p in players:
name = p['name']
sname = name.split(' ')
p['firstName'] = sname[0]
p['lastName'] = ' '.join(sname[1:])
r = np.array([p['ratings'][0][inv_ymap[k]] for k in y_keys])
ovr = (r * nc2).sum()
p['ovr'] = ovr
pt = p['abbrev']
team_ocs[abbrev_to_tid[pt]].append(ovr)
p['tid'] = abbrev_to_tid[pt]
# -
team_ocs = {k:sorted(v,reverse=True)[13] for k,v in team_ocs.items()}
for player in players:
if player['ovr'] < team_ocs[player['tid']]:
player['tid'] = -1
del player['ovr']
base['players'] = players
with open('player_teams.json'.format(),'wt') as fp:
json.dump(base,fp, sort_keys=True)
| player_seasons.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# CMU Optimal control: Minimization
# Fit Minimum by quadratic approximation repeatedly
using ForwardDiff
using LinearAlgebra
using PyPlot
# +
function f(x)
return x.^4 + x.^3 - x.^2 - x
end
function ∇f(x)
return 4.0*x.^3 + 3.0*x.^2 - 2.0*x - 1.0
end
function ∇2f(x)
return 12.0*x.^2 + 6.0*x - 2.0
end
# -
x = LinRange(-1.75,1.25,1000)
p = plot(x, f(x))
function newton_step(func, ∇func, ∇2func, x_now)
# newton method
δx = -(∇2func(x_now)) \ ∇func(x_now)
x_next = x_now + δx
return x_next
end
eps = 1e-5
x0 = -1.5
Iter = []
plot(x0,f(x0),"rx")
change = 1.0
xi = x0
while abs(change) > eps
x_new = newton_step(f, ∇f, ∇2f, xi)
change = x_new - xi
push!(Iter, change)
xi = x_new
plot(xi, f(xi),"go")
end
p = plot(x,f(x))
plot(Iter)
function regularized_newton_step(func, ∇func, ∇2func, x0)
# add regularization
β = 1e-3
H = ∇2func(x0)
while !isposdef(H)
H = H + β .* I
end
δx = -(H) \ ∇func(x0)
x_next = x0 + δx
return x_next
end
# +
eps = 1e-5
x0 = -1.5
plot(x0,f(x0),"rx")
change = 1.0
Iter1 = []
xi = x0
while abs(change) > eps
x_new = regularized_newton_step(f, ∇f, ∇2f, xi)
change = x_new - xi
xi = x_new
push!(Iter1, change)
plot(xi, f(xi),"go")
end
p = plot(x,f(x))
# -
plot(Iter1)
function backtracking_linesearch_newton_step(func, ∇func, ∇2func, x0)
# using backtracking line search method
α = 1.0
b = 0.1
c = 0.8
β = 1e-3
H = ∇2func(x0)
while !isposdef(H)
H = H + β .* I
end
δx = - (H) \ ∇func(x0)
while (func(x0 + α .* δx) > (func(x0) + b * α * ∇func(x0)' * δx))
α = c * α
end
display("alpha is $α")
x_next = x0 + α .* δx
end
x0 = 0.0
plot(x, f(x))
plot(x0, f(x0), "rx")
# +
eps = 1e-5
x0 = -1.5
plot(x0,f(x0),"rx")
change = 1.0
Iter2 = []
xi = x0
while abs(change) > eps
x_new = backtracking_linesearch_newton_step(f, ∇f, ∇2f, xi)
change = x_new - xi
xi = x_new
push!(Iter2, change)
plot(xi, f(xi),"go")
end
p = plot(x,f(x))
# -
plot(Iter2)
length(Iter2)
| .ipynb_checkpoints/Minimization-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Installation and configuration¶
#
# This notebook create Azure resources and configures the notebooks in this tutorial to connect to an Azure Machine Learning (AzureML) Workspace. You can use an existing workspace or create a new one.
import azureml.core
from azureml.core import Workspace
from dotenv import set_key, get_key, find_dotenv
from pathlib import Path
from testing_utilities import get_auth
import json
import time
# The AzureML Python SDK is already installed. Let's check the AzureML SDK version.
print("SDK Version:", azureml.core.VERSION)
# Replace the values in the following cell with your information. If you would like to use service principal authentication as described [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azure-ml.ipynb) make sure you provide the optional values as well.
# + tags=["parameters"]
# Azure resources
subscription_id = "<subscription_id>"
resource_group = "<resource_group>"
workspace_name = "<workspace_name>"
workspace_region = "resource_region" # e.g. resource_region = "eastus"
# IoT hub name - a globally UNIQUE name is required, e.g. iot_hub_name = "myiothubplusrandomnumber".
iot_hub_name = "<iot_hub_name>"
device_id = "<device_id>" # the name you give to the edge device. e.g. device_id = "mygpudevice"
image_name = "<image_name>" # avoid underscore in names
# -
# Create and initialize a dotenv file for storing parameters used in multiple notebooks.
env_path = find_dotenv()
if env_path == "":
Path(".env").touch()
env_path = find_dotenv()
# +
set_key(env_path, "subscription_id", subscription_id)
set_key(env_path, "resource_group", resource_group)
set_key(env_path, "workspace_name", workspace_name)
set_key(env_path, "workspace_region", workspace_region)
set_key(env_path, "image_name", image_name)
set_key(env_path, "iot_hub_name", iot_hub_name)
set_key(env_path, "device_id", device_id)
# -
# ## Create Azure Resources
# login in your account
# accounts = !az account list --all -o tsv
if "Please run \"az login\" to access your accounts." in accounts[0]:
# !az login -o table
else:
print("Already logged in")
# Below we will reload it just to make sure that everything is working.
# !az account set --subscription $subscription_id
# ### Create AzureML workspace
#
# To create or access an Azure ML Workspace, you will need the following information:
#
# * An Azure subscription id
# * A resource group name
# * A name for your workspace
# * A region for your workspace
#
# The next cell will create an AzureML workspace for you in a subscription, provided you have the correct permissions.
# This will fail when:
#
# 1. You do not have permission to create a workspace in the resource group
# 2. You do not have permission to create a resource group if it's non-existing.
# 3. You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription
#
# If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources. If this cell succeeds, you're done configuring AzureML!
# +
# import the Workspace class and check the azureml SDK version
# from azureml.core import Workspace
ws = Workspace.create(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=workspace_region,
create_resource_group=True,
auth=get_auth(),
exist_ok=True,
)
# persist the subscription id, resource group name, and workspace name in aml_config/config.json.
ws.write_config()
# -
# Below we will reload it just to make sure that everything is working.
# load workspace configuratio from ./aml_config/config.json file.
ws = Workspace.from_config(auth=get_auth())
ws.get_details()
# ### Create IoT Hub
# install az-cli iot extension - I had to use "sudo -i" to make it work
# !sudo -i az extension add --name azure-cli-iot-ext
# !az iot hub list --resource-group $resource_group -o table
# Command to create a Standard tier S1 hub with name `iot_hub_name` in the resource group `resource_group`.
# !az iot hub create --resource-group $resource_group --name $iot_hub_name --sku S1
# +
# Command to create a free tier F1 hub. You may encounter error "Max number of Iot Hubs exceeded for sku = Free" if quota is reached.
# # !az iot hub create --resource-group $resource_group --name $iot_hub_name --sku F1
# -
# ### Register an IoT Edge device
# We create a device with name `device_id` under previously created iot hub.
time.sleep(30) # Wait 30 seconds to let IoT hub stable before creating a device
print("az iot hub device-identity create --hub-name {} --device-id {} --edge-enabled -g {}".format(iot_hub_name,device_id,resource_group))
# !az iot hub device-identity create --hub-name $iot_hub_name --device-id $device_id --edge-enabled -g $resource_group
# Obtain device_connection_string. It will be used in the next step.
print("az iot hub device-identity show-connection-string --device-id {} --hub-name {} -g {}".format(device_id, iot_hub_name,resource_group))
# json_data = !az iot hub device-identity show-connection-string --device-id $device_id --hub-name $iot_hub_name -g $resource_group
print(json_data)
device_connection_string = json.loads(''.join([i for i in json_data if 'WARNING' not in i]))['connectionString']
print(device_connection_string)
set_key(env_path, "device_connection_string", device_connection_string)
# In this notebook, we created a ".env" file to save and reuse the variables needed cross all the notebooks. We also created a new Azure resource group with name <resource\_group>, where all Azure resources are created. We can now move on to the next notebook [02_IoTEdgeConfig.ipynb](02_IoTEdgeConfig.ipynb).
| object-detection-azureml/01_AzureSetup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
import pandas
dir_path = './Data_RNN'
df = pandas.DataFrame()
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
if not os.path.isdir(file_path):
ext = os.path.splitext(file_path)[-1]
if ext == '.txt':
print(file_path)
curr_df = pandas.read_csv(file_path, delim_whitespace=True, header=None, usecols=[6])
df = df.append(curr_df)
# +
import numpy as np
from sklearn.preprocessing import MinMaxScaler
dataset = df.values
dataset = dataset.astype('float32')
dataset = np.log10(dataset)
# normalize
scaler = MinMaxScaler(feature_range=(0,1))
dataset = scaler.fit_transform(dataset)
dataset = np.reshape(dataset, (11, 288, 1))
#dataset.shape
# +
import matplotlib.pyplot as plt
# %matplotlib inline
for ds in dataset:
plt.plot(ds)
plt.show()
# +
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
model = Sequential()
model.add(LSTM(32, batch_input_shape=(1, 20, 1), stateful=True, return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(32, batch_input_shape=(1, 20, 1), stateful=True))
model.add(Dropout(0.3))
model.add(Dense(1))
#model = Sequential()
#model.add(LSTM(100, batch_input_shape=(1, 20, 1), stateful=True))
#model.add(Dropout(0.3))
#model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# -
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
dataX.append(dataset[i:( i + look_back), 0])
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
for epoch in range(10):
for event_idx in range(10):
print('epoch : ' + str(epoch) + ' | event id : ' + str(event_idx))
trainX, trainY = create_dataset(dataset[event_idx], 20)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) # samples, time steps, features
model.fit(trainX, trainY, nb_epoch=1, batch_size=1, verbose=2, shuffle=False)
model.reset_states()
# +
testX, testY = create_dataset(dataset[10], 20)
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
testPredict = [testX[0]]
# +
look_ahead = 288 - 20
predictions = np.zeros((look_ahead, 1))
for i in range(look_ahead):
prediction = model.predict(np.array([testPredict[-1]]), batch_size=1, verbose=0)
predictions[i] = prediction
testPredict.append(np.vstack([testPredict[-1][1:], prediction]))
model.reset_states()
# -
plt.figure(figsize=(12,5))
plt.plot(np.arange(look_ahead), predictions,'r', label="prediction")
plt.plot(np.arange(look_ahead), testY, label="test function")
plt.legend()
plt.show()
# +
testX, testY = create_dataset(dataset[10], 20)
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
testPredict = [testX[0]]
look_real = 100
look_ahead = 287-100
predictions = np.zeros((look_real + look_ahead, 1))
for i in range(look_real):
prediction = model.predict(np.array([testPredict[-1]]), batch_size=1, verbose=0)
predictions[i] = prediction
testPredict.append(np.vstack([testPredict[-1][1:], testY[i]]))
for i in range(look_ahead):
prediction = model.predict(np.array([testPredict[-1]]), batch_size=1, verbose=0)
predictions[look_real + i] = prediction
testPredict.append(np.vstack([testPredict[-1][1:], prediction]))
model.reset_states()
# -
plt.figure(figsize=(12,5))
plt.plot(np.arange(287), predictions,'r', label="prediction")
plt.plot(np.arange(287), testY, label="test function")
plt.legend()
plt.show()
| _writing/Flux_Test-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Conversion to NetCDF
#
#
# We have a bunch of `.grib` ERA files, one per month.
#
# Can we process these into NetCDF files, one per hour?
#
# ---
#Load the data
# %%time
import xarray as xr
f = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/ERA_skin/sfc_skin_unstructured_2018_01.grib'
ds = xr.open_dataset(f,engine='cfgrib',filter_by_keys={'typeOfLevel': 'surface'})
#Relabel longitude coordinate to be consistent with MODIS
ds_long = ds.assign_coords({"longitude": (((ds.longitude + 180) % 360) - 180)})
#Group it in time
ds_grouped = ds_long.groupby("time")
# +
# %%time
#Output path
path = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/sandbox/'
counter = 0
for label,group in ds_grouped:
outname = path+str(label)+'.nc'
print(outname)
group.to_netcdf(outname)
#Exit condition
counter += 1
if counter > 5: break
# -
# ---
#
# We can then open one of these files much faster:
# %%time
ds_nc = xr.open_dataset(path+'2018-01-01T00:00:00.000000000.nc')
# ---
#
#
# # Skin temperature checks
#
# +
# %%time
import pandas as pd
import xarray as xr
def process_grib_file_skt(f,output_path):
#Open file
ds = xr.open_dataset(f,engine='cfgrib',filter_by_keys={'typeOfLevel': 'surface'},backend_kwargs={'indexpath': ''})
display(ds)
#Relabel longitude coordinate to be consistent with MODIS
ds = ds.assign_coords({"longitude": (((ds.longitude + 180) % 360) - 180)})
display(ds.time)
#Group it by time
ds_grouped = ds.groupby("time")
#Output path
counter=0
for label,group in ds_grouped:
outname = output_path+str(label)+'.nc'
print(outname)
sys.exit()
group.to_netcdf('test.nc')
counter += 1
if counter > 10: break
#Explictly close everything
ds.close()
del ds_grouped
#Paths
root = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw'
#Parameters
dates = pd.date_range('2018-01-01','2020-12-01',
freq='MS').strftime("%Y-%m").tolist()
source = 'ERA_skt' #'ERA_skin'
counter = 0
for dt in dates:
d=dt.replace('-','_')
fname = f'{root}/{source}/skt_unstructured_{d}.grib'
out = f'{root}/ERA_skt_netcdf/'
print(fname)
print('Processing month:', out)
process_grib_file_skt(fname,out)
sys.exit()
# -
# ---
# # Appendix
#
# Test script used in `scripts/convert_grib_to_netcdf.py`
# +
# %%time
def process_grib_file(f,output_path):
#Open file
ds = xr.open_dataset(f,engine='cfgrib',filter_by_keys={'typeOfLevel': 'surface'})
#Relabel longitude coordinate to be consistent with MODIS
ds = ds.assign_coords({"longitude": (((ds.longitude + 180) % 360) - 180)})
#Group it by time
ds_grouped = ds.groupby("time")
#Output path
counter=0
for label,group in ds_grouped:
outname = output_path+str(label)+'.nc'
print(outname)
sys.exit()
group.to_netcdf('test.nc')
counter += 1
if counter > 10: break
#Explictly close everything
ds.close()
del ds_grouped
#Paths
root = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw'
#Parameters
dates = pd.date_range('2018-01-01','2020-12-01',
freq='MS').strftime("%Y-%m").tolist()
source = 'ERA_sfc' #'ERA_skin'
if source == 'ERA_skin':
name = '_skin_'
if source == 'ERA_sfc':
name = '_'
counter = 0
for dt in dates:
d=dt.replace('-','_')
fname = f'{root}/{source}/sfc{name}unstructured_{d}.grib'
out = f'{root}/{source}/NetCDF/'
print('Processing month:', out)
process_grib_file(fname,out)
sys.exit()
# -
# ---
| legacy/legacy_notebooks/01.Convert_grib_to_NetCDF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Ch `07`: Concept `01`
# ## Autoencoder
# All we'll need is TensorFlow and NumPy:
import tensorflow as tf
import numpy as np
# Instead of feeding all the training data to the training op, we will feed data in small batches:
def get_batch(X, size):
a = np.random.choice(len(X), size, replace=False)
return X[a]
# Define the autoencoder class:
class Autoencoder:
def __init__(self, input_dim, hidden_dim, epoch=500, batch_size=10, learning_rate=0.001):
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
# Define input placeholder
x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim])
# Define variables
with tf.name_scope('encode'):
weights = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([hidden_dim]), name='biases')
encoded = tf.nn.sigmoid(tf.matmul(x, weights) + biases)
with tf.name_scope('decode'):
weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([input_dim]), name='biases')
decoded = tf.matmul(encoded, weights) + biases
self.x = x
self.encoded = encoded
self.decoded = decoded
# Define cost function and training op
self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.x, self.decoded))))
self.all_loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.x, self.decoded)), 1))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Define a saver op
self.saver = tf.train.Saver()
def train(self, data):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(self.epoch):
for j in range(500):
batch_data = get_batch(data, self.batch_size)
l, _ = sess.run([self.loss, self.train_op], feed_dict={self.x: batch_data})
if i % 50 == 0:
print('epoch {0}: loss = {1}'.format(i, l))
self.saver.save(sess, './model.ckpt')
self.saver.save(sess, './model.ckpt')
def test(self, data):
with tf.Session() as sess:
self.saver.restore(sess, './model.ckpt')
hidden, reconstructed = sess.run([self.encoded, self.decoded], feed_dict={self.x: data})
print('input', data)
print('compressed', hidden)
print('reconstructed', reconstructed)
return reconstructed
def get_params(self):
with tf.Session() as sess:
self.saver.restore(sess, './model.ckpt')
weights, biases = sess.run([self.weights1, self.biases1])
return weights, biases
def classify(self, data, labels):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self.saver.restore(sess, './model.ckpt')
hidden, reconstructed = sess.run([self.encoded, self.decoded], feed_dict={self.x: data})
reconstructed = reconstructed[0]
# loss = sess.run(self.all_loss, feed_dict={self.x: data})
print('data', np.shape(data))
print('reconstructed', np.shape(reconstructed))
loss = np.sqrt(np.mean(np.square(data - reconstructed), axis=1))
print('loss', np.shape(loss))
horse_indices = np.where(labels == 7)[0]
not_horse_indices = np.where(labels != 7)[0]
horse_loss = np.mean(loss[horse_indices])
not_horse_loss = np.mean(loss[not_horse_indices])
print('horse', horse_loss)
print('not horse', not_horse_loss)
return hidden[7,:]
def decode(self, encoding):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self.saver.restore(sess, './model.ckpt')
reconstructed = sess.run(self.decoded, feed_dict={self.encoded: encoding})
img = np.reshape(reconstructed, (32, 32))
return img
# The *Iris dataset* is often used as a simple training dataset to check whether a classification algorithm is working. The sklearn library comes with it, `pip install sklearn`.
# +
from sklearn import datasets
hidden_dim = 1
data = datasets.load_iris().data
input_dim = len(data[0])
ae = Autoencoder(input_dim, hidden_dim)
ae.train(data)
ae.test([[8, 4, 6, 2]])
# -
Tested; Gopal
| tests/tf/Concept01_autoencoder_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import openeye.oechem as oechem
import qcportal as ptl
import qcelemental as qcel
import cmiles
# ## Explore the dataset
# view collections in QCArchive
client = ptl.FractalClient()
client.list_collections()
# pull the collection of interest
ds = client.get_collection('OptimizationDataset', 'OpenFF Full Optimization Benchmark 1')
# view details about group type
ds.list_specifications()
spec = ds.list_specifications().index[0]
print(spec)
ds.list_specifications().iloc[0]['Description']
# see how many molecules in the set (conformers counted separately)
ds.status(spec)
# show the number of optimization steps per entry
ds.counts().head(15)
# ## Explore a molecule
# check out a single molecule and its final geometry
optrec = ds.get_record(name="C[NH2+]C[C@@H](c1ccc(c(c1)O)O)O-0", specification="default")
print(optrec.status)
optrec.get_final_molecule()
optrec.show_history()
optrec.get_final_energy()
# ## Convert to OEMol, write to file (single mol)
mol = optrec.get_final_molecule()
type(mol)
# convert the qcelemental molecule to an OpenEye molecule
qcjson_mol = mol.dict(encoding='json')
oemol = cmiles.utils.load_molecule(qcjson_mol)
oemol
# convert energy from Hartrees to kcal/mol
ene = optrec.get_final_energy()*qcel.constants.hartree2kcalmol
# add name and energy tag to the mol
oemol.SetTitle("full_1")
oechem.OESetSDData(oemol, "Energy QCArchive", str(ene))
# write molecule -- check that title and sd tag exists
ofs = oechem.oemolostream()
ofs.open("test.sdf")
oechem.OEWriteConstMolecule(ofs, oemol)
# ## Convert to OEMol, write to file, test conformer reading
# open an outstream file
outfile = 'test.sdf'
ofs = oechem.oemolostream()
#if os.path.exists(outfile):
# raise FileExistsError("Output file {} already exists in {}".format(
# outfile, os.getcwd()))
if not ofs.open(outfile):
oechem.OEThrow.Fatal("Unable to open %s for writing" % outfile)
for i, index in enumerate(ds.df.index):
# get the record of each entry
record = ds.get_record(name=index, specification=spec)
if i%10 == 0:
print(i)
if i == 20:
break
print(index)
if record.status == "COMPLETE":
# get optimized molecule of the record
qc_mol = record.get_final_molecule()
# convert the qcelemental molecule to an OpenEye molecule
qcjson_mol = qc_mol.dict(encoding='json')
oemol = cmiles.utils.load_molecule(qcjson_mol)
# convert energy from Hartrees to kcal/mol
ene = record.get_final_energy()*qcel.constants.hartree2kcalmol
# add name and energy tag to the mol
oemol.SetTitle(f"full_{i+1}")
oechem.OESetSDData(oemol, "Energy QCArchive", str(ene))
# write molecule to file
oechem.OEWriteConstMolecule(ofs, oemol)
ofs.close()
#https://docs.eyesopen.com/toolkits/python/oechemtk/oemol.html
ifs = oechem.oemolistream()
#ifs.SetConfTest(oechem.OEAbsCanonicalConfTest())
ifs.SetConfTest(oechem.OEAbsoluteConfTest(False)) # false means confs may have diff titles
if not ifs.open(outfile):
raise FileNotFoundError(f"Unable to open {outfile} for reading")
mols = ifs.GetOEMols()
for i, mol in enumerate(mols):
for j, conf in enumerate(mol.GetConfs()):
print(i, mol.NumConfs(), conf.GetTitle(), oechem.OEMolToSmiles(conf))
# ## Look into inconsistent SMILES strings after conversion
# check out a single molecule and its final geometry
optrec = ds.get_record(name="CO/N=C/1\C[N@](C[C@H]1C[NH3+])c2c(cc3c(=O)c(cn(c3n2)C4CC4)C(=O)[O-])F-3", specification="default")
print(optrec.status)
optrec.get_final_molecule()
# check out a single molecule and its final geometry
optrec = ds.get_record(name="CO/N=C/1\C[N@](C[C@H]1C[NH3+])c2c(cc3c(=O)c(cn(c3n2)C4CC4)C(=O)[O-])F-6", specification="default")
print(optrec.status)
optrec.get_final_molecule()
# ## Write out the whole set
#
# This took about 5.5 hours
# open an outstream file
outfile = 'whole.sdf'
ofs = oechem.oemolostream()
#if os.path.exists(outfile):
# raise FileExistsError("Output file {} already exists in {}".format(
# outfile, os.getcwd()))
if not ofs.open(outfile):
oechem.OEThrow.Fatal("Unable to open %s for writing" % outfile)
# +
for i, index in enumerate(ds.df.index):
# get the record of each entry
record = ds.get_record(name=index, specification=spec)
if i%100 == 0:
print(i)
if record.status == "COMPLETE":
# get optimized molecule of the record
qc_mol = record.get_final_molecule()
# convert the qcelemental molecule to an OpenEye molecule
qcjson_mol = qc_mol.dict(encoding='json')
oemol = cmiles.utils.load_molecule(qcjson_mol)
# convert energy from Hartrees to kcal/mol
ene = record.get_final_energy()*qcel.constants.hartree2kcalmol
# add name and energy tag to the mol
oemol.SetTitle(f"full_{i+1}")
oechem.OESetSDData(oemol, "SMILES QCArchive", index)
oechem.OESetSDData(oemol, "Energy QCArchive", str(ene))
# write molecule to file
oechem.OEWriteConstMolecule(ofs, oemol)
ofs.close()
| 01_setup/accessed_nov2019/extract_qcarchive_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exploration of RISE with mnist binary
#
# Function : Exploration of RISE mnist binary
# Author : Team DIANNA
# Contributor :
# First Built : 2021.08.25
# Last Update : 2021.08.25
# Note : We ran the method using the our own trained model on mnist and various instances from mnist dataset. Results look random. There is no sense that we can make of the heatmaps.
import os
import dianna
import onnx
import onnxruntime
import numpy as np
# %matplotlib inline
from matplotlib import pyplot as plt
from torchvision import datasets, transforms
from scipy.special import softmax
import pandas as pd
from dianna.methods import RISE
from dianna import visualization
# +
# load data
dataset_root = os.path.expanduser('~/projects/DIANNA/datasets/leafsnap/leafsnap-dataset-30subset/')
img_size = 128
# apply same transform as during training: resize and crop to a square image, then convert to tensor
transform = transforms.Compose([transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor()])
test_data = datasets.ImageFolder(os.path.join(dataset_root, 'dataset/split/test'), transform=transform,
is_valid_file=lambda fname: fname.endswith('.jpg'))
nsample = len(test_data)
nspecies = len(test_data.classes)
print(f'Number of samples: {nsample}')
print(f'Number of species: {nspecies}')
X_test = np.array([instance[0].numpy() for instance in test_data])
X_test = np.transpose(X_test, (0, 2, 3, 1))
y_test = np.array([instance[1] for instance in test_data])
# -
X_test.shape
plt.imshow(X_test[0])
# # Predict classes for test data
# +
def run_model(data):
data = np.transpose(data, (0, 3, 1, 2)).astype(np.float32)
fname = os.path.expanduser('~/surfdrive/Shared/datasets/leafsnap/leafsnap_model.onnx')
# get ONNX predictions
sess = onnxruntime.InferenceSession(fname)
input_name = sess.get_inputs()[0].name
output_name = sess.get_outputs()[0].name
onnx_input = {input_name: data}
pred_onnx = sess.run([output_name], onnx_input)
return softmax(pred_onnx[0], axis=1)
pred_onnx = run_model(X_test)
# -
# Print class and image of a single instance in the test data
i_instance = 50
target_class = y_test[i_instance]
print(target_class)
print(pred_onnx[i_instance])
plt.imshow(X_test[i_instance])
# +
# heatmaps = dianna.explain(run_model, X_test[[i_instance]], method="RISE", n_masks=2000, feature_res=8)
# +
# investigate which value for p_keep works best by looking at the stddev of the probabilities for the target class,
def print_stats(p_keep):
n_masks = 100
feature_res = 8
explainer = RISE(n_masks=n_masks, feature_res=feature_res, p_keep=p_keep)
explainer(run_model, X_test[[i_instance]])
preds = explainer.predictions[:, y_test[i_instance]]
df = pd.DataFrame(preds)
display(df.describe())
# print_stats(.5) # stddev 0.04, too low
print_stats(.2) # stddev .22, min value .14, max value .97. Seems good.
# -
explainer = RISE(n_masks=2000, feature_res=8, p_keep=.2)
heatmaps = explainer(run_model, X_test[[i_instance]])
visualization.plot_image(heatmaps[target_class], X_test[i_instance], heatmap_cmap='bwr')
visualization.plot_image(heatmaps[target_class], heatmap_cmap='gray')
# # Conclusion
# RISE does show some clear structure in the resulting heatmap, but it is not entirely clear how this relates to the shape of the leaf. It might detect mostly the edges of the leaf.
# +
def describe(arr):
print('shape:',arr.shape, 'min:',np.min(arr), 'max:',np.max(arr), 'std:',np.std(arr))
describe(heatmaps[target_class])
# describe(heatmaps[1])
# -
for i in range(10):
plt.imshow(explainer.masks[i])
plt.show()
| example_data/xai_method_study/RISE/rise_leafsnap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preliminaries
#
# The `pandas` library allows the user several data structures for different data manipulation tasks:
# 1. Data storage through its `Series` and `DataFrame` data structures.
# 2. Data filtering using multiple methods from the package.
# 3. Reading data from many different file formats such as `csv`, `txt`, `xlsx`, ...
#
# Below we provide a brief overview of the `pandas` functionalities needed for these exercises. The complete documentation can be found on the [`pandas` website](https://pandas.pydata.org/).
#
# ## Pandas data structures
#
# ### Series
# The Pandas Series data structure is similar to a one-dimensional array. It can store any type of data. The values are mutable but the size not.
#
# To create `Series`, we call the `pd.Series()` method and pass an array. A `Series` may also be created from a numpy array.
# +
import pandas as pd
import numpy as np
first_series = pd.Series([1,10,100,1000])
print(first_series)
teams = np.array(['PSV','Ajax','Feyenoord','Twente'])
second_series = pd.Series(teams)
print('\n')
print(second_series)
# -
# ### DataFrame
# One can think of a `DataFrame` as a table with rows and columns (2D structure). The columns can be of a different type (as opposed to `numpy` arrays) and the size of the `DataFrame` is mutable.
#
# To create `DataFrame`, we call the `pd.DataFrame()` method and we can create it from scratch or we can convert a numpy array or a list into a `DataFrame`.
# +
# DataFrame from scratch
first_dataframe = pd.DataFrame({
"Position": [1, 2, 3, 4],
"Team": ['PSV','Ajax','Feyenoord','Twente'],
"GF": [80, 75, 75, 70],
"GA": [30, 25, 40, 60],
"Points": [79, 78, 70, 66]
})
print("From scratch: \n {} \n".format(first_dataframe))
# DataFrme from a list
data = [[1, 2, 3, 4], ['PSV','Ajax','Feyenoord','Twente'],
[80, 75, 75, 70], [30, 25, 40, 60], [79, 78, 70, 66]]
columns = ["Position", "Team", "GF", "GA", "Points"]
second_dataframe = pd.DataFrame(data, index=columns)
print("From list: \n {} \n".format(second_dataframe.T)) # the '.T' operator is explained later on
# DataFrame from numpy array
data = np.array([[1, 2, 3, 4], ['PSV','Ajax','Feyenoord','Twente'],
[80, 75, 75, 70], [30, 25, 40, 60], [79, 78, 70, 66]])
columns = ["Position", "Team", "GF", "GA", "Points"]
third_dataframe = pd.DataFrame(data.T, columns=columns)
print("From numpy array: \n {} \n".format(third_dataframe))
# -
# ### DataFrame attributes
# This section gives a quick overview of some of the `pandas.DataFrame` attributes such as `T`, `index`, `columns`, `iloc`, `loc`, `shape` and `values`.
# transpose the index and columns
print(third_dataframe.T)
# index makes reference to the row labels
print(third_dataframe.index)
# columns makes reference to the column labels
print(third_dataframe.columns)
# iloc allows to access the index by integer-location (e.g. all team names, which are in the second columm)
print(third_dataframe.iloc[:,1])
# loc allows to access the index by label(s)-location (e.g. all team names, which are in the "Team" columm)
print(third_dataframe.loc[0, 'Team'])
# shape returns a tuple with the DataFrame dimension, similar to numpy
print(third_dataframe.shape)
# values return a Numpy representation of the DataFrame data
print(third_dataframe.values)
# ### DataFrame methods
# This section gives a quick overview of some of the `pandas.DataFrame` methods such as `head`, `describe`, `concat`, `groupby`,`rename`, `filter`, `drop` and `isna`. To import data from CSV or MS Excel files, we can make use of `read_csv` and `read_excel`, respectively.
# print the first few rows in your dataset with head()
print(third_dataframe.head()) # In this case, it is not very useful because we don't have thousands of rows
# get the summary statistics of the DataFrame with describe()
print(third_dataframe.describe())
# +
# concatenate (join) DataFrame objects using concat()
# first, we will split the above DataFrame in two different ones
df_a = third_dataframe.loc[[0,1],:]
df_b = third_dataframe.loc[[2,3],:]
print(df_a)
print('\n')
print(df_b)
print('\n')
# now, we concatenate both datasets
df = pd.concat([df_a, df_b])
print(df)
# +
# group the data by certain variable via groupby()
# here, we have grouped the data by goals for, which in this case is 75
group = df.groupby('GF')
print(group.get_group('75'))
# -
# rename() helps you change the column or index names
print(df.rename(columns={'Position':'Pos','Team':'Club'}))
# build a subset of rows or columns of your dataset according to labels via filter()
# here, items refer to the variable names: 'Team' and 'Points'; to select columns, we specify axis=1
print(df.filter(items=['Team', 'Points'], axis=1))
# dropping some labels
print(df.drop(columns=['GF', 'GA']))
# +
# search for NA (not available) entries in the DataFrame
print(df.isna()) # No NA values
print('\n')
# create a pandas Series with a NA value
# the Series as W (winnin matches)
tmp = pd.Series([np.NaN, 25, 24, 19], name="W")
# concatenate the Series with the DataFrame
df = pd.concat([df,tmp], axis = 1)
print(df)
print('\n')
# again, check for NA entries
print(df.isna())
# -
# ## Dataset
#
# For this week exercises we will use a dataset from the Genomics of Drug Sensitivity in Cancer (GDSC) project (https://www.cancerrxgene.org/). In this study (['Iorio et al., Cell, 2016']()), 265 compounds were tested on 1001 cancer cell lines for which different types of -omics data (RNA expression, DNA methylation, Copy Number Alteration, DNA sequencing) are available. This is a valuable resource to look for biomarkers of drugs sensitivity in order to try to understand why cancer patients responds very differently to cancer drugs and find ways to assign the optimal treatment to each patient.
#
# For this exercise we will use a subset of the data, focusing the response to the drug YM155 (Sepantronium bromide) on four cancer types, for a total of 148 cancer cell lines.
#
# | ID | Cancer type |
# |-------------|----------------------------------|
# | COAD/READ | Colorectal adenocarcinoma |
# | NB | Neuroblastoma |
# | KIRC | Kidney renal clear cell carcinoma|
# | BRCA | Breast carcinoma |
#
# We will use the RNA expression data (RMA normalised). Only genes with high variability across cell lines (variance > 5, resulting in 238 genes) have been kept.
#
# Drugs have been tested at different concentration, measuring each time the viability of the cells. Drug sensitivity is measured using the natural log of the fitted IC50 metric, which is defined as the half maximal inhibitory concentration. A lower IC50 corresponds to a more sensitive cell line because a lower amount of drug is sufficient to have a strong response, while a higher IC50 corresponds to a more resistant cell line because more drug is needed for killing the cells.
#
# Based on the IC50 metric, cells can be classified as sensitive or resistant. The classification is done by computing the $z$-score across all cell lines in the GDSC for each drug, and considering as sensitive the ones with $z$-score < 0 and resistant the ones with $z$-score > 0.
#
# The dataset is originally provided as 3 files ([original source](https://www.sciencedirect.com/science/article/pii/S0092867416307462?via%3Dihub)) :
#
# `GDSC_RNA_expression.csv`: gene expression matrix with the cell lines in the rows (148) and the genes in the columns (238).
#
# `GDSC_drug_response.csv`: vector with the cell lines response to the drug YM155 in terms of log(IC50) and as classification in sensitive or resistant.
#
# `GDSC_metadata.csv`: metadata for the 148 cell lines including name, COSMIC ID and tumor type (using the classification from ['The Cancer Genome Atlas TCGA'](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga))
#
# For convenience, we provide the data already curated.
#
# `RNA_expression_curated.csv`: [148 cell lines , 238 genes]
#
# `drug_response_curated.csv`: [148 cell lines , YM155 drug]
#
# The curated data cam be read as `pandas` `DataFrame`s in the following way:
# +
import pandas as pd
gene_expression = pd.read_csv("./data/RNA_expression_curated.csv", sep=',', header=0, index_col=0)
drug_response = pd.read_csv("./data/drug_response_curated.csv", sep=',', header=0, index_col=0)
# -
# You can use the `DataFrame`s directly as inputs to the the `sklearn` models. The advantage over using `numpy` arrays is that the variable are annotated, i.e. each input and output has a name.
# ## Tools
# The `scikit-learn` library provides the required tools for linear regression/classification and shrinkage, as well as for logistic regression.
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import LogisticRegression
# Note that the notation used for the hyperparameters in the `scikit-learn` library is different from the one used in the lecture. More specifically, in the lecture $\alpha$ is the tunable parameter to select the compromise between Ridge and Lasso. Whereas, `scikit-learn` library refers to `alpha` as the tunable parameter $\lambda$. Please check the documentation for more details.
# # Exercises
#
# ## Selection of the hyperparameter
#
# Implement cross-validation (using `sklearn.grid_search.GridSearchCV`) to select the `alpha` hyperparameter of `sklearn.linear_model.Lasso`.
# +
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
#Import the dataset
gene_expression = pd.read_csv("./data/RNA_expression_curated.csv", sep=',', header=0, index_col=0)
drug_response = pd.read_csv("./data/drug_response_curated.csv", sep=',', header=0, index_col=0)
#X_train, X_test, y_train, y_test = train_test_split(gene_expression, drug_response, stratify=drug_response, random_state=40)
#create the model pipeline
model = Pipeline([
("scaler",StandardScaler()),
("lasso", Lasso(alpha=1.0, max_iter=1000))
])
#perform a grid search for the alpha parameter
model = GridSearchCV(model, {"lasso__alpha": np.logspace(-5,1,num=100)})
#print(model.get_params().keys())
#print(model.get_params())
#Fit the model
model.fit(gene_expression,drug_response)
#Do an evaluation of the model
#print(model.best_estimator_)
print(model.best_params_)
#print(model.cv_results_)
print(mean_squared_error(drug_response,model.predict(gene_expression)))
# -
# ## Feature selection
#
# Look at the features selected using the hyperparameter which corresponds to the minimum cross-validation error.
#
# <p><font color='#770a0a'>Is the partition in training and validation sets playing a role in the selection of the hyperparameter? How will this affect the selection of the relevant features?</font></p>
#
# <p><font color='#770a0a'>Should the value of the intercept also be shrunk to zero with Lasso and Ridge regression? Motivate your answer.</font></p>
# Feature selection answer:
# The split of the training and validation set plays a role in the selection of the hyperparameters. First of all, if the trainingset is very large in comparison to the validation set, the hyperparameter is more likely to fit the training data well. But on the validation set it might perform worse.
# Also, especially for small dataset this plays a role. Because the validation set is more likely to differ/variate from the training set.
#
# The features are selected based on the training data. So if the partition of training and validation is poor (and the hyperparameter is biased to training), the features might not work very well on the validation data.
#
# The value of the intercept should not be shrunken to zero with Lasso and Ridge. Because this is the starting point of linear regression, we cannot know for sure if this is correct (if w0 is zero).
# Also this is most unlikely when the linear regression is negative, because then all values would be negative.
#
# ## Bias-variance
#
# Show the effect of the regularization on the parameter estimates in terms of bias and variance. For this you can repeat the optimization 100 times using bootstrap and visualise the profile of the Lasso regression coefficient over a grid of the hyperparameter, optionally including the variability as error bars.
#
# <p><font color='#770a0a'>Based on the visual analysis of the plot, what are your observation on bias and variance in relation to model complexity? Motivate your answer.</font></p>
#
#
# +
from sklearn.linear_model import Lasso
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from random import randint
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
gene_expression = pd.read_csv("./data/RNA_expression_curated.csv", sep=',', header=0, index_col=0)
drug_response = pd.read_csv("./data/drug_response_curated.csv", sep=',', header=0, index_col=0)
alphas=np.logspace(-5,0,50)
means=np.zeros((len(alphas),1))
stds=np.zeros((len(alphas),1))
for idx,alpha in enumerate(alphas):
mses=np.zeros((len(drug_response),1))
for i in range(0,100):
ti = [randint(0, len(drug_response)-1) for p in range(0, len(drug_response))] #select random indices
data_genes=gene_expression.iloc[ti]
data_response=drug_response.iloc[ti]
X_train, X_val, y_train, y_val = train_test_split(
data_genes, data_response,test_size=0.2)
model=Pipeline([
('scaler',StandardScaler()),
('LR',Lasso(alpha=alpha))
])
model.fit(X_train,y_train)
pred=model.predict(X_val)
mse=mean_squared_error(y_val,pred)
mses[i]=mse
means[idx]=np.mean(mses)
stds[idx]=stats.sem(mses)
# %matplotlib tk
fig = plt.gcf()
fig.set_size_inches(12.5, 7.5)
plt.errorbar(x=np.log10(alphas),y=means,yerr=stds,fmt='o', color='red',
ecolor='lightgray', elinewidth=3, capsize=5)
plt.xlabel(r'log($\alpha$) (-)')
plt.ylabel('Mean-Squared Error (-)')
plt.title(r'Regularization parameter $\alpha$ versus MSE')
plt.show()
idx=np.argmin(means)
minimum=np.log10(alphas[idx])
optimal=idx+np.argmax(0<(means[idx:]-(means[idx]+stds[idx])))
lamb=np.log10(alphas[optimal])
print('The minimum error of the model is reached with log('+chr(945)+') = '+str(minimum))
print('The optimal value for log('+chr(945)+') = '+str(lamb))
# -
# The figure attached as bias_variance_week3.png shows that log($\alpha$) has a minimum value at the log of -1,12. Taking the standard error of the mean of this minimum $\alpha$ into account leads to a final value of -0,92 for the log of the hyperparameter $\alpha$. Increasing the value for $\alpha$ would lead to a decrease of the model complexity, which in turn gives a lower variance and a higher bias of the model. Decreasing the value of $\alpha$ does therefore provide a model with a lower bias, but at the cost of a higher variance.
# ## Logistic regression
#
# <p><font color='#770a0a'>Write the expression of the objective function for the penalized logistic regression with $L_1$ and $L_2$ regularisation (as in Elastic net).</font></p>
# \begin{equation}
# V = \sum_{i=1}^{N}[y_i(\beta_{0}+\beta^{T}x_i)-log(1+e^{(\beta_{0}+\beta^{T}x_i)})]-\lambda\sum_{j=1}^{p}((\alpha\beta{_j}^{2}+(1-\alpha)|\beta_j|)
# \end{equation}
| practicals/Practicals week 3 submission/week_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
plt.style.use("ggplot")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# -
df = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
df.dtypes
df.head()
df.describe()
(df.isnull().sum()/df.shape[0]).sort_values(ascending=False)
plt.figure(figsize=[17,10])
sns.heatmap(df.corr().abs())
plt.show()
df.corr()['SalePrice'].sort_values(ascending=False)
(df.isnull().sum()/df.shape[0]).sort_values(ascending=False)
data_train = df.select_dtypes(include=[np.number])
data_test = test.select_dtypes(include=[np.number])
data_train = data_train.interpolate()
data_test = data_test.interpolate()
(data_train.isnull().sum()/df.shape[0]).sort_values(ascending=False)
data_train["SalePrice"] = np.log(data_train['SalePrice'])
data_train.head()
data_train.drop('Id', axis=1, inplace=True)
data_test.drop("Id", axis=1, inplace=True)
X = data_train.drop("SalePrice", axis=1)
y = data_train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.33)
reg = LinearRegression()
reg.fit(X_train, y_train)
print("Score no conjunto de treino: {:.2%}", format(reg.score(X_train, y_train)))
print("Score no conjunto de teste: {:.2%}", format(reg.score(X_test, y_test)))
test['SalePrice'] = np.exp(reg.predict(data_test))
test['SalePrice'].hist(alpha=0.6, bins=50)
df['SalePrice'].hist(alpha=0.6, bins=50)
plt.show()
test[['Id', 'SalePrice']].to_csv("submission.csv", index=False)
| projeto_preco_de_casas/Price Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# ## _*Getting Started with QISKit*_
#
# The latest version of this notebook is available on https://github.com/QISKit/qiskit-tutorial.
#
# ***
# ### Contributors
# <NAME>, <NAME>, <NAME>, <NAME>
# ## QISKit (Quantum Information Software developer Kit)
#
# This tutorial aims to explain how to use QISKit. We assume you have installed QISKit if not please look at [qiskit.org](http://www.qiskit.org) or the install [documentation](https://github.com/QISKit/qiskit-tutorial/blob/master/INSTALL.md).
#
# QISKit is a Python software development kit that you can use to create your quantum computing programs based on circuits defined through the [OpenQASM](https://github.com/QISKit/qiskit-openqasm) specification and execute them on several backends (real quantum processors online, simulators online, and simulators on local). For the online backends conects to the [IBM Q Experience project](http://quantumexperience.ng.bluemix.net/).
#
# In addition to this tutorial, we have other tutorials that introduce you to more complex concepts directly related to quantum computing.
#
# More examples:
# - Familiarize yourself with the important concepts of [superposition and entanglement](../qis/superposition_and_entanglement.ipynb).
import sys, time
try:
sys.path.append("../../") # go to parent dir
import Qconfig
qx_config = {
"APItoken": Qconfig.APItoken,
"url": Qconfig.config['url']}
except:
qx_config = {
"APItoken":"<PASSWORD>",
"url":"https://quantumexperience.ng.bluemix.net/api"}
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, QISKitError
from qiskit import available_backends, execute, register, get_backend
# ## Basic Concept
#
# The basic concept of our QISKit core is an array of quantum circuits. The workflow consists of three stages: [Build](#sectionB), [Compile](#sectionC), and [Run](#sectionR). Build allows you to make different quantum circuits that represent the problem you are solving; Compile allows you to rewrite them to run on different backends (simulators/real chips of different [quantum volumes](http://ibm.biz/qiskit-quantum-volume), sizes, fidelity, etc); and Run launches the jobs. After the jobs have been run, the data is collected. There are methods for putting this data together, depending on the program. This either gives you the answer you wanted, or allows you to make a better program for the next instance.
#
# ### Building your circuit: Create it <a id='sectionB'></a>
# The basic elements needed for your first program are the QuantumCircuit, QuantumRegister, and ClassicalRegister.
# +
# Create a Quantum Register called "q" with 3 qubits
qr = QuantumRegister(3)
# Create a Classical Register called "c" with 3 bits
cr = ClassicalRegister(3)
# Create a Quantum Circuit called involving "qr" and "cr"
circuit = QuantumCircuit(qr, cr)
# -
# After you create the circuit with its registers, you can add gates to manipulate the registers. As you proceed though the tutorial you will find more quantum gates and circuits but the below is an example of the quantum half adder.
# +
# Not gate on qubit 0
circuit.x(qr[0])
# Not gate on qubit 1
circuit.x(qr[1])
# Barrier to seperator the input from the circuit
circuit.barrier(qr[0])
circuit.barrier(qr[1])
circuit.barrier(qr[2])
# Toffoli gate from qubit 0,1 to qubit 2
circuit.ccx(qr[0], qr[1], qr[2])
# CNOT (Controlled-NOT) gate from qubit 0 to qubit 1
circuit.cx(qr[0], qr[1])
# measure gate from qr to cr
circuit.measure(qr, cr)
# -
# ### Extract OpenQASM
#
# You can obtain a OpenQASM representation of your code.
# +
# QASM from a program
QASM_source = circuit.qasm()
print(QASM_source)
# -
# ### Visualize Circuit
#
# You can visualize your circuit using QISKit's `circuit_drawer`, which plots the unrolled circuit in the specified basis.
#
# NOTE: To get this to work you need to make sure poppler is installed.
from qiskit.tools.visualization import circuit_drawer
circuit_drawer(circuit)
# ### Execute <a id='sectionC'></a>
# First we need to choose the backend. Lets start with the local simulator
backend = 'local_qasm_simulator'
# Next we need to run the circuit
# Create a Quantum Program for execution
job = execute(circuit, backend)
# Execute compiles and runs the circuits
#
# ```
# execute(name_of_circuits=None, backend='local_qasm_simulator', config=None, basis_gates=None, coupling_map=None, initial_layout=None, shots=1024, max_credits=3, seed=None, qobj_id=None, hpc=None, skip_translation=False)
# ```
#
# (**Tip**: You can obtain the above parameters in Jupyter. Simply place the text cursor on a function and press Shift+Tab)
#
# When you run a program a job object is made
#
# ```
# job.status
# ```
# returns the status of the job
#
# ```
# job.done
# ```
# is a bool the returns the Ture if the job is done
#
# ```
# job.cancel
# ```
# cancels the job
#
# ```
# job.result()
# ```
# returns the job result object
job.status
result = job.result()
# ### Result
# You can access the result via the function
#
# ```get_counts(circuit)```.
result.get_counts(circuit)
# ### Execute on a Real Device<a id='sectionR'></a>
#
# To run it on a real device we need to register the IBMQ backends. For the public devices this can be done using
#
# ```
# register(args,...)
# ```
register(qx_config['APItoken'], qx_config['url'])
def lowest_pending_jobs():
"""Returns the backend with lowest pending jobs."""
list_of_backends = available_backends(
{'local': False, 'simulator': False})
device_status = [get_backend(backend).status
for backend in list_of_backends]
best = min([x for x in device_status if x['available'] is True],
key=lambda x: x['pending_jobs'])
return best['name']
backend = lowest_pending_jobs()
print("the best backend is " + backend)
# +
shots = 1024 # Number of shots to run the program (experiment); maximum is 8192 shots.
max_credits = 3 # Maximum number of credits to spend on executions.
job_exp = execute(circuit, backend=backend, shots=shots, max_credits=max_credits)
# -
lapse = 0
interval = 10
while not job_exp.done:
print('Status @ {} seconds'.format(interval * lapse))
print(job_exp.status)
time.sleep(interval)
lapse += 1
print(job_exp.status)
result_real = job_exp.result()
# Like before, the counts from the execution can be obtained using ```get_counts('name')```
result_real.get_counts(circuit)
# The job ID can be obtained using the following which you can use later to get the job if your experiment takes longer to run then you have time to wait around.
# +
jobID = job_exp.job_id
print('JOB ID: {}'.format(jobID))
# -
jobID
| reference/tools/getting_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:unidata-python-workshop]
# language: python
# name: conda-env-unidata-python-workshop-py
# ---
# The following script takes you through the steps of retrieving an Upper Air vertical profile from an AWIPS EDEX server and plotting a Skew-T/Log-P chart with Matplotlib and MetPy.
#
# The **bufrua** plugin returns separate objects for parameters at **mandatory levels** and at **significant temperature levels**. For the Skew-T/Log-P plot, significant temperature levels are used to plot the pressure, temperature, and dewpoint lines, while mandatory levels are used to plot the wind profile.
# +
# %matplotlib inline
from awips.dataaccess import DataAccessLayer
import matplotlib.tri as mtri
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import math
from metpy.calc import wind_speed, wind_components, lcl, dry_lapse, parcel_profile
from metpy.plots import SkewT, Hodograph
from metpy.units import units, concatenate
# Set host
DataAccessLayer.changeEDEXHost("edex-cloud.unidata.ucar.edu")
request = DataAccessLayer.newDataRequest()
# Set data type
request.setDatatype("bufrua")
availableLocs = DataAccessLayer.getAvailableLocationNames(request)
availableLocs.sort()
MAN_PARAMS = set(['prMan', 'htMan', 'tpMan', 'tdMan', 'wdMan', 'wsMan'])
SIGT_PARAMS = set(['prSigT', 'tpSigT', 'tdSigT'])
request.setParameters("wmoStaNum", "validTime", "rptType", "staElev", "numMand",
"numSigT", "numSigW", "numTrop", "numMwnd", "staName")
request.getParameters().extend(MAN_PARAMS)
request.getParameters().extend(SIGT_PARAMS)
locations = DataAccessLayer.getAvailableLocationNames(request)
locations.sort()
# Set station ID (not name)
request.setLocationNames("72562") #KLBF
# Get all times
datatimes = DataAccessLayer.getAvailableTimes(request)
# Get most recent record
response = DataAccessLayer.getGeometryData(request,times=datatimes[-1].validPeriod)
# Initialize data arrays
tdMan,tpMan,prMan,wdMan,wsMan = np.array([]),np.array([]),np.array([]),np.array([]),np.array([])
prSig,tpSig,tdSig = np.array([]),np.array([]),np.array([])
manGeos = []
sigtGeos = []
# Build arrays
for ob in response:
parm_array = [x.decode('utf-8') for x in ob.getParameters()]
if set(parm_array) & MAN_PARAMS:
manGeos.append(ob)
prMan = np.append(prMan,ob.getString(b"prMan"))
tpMan = np.append(tpMan,ob.getString(b"tpMan"))
tdMan = np.append(tdMan,ob.getString(b"tdMan"))
wdMan = np.append(wdMan,ob.getString(b"wdMan"))
wsMan = np.append(wsMan,ob.getString(b"wsMan"))
continue
if set(parm_array) & SIGT_PARAMS:
sigtGeos.append(ob)
prSig = np.append(prSig,ob.getString(b"prSigT"))
tpSig = np.append(tpSig,ob.getString(b"tpSigT"))
tdSig = np.append(tdSig,ob.getString(b"tdSigT"))
continue
# Sort mandatory levels (but not sigT levels) because of the 1000.MB interpolation inclusion
ps = prMan.argsort()[::-1]
wpres = prMan[ps]
direc = wdMan[ps]
spd = wsMan[ps]
tman = tpMan[ps]
dman = tdMan[ps]
# Flag missing data
prSig[prSig <= -9999] = np.nan
tpSig[tpSig <= -9999] = np.nan
tdSig[tdSig <= -9999] = np.nan
wpres[wpres <= -9999] = np.nan
tman[tman <= -9999] = np.nan
dman[dman <= -9999] = np.nan
direc[direc <= -9999] = np.nan
spd[spd <= -9999] = np.nan
# assign units
p = (prSig/100) * units.mbar
T = (tpSig-273.15) * units.degC
Td = (tdSig-273.15) * units.degC
wpres = (wpres/100) * units.mbar
tman = tman * units.degC
dman = dman * units.degC
u,v = wind_components(spd, np.deg2rad(direc))
# Create SkewT/LogP
plt.rcParams['figure.figsize'] = (8, 10)
skew = SkewT()
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(wpres, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-60, 30)
title_string = " T(F) Td "
title_string += " " + ob.getString(b"staName").decode('UTF-8')
title_string += " " + str(ob.getDataTime().getRefTime())
title_string += " (" + str(ob.getString(b"staElev")) + "m elev)"
title_string += "\n" + str(round(T[0].to('degF').item(),1))
title_string += " " + str(round(Td[0].to('degF').item(),1))
plt.title(title_string, loc='left')
# Calculate LCL height and plot as black dot
lcl_pressure, lcl_temperature = lcl(p[0], T[0], Td[0])
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Calculate full parcel profile and add to plot as black line
prof = parcel_profile(p, T[0], Td[0]).to('degC')
skew.plot(p, prof, 'k', linewidth=2)
# An example of a slanted line at constant T -- in this case the 0 isotherm
l = skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Draw hodograph
ax_hod = inset_axes(skew.ax, '30%', '30%', loc=3)
h = Hodograph(ax_hod, component_range=max(wsMan))
h.add_grid(increment=20)
h.plot_colormapped(u, v, spd)
# Show the plot
plt.show()
| notebooks/AWIPS/Upper_Air_BUFR_Soundings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.9 (''.venv'': poetry)'
# language: python
# name: python3
# ---
# # Run Multiple Tools in a Pipeline
#
# In this notebook, we explain how to use Arachne pipeline functionality.
# In Arachne, we leverge an open-source Python framework for constructing pipelines (i.e., [Kedro](https://kedro.readthedocs.io/en/stable/index.html#)).
# First, we describe how to use this feature from Arachne CLI, and then the way to execute from Python interface.
# ## Prepare a Model
#
# For this tutorial, we will be working with the TFLite Converter and TVM for compling a Tensorflow (Keras) model by TVM after converting it into a TFLite model.
# First, we prepare a Tensorflow model representing ResNet-50 v2 like the previous tutorails.
# +
import tensorflow as tf
model = tf.keras.applications.resnet_v2.ResNet50V2()
model.summary()
model.save("/tmp/resnet50-v2.h5")
# -
# ## Construct and Run a Pipeline by `arachne.driver.pipeline`
#
# Now, you are ready to run a pipeline by using `arachne.driver.pipeline` which is a CLI for constructing and executing a pipeline including multiple tools.
# To define a pipeline, you have to specify the `pipeline` option that takes a list of tool names.
# To configure the tool behavior, you can use tool specific options as well as `arachne.driver.cli`.
# + language="bash"
#
# CUDA_VISIBLE_DEVICES=6 python -m arachne.driver.pipeline model_file=/tmp/resnet50-v2.h5 output_path=/tmp/output.tar pipeline=[tflite_converter,tvm] tools.tflite_converter.ptq.method=fp16 tools.tvm.cpu_attr=[+fma,+avx2] tools.tvm.composite_target=[tensorrt,cpu]
# -
# Here, we specify the two tools in a pipline (`pipeline=[tflite_converter,tvm]`).
# To configure the behavior of each tool, we can control it by modifying `tools.tflite_converter` and `tools.tvm` options.
# In this example, the TFLite Converter first converts the input model in FP16 mode and the TVM compile the converted model for the TensorRT target with allowing to execute the remaining graph on CPU.
# ## Construct and Run a Pipeline by Python Interface
#
# If you want to use pipeline functionality with Python interfaces, please import the `arachne.driver.pipeline` module.
# First, you should setup the `arachne.driver.pipeline.PipelineConfig` object which is a config class for pipeline.
# To specify the tools in pipeline, you should pass a list of tool names to `PipelineConfig.pipeline`.
# The `arachne.driver.pipeline.get_default_tool_configs` is used for retrieving the default configs for specified configs and saving the result to `PipelineConfig.tools`.
# To modify the behavior of each tool, you can change the value under `PipelineConfig.tools`.
# Last, `arachne.driver.pipeline.run` is used for executing the pipeline.
# +
from arachne.utils.model_utils import init_from_file, save_model
from arachne.driver.pipeline import PipelineConfig, get_default_tool_configs, run
# Prepare an input model
model_path = "/tmp/resnet50-v2.h5"
input = init_from_file(model_path)
# Construct a pipeline
cfg = PipelineConfig()
cfg.pipeline = ['tflite_converter', 'tvm']
cfg.tools = get_default_tool_configs(cfg.pipeline)
# Setup tflite_converter config
cfg.tools['tflite_converter'].ptq.method = "fp16"
# Setup tvm config
cfg.tools['tvm'].cpu_target = "x86-64"
cfg.tools['tvm'].cpu_attr = ['+fma', '+avx2']
cfg.tools['tvm'].composite_target = ['tensorrt', 'cpu']
output = run(input, cfg)
save_model(model=output, output_path="/tmp/output.tar", tvm_cfg=cfg.tools['tvm'])
| examples/run_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Using Python for Research Homework: Week 3, Case Study 2
#
# In this case study, we will find and plot the distribution of word frequencies for each translation of Hamlet. Perhaps the distribution of word frequencies of Hamlet depends on the translation --- let's find out!
# +
# DO NOT EDIT THIS CODE!
import os
import pandas as pd
import numpy as np
from collections import Counter
def count_words_fast(text):
text = text.lower()
skips = [".", ",", ";", ":", "'", '"', "\n", "!", "?", "(", ")"]
for ch in skips:
text = text.replace(ch, "")
word_counts = Counter(text.split(" "))
return word_counts
def word_stats(word_counts):
num_unique = len(word_counts)
counts = word_counts.values()
return (num_unique, counts)
# -
# ### Exercise 1
#
# In this case study, we will find and visualize summary statistics of the text of different translations of Hamlet. For this case study, functions `count_words_fast` and `word_stats` are already defined as in the Case 2 Videos (Videos 3.2.x).
#
# #### Instructions
# - Read in the data as a pandas dataframe using `pd.read_csv`. Use the `index_col` argument to set the first column in the csv file as the index for the dataframe. The data can be found at https://courses.edx.org/asset-v1:HarvardX+PH526x+2T2019+type@asset+block@hamlets.csv
# +
#hamlets = ## Complete this line of code! ##
hamlets = pd.read_csv("asset-v1_HarvardX+PH526x+2T2019+type@<EMAIL>", index_col=0)
hamlets
# -
# ### Exercise 2
#
# In this exercise, we will summarize the text for a single translation of Hamlet in a `pandas` dataframe.
#
# #### Instructions
# - Find the dictionary of word frequency in `text` by calling `count_words_fast()`. Store this as `counted_text`.
# - Create a `pandas` dataframe named `data`.
# - Using `counted_text`, define two columns in data:
# - `word`, consisting of each unique word in text.
# - `count`, consisting of the number of times each word in `word` is included in the text.
# +
language, text = hamlets.iloc[0]
# Enter your code here.
counted_text = count_words_fast(text)
# (text)
data =pd.DataFrame({
"word":list(counted_text.keys()),
"count":list(counted_text.values())
})
# counted_text.keys()
# -
data[data['word']=='hamlet']
counted_text.keys()
counted_text.values()
sum(counted_text.values())
# ### Exercise 3
#
# In this exercise, we will continue to define summary statistics for a single translation of Hamlet.
#
# #### Instructions
# - Add a column to data named `length`, defined as the length of each word.
# - Add another column named `frequency`, which is defined as follows for each word in `data`:
# - If `count > 10`, `frequency` is "frequent".
# - If `1 < count <= 10`, `frequency` is "infrequent".
# - If `count == 1`, `frequency` is "unique".
# +
# write your code here!
# apply(function), Apply function to each object.
data['length'] = data.apply(lambda row: len(row['word']), axis=1)
def frequency(row):
if row['count']>10:
return 'frequency'
if 1<row['count']<=10:
return 'infrequent'
if row['count']==1:
return 'unique'
data['frequency'] = data.apply (lambda row: frequency(row), axis=1)
data['frequency'].value_counts()['unique']
# +
# data.iloc[:, [1, 2, 5]]
# +
# Use df.loc[] and df.iloc[] to select only rows, only columns or both.
# Use df.at[] and df.iat[] to access a single value by row and column.
# First index selects rows, second index columns.
# df.iloc[10:20], Select rows 10-20.
# df.iloc[:, [1, 2, 5]], Select columns in positions 1, 2 and 5 (first column is 0).
# df.loc[:, 'x2':'x4'], Select all columns between x2 and x4 (inclusive).
# df.loc[df['a'] > 10, ['a’, 'c']], Select rows meeting logical condition, and only the specific columns .
# df.iat[1, 2] Access single value by index
# df.at[4, 'A'] Access single value by label
# +
data["length"] = data["word"].apply(len)
data.loc[data["count"] > 10, "frequency"] = "frequent"
data.loc[data["count"] <= 10, "frequency"] = "infrequent"
data.loc[data["count"] == 1, "frequency"] = "unique"
data.groupby('frequency').count()
# +
# data
# -
# ### Exercise 4
#
# In this exercise, we will summarize the statistics in data into a smaller pandas dataframe.
#
# #### Instructions
# - Create a `pandas` dataframe named `sub_data` including the following columns:
# - `language`, which is the language of the text (defined in Exercise 2).
# - `frequency`, which is a list containing the strings "frequent", "infrequent", and "unique".
# - `mean_word_length`, which is the mean word length of each value in frequency.
# - `num_words`, which is the total number of words in each frequency category.
# write your code here!
sub_data = pd.DataFrame({'language':'English',
'frequency':["frequent", "infrequent", "unique"],
"mean_word_length": data.groupby(by = "frequency")["length"].mean(),
"num_words": data.groupby(by = "frequency").size()
})
sub_data
# ### Exercise 5
#
# In this exercise, we will join all the data summaries for text Hamlet translation.
#
# #### Instructions
# - The previous code for summarizing a particular translation of Hamlet is consolidated into a single function called `summarize_text`. Create a pandas dataframe` grouped_data` consisting of the results of `summarize_text` for each translation of Hamlet in `hamlets`.
# - Use a `for` loop across the row indices of `hamlets` to assign each translation to a new row.
# - Obtain the `ith` row of `hamlets` to variables using the `.iloc` method, and assign the output to variables `language` and `text`.
# - Call `summarize_text` using `language` and `text`, and assign the output to `sub_data`.
# - Use the pandas `.append()` function to append to pandas dataframes row-wise to `grouped_data`.
# +
def summarize_text(language, text):
counted_text = count_words_fast(text)
data = pd.DataFrame({
"word": list(counted_text.keys()),
"count": list(counted_text.values())
})
data.loc[data["count"] > 10, "frequency"] = "frequent"
data.loc[data["count"] <= 10, "frequency"] = "infrequent"
data.loc[data["count"] == 1, "frequency"] = "unique"
data["length"] = data["word"].apply(len)
sub_data = pd.DataFrame({
"language": language,
"frequency": ["frequent","infrequent","unique"],
"mean_word_length": data.groupby(by = "frequency")["length"].mean(),
"num_words": data.groupby(by = "frequency").size()
})
return(sub_data)
# write your code here!
# futureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
grouped_data = pd.DataFrame(columns = ["language", "frequency", "mean_word_length", "num_words"])
for row in range(len(hamlets)):
language, text = hamlets.iloc[row]
sub_data = summarize_text(language, text)
grouped_data = grouped_data.append(sub_data)
# grouped_data = pd.concat([sub_data])
# -
grouped_data
grouped_data.shape
# ### Exercise 6
#
# In this exercise, we will plot our results and look for differences across each translation.
#
# #### Instructions
# - Plot the word statistics of each translations on a single plot. Note that we have already done most of the work for you.
# - Consider: do the word statistics differ by translation?
# +
colors = {"Portuguese": "green", "English": "blue", "German": "red"}
markers = {"frequent": "o","infrequent": "s", "unique": "^"}
import matplotlib.pyplot as plt
for i in range(grouped_data.shape[0]):
row = grouped_data.iloc[i]
plt.plot(row.mean_word_length, row.num_words,
marker=markers[row.frequency],
color = colors[row.language],
markersize = 10
)
color_legend = []
marker_legend = []
for color in colors:
color_legend.append(
plt.plot([], [],
color=colors[color],
marker="o",
label = color, markersize = 10, linestyle="None")
)
for marker in markers:
marker_legend.append(
plt.plot([], [],
color="k",
marker=markers[marker],
label = marker, markersize = 10, linestyle="None")
)
plt.legend(numpoints=1, loc = "upper left")
plt.xlabel("Mean Word Length")
plt.ylabel("Number of Words")
# write your code to display the plot here!
plt.show()
# -
| HW3-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Hipótese 5 (KNN)
# `<NAME> (<EMAIL>)`
#
# `<NAME> (<EMAIL>)`
# #### É possível predizer a classificação indicativa de um game baseado em seu gênero e suas vendas?
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.neighbors import KNeighborsClassifier
from matplotlib import pyplot as plt
df = pd.read_csv('vgsalesPP2.csv')
df.drop(['Unnamed: 0'],axis=1,inplace=True)
display(len(df.columns))
df.head()
# +
all_sales = df.iloc[:, 5:10].copy()
all_genres = df.iloc[:, 50:57].copy()
X = all_sales.join(all_genres).values.copy()
y = df["Rating"].values.copy()
# -
X # Valores das vendas e generos.
y # Valores de Genre
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
success = []
for i in range(1, 50):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
success.append(np.mean(pred_i == y_test))
success
# +
plt.figure(figsize=(12, 6))
plt.plot(range(1, 50), success, color='red', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=10)
plt.title('Taxa acerto de valores K ')
plt.xlabel('K Valores')
plt.ylabel('Media de Acerto')
best_result = 0
for i in success:
if(i > best_result):
best_result = i
best_k_value = success.index(best_result) + 1
print("Melhor resultado:",best_result,"para o k igual a",best_k_value)
# -
classifier = KNeighborsClassifier(n_neighbors=best_k_value)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
fig, ax = plt.subplots(figsize=(40, 20))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def list_string_to_list_index(list_keys, list_string):
list_index = []
for i in list_string:
list_index.append(list_keys.index(i))
return list_index;
# +
class_names = df["Rating"].unique()
y_test_index = list_string_to_list_index(list(class_names), y_test)
y_pred_index = list_string_to_list_index(list(class_names), y_pred)
plot_confusion_matrix(y_test_index, y_pred_index, classes=class_names, normalize=True,
title='Normalized confusion matrix')
# -
# ### Conclusão
# O melhor resultado que obtemos foi quando se estabelecem algo próximo à 37 vizinhos, tendo uma taxa de acerto por volta dos 67%. Com isso. conclui-se que é possível predizer a classificação indicativa de um jogo (rating), a partir do seu gênero e venda. Isso indica principalmente que, a classificação indicativa de um jogo irá influenciar nas suas vendas. Resta saber quais ratings possuem os melhores resultados de vendas historicamente.
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='ball_tree')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
ball_tree_result = np.mean(y_pred == y_test)
print("Resultado da predição com o algoritmo 'ball_tree' foi de",ball_tree_result)
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='kd_tree')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
kd_tree_result = np.mean(y_pred == y_test)
print("Resultado da predição com o algoritmo 'kd_tree' foi de",kd_tree_result)
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='brute')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
brute_result = np.mean(y_pred == y_test)
print("Resultado da predição com o algoritmo 'brute' foi de",brute_result)
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='brute', weights='distance')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
weight_distance_result = np.mean(y_pred == y_test)
print("Resultado da predição com o algoritmo 'brute' e peso 'distance' foi de",weight_distance_result)
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='brute', leaf_size=50)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
bigger_leaf_size_result = np.mean(y_pred == y_test)
print("Resultado da predição com o algoritmo 'brute' e tamanho da folha igual a 50 foi de",bigger_leaf_size_result)
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='brute', leaf_size=20)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
smaller_leaf_size_result = np.mean(y_pred == y_test)
print("Resultado da predição com o algoritmo 'brute' e tamanho da folha igual a 20 foi de",smaller_leaf_size_result)
# ### Conclusão das variações no KNN
#
# Vimos que o melhor resultado obtido até então havia sido 67,36% com o k vizinhos igual a 37. Mantendo o número de vizinhos, alteramos então o parâmetro 'algorithm', e o melhor resultado obtido foi de 67,48% quando colocou-se o algoritmo 'brute'. Depois alteramos o parâmetro 'weights' mas o resultado piorou um pouco para 66,34%. Depois aumentamos e diminuimos o valor do parâmetro 'leaf_size', mas não houve melhoras significativas.
#
# Concluimos então que a melhor configuração do KNN para este caso foi a seguinte:
# KNeighborsClassifier(algorithm='brute', leaf_size=30, metric='minkowski',
# metric_params=None, n_jobs=None, n_neighbors=37, p=2,
# weights='uniform')
classifier = KNeighborsClassifier(n_neighbors=best_k_value, algorithm='brute')
model = classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
plt.scatter(y_test, predictions)
plt.xlabel("True Values")
plt.ylabel("Predictions")
print("Score:", model.score(X_test, y_test))
scores = cross_val_score(model, X, y, cv=6)
print ("Cross-validated scores:", scores)
predictions = cross_val_predict(model, X, y, cv=6)
plt.scatter(y, predictions)
| Hipotese5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/collanealmazan/CPEN-21A-ECE-2-1/blob/main/Demo1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="_bfcOiQNrHXJ"
#Intro to Python Programming
# + [markdown] id="U1KcqH1Ort1Q"
# #Intro to Python Programming
#
# + colab={"base_uri": "https://localhost:8080/"} id="PyMhp31br1YA" outputId="979c67c2-c468-404e-bb75-a313750986f5"
#Python Indention
if 5>2:
print("Five is greater than two")
# + [markdown] id="y0HrQHQ8tq0z"
# #Python Variable
#
# + colab={"base_uri": "https://localhost:8080/"} id="6RK0DgXetznc" outputId="d3eb1e44-6bf1-4544-f3f9-66fc10ddf0e8"
x = 1
a, b = 0, 1
a,b,c= "zero","one","two"
print(x)
print(a)
print(b)
print(c)
# + colab={"base_uri": "https://localhost:8080/"} id="jW00miXMuJtC" outputId="74bf339b-2118-47da-80bd-453de1235259"
d = "Sally" #This is a string
D = 'Ana'
print(d)
e = 'John'
print(e)
print(D)
# + colab={"base_uri": "https://localhost:8080/"} id="52uxFDQ9vHP5" outputId="fd763c97-855a-483e-ae8a-c0b5afd200e6"
print(type(d)) #This is a Type function
print(type(x))
# + [markdown] id="dN86n1FtuiQa"
# #Casting
# + colab={"base_uri": "https://localhost:8080/"} id="QYT_pO1Lupap" outputId="c2ed33e6-e549-4172-c826-b49c5916e397"
f = float(4)
print(f)
g = int(5)
print(g)
# + [markdown] id="ufcxt5JuwLQR"
# ##Multiple Variables with One Value
#
# + colab={"base_uri": "https://localhost:8080/"} id="6sB2GJPRwQw0" outputId="e7c3ae8d-bb7d-4b1e-9d25-53b940ecd9eb"
x = y = z = "four"
print(x)
print(y)
print(z)
# + colab={"base_uri": "https://localhost:8080/"} id="kTLbToUawpPL" outputId="9c2e6498-acbe-4f68-fdf2-196242d2ae49"
x = "enjoying"
print("Python Programming is" " " + x)
# + [markdown] id="zB28AqhAxKmg"
# #Operations in Python
# + colab={"base_uri": "https://localhost:8080/"} id="VbAwhh_QxPXU" outputId="ac12169c-b426-48f0-d8b6-c3514e385515"
x = 5
y = 7
x += y #This is the same as x = x + y
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="jmtG_uT2zRVx" outputId="2176092e-0fb8-4737-e6d8-2e508c9c0032"
x = 5
y = 7
not(x>y or y==x)
# + colab={"base_uri": "https://localhost:8080/"} id="OhTKI2G10Jao" outputId="cf0fb47f-3f80-43b0-8a56-1b296f35e3a1"
x is y
# + colab={"base_uri": "https://localhost:8080/"} id="tjcbIq2g0NJs" outputId="ab5168a3-f871-46ce-a134-da3f06c64208"
x is not y
| Demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Email Announcement Notebook
#
# This is a notebook to take user responses from a google form and convert it to an email list that one can
# use to send an announcement with.
# ### Import Google Form Results
#
# We import the responses from the google form by converting them to a spreadsheet on google sheets, then importing that sheet.
import pygsheets
import pandas as pd
#authorization
CLIENT_SECRET_FILE = '/Users/jamessunseri/Desktop/auto_emailer/client_secrets.json'
gc = pygsheets.authorize(client_secret=CLIENT_SECRET_FILE)
#open the google spreadsheet (where 'Python DeCal Application Form 21 (Responses)' is the name of my sheet)
response_sheet_title = 'Python DeCal Application Form Spring 2022 (Responses)'
sh_responses = gc.open(response_sheet_title)
# +
########################
# Responses #
########################
#select the first sheet
wks = sh_responses[0]
#format data into a nice dataframe using pandas
data = pd.DataFrame(wks)
data_np = data.to_numpy()
column_names = data_np[0,:]
#cleanly format dataframe
data = pd.DataFrame(data_np[1:,:], columns = column_names)
# -
data['Email Address (ending with berkeley.edu) ']
# ### Write Message
#
# Here we write the message we want to send to the email list.
email_messages = []
for i in range(len(data['First and Last Name'])):
email_string = """ Dear {0},
This is a friendly announcement and reminder for folks that have not enrolled in the course yet.
This email is being sent to anyone who filled out the google form, so if you already enrolled in the
course then please ignore the rest of this email. If you haven't enrolled yet, please do so by
searching for the course number/name posted on our website pythondecal.github.io and using the enrollment
code we sent you previously via email as the permission code needed to finalize enrollment.
Please do this ASAP as you will not receive notifications via bCourses if you don't. This is absolutely necessary starting
Today (Wednesday 1/19) because we will have our first lecture today 3-4pm (Pacific) via zoom only and the first homework assignment
will come not too long after that so we need to be able to reach y'all efficiently with announcements.
The zoom information is below.
Zoom Link: https://berkeley.zoom.us/j/99199605577?pwd=<PASSWORD>
Password: <PASSWORD>
Please let us know if you have any questions!
Onwards,
Python Decal Staff""".format(data['First and Last Name'][i])
email_messages.append(email_string)
# +
email_dict = {}
for i in range(len(data['First and Last Name'])):
address = data['Email Address (ending with berkeley.edu) '][i]
message = email_messages[i]
email_dict[address] = message
email_dict = {'Emails':data['Email Address (ending with berkeley.edu) '], 'Messages':email_messages}
email_data = pd.DataFrame(email_dict, index=range(len(data['First and Last Name'])))
email_data
# -
# ### Auto Emailer
#
# Below is the code that allows us to automatically send the emails just from running a cell.
# +
import pickle
import os
from google_auth_oauthlib.flow import Flow, InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
from google.auth.transport.requests import Request
def Create_Service(client_secret_file, api_name, api_version, *scopes):
print(client_secret_file, api_name, api_version, scopes, sep='-')
CLIENT_SECRET_FILE = client_secret_file
API_SERVICE_NAME = api_name
API_VERSION = api_version
SCOPES = [scope for scope in scopes[0]]
print(SCOPES)
cred = None
pickle_file = f'token_{API_SERVICE_NAME}_{API_VERSION}.pickle'
# print(pickle_file)
if os.path.exists(pickle_file):
with open(pickle_file, 'rb') as token:
cred = pickle.load(token)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)
cred = flow.run_local_server()
with open(pickle_file, 'wb') as token:
pickle.dump(cred, token)
try:
service = build(API_SERVICE_NAME, API_VERSION, credentials=cred)
print(API_SERVICE_NAME, 'service created successfully')
return service
except Exception as e:
print('Unable to connect.')
print(e)
return None
def convert_to_RFC_datetime(year=1900, month=1, day=1, hour=0, minute=0):
dt = datetime.datetime(year, month, day, hour, minute, 0).isoformat() + 'Z'
return dt
import base64
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# CLIENT_SECRET_FILE = 'client_secrets.json'
API_NAME = 'gmail'
API_VERSION = 'v1'
SCOPES = ['https://mail.google.com/']
service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
# -
def write_email(send_to_email, message):
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = send_to_email
mimeMessage['subject'] = 'Python DeCal Admission'
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
raw_string = base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()
message = service.users().messages().send(userId='me', body={'raw': raw_string}).execute()
print(message)
N_people = len(email_data['Messages'])
for i in range(N_people):
write_email(email_data['Emails'][i], email_data['Messages'][i])
print("Email sent to: ", email_data['Emails'][i])
| send_announcment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reddit
# Para utilização do scrapper é preciso cadastrar um cliente na API do reddit, para obter as credênciais de acesso.
#
# É possível obter esses tokens ao cadastrar a aplicação nessa página: https://www.reddit.com/prefs/apps/
# +
import praw
import pandas as pd
from datetime import datetime
# Tokens de Acesso a API do Reddit
# Criada para Uso do Exclusivo
reddit = praw.Reddit(client_id='Zahz6NxvpyRV4A', client_secret='<KEY>', user_agent='TCC - Reddit Scrapper')
# Link de um post do reddit
# Também pode ser substituido por tópicos da página principal, ou de algum subreddit.
submission = reddit.submission(url="https://www.reddit.com/r/news/comments/i0lu1y/donald_trump_calls_for_delay_to_2020_us/")
# +
from praw.models import MoreComments
def get_first_level_comments(list_of_comments, index,dataframe):
for comentario in list_of_comments:
if(type(comentario) == praw.models.reddit.more.MoreComments):
index = get_first_level_comments(comentario.comments(), index, dataframe)
continue
print_root_info(comentario, index, dataframe)
if(comentario.replies):
child_id = index + 1
last_id = get_replies_from_node(comentario.replies, index, child_id, dataframe)
index = last_id + 1
else:
index += 1
# Retorna o indice do último comentário +1
return index
# +
def get_replies_from_node(replies, in_reply_to_id, node_id, dataframe):
index = node_id
for resposta in replies:
if(type(resposta) == praw.models.reddit.more.MoreComments):
return get_replies_from_node(resposta.comments(), in_reply_to_id, index, dataframe)
print_node_info(resposta, index, in_reply_to_id, dataframe)
if(resposta.replies):
child_id = index + 1
last_id = get_replies_from_node(resposta.replies, index, child_id, dataframe)
index = last_id + 1
else:
index += 1
return index - 1
def print_root_info(comentario, index, dataframe):
date = datetime.fromtimestamp(comentario.created_utc)
body = str(comentario.body).replace("\n", " ")
body = body.replace("\t"," ")
# print(index, comentario.author, date.strftime('%d/%m/%Y'), date.strftime('%H:%M:%S'),comentario.body, "postion", "''")
dataframe.loc[index] = [index, str(comentario.author),date.strftime('%d/%m/%Y'), date.strftime('%H:%M:%S'), body, submission.title, 'neutral', '']
def print_node_info(resposta, index, in_reply_to_id, dataframe):
date = datetime.fromtimestamp(resposta.created_utc)
body = str(resposta.body).replace("\n", "")
body = body.replace("\t"," ")
# print(f"{index} {resposta.author} {date.strftime('%d/%m/%Y')} {date.strftime('%H:%M:%S')} {resposta.body} {'position'} {in_reply_to_id}")
dataframe.loc[index] = [index, str(resposta.author),date.strftime('%d/%m/%Y'), date.strftime('%H:%M:%S'), body, submission.title, 'neutral', in_reply_to_id]
# +
df = pd.DataFrame(columns=['id', 'sender', 'date', 'time', 'content', 'topic', 'position', 'addressee'])
get_first_level_comments(submission.comments,1, df)
df.to_csv("reddit_post_extraction.csv", index_label=False)
# -
# # Twitter
#
# Teste de outro método de extração, utilizando o Twitter como rede social
# +
import tweepy
ACCESS_TOKEN = ''
ACCESS_SECRET = ''
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
# Setup access to API
def connect_to_twitter_OAuth():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
return api
api = connect_to_twitter_OAuth()
trump_tweets = api.user_timeline('rakin')
for tweet in trump_tweets:
print(tweet.text, tweet.id)
api.me()
#tweets = api.search()
# -
# ### O código funciona para pegar as replies, porém se o tweet for relativamente antigo, ou a pessoa tem muita atividade na plataforma, há uma demora excessiva para retornar as respostas
#
# ### Isso acontece por conta da maneira que o método da api está implementado
#
def get_replies(user, status_id):
replies = tweepy.Cursor(api.search, q='to:{}'.format(user),
since_id=status_id, tweet_mode='extended').items()
resp = []
while True:
try:
reply = replies.next()
if not hasattr(reply, 'in_reply_to_status_id_str'):
continue
if reply.in_reply_to_status_id == status_id:
# logging.info("reply of tweet:{}".format(reply.full_text))
aux = "@" + str(reply.user.screen_name)
aux_id = reply.id
print(reply.full_text)
get_replies(aux, aux_id)
resp.append({"user": "@" + str(reply.user.screen_name), "id": reply.id, "text": reply.full_text, "tweet_replies": []})
except tweepy.RateLimitError as e:
print("Twitter api rate limit reached".format(e))
# time.sleep(60)
continue
except tweepy.TweepError as e:
print("Tweepy error occured:{}".format(e))
break
except StopIteration:
break
except Exception as e:
print("Failed while fetching replies {}".format(e))
break
return resp
# +
user_name="@HaruJiggly"
tweet_id = 1282782489719779337
get_replies(user_name, tweet_id)
retorno = get_replies(user_name, tweet_id)
for i in retorno:
print(i)
# def get_sub_replies(reply_list_of_dicts):
# for item in reply_list_of_dicts:
# print(item["user"], item["id"], get_replies(item["user"], item["id"]))
# get_sub_replies(retorno)
| TCC_Scrappers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datascience import *
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
# -
# ## Arithmetic
2 + 3
2 * 3
2 ** 3
2 * * 3
10 * 2 ** 3
2 + 3 * 4 + 5
2 / 3
2 / 0
2 / 3000
2 / 3000000
0.6666666666666666 - 0.6666666666666666123456789
0.000000000000000123456789
0.000000000000000000000000000000000000000000000000000000000000000000000123456789
2 ** 0.5
2 ** 0.5 * 2 ** 0.5
2 ** 0.5 * 2 ** 0.5 - 2
# ## Growth
sept_7 = 4366
aug_7 = 1830
growth_per_month = (sept_7 / aug_7) - 1
growth_per_month
sept_7 * (1 + growth_per_month) ** 12
fed_budget_2002 = 2370000000000
fed_budget_2012 = 3380000000000
fed_budget_2012 - fed_budget_2002
g = (fed_budget_2012 / fed_budget_2002) ** (1/10) - 1
g
fed_budget_2002 * (1 + g) ** 16 # Actual 2018 budget: $4.1 trillion
# ## Arrays
make_array(1, 2, 3)
make_array(1, 2, 3) * 2
a = make_array(1, 2, 3)
a + 5
a + make_array(10, 100, 1000)
a
sum(a)
max(a)
min(a)
fed_budget_2002 * (1 + g) ** a
# ## Columns
# From http://www.boxofficemojo.com/alltime/adjusted.htm
movies = Table.read_table('top_movies_2017.csv')
movies
movies.column('Gross')
adjustment = movies.column('Gross (Adjusted)') / movies.column('Gross')
adjustment
movies.with_column('Adjustment', adjustment)
movies.with_column('Adjustment', adjustment).scatter('Year', 'Adjustment')
movies.column('Year')
age = 2017 - movies.column('Year')
movies = movies.with_column('Age', age)
movies
movies = movies.with_column('Growth rate', adjustment ** (1 / age) - 1)
movies
movies.scatter('Year', 'Growth rate')
movies.sort('Age').show(20)
movies.sort('Year').show(20)
# +
# http://www.boxofficemojo.com/about/adjuster.htm
| Data8.1x/lec04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.linalg as la
sns.set_context('notebook', font_scale=1.5)
# -
# **1**. Interpolating polynomials and choice of basis
#
# We have
#
# | x | y |
# | ----- | -------- |
# | 0 | -5 |
# | 1 | -3 |
# | -1 | -15 |
# | 2 | 39 |
# | -2 | -9 |
#
# Find interpolating polynomials using
#
# - Monomial basis $f_i(x_i) = x^i$ - code this using only simple linear algebra operations (including solve)
# - Lagrange basis
# $$
# l_j(x_j) = \prod_{0 \le m \le k, m \ne j} \frac{x - x_m}{x_j - x_m}
# $$
#
# The Lagrange interpolation uses the values of $y$ as the coefficient for the basis polynomials. Do this manually and then using the scipy.interpolate package
x = np.array([0,1,-1,2,-2])
y = np.array([-5, -3, -15, 39, -9])
# Monomial basis
A = np.c_[np.ones(5), x, x**2, x**3, x**4]
A
c = la.solve(A, y)
c
xp = np.linspace(-2, 2, 100)
yp = np.c_[np.ones(100), xp, xp**2, xp**3, xp**4] @ c
plt.plot(xp, xp**0, label='M0')
plt.plot(xp, xp, label='M1')
plt.plot(xp, xp**2, label='M2')
plt.plot(xp, xp**3, label='M3')
plt.plot(xp, xp**4, label='M4')
plt.legend()
plt.title('Monomial basis polynomials')
pass
plt.scatter(x, y)
plt.plot(xp, yp)
pass
# Lagrange basis
xp = np.linspace(-2, 2, 50)
L0 = ((xp-x[1])*(xp-x[2])*(xp-x[3])*(xp-x[4])) / ((x[0]-x[1])*(x[0]-x[2])*(x[0]-x[3])*(x[0]-x[4]))
L1 = ((xp-x[0])*(xp-x[2])*(xp-x[3])*(xp-x[4])) / ((x[1]-x[0])*(x[1]-x[2])*(x[1]-x[3])*(x[1]-x[4]))
L2 = ((xp-x[0])*(xp-x[1])*(xp-x[3])*(xp-x[4])) / ((x[2]-x[0])*(x[2]-x[1])*(x[2]-x[3])*(x[2]-x[4]))
L3 = ((xp-x[0])*(xp-x[1])*(xp-x[2])*(xp-x[4])) / ((x[3]-x[0])*(x[3]-x[1])*(x[3]-x[2])*(x[3]-x[4]))
L4 = ((xp-x[0])*(xp-x[1])*(xp-x[2])*(xp-x[3])) / ((x[4]-x[0])*(x[4]-x[1])*(x[4]-x[2])*(x[4]-x[3]))
plt.plot(xp, L0, label='L0')
plt.plot(xp, L1, label='L1')
plt.plot(xp, L2, label='L2')
plt.plot(xp, L3, label='L3')
plt.plot(xp, L4, label='L4')
plt.legend()
plt.title('Lagrange basis polynomials')
pass
plt.scatter(x, y)
plt.plot(xp, y[0]*L0 + y[1]*L1 + y[2]*L2 + y[3]*L3 + y[4]*L4)
pass
# Using library functions
from scipy.interpolate import lagrange
lp = lagrange(x, y)
plt.scatter(x, y)
plt.plot(xp, lp(xp))
pass
# **2**. Markov chains
#
# $$
# P = \pmatrix{
# p_{11} & p_{12} & p_{13} \\
# p_{21} & p_{22} & p_{33} \\
# p_{31} & p_{32} & p_{33} \\
# }
# $$
#
# By convention, the $rows$ of a Markov transition matrix sum to 1, and $p_{32}$ is the probability that the system will change from state 3 to state 2. Therefore, to see the next state of an initial probability row vector $v_k$, we need to perform left multiplication
#
# $$
# v_{k+1}^T = v_{k}^T P
# $$
#
# If this is confusing, you can work with the matrix $P^T$ and do right-multiplication with column vectors. In this case, $p_{32}$ is the probability that the system will change from state 2 to state 3.
#
# 
#
# Find the stationary vector $\pi^T = \pi^T P$ for the transition graph shown
#
# - by solving a set of linear equations
# - by solving an eigenvector problem
# - Check that the resulting vector is invariant with respect to the transition matrix
# ```dot
# # %%file markov.dot
#
# digraph g {
# a -> a [label=0.8]
# b -> b [label=0.2]
# c -> c [label=0.2]
# a -> b [label=0.1]
# b -> a [label=0.3]
# a -> c [label=0.1]
# c -> a [label=0.2]
# b -> c [label=0.5]
# c -> b [label=0.6]
# }
# ```
P = np.array([
[0.8, 0.1, 0.1],
[0.3, 0.2, 0.5],
[0.2, 0.6, 0.2]
])
lam, v = la.eig(P)
pi = v[:, np.argmax(lam)]
pi = pi/pi.sum()
pi
A = np.eye(3) - P
A
A[-1, :] = np.ones(3)
pi = la.solve(A, np.array([0,0,1]))
pi
# Brute force check
x0 = np.random.rand(3)
x0 /= x0.sum()
np.linalg.matrix_power(P, 100) @ x0.reshape(-1,1)
# **3**. Graphs
#
# $M$ is the adjacency matrix of a directed graph $G$. Find the vertices that belong to a clique.
#
# $$
# M = \pmatrix{
# 0 & 1 & 0 & 1 & 1 \\
# 1 & 0 & 0 & 1 & 0 \\
# 1 & 1 & 0 & 1 & 0 \\
# 1 & 1 & 0 & 0 & 0 \\
# 1 & 0 & 0 & 1 & 0
# }
# $$
#
# A clique is defined as a subset of a graph where
#
# 1. The subset has at least 3 vertices
# 2. All pairs of vertices are connected
# 3. The subset is as large as possible
#
# Because of the symmetry required in condition 2, we only need to consider the graph $S$ where $s_{ij} = 1$ if vertcies $i$ and $j$ communicate and 0 otherwise. Then the on-zero diagonal entries of $S^3$ is the set states recurrent in 3 steps. That is, there is a bi-directional path ${s_i \leftrightarrow s_j \leftrightarrow s_k \leftrightarrow s_i}$, which means that the vertices $\{s_i, s_j, s_k\}$ form a subset of a clique.
M = np.array([
[0,1,0,1,1],
[1,0,0,1,1],
[1,1,0,1,0],
[1,1,0,0,0],
[1,0,0,1,0]
])
S = np.where((M == 1) & (M == M.T), 1, 0)
S
S3 = np.linalg.matrix_power(S, 3)
S3
# Therefore nodes 0, 1, and 3 are part of a clique, and since the smallest clique has 3 members, they are from the same clique.
# **4**. Suppose we wish to solve the problem $t = Mt + b$ - here the notation is from one type of such problems where $t$ is the temperature, $M$ is a matrix for diffusion, and $b$ represent fixed boundary conditions. Suppose we have a 5 by 5 grid system whose boundary temperatures are fixed. Let $M$ is a matrix with $1/4$ for the $\pm 1$ off-diagonals and 0 elsewhere (i.e. diffusion is approximated by the average temperature of the 4 N, S, E, W neighbors), and $b$ is the vector $(5,2,3,3,0,1,3,0,1)$ - this assumes the temperatures along the bottom = 0, right edge = 1, top = 2 and left edge = 3. Find the equilibrium temperature at each of the 9 interior points
#
# - by solving a linear equation
# - by iteration
M = 0.25*np.array([
[0,1,0,1,0,0,0,0,0],
[1,0,1,0,1,0,0,0,0],
[0,1,0,0,0,0,1,0,0],
[1,0,0,0,1,0,1,0,0],
[0,1,0,1,0,1,0,1,0],
[0,0,1,0,1,0,0,0,1],
[0,0,0,1,0,0,0,1,0],
[0,0,0,0,1,0,1,0,1],
[0,0,0,0,0,0,1,1,0]
])
M
b = 1/4*np.array([5,2,3,3,0,1,3,0,1,]).reshape((-1,1))
# Direct solution - not possible for large matrices
la.solve(np.eye(9) - M, b).reshape(3,3)
# Jacobi iteration
t = np.random.uniform(0,1,9).reshape((-1,1))
for i in range(101):
t = M@t + b
if i % 25 == 0:
print(t.reshape(3,3))
tc = np.zeros((5,5))
tc[1:-1, 1:-1] = t.reshape((3,3))
tc[-1, 1:] = 0
tc[1:,-1] = 1
tc[0, 1:] = 2
tc[:-1,0 ] = 3
tc
plt.imshow(tc, interpolation='gaussian', cmap='jet')
plt.xticks([])
plt.yticks([])
pass
# **5**. Iterated affine maps
#
# Define the following mapping in $\mathbb{R}^2$
#
# $$
# T_i: \pmatrix{x \\ y} \to s \pmatrix{\cos \theta & - \sin \theta \\ \sin \theta & \cos \theta} \pmatrix{x \\ y} + \pmatrix{a_i \\ b_i}
# $$
#
# Suppose $s = 1/3$, $\theta = 0$, and $\pmatrix{a_i \\ b_i}$ are
#
# $$
# \pmatrix{0 \\ 0}, \pmatrix{1/3 \\ 0},
# \pmatrix{2/3 \\ 0}, \pmatrix{0 \\ 1/3},
# \pmatrix{2/3 \\ 1/3}, \pmatrix{0 \\ 2/3},
# \pmatrix{1/3 \\ 2/3}, \pmatrix{2/3 \\ 2/3}
# $$
#
# Generate 1,000 points by first randomly selecting a point in the unit square, then applying at random one of th transformations $T_i$ to the point. Plot the resulting 1,000 points as a scatter plot on in a square frame.
#
def f(x, s, ab):
"""Sierpinski."""
return s*np.eye(2)@x.reshape((-1,1)) + ab.reshape((-1,1))
ab =[
[0,0],
[1/3,0],
[2/3,0],
[0,1/3],
[2/3,1/3],
[0,2/3],
[1/3,2/3],
[2/3,2/3]
]
n = 50001
burn = 10
grid = np.zeros((n,2))
idx = np.random.choice(8, n)
tr = np.array(ab)[idx][:,:,None]
x = np.random.uniform(0,1,(2, 1))
s = 1/3
fig, axes = plt.subplots(1,5, figsize=(15,3))
for i in range(n):
x = np.reshape(s*np.eye(2) @ x + tr[i,:], (2,1))
grid[i] = x.ravel()
if i % 10000 == 0:
ax = axes[(i-1) // 10000]
ax.scatter(grid[burn:, 0], grid[burn:, 1], s=0.01, c='orange')
ax.axis('square')
ax.set_xticklabels([])
ax.set_yticklabels([])
# **6**. The Fibonacci sequence came about from this toy model of rabbit population dynamics
#
# - A baby rabbit matures into an adult in 1 time unit
# - An adult gives birth to exactly 1 baby in 1 time unit
# - Rabbits are immortal
#
# This gives the well known formula for the number of rabbits over discrete time $F_{k+2} = F_{k} + F_{k+1}$
#
# - Express this model as a matrix equation, and calculate the long-term growth rate
# Let the population at any time be expreessed as the vector
# $$
# \pmatrix{\text{adult} \\ \text{baby} }
# $$
#
# In the next time step, there will be
#
# - 1 adult from each adult, and one adult from each baby
# - 1 baby from each adult
A = np.array([[1,1],[1,0]])
A
x0 = np.array([1,1]).reshape(-1,1)
x = x0
for i in range(10):
x = A @ x
print(x.ravel(), end=', ')
print('Growth rate = %.3f' % (x[0,0]/x[1,0]))
# Long term growth rate is the largest eigenvalue of the matrix.
la.eigvals(A).max()
# **7**. Age-structured population growth
#
# Suppose that we observe the following Leslie matrix
#
# $$
# L = \pmatrix{
# 0 & 3 & 2 & 0.5 \\
# 0.8 & 0 & 0 & 0 \\
# 0 & 0.9 & 0 & 0 \\
# 0 & 0 & 0.7 & 0
# }
# $$
#
# 
#
# - Starting with just 1,000 females in age-group 0-15 at time 0 and nobody else, what is the expected population after 90 years?
# - Suppose we could alter the fertility in a *single* age group for this population - can we achieve a steady state non-zero population?
L = np.array([
[0,3,2,0.5],
[0.8,0,0,0],
[0,0.9,0,0],
[0,0,0.7,0]
])
x0 = np.array([1000,0,0,0]).reshape(-1,1)
(np.linalg.matrix_power(L, 6) @ x0).astype('int').ravel()
L0 = L.copy()
L0[0,1] = 0
L0
lam, v = la.eig(L0)
lam
v
# Note that the real eigenvalue with real eigenvector is dominant $\vert L_1 \vert > \vert L_k \vert$.
#
# A theorem says this will be true if you have any two positive consecutive entries in the first row of $L$.
#
# The growth rate is determined by the dominant real eigenvalue with real eigenvector - in the long term, whether the population will grow, shrink or reach steady state depends on whether this is greater than, less than or equal to 1 respectively.
np.absolute(lam)
# ```dot
# # %%file leslie.dot
#
# digraph g {
# rank = min {1}
# rank = max {5}
# rankdir = LR
# overlap = false
# splines = true
#
# 5 [style=invis]
# 1 [label = "0 - 15"]
# 2 [label = "15 - 30"]
# 3 [label = "30 - 45"]
# 4 [label = "45 - 60"]
#
# 1 -> 2 [label = 0.8 ]
# 2 -> 3 [label = 0.9]
# 3 -> 4 [label = 0.7]
# 2 -> 1 [label = "F = 3" constraint=false]
# 3 -> 1 [label = "F = 2" constraint=false]
# 4 -> 1 [label = "F = 0.5" constraint=false]
# }
# ```
# **8**.
#
# You are given the following set of data to fit a quadratic polynomial to
#
# ```python
# x = np.arange(10)
# y = np.array([ 1.58873597, 7.55101533, 10.71372171, 7.90123225,
# -2.05877605, -12.40257359, -28.64568712, -46.39822281,
# -68.15488905, -97.16032044])
# ```
#
# - Find the least squares solution by using the normal equations $A^T A \hat{x} = A^T y$. (5 points)
#
#
x = np.arange(10)
y = np.array([ 1.58873597, 7.55101533, 10.71372171, 7.90123225,
-2.05877605, -12.40257359, -28.64568712, -46.39822281,
-68.15488905, -97.16032044])
A = np.c_[np.ones(len(x)), x, x**2]
betas = la.solve(A.T @ A, A.T @ y)
betas
xp = np.linspace(x.min(), x.max(), 100)
plt.scatter(x, y)
plt.plot(xp, np.polyval(betas[::-1], xp))
pass
# **9**.
#
# You are given the following data
#
# ```python
# A = np.array([[1, 8, 0, 7],
# [0, 2, 9, 4],
# [2, 8, 8, 3],
# [4, 8, 6, 1],
# [2, 1, 9, 6],
# [0, 7, 0, 1],
# [4, 0, 2, 4],
# [1, 4, 9, 5],
# [6, 2, 6, 6],
# [9, 9, 6, 3]], dtype='float')
#
# b = np.array([[2],
# [5],
# [0],
# [0],
# [6],
# [7],
# [2],
# [6],
# [7],
# [9]], dtype='float')
# ```
#
# - Using SVD directly (not via `lstsq`), find the least squares solution to $Ax = b$ (10 points)
# - Use SVD to find the best rank 3 approximation of A (10 points)
# - Calculate the approximation error in terms of the Frobenius norm (5 points)
# +
A = np.array([[1, 8, 0, 7],
[0, 2, 9, 4],
[2, 8, 8, 3],
[4, 8, 6, 1],
[2, 1, 9, 6],
[0, 7, 0, 1],
[4, 0, 2, 4],
[1, 4, 9, 5],
[6, 2, 6, 6],
[9, 9, 6, 3]], dtype='float')
b = np.array([[2],
[5],
[0],
[0],
[6],
[7],
[2],
[6],
[7],
[9]], dtype='float')
# -
U, s, Vt = np.linalg.svd(A, full_matrices=False)
Vt.T @ np.diag(1/s) @ U.T @ b
k = 3
R = U[:, :k] @ np.diag(s[:k]) @ Vt[:k, :]
np.linalg.norm(A - R, ord='fro')
# **10**.
#
# The page rank of a node is given by the equation
#
# 
#
# and at steady state, we have the page rank vector $R$
#
# 
#
# where $d$ is the damping factor, $N$ is the number of nodes, $1$ is a vector of ones, and
#
# 
#
# where $L(p_j)$ is the number of outgoing links from node $p_j$.
#
# Consider the graph
#
# 
#
# If $d = 0.9$ find the page rank of each node
#
# - By solving a linear system (15 points)
# - By eigendecomposition (10 points)
#
# Note: The Markov matrix constructed as instructed does not follow the usual convention. Here the columns of our Markov matrix are probability vectors, and the page rank is considered to be a column vector of the steady state probabilities.
M = np.array([
[0,0,0,1],
[0.5,0,0,0],
[0.5,1,0,0],
[0,0,1,0]
])
N = 4
d = 0.9
r = np.linalg.solve(np.eye(N) - d*M, (1-d)/N * np.ones(N))
r
# +
A = d*M + (1-d)/N * np.ones(N)
e, v = np.linalg.eig(A)
# -
e
u = np.real_if_close(v[:, 0])
u /= np.sum(u)
u
# **11**.
#
# Recall that a covariance matrix is a matrix whose entries are
#
# 
#
# 1. Find the sample covariance matrix of the 4 features of the **iris** data set at http://bit.ly/2ow0oJO using basic `numpy` operations on `ndarrasy`. Do **not** use the `np.cov` or equivalent functions in `pandas` (except for checking). Remember to scale by $1/(n-1)$ for the sample covariance. (10 points)
# 2. Plot the first 2 principal components of the `iris` data by using eigendecoposition, coloring each data point by the species (10 points)
url = 'http://bit.ly/2ow0oJO'
iris = pd.read_csv(url)
iris.head()
X = iris.values[:, :4].astype('float')
X -= X.mean(axis=0)
C = (X.T @ X)/(X.shape[0]-1)
C
e, v = np.linalg.eigh(C)
idx = np.argsort(e)[::-1]
pc = v[:, idx[:2]]
p1, p2 = pc.T @ X.T
plt.scatter(p1, p2, c=iris.species.astype('category').cat.codes)
pass
| notebooks/solutions/T07_More_Linear_Algebra_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# name: ir
# ---
# + [markdown] id="lrOk7wFeB5rO"
# ## R
# + colab={"base_uri": "https://localhost:8080/"} id="up3a84fcx1QX" outputId="c366d1ce-1cf2-4609-db6c-94c33abf4602"
library(dplyr)
library(tidyr)
library(ggplot2)
library(magrittr)
# + [markdown] id="pViAlsx38Ufz"
# ### QQ-Plot
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="q6xNqnR11zXn" outputId="1be4a1b4-1aef-4279-c7df-ee0867e27023"
norm_samp <- rnorm(100)
qqnorm(norm_samp)
abline(a=0, b=1, col='grey')
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="giM7y9y3N6gD" outputId="020b28a4-2984-494c-be00-f3aabc5f000a"
norm_samp <- rnorm(350)
qqnorm(norm_samp)
abline(a=0, b=1, col='blue')
| R/Day26/day26ofmathandstats_R.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
#read your file
file=r'/home/geoffrey/notebooks/praxido/31-january-2018.png'
img = cv2.imread(file,0)
img.shape
#thresholding the image to a binary image
thresh,img_bin = cv2.threshold(img,128,255,cv2.THRESH_BINARY |cv2.THRESH_OTSU)
#inverting the image
img_bin = 255-img_bin
cv2.imwrite('/home/geoffrey/notebooks/praxido/cv_inverted.png',img_bin)
#Plotting the image to see the output
plotting = plt.imshow(img_bin,cmap='gray')
plt.show()
# Length(width) of kernel as 100th of total width
kernel_len = np.array(img).shape[1]//100
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_len, 1))
# A kernel of 2x2
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
#Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(img_bin, ver_kernel, iterations=3)
vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=3)
cv2.imwrite("/home/geoffrey/notebooks/praxido/vertical.jpg",vertical_lines)
#Plot the generated image
plotting = plt.imshow(image_1,cmap='gray')
plt.show()
#Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(img_bin, hor_kernel, iterations=3)
horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=3)
cv2.imwrite("/home/geoffrey/notebooks/praxido/horizontal.jpg",horizontal_lines)
#Plot the generated image
plotting = plt.imshow(image_2,cmap='gray')
plt.show()
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
#Eroding and thesholding the image
img_vh = cv2.erode(~img_vh, kernel, iterations=2)
thresh, img_vh = cv2.threshold(img_vh,128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imwrite("/home/geoffrey/notebooks/praxido/img_vh.jpg", img_vh)
bitxor = cv2.bitwise_xor(img,img_vh)
bitnot = cv2.bitwise_not(bitxor)
cv2.imwrite("/home/geoffrey/notebooks/praxido/bitnot.jpg", bitnot)
#Plotting the generated image
plotting = plt.imshow(bitnot,cmap='gray')
plt.show()
# Detect contours for following box detection
contours, hierarchy = cv2.findContours(img_vh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# +
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
# Sort all the contours by top to bottom.
contours, boundingBoxes = sort_contours(contours, method="top-to-bottom")
# -
#Creating a list of heights for all detected boxes
heights = [boundingBoxes[i][3] for i in range(len(boundingBoxes))]
#Get mean of heights
mean = np.mean(heights)
#Create list box to store all boxes in
box = []
# Get position (x,y), width and height for every contour and show the contour on image
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if (w<1000 and h<500):
image = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
box.append([x,y,w,h])
plotting = plt.imshow(image,cmap='gray')
plt.show()
#Creating two lists to define row and column in which cell is located
row=[]
column=[]
j=0
#Sorting the boxes to their respective row and column
for i in range(len(box)):
if(i==0):
column.append(box[i])
previous=box[i]
else:
if(box[i][1]<=previous[1]+mean/2):
column.append(box[i])
previous=box[i]
if(i==len(box)-1):
row.append(column)
else:
row.append(column)
column=[]
previous = box[i]
column.append(box[i])
print(column)
print(row)
#calculating maximum number of cells
countcol = 0
for i in range(len(row)):
countcol = len(row[i])
if countcol > countcol:
countcol = countcol
#Retrieving the center of each column
center = [int(row[i][j][0]+row[i][j][2]/2) for j in range(len(row[i])) if row[0]]
center=np.array(center)
center.sort()
#Regarding the distance to the columns center, the boxes are arranged in respective order
finalboxes = []
for i in range(len(row)):
lis=[]
for k in range(countcol):
lis.append([])
for j in range(len(row[i])):
diff = abs(center-(row[i][j][0]+row[i][j][2]/4))
minimum = min(diff)
indexing = list(diff).index(minimum)
lis[indexing].append(row[i][j])
finalboxes.append(lis)
#from every single image-based cell/box the strings are extracted via pytesseract and stored in a list
outer=[]
for i in range(len(finalboxes)):
for j in range(len(finalboxes[i])):
inner=''
if(len(finalboxes[i][j])==0):
outer.append(' ')
else:
for k in range(len(finalboxes[i][j])):
y,x,w,h = finalboxes[i][j][k][0],finalboxes[i][j][k][1], finalboxes[i][j][k][2],finalboxes[i][j][k][3]
finalimg = bitnot[x:x+h, y:y+w]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
border = cv2.copyMakeBorder(finalimg,2,2,2,2, cv2.BORDER_CONSTANT,value=[255,255])
resizing = cv2.resize(border, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
dilation = cv2.dilate(resizing, kernel,iterations=1)
erosion = cv2.erode(dilation, kernel,iterations=1)
cv2.imwrite(f"/home/geoffrey/notebooks/praxido/output/cell{i}-{j}-{k}.jpg", erosion)
out = pytesseract.image_to_string(erosion)
if(len(out)==0):
out = pytesseract.image_to_string(erosion, config='--psm 3')
inner = inner +" "+ out
outer.append(inner)
#Creating a dataframe of the generated OCR list
arr = np.array(outer)
dataframe = pd.DataFrame(arr.reshape(len(row),countcol))
print(dataframe)
data = dataframe.style.set_properties(align="left")
#Converting it in a excel-file
data.to_excel("/home/geoffrey/notebooks/praxido/output.xlsx")
| Split Sheet Into Cells.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <font size="+5">#01 | Web Development & Machine Learning Deployment</font>
# - Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)
# - Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄
# # Discipline to Search Solutions in Google
# > Apply the following steps when **looking for solutions in Google**:
# >
# > 1. **Necesity**: How to load an Excel in Python?
# > 2. **Search in Google**: by keywords
# > - `load excel python`
# > - ~~how to load excel in python~~
# > 3. **Solution**: What's the `function()` that loads an Excel in Python?
# > - A Function to Programming is what the Atom to Phisics.
# > - Every time you want to do something in programming
# > - **You will need a `function()`** to make it
# > - Theferore, you must **detect parenthesis `()`**
# > - Out of all the words that you see in a website
# > - Because they indicate the presence of a `function()`.
# # Load the Data
# > - We take some dataset from the _[Machine Learning Data Repository UCI](https://archive.ics.uci.edu/ml/datasets/adult)_
# > - The aim is to predict weather a **person** (rows) `earned>50k` a year or not
# > - Based on their **social-demographic features** (columns)
# >
# > PD: You may see the column names & meanings [here ↗](https://archive.ics.uci.edu/ml/datasets/adult).
# +
import pandas as pd
pd.set_option("display.max_columns", None)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
df = pd.read_csv(url, header=None, na_values=' ?')
df.rename(columns={14: 'target'}, inplace=True)
df.head()
# -
# ## Compute & Save the ML Model
# ### Create Model from Historical Data
# ### Save the `Model()` into a `file.pkl`
# ## Web Development
# ### URLs
#
# - Go to a website
# ### HTML
#
# - Inspect the HTML
# ### GET
#
# - Explain this command: getting data from twitter
# ### POST
#
# - Explain this command: posting a tweet
# ## Create a Simple HTML
# ## HTML Form to ask User Input Data
#
# - Intro Data
# - Submit Button
# - Nothing happens?
# - We haven't integrated a Web Server System
# ## Input Data & Make a Prediction
#
# - Simulating a user introducing data in the website
# ## Send Input Data to Python & Make the Prediction
# ## Create Flask App
# ### Routing
# ### Rendering `html` templates through MVT
# ### Python Data to HTML Template and Get the View
# ## Deploy App to Heroku
# ### Create a Heroku Account
# ### Download Heroku CLI
# ### Deploy to Heroku
# #### The `requirements.txt`
#
# - We won't add all libraries, just the ones needed
# #### Create a Virtual Env
# #### The Procfile
# ## Watch Out your App Online
# ## Put it on GitHub
| III Advanced Machine Learning/01_Web Development & Machine Learning Deployment/01practice_ml-deployment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Mission to Mars - Webscraping Project
# <h3><NAME>
# import dependencies
from splinter import Browser
import pandas as pd
import requests
from bs4 import BeautifulSoup as bs
from pprint import pprint
#Launch site and use BeautifulSoup to parse
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
# ### Set up the necessary variables for the most recent news article
# Need to get a variable for the news_title and news_p
soup = bs(html, 'html.parser')
latest_news_date = (soup.find_all('div', class_="list_date"))[0].get_text()
latest_news_title = (soup.find_all('div', class_='content_title'))[0].get_text()
latest_news_paragraph = (soup.find_all('div', class_='article_teaser_body'))[0]
# * Visit the url for JPL Featured Space Image [here](https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars).
#
# * Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called `featured_image_url`.
#
# * Make sure to find the image url to the full size `.jpg` image.
#
# * Make sure to save a complete url string for this image.
#
# ```python
# # Example:
# featured_image_url = 'https://www.jpl.nasa.gov/spaceimages/images/largesize/PIA16225_hires.jpg'
# scrape for featured image url
soup.find('div', class_='content_title').find('a').text
# ### Mars Weather
#
# * Visit the Mars Weather twitter account [here](https://twitter.com/marswxreport?lang=en) and scrape the latest Mars weather tweet from the page. Save the tweet text for the weather report as a variable called `mars_weather`.
#
# ### Example:
# mars_weather = 'Sol 1801 (Aug 30, 2017), Sunny, high -21C/-5F, low -80C/-112F, pressure at 8.82 hPa, daylight 06:09-17:55'
# #### Set up scraper for Mars Images from Images Site
#Launches Website and Parses Data into Beautiful Soup.
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
#need to scrape for featured image url, variable featured_image_url
image = (soup.find_all('div', class_='carousel_items')[0].a.get('data-fancybox-href'))
featured = 'https://www.jpl.nasa.gov'+ image
print(featured)
browser.quit()
# #### Scrape Mars Weather from Twitter Account
#Launches Website and Parses Data into Beautiful Soup.
url = 'https://twitter.com/marswxreport?lang=en'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
#Save The Tweet of the most Recent Mars Weather String.
mars_weather = (soup.find_all('p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text')[0].get_text())
#prints most recent variable
browser.quit()
mars_weather
# ### Scraping Mars Facts Webpage
#Launches Website and Parses Data into Beautiful Soup.
url = 'https://space-facts.com/mars/'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
tables_df = ((pd.read_html(url))[0]).rename(columns={0: "Attribute", 1: "Value"}).set_index(['Attribute'])
#Use Pandas to convert the data to a HTML table string, Along with cleaning out the '\n' string in a function.
html_table = (tables_df.to_html()).replace('\n', '')
html_table
#saves the table to an HTML File, if needed.
tables_df.to_html('table.html')
tables_df
# ### Mars Hemisphere URL
# ##### Cerberus Hemisphere URL
#Launches Website and Parses Data into Beautiful Soup.
#cerberus url
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
cerberus_url = (soup.find_all('div', class_='downloads')[0].li.a.get('href'))
browser.quit()
print(cerberus_url)
# ##### Schiaparelli Hemisphere Url
#Launches Website and Parses Data into Beautiful Soup.
#Schiaparelli Hemisphere url
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
schiaparelli_url = (soup.find_all('div', class_='downloads')[0].li.a.get('href'))
browser.quit()
print(schiaparelli_url)
# #### Syrtis Major Hemisphere
#Syrtis Major Hemisphere
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
syrtis_major_url = (soup.find_all('div', class_='downloads')[0].li.a.get('href'))
browser.quit()
print(syrtis_major_url)
# ##### Valles Marineris Hemisphere
#Valles Marineris Hemisphere Url
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced'
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html=browser.html
soup = bs(html, 'html.parser')
valles_marineries_url= (soup.find_all('div', class_='downloads')[0].li.a.get('href'))
browser.quit()
print(valles_marineries_url)
# +
#Create a Dictionary of the Title of the Hemispheres with Respective Image Urls
hemisphere_image_urls = [
{"title": "Valles Marineris Hemisphere", "img_url": valles_marineries_url},
{"title": "Cerberus Hemisphere", "img_url": cerberus_url},
{"title": "Schiaparelli Hemisphere", "img_url": schiaparelli_url},
{"title": "Syrtis Major Hemisphere", "img_url": syrtis_major_url},
]
# -
hemisphere_image_urls
hemisphere_image_urls[0]['title']
hemisphere_image_urls[0]['img_url']
| .ipynb_checkpoints/mission_to_mars-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 1 - Analyzing Taxi Time Series Data with STUMPY
#
# This example utilizes the main takeways from the research papers: [Matrix Profile I](http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf) & [Matrix Profile II](http://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf).
#
# We will be looking at data from taxi passengers in NYC and will be seeing if we can find any patterns or similar subsequences within the time series that we can utilize to determine patterns and outliers.
# ## Getting Started
#
# Let's import the packages that we'll need to load, analyze, and plot the data.
# +
# %matplotlib inline
import pandas as pd
import stumpy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as dates
import datetime as dt
# -
# ## Loading Some Data
#
# First, we'll download historical data that represents the half-hourly average of the number of NYC taxi passengers over 75 days in the Fall of 2014.
#
#
# We extract that data and insert it into a pandas dataframe, making sure the timestamps are stored as *datetime* objects and the values are of type *float64*.
df = pd.read_csv("https://raw.githubusercontent.com/stanford-futuredata/ASAP/master/Taxi.csv", sep=',')
df['value'] = df['value'].astype(np.float64)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.head()
# The python functions below are going to be used throughout this example to automatically resize and create the plots that are displayed using the Matplotlib python plotting package.
# +
def change_plot_size(width, height, plt):
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = width
fig_size[1] = height
plt.rcParams["figure.figsize"] = fig_size
def create_plot(plot_title, x_axis_title, y_axis_title, data, plt):
plt.suptitle(plot_title[0], fontsize = plot_title[1])
plt.xlabel(x_axis_title[0], fontsize = x_axis_title[1])
plt.ylabel(y_axis_title[0], fontsize = y_axis_title[1])
plt.plot(data)
plt.rcParams['xtick.direction'] = 'out'
# This code is going to be utilized to control the axis labeling cof the plots
DAY_MULTIPLIER = 7 # Specify for the amount of days you want between each labeled x-axis tick
x_axis_labels = df[(df.timestamp.dt.hour==0)]['timestamp'].dt.strftime('%b %d').values[::DAY_MULTIPLIER]
x_axis_labels[1::2] = " "
x_axis_labels, DAY_MULTIPLIER
# -
# Now we are going to plot the raw time series, and see if there are any patterns or outliers we can see by just looking at the plotted time series.
# +
change_plot_size(18,8, plt)
create_plot(('Taxi Passenger Raw Data','48'),
('Window Start Date','24'),
('Half-Hourly Average Number of Taxi Passengers','24'),
df['value'].values, plt)
plt.xticks(np.arange(0,len(df['value'].values),(48*DAY_MULTIPLIER)/2),x_axis_labels)
plt.xticks(rotation=75)
plt.minorticks_on()
plt.margins(x=0)
plt.show()
# -
# It seems as if there is a general periodicity between spans of 1-day and 7-days, which can likely be explained by the fact that more people use taxis throughout the day than through the night and that it is reasonable to say most weeks have similar taxi-rider patterns. Also, maybe there is an outlier just to the right of the window starting near the end of October, but other than that there isn't anything you can conclude from just looking at the raw data.
# ## Generating the Matrix Profile
#
# The Matrix Profile is a new data mining tool that searches for exact subsequence matches. In plain language, it allows you to quickly identify the closest match to any pattern in the time series AND it allows you to quickly identify outliers as well! Isn't that awesome?
#
# Now we are going to generate a matrix profile and then utilize our package's code to generate a matrix profile and then plot it to see if we can extract any patterns from the data.
#
# First, you will need to declare a window size,`m`, to use as the subsequence length (i.e., a sequence of values from our time series that is a subset of the entire time series). Defining the subsequence length usually requires some level of domain knowledge that allow you to determine an adequate window size. Since this data was taken half-hourly, we chose a value `m = 48` to represent the span of exactly one day.
#
# Note that in this notebook, we will use m, window size, and subsequence length interchangeably.
m = 48
# Now if only we had an algorithm that we could use to locate patterns and outliers within the time series. Oh wait...we do! it's called *STOMP*. We've implemented a Numba JIT-compiled version of the popular STOMP algorithm that we've appropriately called *STUMP* that is capable of parallel computation.
#
# *STUMP* performs an ordered search for patterns and outliers within a specified time series (numpy array), and takes advantage of the locality of some calculations to minimize the runtime.
#
# We now have everything we need to run our dataset through the *STUMP*, let's do just that and plot the results.
stump_results = stumpy.stump(df['value'].values, m=m)
out_df = pd.DataFrame(stump_results, columns=['mp', 'inx', 'left', 'right'])
out_df.head()
# For clarity, we store the output inside a pandas dataframe
#
# The dataframe output contains an index along with four columns of data and
# this is how each can be interpreted:
#
# The leftmost column represents the index of the time series where the window begins that we are looking at
#
# **mp**: This is the matrix profile value and represents the euclidian distance between the specified index's window and it's closest correspinding subsequence
#
# **inx**: This represents the index of the closest matching window, so the euclidian distance between the dataframe indexed window and the **inx** window is the value located in the same row under the **mp** column
#
# For now, you can ignore the **left** & **right** columns. If you are curious, they are explained throughout [Matrix Profile VII](http://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf)
#
# ## Plotting the Matrix Profile
#
# We are going to need to extract just the Matrix Profile values if we are looking to plot it, which has been done below
# +
MP = [item[0] for item in stump_results] # Extracting only the Matrix Profile values from the STUMP results
create_plot(('1-Day STUMP','48'),
('Window Start Date','24'),
('Euclidian distance to closest subsequence','24'),
MP, plt)
plt.minorticks_on()
plt.margins(x=0)
plt.xticks(np.arange(0,len(MP),(m*DAY_MULTIPLIER)/2),x_axis_labels)
plt.xticks(rotation=75)
plt.show()
# -
# ## Matrix Profile Analysis through STUMP
#
# Let's understand what we're looking at.
#
# ### Lowest Values
#
# The lowest values in this graph are considered the *best motifs* , as they represent that corresponding subsequence window (specified by the x-axis) has a closest subsequence with the distance *j* (specified by the y-axis). In this example, we can see that the 2 lowest values are located at `x=27` and `x=363`, those points in the graph have been highlighted with open triangles below.
#
# It happens to be that the 2 lowest data points are *exactly* 7 days apart, suggesting that in this dataset, there might be a periodicity of seven days, in addition to the more obvious periodicity of one day (*Taxi Example, Part III*).
#
# ### Highest Values
#
# So what about the highest values? The indices that have the highest values on the graph represent the uniqueness of their corresponding subsequence. Meaning that there is not an existing subsequence that is relatively close to the specified subsequence (in terms of euclidian distance). We found that the top 3 peaks in the graph happened to correspond exactly with the timing of Columbus Day, Daylight Saving Time, and Thanksgiving, respectively. The specified peaks are highlighted by downward-pointing triangles in the graph below.
# +
create_plot(('1-Day STUMP','48'),
('Window Start Date','24'),
('Euclidian distance to closest subsequence','24'),
MP, plt)
plt.plot(575,1.7, marker="v", markersize=15, color='b')
plt.plot(1535,3.65, marker="v", markersize=15, color='b')
plt.plot(2700,3.1, marker="v", markersize=15, color='b')
plt.plot(30,.2, marker="^", markersize=15, color='b', fillstyle='none')
plt.plot(363,.2, marker="^", markersize=15, color='b', fillstyle='none')
plt.xticks(np.arange(0,3553,(m*DAY_MULTIPLIER)/2),x_axis_labels)
plt.xticks(rotation=75)
plt.minorticks_on()
plt.margins(x=0)
plt.show()
# -
# ## Different Window Sizes
#
# Manipulating the window size can have little impact on your resulting Matrix Profile. For example, the code below runs STUMP with windows of the corresponding lengths in the dictionary.
# +
days_dict ={
"Half-Day": 24,
"1-Day": 48,
"2-Days": 96,
"5-Days": 240,
"7-Days": 336,
}
dataframe = pd.DataFrame.from_dict(days_dict, orient='index', columns=['m'])
dataframe.head()
# -
# We purposely chose spans of corresponding day-lengths that we thought would be reasonable to digest.
# +
# Adjusting sizing for output
change_plot_size(18,8, plt)
plt.figure(1)
plt.suptitle('STUMP With Changing Window Sizes', fontsize='48')
index=1 # Keeps track of in-order index of dictionary (need for call to plt.subplot)
for k,i in days_dict.items():
plt.rcParams['xtick.direction'] = 'in'
figure = plt.subplot(len(days_dict), 1, index)
temp = stumpy.stump(df['value'].values, days_dict[k])
MP = [item[0] for item in temp] # Extracting only the MP values from the STUMP results
figure.set_title('m='+ str(days_dict[k]) + ' (' + k + ')', fontsize=16, y=.5)
plt.ylim(0,10)
plt.xlim(0,3600)
if k == list(days_dict.keys())[-1]: # Last key in dictionary
plt.xticks(np.arange(0,len(MP),(m*DAY_MULTIPLIER)/2),x_axis_labels)
plt.xticks(rotation=75)
plt.ylabel('Euclidian Distance to Closest Subsequence',horizontalalignment='left', fontsize='18')
plt.xlabel('Window Start Date', fontsize='28')
else:
figure.set_xticklabels([])
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0)
plt.plot(MP)
index = index + 1
plt.show()
# -
# We can see that even with varying subsequence lengths, our peaks stay prominent. But it looks as if all the non-peak values are converging towards each other. This is why having a knowledge of the data-context is important prior to running STUMP, as it is helpful to have a window size that may capture a repeating pattern, or outlier, within the dataset.
# ## STUMPED - Distributed STUMP
#
# When you have more than a few thousand data points and no access to GPUs, then STUMP may not be sufficient for your needs. Instead, you can try STUMPED, a (experimental) distributed parallel implementation that depends on Dask distributed:
#
# ```
# from dask.distributed import Client
# dask_client = Client()
#
# stumped_results = stumpy.stumped(dask_client, df['value'].values, m=m) # Note that a dask client is needed
# ```
# ## Summary
#
# And that's it! You have now loaded in a dataset, ran it through STUMP using our package, and were able to extract multiple conclusions of existing patterns and outliers within the time series. You can now import this package and use it in your own projects. Happy coding!
# ## Resources
#
# [Matrix Profile I](http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf)
#
# [Matrix Profile II](http://www.cs.ucr.edu/~eamonn/STOMP_GPU_final_submission_camera_ready.pdf)
| notebooks/Tutorial_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple template fitting using pyroofit
# <div class="alert alert-block alert-danger">
# <b>Note for contributors:</b> Remember to run <code>Kernel > Restart & Clear output</code> before adding any changes to git!</div>
# In this tutorial we will perform template fits with ``pyroofit``.
#
# ``pyroofit`` is a python wrapper for the ``RooFit`` package. Documentation can be found here:
# http://www.desy.de/~swehle/pyroofit/
#
# The project's code can be found at https://github.com/simonUU/PyrooFit
#
# ``pyroofit`` can be installed with ``pip3 install --user pyroofit``.
# +
import ROOT
from pyroofit.models import Gauss, Chebychev
import numpy as np
import pandas as pd
# Show pictures in this notebook
from IPython.display import Image
# %matplotlib inline
# -
# Create some test data
data = pd.DataFrame(
np.concatenate((
np.random.normal(-2, 1, 1000),
np.random.normal(3, 2, 1000),
-5 + 10* np.random.random_sample(2000)
)),
columns=["x"]
)
# <div class="alert alert-block alert-info">
# <b>Note:</b> We will fit the <b>distribution</b> of the above data, so note the conceptual difference to the <code>x, y</code> data from the tutorial <code>fitting_curves_data</code>.</div>
# <div class="alert alert-block alert-success">
# <b>Question [medium]:</b> Can you guess what the corresponding histogram to this data will look like and why?</div>
# No? Then cheat and look at the histogram:
data.hist(bins=30)
# ## First try: Fit single gaussian as signal and line as background
# +
# Gaussian for signal
pdf_sig = Gauss(('x', -5, 5), mean=(-1, 0, 1), title="Signal")
# Straight line for background
pdf_bkg = Chebychev(('x', -5, 5), n=1, title="Background")
# -
# <div class="alert alert-block alert-success">
# <b>Question:</b> Why is the model for the straight line called 'Chebychev'?</div>
# Now we build a compound PDF from the two simple PDFs.
# ``pyroofit`` is quite nice in that this has a very pythonic syntax (we overload the ``+`` operator):
pdf = pdf_sig + pdf_bkg
# Now we're ready to fit:
pdf.fit(data)
# Don't be deterred by the amount of output, but let's look at the results:
pdf.get()
# **Hint:** In order to get the results as a dictionary, use ``get_parameters()`` instead:
pdf.get_parameters()
# And plot:
pdf.plot("test.png", legend=True)
Image("test.png")
# <div class="alert alert-block alert-success">
# <b>Question 2 [easy]:</b> Why are the results so terrible?</div>
# ## Second try: Fit two gaussians as signal
# + run_control={"marked": false}
gauss1 = Gauss(('x', -5, 5), mean=(-5, -3, 0), title="signal", name="gauss1")
gauss2 = Gauss(('x', -5, 5), mean=(0, 3, 5), title="signal", name="gauss2")
pdf_sig = gauss1 + gauss2
pdf_bkg = Chebychev(('x', -5, 5), n=1, title="Background")
pdf = pdf_sig + pdf_bkg
# + run_control={"marked": true}
pdf.fit(data)
# -
print("Gauss 1:")
print(gauss1.get())
print("Gauss 2:")
print(gauss2.get())
print("Bkg:")
print(pdf_bkg.get())
pdf.plot("test2.png", legend=True)
Image("test2.png")
# ## Exercise 1
# <div class="alert alert-block alert-success">
# <b>Exercise 1 [easy]:</b> Fit one gaussian for signal and a linear background model to the following dataset:
# </div>
data = pd.DataFrame(
np.concatenate((
np.random.normal(-2, 1, 1000),
-5 + 10* np.random.random_sample(2000)
)),
columns=["x"]
)
# ## Fixing templates from MC
# In the previous examples, we simply "knew" that our signal was shaped like a (two) Gaussian(s) and the background was linear.
#
# Usually however, the situation isn't as simple and we first have to learn how our signal and background looks like by looking at MC data. Remember that in MC we always know signal from background (it's simulated data after all).
#
# Thus, we can first fit our signal and background model to the MC, then fix the parameters. Now we have two
# PDFs $\mathrm{pdf}_\mathrm{sig}$ and $\mathrm{pdf}_\mathrm{bkg}$ and get the signal and background yields by
# fitting the data with $\mu_\mathrm{sig}\mathrm{pdf}_\mathrm{sig} + \mu_\mathrm{bkg}\mathrm{pdf}_\mathrm{bkg}$.
# +
mc_signal = pd.DataFrame(
np.random.normal(-2, 1, 1000),
columns=["x"]
)
mc_bkg = pd.DataFrame(
np.concatenate((
np.random.normal(2, 1, 1000),
-5 + 10* np.random.random_sample(2000)
)),
columns=["x"]
)
data = pd.DataFrame(
np.concatenate((
np.random.normal(2, 1, int(1.2*1000)),
-5 + 10* np.random.random_sample(int(1.2*2000)),
np.random.normal(-2, 1, int(0.3*1000))
)),
columns=["x"]
)
# -
mc_signal.hist(bins=30)
mc_bkg.hist(bins=30)
pdf_sig = Gauss(('x', -5, 5), mean=(-4, -2, 0), title="signal", name="gauss1")
pdf_sig.fit(mc_signal)
pdf_sig.plot("mc_signal_fit.png", legend=True)
Image("mc_signal_fit.png")
pdf_sig.fix()
pdf_bkg = (
Gauss(('x', -5, 5), mean=(-5, 3, 5), title="Background", name="gauss2")
+ Chebychev(('x', -5, 5), n=1, title="Background")
)
pdf_bkg.fit(mc_bkg)
pdf_bkg.plot("mc_bkg_fit.png", legend=True)
Image("mc_bkg_fit.png")
pdf_bkg.fix()
pdf_bkg.get()
pdf = pdf_sig + pdf_bkg
pdf.fit(data)
pdf.plot("fit_to_data.png", legend=True)
Image("fit_to_data.png")
pdf.get()
pdf_bkg.get()
| examples/jupyter_notebooks/003_template_fits_pyroofit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.9 64-bit
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Ensure we begin with defaults before making modifications
mpl.rcdefaults()
# Set the color palette with seaborn
sns.set_palette("Set1")
my_palette = sns.color_palette("Set1")
# Set up general parameters for plotting
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.size'] = 8
mpl.rcParams['font.weight'] = 'light'
# mpl.rcParams['font.sans-serif'] = 'Gill Sans Nova'
mpl.rcParams['text.usetex'] = True
mpl.rcParams['figure.figsize'] = (3.375,2.25) #inches
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.linewidth'] = 0.5
mpl.rcParams['lines.linewidth'] = 0.5
mpl.rcParams['lines.markersize'] = 1
mpl.rcParams['axes.unicode_minus'] = True
mpl.rcParams['xtick.major.size'] = 3
mpl.rcParams['xtick.major.width'] = 0.5
mpl.rcParams['ytick.major.size'] = 3
mpl.rcParams['ytick.major.width'] = 0.5
mpl.rcParams['savefig.transparent'] = False
mpl.rcParams['legend.frameon'] = False
mpl.rcParams['savefig.format'] = 'pdf'
# +
t2 = 0.75
def energy(k, t2):
return np.sqrt((1 + t2 * np.cos(k))**2 + t2**2 * np.sin(k)**2)
plt.clf()
plt.figure(figsize=(2,1.3))
ks = np.arange(-np.pi, np.pi, 0.01)
plt.plot(ks, energy(ks, t2), color="red")
plt.plot(ks, -energy(ks, t2), color="red")
plt.ylim(-2,2)
plt.xlim(-np.pi,np.pi)
plt.xticks([-np.pi,-np.pi/2,0,np.pi/2,np.pi], labels=["$-\pi$","$-\pi/2$","$0$","$\pi/2$","$\pi$"])
plt.ylabel("$E/t_1$")
plt.xlabel("$k$")
plt.savefig("ssh-energy-bands.svg", bbox_inches='tight')
plt.show()
# +
xs = np.arange(-3,3,0.01)
plt.clf()
plt.plot(xs, -lorentzian(xs,0.001), label = "δ-potential")
plt.plot(xs, np.exp(-np.abs(xs)), c = "gray", label = "Bound state")
plt.legend()
plt.ylim(-1,1)
plt.xlabel("mλx")
plt.savefig("pot_and_boundstate.png", bbox_inches='tight')
plt.show()
# -
| assets/img/ssh-figs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch
# language: python
# name: pytorch
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# ## Conversion to SimCLRv2 and Converting TF Pretrained Weights
# Pretrained weights can be found on Google's [repo](https://github.com/google-research/simclr). With conversion scripts linked. Most of the inital work can be found in spijkervet_prototypes.ipynb. This work is to clean up the spaghetti code and turn into modules.
# +
import os
import sys
import argparse
import datetime
from pprint import pprint
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
import numpy as np
from torch.utils.tensorboard import SummaryWriter
sys.path.insert(0, '../../../SimCLRv2-PyTorch/')
from utils.model import save_model, load_optimizer
from simclr.modules import LogisticRegression
from simclr import SimCLRv2, SimCLRv2_ft
from simclr.modules import get_resnet, NT_Xent
from simclr.modules.transformations import TransformsSimCLR
from utils import yaml_config_hook
# +
simclr_repo = Path('/home/kaipak/dev/SimCLRv2-PyTorch/')
parser = argparse.ArgumentParser(description="SimCLR")
config = yaml_config_hook(simclr_repo / 'config/config.yaml')
tb_out = Path('/home/kaipak/models/tensorboard_logs')
for k, v in config.items():
parser.add_argument(f"--{k}", default=v, type=type(v))
args = parser.parse_args([])
args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# -
args.batch_size = 64
args.logistic_batch_size = 96
args.resnet = "resnet50"
args.epochs = 400
args.gpus = 4
args.optimizer = 'LARS'
args.workers = 64
args.dataset = 'CIFAR100'
pprint(vars(args))
# ## Dataset Transforms
# Dataset loader calls below apply SimCLR paper recommended transforms producing $x_i$ $x_j$ pairs.
# +
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.dataset == "STL10":
train_dataset = torchvision.datasets.STL10(
args.dataset_dir,
split="unlabeled",
download=True,
transform=TransformsSimCLR(size=args.image_size),
)
elif args.dataset == "CIFAR10":
train_dataset = torchvision.datasets.CIFAR10(
args.dataset_dir,
download=True,
transform=TransformsSimCLR(size=args.image_size),
)
elif args.dataset == "CIFAR100":
train_dataset = torchvision.datasets.CIFAR100(
args.dataset_dir,
download=True,
transform=TransformsSimCLR(size=args.image_size),
)
else:
raise NotImplementedError
if args.nodes > 1:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=args.world_size, rank=rank, shuffle=True
)
else:
train_sampler = None
# Data Transforms happen here.
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
drop_last=True,
num_workers=args.workers,
sampler=train_sampler,
)
# -
# ## SimCLRv2: Self Supervised Learning
# Modified SimCLR Pytorch code to v2 with Resnet code from converter which includes contrastive head.
#
# +
model = SimCLRv2(resnet_depth=50, resnet_width_multiplier=2)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
model = nn.DataParallel(model)
if args.reload:
model_fp = os.path.join(
args.model_path, f"checkpoint_{args.epoch_num}.tar"
)
model.load_state_dict(torch.load(model_fp, map_location=args.device.type))
model = model.to(args.device)
optimizer, scheduler = load_optimizer(args, model)
criterion = NT_Xent(args.batch_size, args.temperature, world_size=1)
# -
def train(args, train_loader, model, criterion, optimizer, writer, display_every=50):
"""Train function"""
model.train()
epoch_loss = 0
for step, ((x_i, x_j), _) in enumerate(train_loader):
optimizer.zero_grad()
x_i = x_i.cuda(non_blocking=True)
x_j = x_j.cuda(non_blocking=True)
# Positive pair with encoding
h_i, h_j, z_i, z_j = model(x_i, x_j)
loss = criterion(z_i, z_j)
loss.backward()
optimizer.step()
if step % display_every == 0:
print(f"Step [{step}/{len(train_loader)}]\t Loss: {loss.item()}")
writer.add_scalar("Loss/train_epoch", loss.item(), args.global_step)
epoch_loss += loss.item()
args.global_step += 1
return epoch_loss
# + tags=[]
args.global_step = 0
args.current_epoch = 0
tb_writer = SummaryWriter(log_dir=f'/home/kaipak/models/tensorboard_logs/' +
f'{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}')
for epoch in range(args.start_epoch, args.epochs):
lr = optimizer.param_groups[0]["lr"]
epoch_loss = train(args, train_loader, model, criterion, optimizer, tb_writer)
if scheduler:
scheduler.step()
if epoch % 10 == 0:
save_model(args, model, optimizer)
writer.add_scalar("Loss/train", epoch_loss / len(train_loader), epoch)
writer.add_scalar("Misc/learning_rate", lr, epoch)
print(
f"Epoch [{epoch}/{args.epochs}]\t Loss: {epoch_loss / len(train_loader)}\t lr: {round(lr, 5)}"
)
args.current_epoch += 1
save_model(args, model, optimizer)
# -
torch.cuda.empty_cache()
torch.load('/home/kaipak/models/SimCLRv2/r50_2x_sk1.pth').keys()
args
# ## SimCLRv2: Fine Tuning From Projection Head
# v2 says we should fine tune from middle projection layer. Original SimCLR implementation basically throws this away and additionally does not have fine-tuning step from Resnet. Build code to take middle layer of projection then run supervised fine-tuning using cross-entropy as loss function
# +
# For fine tuning, we just need the standard images with resizing
if args.dataset == "STL10":
train_dataset = torchvision.datasets.STL10(
args.dataset_dir,
split="train",
download=True,
transform=TransformsSimCLR(size=args.image_size).test_transform,
)
test_dataset = torchvision.datasets.STL10(
args.dataset_dir,
split="test",
download=True,
transform=TransformsSimCLR(size=args.image_size).test_transform,
)
elif args.dataset == "CIFAR10":
train_dataset = torchvision.datasets.CIFAR10(
args.dataset_dir,
train=True,
download=True,
transform=TransformsSimCLR(size=args.image_size).test_transform,
)
test_dataset = torchvision.datasets.CIFAR10(
args.dataset_dir,
train=False,
download=True,
transform=TransformsSimCLR(size=args.image_size).test_transform,
)
elif args.dataset == "CIFAR100":
train_dataset = torchvision.datasets.CIFAR100(
args.dataset_dir,
train=True,
download=True,
transform=TransformsSimCLR(size=args.image_size).test_transform,
)
test_dataset = torchvision.datasets.CIFAR100(
args.dataset_dir,
train=False,
download=True,
transform=TransformsSimCLR(size=args.image_size).test_transform,
)
else:
raise NotImplementedError
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.logistic_batch_size,
shuffle=True,
drop_last=True,
num_workers=args.workers,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
#batch_size=args.logistic_batch_size,
batch_size=32,
shuffle=False,
drop_last=True,
num_workers=args.workers,
)
# +
n_classes = 100
simclr_model = SimCLRv2(resnet_depth=50, resnet_width_multiplier=2, sk_ratio=0.0625,
pretrained_weights='/home/kaipak/models/SimCLRv2/r50_2x_sk1.pth')
simclr_model_ft = SimCLRv2_ft(simclr_model, n_classes)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
simclr_model_ngpu = nn.DataParallel(simclr_model_ft)
simclr_model = simclr_model_ngpu.to(args.device)
# -
model = get_resnet()
# +
from prettytable import PrettyTable
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params+=param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
count_parameters(model[0])
# +
count_parameters(simclr_model)
# -
sum(dict((p.data_ptr(), p.numel()) for p in model[0].parameters()).values())
# +
def train(args, loader, model, criterion, optimizer, writer):
"""Train evaluation model"""
epoch_loss = 0
epoch_accuracy = 0
model.train()
for step, input in enumerate(loader):
optimizer.zero_grad()
X, y = input
X = X.cuda(non_blocking=True)
y = y.cuda(non_blocking=True)
output = model(X)
step_loss = criterion(output, y)
predicted = output.argmax(1)
step_accuracy = (predicted == y).sum().item() / y.size(0)
epoch_accuracy += step_accuracy
step_loss.backward()
optimizer.step()
epoch_loss += step_loss
writer.add_scalar("Accuracy/train_step", step_accuracy, args.global_step)
args.global_step += 1
if step % 100 == 0:
print(f"Step [{step}/{len(loader)}]\t Accuracy {step_accuracy}...")
writer.add_scalar("Accuracy/train_epoch", step_accuracy, args.current_epoch)
writer.add_scalar("Loss/train_epoch", epoch_loss, args.current_epoch)
return epoch_loss, epoch_accuracy
def test(args, loader, model, criterion, optimizer):
epoch_loss = 0
epoch_accuracy = 0
model.eval()
for step, (x, y) in enumerate(loader):
model.zero_grad()
x = x.to(args.device)
y = y.to(args.device)
output = model(x)
step_loss = criterion(output, y)
predicted = output.argmax(1)
step_accuracy = (predicted == y).sum().item() / y.size(0)
epoch_accuracy += step_accuracy
epoch_loss += step_loss.item()
return epoch_loss, epoch_accuracy
# -
optimizer, scheduler = load_optimizer(args, simclr_model)
criterion = torch.nn.CrossEntropyLoss()
writer = SummaryWriter(log_dir='/home/kaipak/models/runs')
# +
# %time
args.global_step = 0
args.current_epoch = 0
args.logistic_epochs = 3
tb_writer = SummaryWriter(log_dir=f'/home/kaipak/models/tensorboard_logs/' +
f'{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}')
#for epoch in range(args.logistic_epochs):
for epoch in range(args.logistic_epochs):
loss_epoch, accuracy_epoch = train(args, train_loader, simclr_model, criterion, optimizer, tb_writer)
print(f"Epoch [{epoch}/{args.logistic_epochs}]\t Loss: {loss_epoch / len(train_loader)}\t Accuracy: {accuracy_epoch / len(train_loader)}")
args.current_epoch += 1
loss_epoch, accuracy_epoch = test(
args, test_loader, simclr_model, criterion, optimizer
)
print(
f"[FINAL]\t Loss: {loss_epoch / len(test_loader)}\t Accuracy: {accuracy_epoch / len(test_loader)}"
)
# -
torch.cuda.empty_cache()
simclr_model = SimCLRv2(resnet_depth=50, resnet_width_multiplier=2, sk_ratio=0.0625,
pretrained_weights='/home/kaipak/models/SimCLRv2/r50_2x_sk1.pth')
simclr_model.projector
optimizer
train_dataset
| experiments_and_development/SimCLRv2-PyTorch/simclrv2_conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ArjrgG_mXccI"
# # Preprocessing Data
# + [markdown] id="fiJZsa8dXfNo"
# ## Integrasi Data
#
# [Sumber data dari Kaggle dengan judul Olympic Tokyo](https://www.kaggle.com/arjunprasadsarkhel/2021-olympics-in-tokyo/version/7)
# + id="uwYJMdU3XziB"
import pandas as pd
# + id="AwNhr4Z2XbWx"
# Load Data Athlete
athlete = pd.read_excel('/content/Athletes.xlsx')
athlete
# + [markdown] id="23GV2rJIYXqb"
# **Agregasi data untuk mendapatkan berapa jumlah atlit yang dikirim oleh masing-masing negara**
# + id="Fai9qFIKX7Rd"
athlete_country = athlete.groupby(['NOC'])['NOC'].count().reset_index(name='jumlah')
athlete_country
# + [markdown] id="m_bBzvJ5ZbWe"
# **Load Data Medals untuk mengetahui berapa medali yang didapatkan suatu negara**
# + id="4sR9K9eIYvVP"
medals = pd.read_excel('/content/Medals.xlsx')
medals
# + [markdown] id="Y0bF2pcJbsC6"
# Data Athletes dan medals akan digabungkan menggunakan kolom NOC pada athletes dan Team/NOC pada medals.
# Untuk mempermudah proses join kita ubah nama kolom pada medals menjadi NOC.
#
# **medals: Team/NOC->NOC**
# + id="LmbCfE9ecBM1"
medals.rename(columns={'Team/NOC':'NOC'}, inplace=True)
medals
# + [markdown] id="MUY3756JdavC"
# Cek apakah terdapat data yang redundan pada medals dengan menggunakan aggregasi.
# + id="uMqPoBX1dps3"
medals_check = medals.groupby(['NOC'])['NOC'].count().reset_index(name='jumlah')
# + [markdown] id="LrMqV7Niecis"
# Buat filter untuk mengetahui apakah ada NOC yang dituliskan 2 kali. Jika nilai pada kolom jumlah adalah 2 atau lebih maka terdapat data NOC lebih dari 1 pada suatu negara.
# + id="zLlVZJFIeb_D"
filter = medals_check['jumlah']==1
filter
# + [markdown] id="slM0bwZqetHq"
# Implementasikan hasil filter pada dataset medals dengan menggunakan where.
# + id="pB8wIBFbezkJ"
medals_check.where(filter).shape[0]
# + [markdown] id="LxEmGGxcfVZ2"
# Membandingkan jumlah data yang telah difilter dengan data sebelum difilter. Tidak terdapat data NOC yang ditulis 2 kali atau lebih berarti tidak terdapat data yang redundan.
# + id="pOO1BRUmfIZD"
medals.shape[0]
# + [markdown] id="THwU2LZ7fd53"
# Kemudian, kita gabungkan dataset athletes dan medals menggunakan kolom NOC dengan cara inner. Inner berarti hanya akan menggabungkan kedua dataset dengan nilai NOC yang sama.
# + id="BeRXdTXJZnZM"
country_medals = athlete_country.merge(medals, on='NOC', how='inner')
country_medals
# + id="fXZxvDeOaT43"
import matplotlib.pyplot as plt
# + [markdown] id="sZGPcXg3gRcI"
# Visualisasikan data dengan membandingkan antara jumlah athlete yang dikirim dengan jumlah medali yang didapatkan pada masing-masing negara.
# + id="FzS2areAgBTR"
plt.bar(country_medals['NOC'],country_medals['jumlah'])
plt.bar(country_medals['NOC'],country_medals['Rank by Total'])
# + [markdown] id="5T_Uw8FHgfcT"
# Dikarenakan terlalu banyak negara sehingga diagram tidak terlihat jelas maka kita dapat menampilkan data dengan kriteria tertentu.
# + id="zyVmiLCngJX3"
country_medals.describe()
# + id="TNW_5R7qg9Lj"
visualize_data = country_medals[country_medals['jumlah']>150].sort_values('jumlah')
plt.bar(visualize_data['NOC'],visualize_data['jumlah'])
plt.bar(visualize_data['NOC'],visualize_data['Rank by Total'])
# + [markdown] id="8Mp5RwhLi9P2"
# ------------------------------------------
# ## Menggunakan dataset Iris
# + id="VBu2KQ-shNoS"
iris = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', delimiter=',',names=['sepal_length','sepal_width','petal_length','petal_width','class'])
iris
# + [markdown] id="b_ctA-yYjkL0"
# ### Normalisasi pada dataset Iris menggunakan normalisasi min-max
# + id="aqLyZCHqjsjD"
from sklearn.preprocessing import MinMaxScaler
# + [markdown] id="J9ZmMtT9kwne"
# Membagi fitur dan kelas
# + id="ansmLhFGkwRg"
x = iris.iloc[:,:-1]
x
# + id="NnyWt-oKk5cK"
y = iris.iloc[:,-1]
y
# + id="Rf7MqEs4kT_-"
scaller = MinMaxScaler()
scaller.fit(x)
x = pd.DataFrame(scaller.transform(x), columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'])
x
# + [markdown] id="ZzmjxGWzraPG"
# ## Diskritisasi
# + id="_EYHzAzKkhhy"
plt.hist(iris['sepal_length'])
# + id="rS_bGoOfpIHt"
plt.hist(iris['sepal_width'])
# + id="BQuoxqY9qM7m"
from sklearn.preprocessing import KBinsDiscretizer
# + id="xFec9OCKqSqz"
discretizer = KBinsDiscretizer(encode='ordinal', strategy='uniform', n_bins=3)
discretizer.fit(x)
x = pd.DataFrame(discretizer.transform(x), columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'])
x
# + id="86NHJumQq3Yy"
| Pertemuan 10 - Pra-pemrosesan Data Relasional/AVD_Preprocessing2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple Stock Date
# ## This tutorial is about date and how to manipulate date in dataframe.
# + outputHidden=false inputHidden=false
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
# fix_yahoo_finance is used to fetch data
import fix_yahoo_finance as yf
yf.pdr_override()
# + outputHidden=false inputHidden=false
# input
symbol = 'AAPL'
start = '2015-01-01'
end = '2019-01-01'
# Read data
dataset = yf.download(symbol,start,end)
# View Columns
dataset.head()
# + outputHidden=false inputHidden=false
# Date becomes a columns
df = dataset.copy() # Copy the original data
dfn = df.reset_index()
dfn.head()
# -
df = dataset.copy()
# + outputHidden=false inputHidden=false
df.head()
# + outputHidden=false inputHidden=false
# Add Year, Month, Day
df['Year'] = df.index.year
df['Month'] = df.index.month
df['Day'] = df.index.day
# + outputHidden=false inputHidden=false
df.head()
# + outputHidden=false inputHidden=false
# Convert Daily to Weekly
weekly = dataset.copy()
weekly = weekly.resample('W').last()
weekly.head()
# + outputHidden=false inputHidden=false
# Convert Daily to Yearly
yearly = dataset.copy()
yearly = yearly.asfreq('BY') # Use Business Year - 'BY'
yearly.head()
# + outputHidden=false inputHidden=false
# Choose Particular Year to analyze
monthly = dataset.copy()
monthly = monthly.reset_index()
y2017 = monthly[monthly['Date'].dt.year==2017]
y2017
# + outputHidden=false inputHidden=false
month_name = dataset.copy()
# + outputHidden=false inputHidden=false
# Convert Daily to Monthly
month_name = month_name.asfreq('M')
# + outputHidden=false inputHidden=false
month_name['Month_Name'] = month_name.index.month
# + outputHidden=false inputHidden=false
month_name.head()
# + outputHidden=false inputHidden=false
import calendar
month_name['Month_ABBR'] = month_name['Month_Name'].apply(lambda x: calendar.month_abbr[x])
month_name.head()
# + outputHidden=false inputHidden=false
month_name['Month_Name'] = month_name['Month_Name'].apply(lambda x: calendar.month_name[x])
month_name.head()
# + outputHidden=false inputHidden=false
# Pivot Table Date
df_months = pd.pivot_table(df, index=df.index.month, columns=df.index.year, values = 'Adj Close') # each months
df_months
# + outputHidden=false inputHidden=false
df_days = pd.pivot_table(df, index=df.index.day, columns=df.index.year, values = 'Adj Close') # daily for one whole months
df_days
# + outputHidden=false inputHidden=false
df_all_columns = pd.pivot_table(df, index=df.index.month, columns=df.index.year)
df_all_columns
# + outputHidden=false inputHidden=false
stock_data = df.copy()
stock_data['Year'] = df.index.year
stock_data['Month'] = df.index.month
stock_data['Day'] = df.index.day
stock_data['Week_Day'] = df.index.dayofweek
stock_data['Week_Day_Name'] = df.index.strftime('%A')
stock_data.tail(10)
# + outputHidden=false inputHidden=false
approach1 = stock_data.groupby(['Year', 'Month']).first()['Adj Close']
approach1.tail(12)
# + outputHidden=false inputHidden=false
approach2 = stock_data.groupby(['Year', 'Day']).first()['Adj Close']
approach2.tail(12)
# -
# ## Convert Date to String
# + outputHidden=false inputHidden=false
string_date = dataset.copy()
# + outputHidden=false inputHidden=false
string_date['Date'] = string_date.index
string_date.head()
# + outputHidden=false inputHidden=false
string_date['Date'] = string_date['Date'].dt.strftime("%Y%m%d").astype(int)
# + outputHidden=false inputHidden=false
# Convert Date to Numbers
string_date.head()
| 02_SimpleStockDate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pd.options.display.max_rows = 999
from tang_jcompneuro.model_fitting_postprocess import load_data_generic
def check_all(squared, score_col_name):
models_to_examine = [
('gabor', 'simple'),
('gabor', 'complex'),
('gabor', 'multi,1,1'),
('gabor', 'single', True, False, ('simple', 'complex')),
]
return load_data_generic(models_to_examine, load_naive=False, metric='ccnorm_5', squared=squared,
score_col_name=score_col_name,
datasets_to_check=('MkA_Shape',
'MkE2_Shape'
),
subsets_to_check=('all',
'OT'
), modelname_alternative=lambda _1, x, _2, _3: x)
df_all_cc2 = check_all(squared=True, score_col_name='cc2').xs(100, level='percentage').sort_index()
HO_neuron_perf = df_all_cc2.apply(lambda x: x['cc2']['HO']['mean'], axis=1).unstack('subset')
HO_neuron_perf
OT_neuron_perf = df_all_cc2.apply(lambda x: x['cc2']['OT']['mean'], axis=1).unstack('subset')
OT_neuron_perf
# +
# show complex vs multi,1,1
# show simple vs multi,1,1
# show single vs multi,1,1
# +
from tang_jcompneuro.plotting import image_subset_and_neuron_subset_list, show_one_basic
# magic numbers here follow
# https://github.com/leelabcnbc/tang_jcompneuro/blob/master/thesis_plots/v1_fitting/results_glm_opt_detailed.ipynb
def draw_one_stuff(monkey, model_name_x, model_name_y, save=None):
# draw one by one.
num_panel = len(image_subset_and_neuron_subset_list)
plt.close('all')
fig, axes = plt.subplots(1, num_panel, sharex=True, sharey=True, squeeze=False,
figsize=(5.5,2.5))
for idx, (ax, (img_subset, neuron_subset)) in enumerate(zip(axes.ravel(), image_subset_and_neuron_subset_list)):
data_x = df_all_cc2.at[(monkey, img_subset, model_name_x), 'cc2'][neuron_subset]['raw']
data_y = df_all_cc2.at[(monkey, img_subset, model_name_y), 'cc2'][neuron_subset]['raw']
print(data_x.min(), data_x.max())
print(data_y.min(), data_y.max())
show_one_basic(data_x, data_y, title=f'{neuron_subset} neurons\n{img_subset} stimuli',
ax=ax,mean_title='mean $CC_\mathrm{norm}^2$', xlabel=model_name_x,
ylabel=model_name_y if idx == 0 else None)
monkey_title = {'MkA_Shape': 'A', 'MkE2_Shape': 'B'}[monkey]
fig.suptitle(f'{model_name_y} vs. {model_name_x} on monkey {monkey_title}')
# adjust figure
fig.subplots_adjust(top=0.75, bottom=0.175, left=0.1, right=0.98, hspace=0.1, wspace=0.075)
# if save is not None:
# save_dir = os.path.join(dir_dictionary['plots'], 'supp', 'cnn_different_arch')
# os.makedirs(save_dir, exist_ok=True)
# fig.savefig(os.path.join(save_dir, f'{save}.pdf'), dpi=300)
plt.show()
# -
draw_one_stuff('MkA_Shape', 'simple', 'multi,1,1')
draw_one_stuff('MkA_Shape', 'complex', 'multi,1,1')
draw_one_stuff('MkA_Shape', 'single', 'multi,1,1')
| results_ipynb/step_1_rough_exploration/gabor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="4XuLMEWSezg8"
# # Module Import
# + id="tRrVBqoNeykd"
import numpy as np
import pandas as pd
import re
import argparse
import os
import pickle
import nltk
import tensorflow as tf
import re
import string
import ast
import tensorflow as tf
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from sklearn.metrics import (
accuracy_score,
recall_score,
precision_score,
f1_score, auc, roc_curve, confusion_matrix)
from sklearn.model_selection import train_test_split,KFold,StratifiedKFold
from gensim.models import KeyedVectors
from nltk import tokenize
from keras.preprocessing.text import Tokenizer,text_to_word_sequence
from keras.utils.np_utils import to_categorical
from keras.layers import Layer
from keras import initializers
from keras import backend as K
from keras.layers import Dense, Input
from keras.layers import Embedding, GRU, Bidirectional,TimeDistributed,concatenate,LSTM
from keras.models import Model, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from imblearn.over_sampling import RandomOverSampler,SMOTE
from keras.preprocessing.text import Tokenizer,text_to_word_sequence
from keras.utils.np_utils import to_categorical
from keras.layers import Layer
from keras import initializers
from keras import backend as K
from keras.layers import Dense, Input
from keras.layers import Embedding, GRU, Bidirectional,TimeDistributed,concatenate,LSTM
from keras.models import Model, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from imblearn.over_sampling import RandomOverSampler,SMOTE
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
session = tf.compat.v1.Session(config=config)
os.environ['KERAS_BACKEND']='tensorflow'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
nltk.download('punkt')
nltk.download('stopwords')
# + [markdown] id="OI-26cs84etf"
# # Method Loading
# + id="7NK_nV-b4XPm" executionInfo={"status": "ok", "timestamp": 1630681081239, "user_tz": -60, "elapsed": 300, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "05276728414427570973"}}
# partially from https://github.com/richliao/textClassifier/blob/master/textClassifierHATT.py
# removing URLs
def remove_url(text):
url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'', text)
# remove html tag
def remove_html(text):
html = re.compile(r'<.*?>')
return html.sub(r'', text)
# remove emoji
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols&pics
u"\U0001F680-\U0001F6FF" # transportation pic
u"\U0001F1E0-\U0001F1FF" # flags
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
# Stop Word Removal
NLTK_stop_words_list = stopwords.words('english')
custom_stop_words_list = ['...']
final_stop_words_list = NLTK_stop_words_list + custom_stop_words_list
def remove_stopwords(text):
"""custom function to remove the stopwords"""
return " ".join([word for word in str(text).split() if word not in final_stop_words_list])
# Symbol removal
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),.!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
def create_emb_mat(emb_path, word_idx, emb_dim):
embeddings_index = {}
if emb_file_flag == 'glove':
f = open(os.path.join(embedding_path), encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
vec = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = vec
f.close()
else:
wv_from_bin = KeyedVectors.load_word2vec_format(emb_path, limit=LIMIT)
for word, vector in zip(wv_from_bin.vocab, wv_from_bin.vectors):
vec = np.asarray(vector, dtype='float32')
embeddings_index[word] = vec
counter=0
emb_matrix = np.random.random((len(word_idx) + 1, emb_dim))
for word, i in word_idx.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[i] = embedding_vector
else :
counter += 1
return emb_matrix
class Attention(Layer):
def __init__(self, attention_dim, **kwargs):
self.init = initializers.get('normal')
self.supports_masking = True
self.attention_dim = attention_dim
super(Attention, self).__init__()
def build(self, input_shape):
assert len(input_shape) == 3
self.W = K.variable(self.init((input_shape[-1], self.attention_dim)))
self.b = K.variable(self.init((self.attention_dim, )))
self.u = K.variable(self.init((self.attention_dim, 1)))
self._trainable_weights = [self.W, self.b, self.u]
super(Attention, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
return mask
def call(self, h, mask=None):
uit = K.tanh(K.bias_add(K.dot(h, self.W), self.b))
ait = K.dot(uit, self.u)
ait = K.squeeze(ait, -1)
ait = K.exp(ait)
if mask is not None:
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = h * ait
output = K.sum(weighted_input, axis=1)
return output
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
def get_config(self):
config = {
'attention_dim': self.attention_dim
}
base_config = super(Attention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# + [markdown] id="tMjHs4Z1huFV"
# # Data Loading
# + id="hyUqhRC9HMNz"
# !git clone https://github.com/anonymoususr12/MHPurf
# + id="qCUWPviX74lF"
# %cd MHPurf/data
# Setting Project
project = 'keras' # select in [tensorflow, pytorch, keras, incubator-mxnet, caffe]
path = f'{project}.csv'
pd_all = pd.read_csv(path)
pd_all = pd_all.sample(frac=1,random_state=999)
pd_all['Title+Body'] = ''
for idx in range(len(pd_all)):
if pd.notna(pd_all['Body'].iloc[idx]):
pd_all['Title+Body'].iloc[idx] = pd_all['Title'].iloc[idx] + '. ' + pd_all['Body'].iloc[idx]
else:
pd_all['Title+Body'].iloc[idx] = pd_all['Title'].iloc[idx]
pd_title = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Title":"text"})
pd_title.to_csv('Title.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_body = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Body":"text"})
pd_body.to_csv('Body.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_label = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Labels":"text"})
pd_label.to_csv('Labels.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_code = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Codes":"text"})
pd_code.to_csv('Codes.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_comment = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Comments":"text"})
pd_comment.to_csv('Comments.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_command = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Commands":"text"})
pd_command.to_csv('Command.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
pd_tplusb = pd_all.rename(columns={"Unnamed: 0":"id","class":"sentiment","Title+Body":"text"})
pd_tplusb.to_csv('Title+Body.csv', index=False, columns=["id","Number","sentiment","text"], mode="w")
# + [markdown] id="qWHiO-mRArm_"
# # Training
# + id="Tz5YHLfNzfxI"
# Imbalance Strategy Setting (if needed)
# IMBLEARN = 'RandomOverSampler'
# IMBLEARN = 'SMOTE'
IMBLEARN = 'None'
# RQ1 setting
PLUS_FLAG = True
TITLE_FLAG = False
DESC_FLAG = False
# RQ2 setting
COMMENT_FLAG = True
CODE_FLAG = True
COMMAND_FLAG = False
LABEL_FLAG = False
# Training setting
BATCH = 64
REPEAT = 30
EPOCH = 25
repeated_range = range(0,REPEAT)
# Embedding setting
WORD2VEC_FLAG = False # True for word2vec, false for GloVe
LIMIT = 1000000 # if WORD2VEC_FLAG
# Zero Padding Length Setting (if needed)
MAX_SENT_LENGTH = 150 # Zero padding length for word level vector (Max number of words in a sentence). We tried [50,100,150,200]
MAX_SENTS = 18 # Zero padding length for sentence level vector (Max number of sentences in a bug report). We tried [50,100,150,200]
MAX_NB_WORDS = 20000 # Maximum words in corpus We tried in [5000,10000,20000,4000]
# Comment feature reading method setting
COMMENT_V2_FLAG = False # False for sentence version, True for comment version
# Generating output name
out_csv_name = f'../MHAN_{project}'
if COMMENT_V2_FLAG:
out_csv_name += 'commentV2'
if WORD2VEC_FLAG == True:
out_csv_name += '_word2vec' + str(LIMIT)
EMBEDDING_PATH = "embedding/enwiki_20180420_100d.txt.bz2"
else:
out_csv_name += '_glove'
EMBEDDING_PATH = "embedding/glove.6B.100d.txt"
if COMMENT_FLAG == True:
out_csv_name += '+comment'
if CODE_FLAG == True:
out_csv_name += '+code'
if COMMAND_FLAG == True:
out_csv_name += '+command'
if LABEL_FLAG == True:
out_csv_name += '+label'
out_csv_name += '.csv'
tf_auc = tf.keras.metrics.AUC()
batch_size = BATCH
repeated_times = REPEAT
embedding_path = EMBEDDING_PATH
if embedding_path.find('glove') != -1:
emb_file_flag = 'glove' # pre-trained word vector is glove
embedding_dim = int(((embedding_path.split('/')[-1]).split('.')[2])[:-1])
else:
embedding_dim = 100
print(f"glove is not using, set embedding_dim as {embedding_dim}")
# preprocessing
###################### Preprocessing for comment #################################
if COMMENT_FLAG:
comment_train=pd.read_csv('Comments.csv')
comment_train["text"] = comment_train["text"].fillna('')
comments = []
corpus3 = []
if COMMENT_V2_FLAG:
# # 2nd preprocessing method (comment based)
for idx in range(comment_train.text.shape[0]):
comment = ast.literal_eval(comment_train.text[idx])
comment = list(map(remove_url,comment))
comment = list(map(remove_html,comment))
comment = list(map(remove_emoji,comment))
comment = list(map(remove_stopwords,comment))
comment = list(map(BeautifulSoup,comment))
comment = [sometext.get_text() for sometext in comment]
comment = list(map(clean_str,comment))
comments.append(comment)
comment = ''.join(comment)
corpus3.append(comment)
else:
# 1st preprocessing method (sentence based)
for idx in range(comment_train.text.shape[0]):
comment = ast.literal_eval(comment_train.text[idx])
comment = list(map(remove_url,comment))
comment = list(map(remove_html,comment))
comment = list(map(remove_emoji,comment))
comment = list(map(remove_stopwords,comment))
comment = ''.join(comment)
comment = BeautifulSoup(comment)
comment = clean_str(comment.get_text())
corpus3.append(comment)
comment = tokenize.sent_tokenize(comment)
comments.append(comment)
# Create Tokenizer
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(corpus3)
# Zero Padding
comment_data = np.zeros((len(corpus3), MAX_SENTS, MAX_SENT_LENGTH), dtype='int16')
for i, sentences in enumerate(comments):
for j, sent in enumerate(sentences):
if j < MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k = 0
for _, word in enumerate(wordTokens):
if word in tokenizer.word_index:
if k < MAX_SENT_LENGTH and tokenizer.word_index[word] < MAX_NB_WORDS:
comment_data[i, j, k] = tokenizer.word_index[word]
k = k + 1
comment_word_index = tokenizer.word_index
###################### Preprocessing for comment #################################
###################### Preprocessing for command #################################
if COMMAND_FLAG:
command_train=pd.read_csv('Command.csv')
commands = []
corpus4 = []
for idx in range(command_train.text.shape[0]):
command = ast.literal_eval(command_train.text[idx])
command = list(map(remove_url,command))
command = list(map(remove_html,command))
command = list(map(remove_emoji,command))
command = list(map(remove_stopwords,command))
command = ''.join(command)
command = BeautifulSoup(command)
command = clean_str(command.get_text())
corpus4.append(command)
command = tokenize.sent_tokenize(command)
commands.append(command)
# Create Tokenizer
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(corpus4)
# Zero Padding
command_data = np.zeros((len(corpus4), MAX_SENTS, MAX_SENT_LENGTH), dtype='int16')
for i, sentences in enumerate(commands):
for j, sent in enumerate(sentences):
if j < MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k = 0
for _, word in enumerate(wordTokens):
if k < MAX_SENT_LENGTH and tokenizer.word_index[word] < MAX_NB_WORDS:
command_data[i, j, k] = tokenizer.word_index[word]
k = k + 1
command_word_index = tokenizer.word_index
###################### Preprocessing for command #################################
###################### Preprocessing for Code #################################
if CODE_FLAG:
code_train=pd.read_csv('Codes.csv')
codes = []
corpus2 = []
# 1st preprocess (line based)
for idx in range(code_train.text.shape[0]):
code = ast.literal_eval(code_train.text[idx])
code = list(map(remove_url,code))
code = list(map(remove_html,code))
code = list(map(remove_emoji,code))
code = list(map(remove_stopwords,code))
code = ''.join(code)
code = BeautifulSoup(code)
code = clean_str(code.get_text())
corpus2.append(code)
code = code.splitlines()
code=list(reversed(code))
codes.append(code)
# # 2nd preprocess (block based)
# for idx in range(code_train.text.shape[0]):
# code = ast.literal_eval(code_train.text[idx])
# code = list(map(remove_url,code))
# code = list(map(remove_html,code))
# code = list(map(remove_emoji,code))
# code = list(map(remove_stopwords,code))
# code = list(map(BeautifulSoup,code))
# code = [sometext.get_text() for sometext in code]
# code = list(map(clean_str,code))
# codes.append(code)
# code = ''.join(code)
# corpus2.append(code)
# Create Tokenizer
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(corpus2)
# Zero Padding
code_data = np.zeros((len(corpus2), MAX_SENTS, MAX_SENT_LENGTH), dtype='int16')
for i, sentences in enumerate(codes):
for j, sent in enumerate(sentences):
if j < MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k = 0
for _, word in enumerate(wordTokens):
if word in tokenizer.word_index:
if k < MAX_SENT_LENGTH and tokenizer.word_index[word] < MAX_NB_WORDS:
code_data[i, j, k] = tokenizer.word_index[word]
k = k + 1
code_word_index = tokenizer.word_index
###################### Preprocessing for Code #################################
###################### Preprocessing for Label #################################
if LABEL_FLAG:
label_data=pd.read_csv('Labels.csv')
label_data["text"] = label_data["text"].fillna('')
label_data = label_data["text"]
labels_data = []
label_corpus = []
for idx in range(len(label_data)):
label = BeautifulSoup(label_data.iloc[idx])
label = clean_str(label.get_text())
label_corpus.append(label)
labels_data.append(label)
# Create Tokenizer
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(labels_data)
# Zero Padding
label_data = np.zeros((len(labels_data), MAX_SENT_LENGTH), dtype='int16')
for j, sent in enumerate(labels_data):
if j < MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k = 0
for _, word in enumerate(wordTokens):
if k < MAX_SENT_LENGTH and tokenizer.word_index[word] < MAX_NB_WORDS:
label_data[j, k] = tokenizer.word_index[word]
k = k + 1
label_word_index = tokenizer.word_index
###################### Preprocessing for Label #################################
###################### Preprocessing for Conent #################################
if PLUS_FLAG == True:
data_train = pd.read_csv('Title+Body.csv')
elif TITLE_FLAG == True:
data_train = pd.read_csv('Title.csv')
elif DESC_FLAG == True:
data_train = pd.read_csv('Body.csv')
original_data_train = data_train
data_train["text"] = data_train["text"].fillna('')
data_train["text"] = data_train["text"].apply(lambda x: remove_url(x))
data_train["text"] = data_train["text"].apply(lambda x: remove_html(x))
data_train["text"] = data_train["text"].apply(lambda x: remove_emoji(x))
data_train["text"] = data_train["text"].apply(lambda text: remove_stopwords(text))
corpus_desc = []
labels = []
texts = []
for idx in range(data_train.text.shape[0]):
text = BeautifulSoup(data_train.text[idx])
text = clean_str(text.get_text())
corpus_desc.append(text)
sentences = tokenize.sent_tokenize(text)
texts.append(sentences)
labels.append(int(data_train.sentiment[idx]))
# Create Tokenizer
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(corpus_desc)
# Zero Padding
body_data = np.zeros((len(corpus_desc), MAX_SENTS, MAX_SENT_LENGTH), dtype='int16')
for i, sentences in enumerate(texts):
for j, sent in enumerate(sentences):
if j < MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k = 0
for _, word in enumerate(wordTokens):
if k < MAX_SENT_LENGTH and tokenizer.word_index[word] < MAX_NB_WORDS:
body_data[i, j, k] = tokenizer.word_index[word]
k = k + 1
word_index = tokenizer.word_index
labels = to_categorical(np.asarray(labels))
labels = to_categorical(np.asarray(labels[:,1]))
###################### Preprocessing for Content ################################
embedding_matrix_body = create_emb_mat(embedding_path, word_index, embedding_dim)
# Model Loading and Training
def training():
######################## layer for comment ##############################
# Embedding layer
if COMMENT_FLAG:
embedding_layer_comment = Embedding(len(comment_word_index) + 1,
embedding_dim,
weights=[embedding_matrix_comment],
mask_zero=False,
input_length=MAX_SENT_LENGTH,
trainable=True)
comment_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences_comment = embedding_layer_comment(comment_input)
l_lstm_comment = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences_comment)
l_att_comment = Attention(embedding_dim)(l_lstm_comment)
sent_encoder_comment = Model(comment_input, l_att_comment)
comments_input = Input(shape=(MAX_SENTS, MAX_SENT_LENGTH), dtype='int32')
comments_encoder = TimeDistributed(sent_encoder_comment)(comments_input)
l_lstm_comments = Bidirectional(GRU(100, return_sequences=True))(comments_encoder)
l_att_comments = Attention(embedding_dim)(l_lstm_comments)
######################## layer for command##############################
# Embedding layer
if COMMAND_FLAG:
embedding_layer_command = Embedding(len(command_word_index) + 1,
embedding_dim,
weights=[embedding_matrix_command],
mask_zero=False,
input_length=MAX_SENT_LENGTH,
trainable=True)
command_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences_command = embedding_layer_command(command_input)
l_lstm_command = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences_command)
l_att_command = Attention(embedding_dim)(l_lstm_command)
sent_encoder_command = Model(command_input, l_att_command)
commands_input = Input(shape=(MAX_SENTS, MAX_SENT_LENGTH), dtype='int32')
commands_encoder = TimeDistributed(sent_encoder_command)(commands_input)
l_lstm_commands = Bidirectional(GRU(100, return_sequences=True))(commands_encoder)
l_att_commands = Attention(embedding_dim)(l_lstm_commands)
######################## layer for code##############################
# Embedding layer
if CODE_FLAG:
embedding_layer_code = Embedding(len(code_word_index) + 1,
embedding_dim,
weights=[embedding_matrix_code],
mask_zero=False,
input_length=MAX_SENT_LENGTH,
trainable=True)
code_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences_code = embedding_layer_code(code_input)
l_lstm_code = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences_code)
l_att_code = Attention(embedding_dim)(l_lstm_code)
sent_encoder_code = Model(code_input, l_att_code)
codes_input = Input(shape=(MAX_SENTS, MAX_SENT_LENGTH), dtype='int32')
codes_encoder = TimeDistributed(sent_encoder_code)(codes_input)
l_lstm_codes = Bidirectional(GRU(100, return_sequences=True))(codes_encoder)
l_att_codes = Attention(embedding_dim)(l_lstm_codes)
######################## layer for label#############################
if LABEL_FLAG:
embedding_layer_label = Embedding(len(label_word_index) + 1,
embedding_dim,
weights=[embedding_matrix_label],
mask_zero=False,
input_length=MAX_SENT_LENGTH,
trainable=True)
label_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences_label = embedding_layer_label(label_input)
l_lstm_label = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences_label)
l_att_label = Attention(embedding_dim)(l_lstm_label)
######################## layer for content##############################
embedding_layer = Embedding(len(word_index) + 1,
embedding_dim,
weights=[embedding_matrix_body],
mask_zero=False,
input_length=MAX_SENT_LENGTH,
trainable=True)
sentence_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_att = Attention(embedding_dim)(l_lstm)
sent_encoder = Model(sentence_input, l_att)
text_input = Input(shape=(MAX_SENTS, MAX_SENT_LENGTH), dtype='int32')
text_encoder = TimeDistributed(sent_encoder)(text_input)
l_lstm_sent = Bidirectional(GRU(100, return_sequences=True))(text_encoder)
l_att_sent = Attention(embedding_dim)(l_lstm_sent)
################## Concatenate #######################
concat_input_list = []
model_input_list = []
train_input_list = []
test_input_list = []
if COMMENT_FLAG:
concat_input_list.append(l_att_comments)
model_input_list.append(comments_input)
train_input_list.append(x_comment_train)
test_input_list.append(x_comment_test)
if COMMAND_FLAG:
concat_input_list.append(l_att_commands)
model_input_list.append(commands_input)
train_input_list.append(x_command_train)
test_input_list.append(x_command_test)
if CODE_FLAG:
concat_input_list.append(l_att_codes)
model_input_list.append(codes_input)
train_input_list.append(x_code_train)
test_input_list.append(x_code_test)
if LABEL_FLAG:
concat_input_list.append(l_att_label)
model_input_list.append(label_input)
train_input_list.append(x_label_train)
test_input_list.append(x_label_test)
concat_input_list.append(l_att_sent)
model_input_list.append(text_input)
train_input_list.append(x_body_train)
test_input_list.append(x_body_test)
if len(concat_input_list) > 1:
concatenated = concatenate(concat_input_list)
else:
concatenated = concat_input_list[0]
preds = Dense(2, activation='softmax')(concatenated)
model = Model(inputs=model_input_list,outputs=[preds])
model.compile(loss='binary_crossentropy',
optimizer='adam', # or rmsprop
metrics=[tf_auc])
# Stop when all epoches are run and return the result that has the best monitor metrics
early_stopping = EarlyStopping(monitor='val_loss',patience=EPOCH,restore_best_weights=True,verbose=0)
callbacks=[early_stopping]
model.fit(train_input_list, y_train, validation_split = 0.2,
epochs=EPOCH, batch_size=batch_size,
callbacks=[early_stopping])
y_pred = model.predict(test_input_list).round().astype(int)
y_true = y_test
current_macro = f1_score(y_true, y_pred, average='macro')
macro.append(current_macro)
current_micro = f1_score(y_true, y_pred, average='micro')
micro.append(current_micro)
fpr, tpr, thresholds = roc_curve(y_true[:,1], y_pred[:,1], pos_label=1)
current_auc = auc(fpr, tpr)
auc_value.append(current_auc)
cm = confusion_matrix(y_true.argmax(axis=1), y_pred.argmax(axis=1))
using = original_data_train[['Number','sentiment']].iloc[test_index]
idx = 0
# initialize list of lists
numbers_diff = []
ytures_diff = []
ypreds_diff = []
numbers_same = []
ytures_same = []
ypreds_same = []
for yture,ypred in zip([int(i) for i in y_true[:,1]],[int(i) for i in y_pred[:,1]]):
if yture != ypred:
numbers_diff.append(using['Number'].iloc[idx])
ytures_diff.append(yture)
ypreds_diff.append(ypred)
if yture == ypred:
numbers_same.append(using['Number'].iloc[idx])
ytures_same.append(yture)
ypreds_same.append(ypred)
idx += 1
data_diff = {'numbers_diff':numbers_diff,'ytrues_diff':ytures_diff,'ypreds_diff':ypreds_diff}
data_same = {'numbers_same':numbers_same,'ytrues_same':ytures_same,'ypreds_same':ypreds_same}
df_diff = pd.DataFrame(data_diff)
df_same = pd.DataFrame(data_same)
results = pd.Series([sum(macro)/len(macro),sum(micro)/len(micro),sum(auc_value)/len(auc_value)], name='results')
df_diff = pd.concat([df_diff,results], axis=1)
df_same = pd.concat([df_same,results], axis=1)
# This will generate the result of false_neg&false_pos or true_neg&true_pos for one run
df_diff.to_csv(f'../df_diff_{project}.csv')
df_same.to_csv(f'../df_same_{project}.csv')
return model
if __name__ == '__main__':
macro = []
micro = []
auc_value = []
df_log = pd.DataFrame(columns=['repeated_time','auc_list','Avg Macro F1', 'Avg Micro F1', 'Avg AUC'])
for repeated_time in repeated_range:
indices = np.arange(body_data.shape[0])
train_index,test_index = train_test_split(indices, test_size=0.2, random_state=repeated_time)
x_body_train, x_body_test = body_data[train_index], body_data[test_index]
y_train, y_test = labels[train_index], labels[test_index]
if IMBLEARN == 'RandomOverSampler':
smo = RandomOverSampler(random_state=666)
nx1, ny1, nz1 = x_body_train.shape
x_body_train = x_body_train.reshape((nx1,ny1*nz1))
x_body_train, y_train = smo.fit_resample(x_body_train, y_train)
temp_n, _ = x_body_train.shape
x_body_train = x_body_train.reshape((temp_n,ny1,nz1))
y_train = to_categorical(y_train.astype(int))
if IMBLEARN == 'SMOTE':
smo = SMOTE(random_state=666)
nx1, ny1, nz1 = x_body_train.shape
x_body_train = x_body_train.reshape((nx1,ny1*nz1))
x_body_train, y_train = smo.fit_resample(x_body_train, y_train)
temp_n, _ = x_body_train.shape
x_body_train = x_body_train.reshape((temp_n,ny1,nz1))
y_train = to_categorical(y_train.astype(int))
if LABEL_FLAG:
x_label_train, x_label_test = label_data[train_index], label_data[test_index]
embedding_matrix_label = create_emb_mat(embedding_path, label_word_index, embedding_dim)
if CODE_FLAG:
x_code_train, x_code_test = code_data[train_index], code_data[test_index]
embedding_matrix_code = create_emb_mat(embedding_path, code_word_index, embedding_dim)
if COMMAND_FLAG:
x_command_train, x_command_test = command_data[train_index], command_data[test_index]
embedding_matrix_command = create_emb_mat(embedding_path, command_word_index, embedding_dim)
if COMMENT_FLAG:
x_comment_train, x_comment_test = comment_data[train_index], comment_data[test_index]
embedding_matrix_comment = create_emb_mat(embedding_path, comment_word_index, embedding_dim)
model = training()
# this will generate the result
new_row = {'repeated_time':repeated_time,'auc_list':str(auc_value),'Avg Macro F1':sum(macro) / len(macro), 'Avg Micro F1':sum(micro) / len(micro), 'Avg AUC':sum(auc_value) / len(auc_value)}
df_log = df_log.append(new_row, ignore_index=True)
df_log.to_csv(out_csv_name, mode='a', header=False)
| model/MHAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# Load Libraries - Make sure to run this cell!
import pandas as pd
import numpy as np
import re
from collections import Counter
from sklearn import feature_extraction, tree, model_selection, metrics
from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ConfusionMatrix
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
# ## Worksheet - Answer - DGA Detection using Machine Learning
#
# This worksheet is a step-by-step guide on how to detect domains that were generated using "Domain Generation Algorithm" (DGA). We will walk you through the process of transforming raw domain strings to Machine Learning features and creating a decision tree classifer which you will use to determine whether a given domain is legit or not. Once you have implemented the classifier, the worksheet will walk you through evaluating your model.
#
# Overview 2 main steps:
#
# 1. **Feature Engineering** - from raw domain strings to numeric Machine Learning features using DataFrame manipulations
# 2. **Machine Learning Classification** - predict whether a domain is legit or not using a Decision Tree Classifier
#
#
#
#
# **DGA - Background**
#
# "Various families of malware use domain generation
# algorithms (DGAs) to generate a large number of pseudo-random
# domain names to connect to a command and control (C2) server.
# In order to block DGA C2 traffic, security organizations must
# first discover the algorithm by reverse engineering malware
# samples, then generate a list of domains for a given seed. The
# domains are then either preregistered, sink-holed or published
# in a DNS blacklist. This process is not only tedious, but can
# be readily circumvented by malware authors. An alternative
# approach to stop malware from using DGAs is to intercept DNS
# queries on a network and predict whether domains are DGA
# generated. Much of the previous work in DGA detection is based
# on finding groupings of like domains and using their statistical
# properties to determine if they are DGA generated. However,
# these techniques are run over large time windows and cannot be
# used for real-time detection and prevention. In addition, many of
# these techniques also use contextual information such as passive
# DNS and aggregations of all NXDomains throughout a network.
# Such requirements are not only costly to integrate, they may not
# be possible due to real-world constraints of many systems (such
# as endpoint detection). An alternative to these systems is a much
# harder problem: detect DGA generation on a per domain basis
# with no information except for the domain name. Previous work
# to solve this harder problem exhibits poor performance and many
# of these systems rely heavily on manual creation of features;
# a time consuming process that can easily be circumvented by
# malware authors..."
# [Citation: Woodbridge et. al 2016: "Predicting Domain Generation Algorithms with Long Short-Term Memory Networks"]
#
# A better alternative for real-world deployment would be to use "featureless deep learning" - We have a separate notebook where you can see how this can be implemented!
#
# **However, let's learn the basics first!!!**
#
# ## Worksheet for Part 2 - Feature Engineering
# #### Breakpoint: Load Features and Labels
#
# If you got stuck in Part 1, please simply load the feature matrix we prepared for you, so you can move on to Part 2 and train a Decision Tree Classifier.
df_final = pd.read_csv('../../data/dga_features_final_df.csv')
print(df_final.isDGA.value_counts())
df_final.head()
# Load dictionary of common english words from part 1
from six.moves import cPickle as pickle
with open('../../data/d_common_en_words' + '.pickle', 'rb') as f:
d = pickle.load(f)
# ## Part 2 - Machine Learning
#
# To learn simple classification procedures using [sklearn](http://scikit-learn.org/stable/) we have split the work flow into 5 steps.
# ### Step 1: Prepare Feature matrix and ```target``` vector containing the URL labels
#
# - In statistics, the feature matrix is often referred to as ```X```
# - target is a vector containing the labels for each URL (often also called *y* in statistics)
# - In sklearn both the input and target can either be a pandas DataFrame/Series or numpy array/vector respectively (can't be lists!)
#
# Tasks:
# - assign 'isDGA' column to a pandas Series named 'target'
# - drop 'isDGA' column from ```dga``` DataFrame and name the resulting pandas DataFrame 'feature_matrix'
# +
target = df_final['isDGA']
feature_matrix = df_final.drop(['isDGA'], axis=1)
print('Final features', feature_matrix.columns)
print( feature_matrix.head())
# -
# ### Step 2: Simple Cross-Validation
#
# Tasks:
# - split your feature matrix X and target vector into train and test subsets using sklearn [model_selection.train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)
# Simple Cross-Validation: Split the data set into training and test data
feature_matrix_train, feature_matrix_test, target_train, target_test = model_selection.train_test_split(feature_matrix, target, test_size=0.25, random_state=33)
feature_matrix_train.count()
feature_matrix_test.count()
target_train.head()
# ### Step 3: Train the model and make a prediction
#
# Finally, we have prepared and segmented the data. Let's start classifying!!
#
# Tasks:
#
# - Use the sklearn [tree.DecisionTreeClassfier()](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html), create a decision tree with standard parameters, and train it using the ```.fit()``` function with ```X_train``` and ```target_train``` data.
# - Next, pull a few random rows from the data and see if your classifier got it correct.
#
# If you are interested in trying a real unknown domain, you'll have to create a function to generate the features for that domain before you run it through the classifier (see function ```is_dga``` a few cells below).
# +
# Train the decision tree based on the entropy criterion
clf = tree.DecisionTreeClassifier() # clf means classifier
clf = clf.fit(feature_matrix_train, target_train)
# Extract a row from the test data
test_feature = feature_matrix_test[192:193]
test_target = target_test[192:193]
# Make the prediction
pred = clf.predict(test_feature)
print('Predicted class:', pred)
print('Accurate prediction?', pred[0] == test_target)
# -
feature_matrix_test
# +
# For simplicity let's just copy the needed function in here again
def H_entropy (x):
# Calculate Shannon Entropy
prob = [ float(x.count(c)) / len(x) for c in dict.fromkeys(list(x)) ]
H = - sum([ p * np.log2(p) for p in prob ])
return H
def vowel_consonant_ratio (x):
# Calculate vowel to consonant ratio
x = x.lower()
vowels_pattern = re.compile('([aeiou])')
consonants_pattern = re.compile('([b-df-hj-np-tv-z])')
vowels = re.findall(vowels_pattern, x)
consonants = re.findall(consonants_pattern, x)
try:
ratio = len(vowels) / len(consonants)
except: # catch zero devision exception
ratio = 0
return ratio
# ngrams: Implementation according to Schiavoni 2014: "Phoenix: DGA-based Botnet Tracking and Intelligence"
# http://s2lab.isg.rhul.ac.uk/papers/files/dimva2014.pdf
def ngrams(word, n):
# Extract all ngrams and return a regular Python list
# Input word: can be a simple string or a list of strings
# Input n: Can be one integer or a list of integers
# if you want to extract multipe ngrams and have them all in one list
l_ngrams = []
if isinstance(word, list):
for w in word:
if isinstance(n, list):
for curr_n in n:
ngrams = [w[i:i+curr_n] for i in range(0,len(w)-curr_n+1)]
l_ngrams.extend(ngrams)
else:
ngrams = [w[i:i+n] for i in range(0,len(w)-n+1)]
l_ngrams.extend(ngrams)
else:
if isinstance(n, list):
for curr_n in n:
ngrams = [word[i:i+curr_n] for i in range(0,len(word)-curr_n+1)]
l_ngrams.extend(ngrams)
else:
ngrams = [word[i:i+n] for i in range(0,len(word)-n+1)]
l_ngrams.extend(ngrams)
# print(l_ngrams)
return l_ngrams
def ngram_feature(domain, d, n):
# Input is your domain string or list of domain strings
# a dictionary object d that contains the count for most common english words
# finally you n either as int list or simple int defining the ngram length
# Core magic: Looks up domain ngrams in english dictionary ngrams and sums up the
# respective english dictionary counts for the respective domain ngram
# sum is normalized
l_ngrams = ngrams(domain, n)
# print(l_ngrams)
count_sum=0
for ngram in l_ngrams:
if d[ngram]:
count_sum+=d[ngram]
try:
feature = count_sum/(len(domain)-n+1)
except:
feature = 0
return feature
def average_ngram_feature(l_ngram_feature):
# input is a list of calls to ngram_feature(domain, d, n)
# usually you would use various n values, like 1,2,3...
return sum(l_ngram_feature)/len(l_ngram_feature)
# +
def is_dga(domain, clf, d):
# Function that takes new domain string, trained model 'clf' as input and
# dictionary d of most common english words
# returns prediction
domain_features = np.empty([1,5])
# order of features is ['length', 'digits', 'entropy', 'vowel-cons', 'ngrams']
domain_features[0,0] = len(domain)
pattern = re.compile('([0-9])')
domain_features[0,1] = len(re.findall(pattern, domain))
domain_features[0,2] = H_entropy(domain)
domain_features[0,3] = vowel_consonant_ratio(domain)
domain_features[0,4] = average_ngram_feature([ngram_feature(domain, d, 1),
ngram_feature(domain, d, 2),
ngram_feature(domain, d, 3)])
pred = clf.predict(domain_features)
return pred[0]
print('Predictions of domain %s is [0 means legit and 1 dga]: ' %('spardeingeld'), is_dga('spardeingeld', clf, d))
print('Predictions of domain %s is [0 means legit and 1 dga]: ' %('google'), is_dga('google', clf, d))
print('Predictions of domain %s is [0 means legit and 1 dga]: ' %('1vxznov16031kjxneqjk1rtofi6'), is_dga('1vxznov16031kjxneqjk1rtofi6', clf, d))
print('Predictions of domain %s is [0 means legit and 1 dga]: ' %('lthmqglxwmrwex'), is_dga('lthmqglxwmrwex', clf, d))
# -
# ### Step 4: Assess model accuracy with simple cross-validation
#
# Tasks:
# - Make predictions for all your data. Call the ```.predict()``` method on the clf with your training data ```X_train``` and store the results in a variable called ```target_pred```.
# - Use sklearn [metrics.accuracy_score](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html) to determine your models accuracy. Detailed Instruction:
# - Use your trained model to predict the labels of your test data ```X_test```. Run ```.predict()``` method on the clf with your test data ```X_test``` and store the results in a variable called ```target_pred```..
# - Then calculate the accuracy using ```target_test``` (which are the true labels/groundtruth) AND your models predictions on the test portion ```target_pred``` as inputs. The advantage here is to see how your model performs on new data it has not been seen during the training phase. The fair approach here is a simple **cross-validation**!
#
# - Print out the confusion matrix using [metrics.confusion_matrix](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html)
# - Use Yellowbrick to visualize the classification report and confusion matrix. (http://www.scikit-yb.org/en/latest/examples/modelselect.html#common-metrics-for-evaluating-classifiers)
# fair approach: make prediction on test data portion
target_pred = clf.predict(feature_matrix_test)
print(metrics.accuracy_score(target_test, target_pred))
print('Confusion Matrix\n', metrics.confusion_matrix(target_test, target_pred))
# Classification Report...neat summary
print(metrics.classification_report(target_test, target_pred, target_names=['legit', 'dga']))
# short-cut
clf.score(feature_matrix_test, target_test)
viz = ConfusionMatrix(clf)
viz.fit(feature_matrix_train, target_train)
viz.score(feature_matrix_test, target_test)
viz.poof()
viz = ClassificationReport(clf)
viz.fit(feature_matrix_train, target_train)
viz.score(feature_matrix_test, target_test)
viz.poof()
# ### Step 5: Assess model accuracy with k-fold cross-validation
#
# Tasks:
# - Partition the dataset into *k* different subsets
# - Create *k* different models by training on *k-1* subsets and testing on the remaining subsets
# - Measure the performance on each of the models and take the average measure.
#
# *Short-Cut*
# All of these steps can be easily achieved by simply using sklearn's [model_selection.KFold()](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html) and [model_selection.cross_val_score()](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) functions.
cvKFold = model_selection.KFold(n_splits=3, shuffle=True, random_state=33)
cvKFold.get_n_splits(feature_matrix)
scores = model_selection.cross_val_score(clf, feature_matrix, target, cv=cvKFold)
print(scores)
# Get avergage score +- Standard Error (https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sem.html)
from scipy.stats import sem
def mean_score( scores ):
return "Mean score: {0:.3f} (+/- {1:.3f})".format( np.mean(scores), sem( scores ))
print( mean_score( scores))
# #### (Optional) Visualizing your Tree
# As an optional step, you can actually visualize your tree. The following code will generate a graph of your decision tree. You will need graphviz (http://www.graphviz.org) and pydotplus (or pydot) installed for this to work.
# The Griffon VM has this installed already, but if you try this on a Mac, or Linux machine you will need to install graphviz.
# +
# These libraries are used to visualize the decision tree and require that you have GraphViz
# and pydot or pydotplus installed on your computer.
from sklearn.externals.six import StringIO
from IPython.core.display import Image
import pydotplus as pydot
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data, feature_names=['length', 'digits', 'entropy', 'vowel-cons', 'ngrams'])
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# -
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
# +
#Create the Random Forest Classifier
random_forest_clf = RandomForestClassifier(n_estimators=10,
max_depth=None,
min_samples_split=2,
random_state=0)
random_forest_clf = random_forest_clf.fit(feature_matrix_train, target_train)
# -
#Next, create the SVM classifier
svm_classifier = svm.SVC()
svm_classifier = svm_classifier.fit(feature_matrix_train, target_train)
| 1m_ML_Security/notebooks/answers/Worksheet 6 - DGA Detection ML Classification - Answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark.sql import SQLContext
from pyspark.sql import functions as sf
from pyspark.ml.feature import OneHotEncoder
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.types import DateType
from datetime import date, timedelta
import datetime as dt
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
from pyspark.ml.regression import RandomForestRegressionModel,RandomForestRegressor
from pyspark.sql.functions import col, avg, sum
import numpy as np
import matplotlib.dates as mdates
from matplotlib import pyplot as plt
import pandas as pd
import pyspark
from collections import Counter
sc.stop()
sc = pyspark.SparkContext(master="spark://172.16.27.208:7077",appName="spark")
sc
base_path = "/home/test5/Desktop/smart-meters-in-london/"
sqlcontext = SQLContext(sc)
cluster_feature = sqlcontext.read.csv(path=base_path+"bk=8_means_Feature.csv",header=True,inferSchema=True)
# cluster_feature = cluster_feature.drop("_c0","date2")
# cluster_feature = cluster_feature.withColumn("windSpeed",cluster_feature["windSpeed"].cast("float"))
# cluster_feature = cluster_feature.withColumn("dewPoint",cluster_feature["dewPoint"].cast("float"))
cluster_feature.printSchema()
type(cluster_feature)
cluster_feature = cluster_feature.withColumn("holiday1",sf.when((col("Weekday/end")==1) | (col("holiday")==1),1).otherwise(0))
cluster_feature = cluster_feature.drop("_c0","diff_energy_week_t_1","date2","holiday","month","Weekday/end")
cluster_feature = cluster_feature.withColumnRenamed("holiday1","holiday")
cluster_feature.take(1)
outputCols = ["weekDay_index","precipType_index","summary_index"]
df_encoded = cluster_feature
df_encoded = df_encoded.na.drop()
for col in outputCols:
encoder = OneHotEncoder(inputCol=col, outputCol="category_{}".format(col))
df_encoded = encoder.transform(df_encoded).cache()
df_encoded = df_encoded.drop("rnk")
df_encoded.printSchema()
# +
inputCols = ["weekDay","precipType","summary",]
columns = df_encoded.columns
feature_col = columns[4:]
feature_col.append(columns[2])
feature_col = set(feature_col) - set(inputCols)
feature_col = feature_col - set(outputCols)
feature_col = list(feature_col)
df_encoded = df_encoded.na.drop()
len(feature_col)
vecAssembler = VectorAssembler(inputCols=feature_col, outputCol="features")
df_feature = vecAssembler.transform(df_encoded)
df_feature.take(1)
# -
df_feature = df_feature.withColumnRenamed("energy(kWh/h)","label")
df_feature = df_feature.withColumn("date",df_feature["date"].cast(DateType()))
df_feature.printSchema()
# +
def get_aggregate(df):
list = ["date","hour"]
df = df.groupBy(list).agg(sum("label"),sum("prediction"))
return df
def select_predicted_actual(df,date,LCLid=None):
list = []
if LCLid != None:
list = df.where((df["LCLid"] == LCLid) & (df["date"] == date)).select("label","prediction").collect()
else:
list = df.where((df["date"] == date)).select("label","prediction").collect()
actual = [int(row['label']) for row in list]
predicted = [int(row['prediction']) for row in list]
return actual,predicted
def populate_precision_recall(actual,predicted,precision,recall,predicted_peak_period,actual_peak_period):
actual, predicted = np.array(actual),np.array(predicted)
actual_std, predicted_std = np.std(actual), np.std(predicted)
actual_mean, predicted_mean = np.mean(actual),np.mean(predicted)
actual_threshhold, predicted_threshhold = actual_std + actual_mean,predicted_std+predicted_mean
y_label, y_predicted = [],[]
for power,power1 in zip(actual,predicted):
y_label.append(1) if power > actual_threshhold else y_label.append(0)
y_predicted.append(1) if power1 > predicted_threshhold else y_predicted.append(0)
predicted_peak_period.append(y_predicted)
actual_peak_period.append(y_label)
counts = Counter(zip(y_predicted,y_label))
tp = counts[1,1]
fp = counts[1,0]
tn = counts[0,0]
fn = counts[0,1]
recall.append((tp/float(tp+fn)) if tp+fn else 0)
precision.append((tp+tn)/24.0)
return precision,recall, predicted_peak_period,actual_peak_period
# -
train_df_cluster = []
test_df_cluster = []
num_of_cluster = 8
for cluster_id in range(num_of_cluster):
train_df = df_feature.where((df_feature["cluster_id"] == cluster_id) & (df_feature["date"] <= date(2013,10,31)))
test_df = df_feature.where((df_feature["cluster_id"] == cluster_id) & (df_feature["date"] > date(2013,10,31)))# & (df_feature["date"] <= date(2013,1,2)))
print("Train_point = {}, Test_point = {}, cId = {}".format(train_df.count(),test_df.count(),cluster_id))
train_df_cluster.append(train_df)
test_df_cluster.append(test_df)
pred_val = []
flag = 0
for cluster_id in range(num_of_cluster):
rf = RandomForestRegressor(numTrees=20,maxDepth=10,maxBins=128,seed=4)
model = rf.fit(train_df_cluster[cluster_id])
# model.save(base_path+"/Model/rf_bkm/rf_model_bkm_{}".format(cluster_id))
rf_pred_val = model.transform(test_df_cluster[cluster_id])
if flag == 0:
pred_val = sqlcontext.createDataFrame([],rf_pred_val.schema)
flag = 1
pred_val = pred_val.union(rf_pred_val)
pred_val = pred_val.cache()
aggregate_df = get_aggregate(pred_val)
aggregate_df = aggregate_df.withColumnRenamed("sum(label)","label")
aggregate_df = aggregate_df.withColumnRenamed("sum(prediction)","prediction")
train_start_date = date(2013,11,1)
train_end_date = date(2013,12,31)
y_date = []
predicted_peak_period = []
actual_peak_period = []
precision = []
recall = []
while train_start_date <= train_end_date:
print(train_start_date)
y_actual,y_pred = select_predicted_actual(aggregate_df,train_start_date)
if len(y_actual) == 0:
train_start_date = train_start_date + timedelta(1)
continue
precision,recall,predicted_peak_period,actual_peak_period = populate_precision_recall(y_actual,y_pred,precision,recall,predicted_peak_period,actual_peak_period)
y_date.append(train_start_date)
train_start_date = train_start_date + timedelta(1)
y_actual,y_pred = select_predicted_actual(aggregate_df,date(2013,12,25))
print(y_actual)
print(y_pred)
print(np.mean(np.array(y_actual)),np.std(np.array(y_actual)))
print(np.mean(np.array(y_pred)),np.std(np.array(y_pred)))
from statistics import mean
print("Mean accuracy = {}, Mean recall = {}".format(mean(precision),mean(recall)))
actual_predicted = aggregate_df.where((aggregate_df.date==date(2013,12,31))).select("label","prediction").toPandas()
predicted_peak_period
fig,ax = plt.subplots(1,1,figsize=(8,6))
mark_actual,mark_predicted = [],[]
for i in range(24):
if predicted_peak_period[60][i] == 1:
mark_predicted.append(i)
if actual_peak_period[60][i] == 1:
mark_actual.append(i)
print(mark_predicted,mark_actual)
ax.plot(np.arange(1,25),actual_predicted["label"],linestyle = '--',marker="d",markevery=mark_actual, label = "Actual")
ax.plot(np.arange(1,25),actual_predicted["prediction"],linestyle = ':',marker="^",markevery=mark_predicted,label = "Predicted")
ax.plot(np.arange(1,25),[actual_predicted["prediction"].mean()]*24,linestyle='--',label="Predicted Mean")
ax.plot(np.arange(1,25),[actual_predicted["prediction"].mean()+actual_predicted["prediction"].std()]*24,linestyle='--',label = "Predicted Peak Threshold")
ax.plot(np.arange(1,25),[actual_predicted["label"].mean()]*24,linestyle='-',label = "Actual Mean")
ax.plot(np.arange(1,25),[actual_predicted["label"].mean()+actual_predicted["label"].std()]*24,linestyle='-',label = "Actual Peak Threshold")
fig.legend()
ax.set_xlabel("Hours of Day")
ax.set_ylabel("Aggregated Load(Kw/h)" )
fig.savefig(base_path+"/plot/peak.png",dpi=600)
actual_predicted["prediction"].std()
y_date[54]
predicted_peak_period[54]
actual_peak_period[54]
| Peak_hr_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cleaning and Profiling
# +
""" Notebook Setup/Imports
"""
# Project path setup
from pathlib import Path
project_dir = Path(__name__).resolve().parents[1]
# Library import
import pandas as pd
import numpy as np
import pandas_profiling
# Print strings as markdown
from IPython.display import Markdown
# Data variables from previous notebook
dataset = pd.read_pickle(Path.joinpath(project_dir, "data/processed/0_data.pickle"))
data_dictionary = pd.read_pickle(Path.joinpath(project_dir, "data/processed/0_data_dictionary.pickle"))
# -
# ## Data Type Validation & Cleaning
""" Get the count for data types in the dataset
"""
d = dataset.dtypes
dataset.get_dtype_counts()
""" Find columns as strings or other objects
"""
object_columns = d[d == 'object'].index.to_list()
print(object_columns)
""" Parse objects to float
"""
for column in object_columns:
sub = dataset[column].str.replace(",","").str.replace("-","").str.strip().apply(lambda x: np.nan if x == "" else x)
dataset[column] = sub.astype(float)
""" Check types
"""
dataset.get_dtype_counts()
# ## Variable Selection
""" Create a list of rejected variables with > .975 correlation from the profile analysis
"""
profile = pandas_profiling.ProfileReport(dataset, check_recoded=True)
rejected_variables = profile.get_rejected_variables(.975)
""" List of columns to keep
"""
keep_columns = [x for x in dataset.columns if x not in rejected_variables]
""" New dataframe with the columns to keep
"""
dataset_clean = dataset[keep_columns]
""" Set the USDA Model to the percentage
"""
dataset_clean.drop("USDA Model Count", axis="columns", inplace=True)
dataset_clean.rename({"USDA Model Percent":"USDA Model"},axis="columns",inplace=True)
""" Save the cleaned data
"""
dataset_clean.to_pickle(Path.joinpath(project_dir, "data/processed/1_data.pickle"))
data_dictionary.to_pickle(Path.joinpath(project_dir, "data/processed/1_data_dictionary.pickle"))
# ## Cleaned Data Profile
""" Create the data profile object and save it to an HTML file
"""
profile = pandas_profiling.ProfileReport(dataset_clean, check_recoded=True)
profile.to_file(outputfile = Path.joinpath(project_dir,
"docs/_static/profile.html"))
#
| notebooks/1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scanpy as sc
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
data_path = '/Users/mincheolkim/Google Drive/UCSF/research/parameter_estimation/ercc/'
# ### Read the ERCC dataset and the data sheet
ercc_data_sheet = pd.read_csv(data_path + 'ercc_concentrations.txt', sep='\t')
adata = sc.read_mtx(data_path + 'ercc92/matrix.mtx').T
adata.obs = pd.read_csv(data_path + 'ercc92/barcodes.tsv', sep='\t', header=None, index_col=0)
adata.var = pd.read_csv(data_path + 'ercc92/genes.tsv', sep='\t', header=None, index_col=0)
adata.var['ERCC ID'] = adata.var.iloc[:, 0]
adata.var = adata.var.merge(ercc_data_sheet, on='ERCC ID', how='left')
adata.var.index = adata.var[1]
adata = adata[:, adata.var['expected fold-change ratio'] == 1].copy()
adata.X = adata.X.toarray()
# ### Get total molecule counts that were loaded and expected
# Parameters to assume
gem_count = 8e5
dilution = 10
ercc_initial_volume = 10 #microliters
molecules_per_attomole = 6.0221409e+23 * 10**-18
adata.var['input_number_molecule_per_gem'] = \
adata.var['concentration in Mix 1 (attomoles/ul)']*\
ercc_initial_volume*\
molecules_per_attomole/\
gem_count/dilution
adata.var['input_number_molecule'] = \
adata.var['concentration in Mix 1 (attomoles/ul)']*\
ercc_initial_volume*\
molecules_per_attomole/dilution
adata.var['mean_expressed'] = adata.X.mean(axis=0)
adata.var['capture_rate'] = adata.var['mean_expressed']/adata.var['input_number_molecule_per_gem']
# ### Capture data
capture_df = pd.DataFrame(
data=adata.X / adata.var.input_molecule_per_gem.values.reshape(1, -1),
columns=adata.var.index)
capture_df.mean
a = capture_df.values.mean(axis=1)**2
b = (capture_df.values**2).mean(axis=1)
plt.scatter(a, b);
plt.xlim(0, 4)
plt.ylim(0, 50)
a
b
plt.scatter(
(capture_df**2).mean(),
capture_df.mean()**2
)
plt.xlim(0, 0.2);
plt.ylim(0, 0.1)
(capture_df**2).mean()
capture_df.var()
adata.var
10**-.8
adata.var.capture_rate.mean()
plt.hist(adata.var.capture_rate, bins=100);
adata.var.head(5)
df.sort_values('concentration in Mix 1 (attomoles/ul)')
| analysis/simulation/legacy/simple/ercc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Creating a Pipeline
#
# In this exercise, you will implement a pipeline that includes multiple stages of *transformers* and *estimators* to prepare features and train a classification model. The resulting trained *PipelineModel* can then be used as a transformer to predict whether or not a flight will be late.
#
# ### Import Spark SQL and Spark ML Libraries
#
# First, import the libraries you will need:
# +
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import VectorAssembler, StringIndexer, VectorIndexer, MinMaxScaler
# -
# ### Load Source Data
# The data for this exercise is provided as a CSV file containing details of flights. The data includes specific characteristics (or *features*) for each flight, as well as a column indicating how many minutes late or early the flight arrived.
#
# You will load this data into a DataFrame and display it.
wkdir ='file:///mnt/c/Users/Adura/Google Drive/Projects/Jupyter/SparkMs/data/'
csv = spark.read.csv(wkdir + 'flights.csv', inferSchema=True, header=True)
csv.show()
# ### Prepare the Data
# Most modeling begins with exhaustive exploration and preparation of the data. In this example, the data has been cleaned for you. You will simply select a subset of columns to use as *features* and create a Boolean *label* field named **label** with the value **1** for flights that arrived 15 minutes or more after the scheduled arrival time, or **0** if the flight was early or on-time.
data = csv.select("DayofMonth", "DayOfWeek", "Carrier", "OriginAirportID", "DestAirportID", "DepDelay", ((col("ArrDelay") > 15).cast("Double").alias("label")))
data.show()
# ### Split the Data
# It is common practice when building supervised machine learning models to split the source data, using some of it to train the model and reserving some to test the trained model. In this exercise, you will use 70% of the data for training, and reserve 30% for testing. In the testing data, the **label** column is renamed to **trueLabel** so you can use it later to compare predicted labels with known actual values.
splits = data.randomSplit([0.7, 0.3])
train = splits[0]
test = splits[1].withColumnRenamed("label", "trueLabel")
train_rows = train.count()
test_rows = test.count()
print("Training Rows:", train_rows, " Testing Rows:", test_rows)
# ### Define the Pipeline
# A predictive model often requires multiple stages of feature preparation. For example, it is common when using some algorithms to distingish between continuous features (which have a calculable numeric value) and categorical features (which are numeric representations of discrete categories). It is also common to *normalize* continuous numeric features to use a common scale (for example, by scaling all numbers to a proportinal decimal value between 0 and 1).
#
# A pipeline consists of a a series of *transformer* and *estimator* stages that typically prepare a DataFrame for
# modeling and then train a predictive model. In this case, you will create a pipeline with seven stages:
# - A **StringIndexer** estimator that converts string values to indexes for categorical features
# - A **VectorAssembler** that combines categorical features into a single vector
# - A **VectorIndexer** that creates indexes for a vector of categorical features
# - A **VectorAssembler** that creates a vector of continuous numeric features
# - A **MinMaxScaler** that normalizes continuous numeric features
# - A **VectorAssembler** that creates a vector of categorical and continuous features
# - A **DecisionTreeClassifier** that trains a classification model.
strIdx = StringIndexer(inputCol = "Carrier", outputCol = "CarrierIdx")
catVect = VectorAssembler(inputCols = ["CarrierIdx", "DayofMonth", "DayOfWeek", "OriginAirportID", "DestAirportID"], outputCol="catFeatures")
catIdx = VectorIndexer(inputCol = catVect.getOutputCol(), outputCol = "idxCatFeatures")
numVect = VectorAssembler(inputCols = ["DepDelay"], outputCol="numFeatures")
minMax = MinMaxScaler(inputCol = numVect.getOutputCol(), outputCol="normFeatures")
featVect = VectorAssembler(inputCols=["idxCatFeatures", "normFeatures"], outputCol="features")
dt = DecisionTreeClassifier(labelCol="label", featuresCol="features")
pipeline = Pipeline(stages=[strIdx, catVect, catIdx, numVect, minMax, featVect, dt])
# ### Run the Pipeline as an Estimator
# The pipeline itself is an estimator, and so it has a **fit** method that you can call to run the pipeline on a specified DataFrame. In this case, you will run the pipeline on the training data to train a model.
piplineModel = pipeline.fit(train)
print ("Pipeline complete!")
# ### Test the Pipeline Model
# The model produced by the pipeline is a transformer that will apply all of the stages in the pipeline to a specified DataFrame and apply the trained model to generate predictions. In this case, you will transform the **test** DataFrame using the pipeline to generate label predictions.
prediction = piplineModel.transform(test)
predicted = prediction.select("features", "prediction", "trueLabel")
predicted.show(100, truncate=False)
# The resulting DataFrame is produced by applying all of the transformations in the pipline to the test data. The **prediction** column contains the predicted value for the label, and the **trueLabel** column contains the actual known value from the testing data.
| DataAnalyticsWithSpark/Supervised/Python Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: adrian conda base
# language: python
# name: conda-base
# ---
# +
import astropy.coordinates as coord
import astropy.units as u
import astropy.table as at
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
from tqdm.notebook import trange
# gala
import gala.coordinates as gc
import gala.dynamics as gd
import gala.integrate as gi
import gala.potential as gp
from gala.units import galactic
import superfreq as sf
# +
mw = gp.MilkyWayPotential()
pot = gp.CCompositePotential()
pot['disk'] = mw['disk']
pot['halo'] = mw['halo']
pot['bar'] = gp.LongMuraliBarPotential(5e9, a=2., b=0.75, c=0.25,
units=galactic)
pot.circular_velocity([8.1, 0, 0]*u.kpc)[0]
# -
fig, ax = plt.subplots(figsize=(6, 6))
_ = pot.plot_density_contours(
grid=(np.linspace(-10, 10, 128),
np.linspace(-10, 10, 128),
0.),
ax=ax)
# +
Om_p = 40*u.km/u.s/u.kpc
frame = gp.ConstantRotatingFrame(
[0, 0, -1] * Om_p.to(u.rad/u.Myr, u.dimensionless_angles()),
units=galactic)
static_frame = gp.StaticFrame(galactic)
H = gp.Hamiltonian(pot, frame)
# -
# # Location of the Lindblad resonances
# $\Omega(R) = \frac{v_c(r)}{R}$
#
# $\kappa(R)^2 = \frac{d^2\Phi}{dR^2} + 3\frac{v_c(R)^2}{R^2}$
R = np.linspace(0, 20, 128) * u.kpc
xyz = np.array([[1, 0, 0]]).T * R[None]
vc = pot.circular_velocity(xyz)
Omega = vc / R
# +
for m in range(1, 6+1):
lhs = -m * (Omega - Om_p)
rhs = np.sqrt(pot.hessian(xyz)[0, 0] + 3*Omega**2).to(lhs.unit)
plt.plot(R.value, lhs - rhs, marker='')
print(R.value[np.nanargmin(np.abs(lhs - rhs))])
plt.ylim(-25, 25)
plt.axhline(0)
# -
# # Visualize results from script
tbl = at.QTable.read('../scripts/bar-orbit-freqs.fits')
max_freq_diff = np.abs((tbl['freq1'] - tbl['freq2']) / tbl['freq1']).max(axis=1)
max_freq_diff.shape
# +
grid_shape = (512, 512)
plt.figure(figsize=(9, 6))
plt.pcolormesh(tbl['R'].value.reshape(grid_shape),
tbl['vR'].value.reshape(grid_shape),
max_freq_diff.value.reshape(grid_shape),
norm=mpl.colors.LogNorm(),
vmin=1e-6, vmax=1e-2,
cmap='Greys')
cb = plt.colorbar()
cb.set_label('max. fractional freq. diff')
olrs = [13.070866141732283,
9.606299212598426,
8.346456692913385,
7.716535433070866,
7.4015748031496065,
7.08661417]
plt.vlines(olrs, ymin=-5, ymax=5, color='tab:red')
ud = 1.
for m, val in zip(np.arange(1, len(olrs)+1), olrs):
if ud > 0:
va = 'bottom'
else:
va = 'top'
plt.text(val, ud * 10, f'm={m}\nOLR',
ha='center', color='tab:red', va=va)
ud *= -1
plt.xlabel('$R$ [kpc]')
plt.ylabel('$v_R$ [km/s]')
# -
tbl['R'].reshape(512, 512).value
# +
# Surface of section:
style = dict(alpha=0.5, marker='o', mew=0, ms=2, linestyle='none')
fig, axes = plt.subplots(2, 2, figsize=(10, 10),
sharex='row', sharey=True,
constrained_layout=True)
sos = gd.surface_of_section(orbit, 1)
axes[0, 0].plot(sos.x.value, sos.v_x.to_value(u.km/u.s), **style)
axes[1, 0].plot(sos.cylindrical.rho.value,
sos.cylindrical.v_rho.to_value(u.km/u.s), **style)
sos = gd.surface_of_section(orbit_rot, 1)
axes[0, 1].plot(sos.x.value, sos.v_x.to_value(u.km/u.s), **style)
axes[1, 1].plot(sos.cylindrical.rho.value,
sos.cylindrical.v_rho.to_value(u.km/u.s), **style)
axes[0, 0].set_ylabel('$v_x$')
for ax in axes[0]:
ax.set_xlabel('$x$')
axes[1, 0].set_ylabel('$v_R$')
for ax in axes[1]:
ax.set_xlabel('$R$')
axes[0, 0].set_title('inertial')
axes[0, 1].set_title('rotating')
fig.savefig(plot_path / f'sos-{i+n:05d}.png', dpi=150)
plt.close(fig)
# X-Y
style = dict(alpha=0.2, marker='o', mew=0, ms=1., linestyle='none')
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
_ = orbit.plot(['x', 'y'], **style, axes=[axes[0]])
_ = orbit_rot.plot(['x', 'y'], **style, axes=[axes[1]])
lim = orbit.x.max().to_value(u.kpc) + 1
for ax in axes:
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
axes[0].set_title('inertial')
axes[1].set_title('rotating')
fig.savefig(plot_path / f'xy-{i+n:05d}.png', dpi=150)
plt.close(fig)
# +
style = dict(alpha=0.2, marker='o', mew=0, ms=1., linestyle='none')
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
_ = orbit.plot(['x', 'y'], **style, axes=[axes[0]])
_ = orbit_rot.plot(['x', 'y'], **style, axes=[axes[1]])
lim = orbit.x.max().to_value(u.kpc) + 1
for ax in axes:
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
# -
max_freq_diff = np.max(freq_diffs, axis=1)
| notebooks/Bar-Chaos-Exploration-Kate-D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example usage
#
# To use `risk_model_tool` in a project:
# +
# %load_ext autoreload
# %autoreload 2
import os
import pandas as pd
import risk_model_tool
risk_model_tool.__version__
# -
# ## Docs of Utils
#
# ### autoexcel
# +
### Read data
from risk_model_tool.utils import autoexcel
from openpyxl import load_workbook
from PIL import Image as PILImage
write_excel = load_workbook(filename="./example/toy_data/iris.xlsx")
autoexcel.insert_df(anchor="G1", sheet=write_excel["Sheet1"], df=pd.DataFrame([{"test": "test", "try":"try"}]), header=True)
# autoexcel.batch_insert_image(anchor="K1", sheet=write_excel["Sheet1"], images="./example/images/", img_size=(5,3))
write_excel.save("./example/toy_data/iris_add_df.xlsx")
# -
# ### df_to_highchart
# +
from risk_model_tool.utils import df_to_highchart
df = pd.read_excel("./example/toy_data/iris.xlsx").iloc[:, 1:]
df_to_highchart.serialize(df, title="Test in notebook",output_type='notebook')
# -
# ### Mailsender
# +
from risk_model_tool.utils import mail_sender
config = {
'host': "smtp.163.com",
'port': 465,
'user': "MAIL",
'passwd': "PASSWORD",
'subject': "Test",
'to': "<EMAIL>",
'timeout':20,
}
mail = mail_sender.MailSender(config, None)
# mail.send()
# -
| docs/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
# the GBM used
import xgboost as xgb
from sklearn.model_selection import KFold
# to encode categoricals
from sklearn.preprocessing import LabelEncoder
# see utils.py
from utils import add_features, rmsle, train_encoders, apply_encoders
import optuna
# +
# globals and load train dataset
FILE_TRAIN = "train.csv"
FILE_TEST = "test.csv"
# load train dataset
data_orig = pd.read_csv(FILE_TRAIN)
#
# add features
#
data_extended = add_features(data_orig)
all_columns = data_extended.columns
# cols to be ignored
# atemp and temp are strongly correlated (0.98) we're taking only one
del_columns = ["datetime", "casual", "registered", "temp"]
TARGET = "count"
cat_cols = ["season", "holiday", "workingday", "weather", "windspeed", "hour", "year"]
num_cols = list(set(all_columns) - set([TARGET]) - set(del_columns) - set(cat_cols))
features = sorted(cat_cols + num_cols)
print("All columns:", len(all_columns))
print("Ignored columns:", len(del_columns))
print("Target:", len([TARGET]))
print("Categorical columns:", len(cat_cols))
print("Numerical columns:", len(num_cols))
print("All the features", len(features))
data_used = data_extended.drop(del_columns, axis=1)
# -
data_used.tail()
# +
# encode
# let's code categorical
le_list = train_encoders(data_used)
# coding
data_used = apply_encoders(data_used, le_list)
# -
x = data_used[features].values
y = data_used[TARGET].values
model = xgb.XGBRegressor()
model.load_model(fname="xgboost.txt")
y_pred = model.predict(x)
y_pred = np.where(y_pred >= 0, y_pred, 0)
data_used["pred"] = np.round(y_pred, 0).astype(int)
data_used.head(20)
data_used["atemp"].nunique()
data_used["windspeed"].nunique()
| check_result.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
3+2
# +
import numpy
from scipy import stats
print("enter the numbers")
ar = list(map(int, input().strip().split()))
x = numpy.array(ar)
print("x = "+str(x))
print("mean = "+str(numpy.mean(ar)))
print("median = "+str(numpy.median(ar)))
print(stats.mode(ar))
print("standard deviation = "+str(numpy.std(ar)))
print("variance = "+str(numpy.var(ar)))
# -
from scipy import stats
ar = [99,86,87,88,111,86,103,87,94,78,77,85,86]
print(stats.mode(ar))
| Assignment 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# <img src='./img/LogoWekeo_Copernicus_RGB_0.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='20%'></img>
# + [markdown] Collapsed="false"
# <br>
# + [markdown] Collapsed="false"
# <a href="./00_index.ipynb"><< Index</a><br>
# <a href="./01_wekeo_atmosphere_data_overview.ipynb"><< 01 - Overview - Atmosphere data products on WEkEO</a><span style="float:right;"><a href="./11_sentinel5p_L2_load_browse.ipynb">11 - Sentinel-5p Carbon Monoxide - Load and browse >></a></span>
# -
# <div class="alert alert-block alert-info">
# <b>DATA RETRIEVE</b></div>
# + [markdown] Collapsed="false"
# # Copernicus Sentinel-5 Precursor (Sentinel-5p) - Carbon Monoxide
# -
# The example below illustrates step-by-step how Copernicus Sentinel-5p carbon monoxide data can be retrieved from WEkEO with the help of the [Harmonized Data Access (HDA) API](https://wekeo.eu/hda-api).
#
# The HDA API workflow is a six-step process:
# - [1. Search for datasets on WEkEO](#wekeo_search)
# - [2. Get the API request](#wekeo_api_request)
# - [3. Get your WEkEO API key](#wekeo_api_key)
# - [4. Initialise the WEkEO Harmonised Data Access request](#wekeo_hda_request)
# - [5. Load data descriptor file and request data](#wekeo_json)
# - [6. Download requested data](#wekeo_download)
#
# All steps have to be performed in order to be able to retrieve data from WEkEO.
# All HDA API functions needed to retrieve data are stored in the notebook [hda_api_functions](./hda_api_functions.ipynb).
# <hr>
# #### Load required libraries
# +
import os
import sys
import json
import time
import base64
import requests
import warnings
warnings.filterwarnings('ignore')
# -
# #### Load helper functions
from ipynb.fs.full.hda_api_functions import *
# <hr>
# ### <a id='wekeo_search'></a>1. Search for datasets on WEkEO
# Under [WEkEO DATA](https://www.wekeo.eu/data), you can search all datasets available on WEkEO. To add additional layers, you have to click on the `+` sign, which opens the `Catalogue` interface.
# There are two search options:<br>
# - a `free keyword search`, and
# - a pre-defined `predefined keyword search`, that helps to filter the data based on `area`, `platform`, `data provider` and more.<br>
#
# Under `PLATFORM`, you can select *`Sentinel-5P`* and retrieve the results. You can either directly add the data to the map or you can click on `Details`, which opens a dataset description.
#
# When you click on `Add to map...`, a window opens where you can select one specific variable of Sentinel-5p TROPOMI.
#
# <br>
#
# <div style='text-align:center;'>
# <figure><img src='./img/wekeo_interface_s5p_1.png' width='90%' />
# <figcaption><i>WEkEO interface to search for datasets</i></figcaption>
# </figure>
# </div>
# ### <a id='wekeo_api_request'></a>2. Get the API request
# When a layer is added to the map, you can select the download icon, which opens an interface that allows you to tailor your data request.
# For Sentinel-5P, the following information can be selected:
# * `Bounding box`
# * `Sensing start stop time`
# * `Processing level`
# * `Product type`
#
# Once you made your selection, you can either directly requet the data or you can click on `Show API request`, which opens a window with the HDA API request for the specific data selection.
#
#
# <br>
#
# <div style='text-align:center;'>
# <figure><img src='./img/wekeo_interface_s5p_2.png' width='80%' />
# <figcaption><i>Sentinel-5p API request - Example</i></figcaption>
# </figure>
# </div>
# <br>
# `Copy` the API request and save it as a `JSON` file. We did the same and you can open the `data descriptor` file for Sentinel-5p [here](./s5p_data_descriptor.json).
# Each dataset on WEkEO is assigned a unique `datasetId`. Let us store the dataset ID for Sentinel-5p as a variable called `dataset_id` to be used later.
dataset_id = "EO:ESA:DAT:SENTINEL-5P:TROPOMI"
# ### <a id='wekeo_api_key'></a>3. Get the WEkEO API key
# In order to interact with WEkEO's Harmonised Data Access API, each user gets assigned an `API key` and `API token`. You will need the API key in order to download data in a programmatic way.
#
# The `api key` is generated by encoding your `username` and `password` to Base64. You can use the function [generate_api_key](./hda_api_functions.ipynb#generate_api_key) to programmatically generate your Base64-encoded api key. For this, you have to replace the 'username' and 'password' strings with your WEkEO username and password in the cell below.
#
# Alternatively, you can go to this [website](https://www.base64encode.org/) that allows you to manually encode your `username:password` combination. An example of an encoded key is `wekeo-test:wekeo-test`, which is encoded to `d2VrZW8tdGVzdDp3ZWtlby10ZXN0`.
#
user_name = '###############'
password = '###############'
api_key = generate_api_key(user_name, password)
api_key
# ##### Alternative: enter manually the generated api key
# +
#api_key =
# -
# ### <a id='wekeo_hda_request'></a>4. Initialise the Harmonised Data Access (HDA) API request
# In order to initialise an API request, you have to initialise a dictionary that contains information on `dataset_id`, `api_key` and `download_directory_path`.
#
# Please enter the path of the directory where the data shall be downloaded to.
# Enter here the directory path where you want to download the data to
download_dir_path = '../../data/'
# With `dataset_id`, `api_key` and `download_dir_path`, you can initialise the dictionary with the function [init](./hda_api_functions.ipynb#init).
hda_dict = init(dataset_id, api_key, download_dir_path)
# #### Request access token
# Once initialised, you can request an access token with the function [get_access_token](./hda_api_functions.ipynb#get_access_token). The access token is stored in the `hda_dict` dictionary.
#
# You might need to accept the Terms and Conditions, which you can do with the function [acceptTandC](./hda_api_functions.ipynb#acceptTandC).
hda_dict = get_access_token(hda_dict)
# #### Accept Terms and Conditions (if applicable)
hda_dict = acceptTandC(hda_dict)
# ### <a id='wekeo_json'></a>5. Load data descriptor file and request data
# The Harmonised Data Access API can read your data request from a `JSON` file. In this JSON-based file, you can describe the dataset you are interested in downloading. The file is in principle a dictionary. The following keys can be defined:
# - `datasetID`: the dataset's collection ID
# - `stringChoiceValues`: type of dataset, e.g. 'processing level' or 'product type'
# - `dataRangeSelectValues`: time period you would like to retrieve data
# - `boundingBoxValues`: optional to define a subset of a global field
#
# You can load the `JSON` file with `json.load()`.
with open('./s5p_data_descriptor.json', 'r') as f:
data = json.load(f)
data
# #### Initiate the request by assigning a job ID
# The function [get_job_id](./hda_api_functions.ipynb#get_job_id) will launch your data request and your request is assigned a `job ID`.
hda_dict = get_job_id(hda_dict,data)
# #### Build list of file names to be ordered and downloaded
# The next step is to gather a list of file names available, based on your assigned `job ID`. The function [get_results_list](./hda_api_functions.ipynb#get_results_list) creates the list.
hda_dict = get_results_list(hda_dict)
# #### Create an `order ID` for each file to be downloaded
# The next step is to create an `order ID` for each file name to be downloaded. You can use the function [get_order_ids](./hda_api_functions.ipynb#get_order_ids).
hda_dict = get_order_ids(hda_dict)
# ### <a id='wekeo_download'></a>6. Download requested data
# As a final step, you can use the function [download_data](./hda_api_functions.ipynb#download_data) to initialize the data download and to download each file that has been assigned an `order ID`.
download_data(hda_dict)
# + [markdown] Collapsed="false"
# <br>
# + [markdown] Collapsed="false"
# <a href="./00_index.ipynb"><< Index</a><br>
# <a href="./01_wekeo_atmosphere_data_overview.ipynb"><< 01 - Overview - Atmosphere data products on WEkEO</a><span style="float:right;"><a href="./11_sentinel5p_L2_load_browse.ipynb">11 - Sentinel-5p Carbon Monoxide - Load and browse >></a></span>
# + [markdown] Collapsed="false"
# <hr>
# + [markdown] Collapsed="false"
# <img src='./img/all_partners_wekeo.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='100%'></img>
| atmosphere/10_sentinel5p_L2_retrieve.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# <h2>Entanglement and Superdense Coding</h2>
#
# Asja has a qubit, initially set to $ \ket{0} $.
#
# Balvis has a qubit, initially set to $ \ket{0} $.
# <h3> Entanglement </h3>
#
# Asja applies Hadamard operator to her qubit.
#
# The quantum state of Asja's qubit is $ \stateplus $.
#
# Then, Asja and Balvis combine their qubits. Their quantum state is
#
# $ \stateplus \otimes \vzero = \myvector{ \frac{1}{\sqrt{2}} \\ 0 \\ \frac{1}{\sqrt{2}} \\ 0 } $.
#
# Asja and Balvis apply CNOT operator on two qubits.
#
# The new quantum state is
#
# $ \CNOT \myvector{ \frac{1}{\sqrt{2}} \\ 0 \\ \frac{1}{\sqrt{2}} \\ 0 } = \myvector{ \frac{1}{\sqrt{2}} \\ 0 \\0 \\ \frac{1}{\sqrt{2}} } = \frac{1}{\sqrt{2}}\ket{00} + \frac{1}{\sqrt{2}}\ket{11} $.
#
# At this moment, Asja's and Balvis' qubits are correlated to each other.
#
# If we measure both qubits, we can observe either state $ \ket{00} $ or state $ \ket{11} $.
#
# Suppose that Asja observes her qubit secretly.
# <ul>
# <li> When Asja sees the result $ \ket{0} $, then Balvis' qubit also collapses to state $ \ket{0} $. Balvis cannot observe state $ \ket{1} $. </li>
# <li> When Asja sees the result $ \ket{1} $, then Balvis' qubit also collapses to state $ \ket{1} $. Balvis cannot observe state $ \ket{0} $. </li>
# </ul>
#
# Experimental results have confirmed that this happens even if there is a physical distance between Asja's and Balvis' qubits.
#
# It seems correlated quantum particales can "affect each other" instantly, even if they are in the different part of the universe.
#
# If two qubits are correlated in this way, then we say that they are <b>entangled</b>.
#
# <i> <u>Technical note</u>:
#
# If the quantum state of two qubits can be written as $ \ket{u} \otimes \ket{v} $, then two qubits are not correlated, where $ \ket{u} $ and $ \ket{v} $ are the quantum states of the first and second qubits.
#
# On the other hand, if the quantum state of two qubits cannot be written as $ \ket{u} \otimes \ket{v} $, then there is an entanglement between the qubits.
# </i>
#
# <b> Entangled qubits can be useful </b>
# <h3> The quantum communication </h3>
#
# After having the entanglement, Balvis takes his qubit and goes away.
#
# Asja will send two classical bits of information by only sending her qubit.
#
# <img src="../images/superdense_coding.png">
#
# <font size="-2">source: https://fi.m.wikipedia.org/wiki/Tiedosto:Superdense_coding.png </font>
# Now, we describe this protocol.
#
# Asja has two bits of classical information: $ a,b \in \{0,1\} $.
#
# There are four possible values for the pair $ (a,b) $: $ (0,0), (0,1), (1,0),\mbox{ or } (1,1) $.
#
# If $a$ is 1, then Asja applies z-gate, i.e., $ Z = \Z $, to her qubit.
#
# If $b$ is 1, then Asja applies x-gate (NOT operator) to her qubit.
#
# Then, Asja sends her qubit to Balvis.
# <h3> After the communication </h3>
#
# Balvis has both qubits.
#
# Balvis applies cx-gate (CNOT operator), where Asja's qubit is the controller.
#
# Then, Balvis applies h-gate (Hadamard operator) to Asja's qubit.
#
# Balvis measures both qubits.
#
# The measurement result will be exactly $ (a,b) $.
# <h3> Task 1</h3>
#
# Verify the correctness of the above protocol.
#
# For each pair of $ (a,b) \in \left\{ (0,0), (0,1), (1,0),(1,1) \right\} $:
# - Create a quantum curcuit with two qubits: Asja's and Balvis' qubits
# - Both are initially set to $ \ket{0} $
# - Apply h-gate (Hadamard) to the Asja's qubit
# - Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
#
# Assume that both qubits are separated from each other.
#
# <ul>
# <li> If $ a $ is 1, then apply z-gate to Asja's qubit. </li>
# <li> If $ b $ is 1, then apply x-gate (NOT) to Asja's qubit. </li>
# </ul>
#
# Assume that Asja sends her qubit to Balvis.
# - Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
# - Apply h-gate (Hadamard) to the Asja's qubit
# - Measure both qubits and compare the results with pair $ (a,b) $
# +
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
pairs = ['00','01','10','11']
for i in pairs:
q = QuantumRegister(2,"q")
c = ClassicalRegister(2,"c")
qc = QuantumCircuit(q,c)
# Apply h-gate (Hadamard) to the Asja's qubit
qc.h(q[1])
# Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
qc.cx(q[1],q[0])
#If 𝑎 is 1, then apply z-gate to Asja's qubit.
#If 𝑏 is 1, then apply x-gate (NOT) to Asja's qubit
if i[0]=='1':
qc.z(q[1])
if i[1]=='1':
qc.x(q[1])
# Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
# Apply h-gate (Hadamard) to the Asja's qubit
# Measure both qubits and compare the results with pair (𝑎,𝑏)
qc.cx(q[1],q[0])
qc.h(q[1])
qc.measure(q,c)
# draw the circuit in Qiskit's reading order
display(qc.draw(output='mpl'))
# compare the results with pair (a,b)
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100)
counts = job.result().get_counts(qc)
print(i,"-->",counts)
# -
# <a href="B54_Superdense_Coding_Solutions.ipynb#task1">click for our solution</a>
# <h3> Task 2 </h3>
#
# Verify each case by tracing the state vector (on paper).
#
# _Hint: Representing quantum states as the linear combinations of basis states makes calculation easier._
# <h3> Task 3</h3>
#
# Can the above set-up be used by Balvis?
#
# Verify that the following modified protocol allows Balvis to send two classical bits by sending only his qubit.
#
# For each pair of $ (a,b) \in \left\{ (0,0), (0,1), (1,0),(1,1) \right\} $:
# - Create a quantum curcuit with two qubits: Asja's and Balvis' qubits
# - Both are initially set to $ \ket{0} $
# - Apply h-gate (Hadamard) to the Asja's qubit
# - Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
#
# Assume that both qubits are separated from each other.
#
# <ul>
# <li> If $ a $ is 1, then apply z-gate to Balvis' qubit. </li>
# <li> If $ b $ is 1, then apply x-gate (NOT) to Balvis' qubit. </li>
# </ul>
#
# Assume that Balvis sends his qubit to Asja.
# - Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
# - Apply h-gate (Hadamard) to the Asja's qubit
# - Measure both qubits and compare the results with pair $ (a,b) $
# +
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_pairs = ['00','01','10','11']
for i in all_pairs:
q = QuantumRegister(2,"q")
c = ClassicalRegister(2,"c")
qc = QuantumCircuit(q,c)
# Apply h-gate (Hadamard) to the Asja's qubit
qc.h(q[1])
# Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
qc.cx(q[1],q[0])
# If 𝑎 is 1, then apply z-gate to Balvis' qubit.
# If 𝑏 is 1, then apply x-gate (NOT) to Balvis' qubit.
if i[0]=='1':
qc.z(q[0])
if i[1]=='1':
qc.x(q[0])
# Apply cx-gate as CNOT(Asja's-qubit,Balvis'-qubit)
# Apply h-gate (Hadamard) to the Asja's qubit
# Measure both qubits and compare the results with pair (𝑎,𝑏)
qc.cx(q[1],q[0])
qc.h(q[1])
qc.measure(q,c)
# draw the circuit in Qiskit's reading order
display(qc.draw(output='mpl'))
# compare the results with pair (a,b)
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100)
counts = job.result().get_counts(qc)
print(i,"-->",counts)
# -
# <a href="B54_Superdense_Coding_Solutions.ipynb#task3">click for our solution</a>
# <h3> Task 4 </h3>
#
# Verify each case by tracing the state vector (on paper).
#
# _Hint: Representing quantum states as the linear combinations of basis states makes calculation easier._
| bronze/B54_Superdense_Coding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Adopted from https://huggingface.co/transformers/custom_datasets.html#seq-imdb
# +
from pathlib import Path
from sklearn.model_selection import train_test_split
import tensorflow as tf
from pyspark.sql import SparkSession
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification
import tensorflow as tf
import pandas as pd
import numpy as np
from cerebro.backend import SparkBackend
from cerebro.keras import SparkEstimator
from cerebro.storage import LocalStore
from cerebro.tune import RandomSearch, GridSearch, hp_choice
def read_imdb_split(split_dir):
split_dir = Path(split_dir)
texts = []
labels = []
for label_dir in ["pos", "neg"]:
for text_file in (split_dir/label_dir).iterdir():
texts.append(text_file.read_text())
labels.append(0 if label_dir is "neg" else 1)
return texts, labels
# -
sample_fraction = 1.0
# ### 1. Download data
# +
# #!wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# #!tar -xf aclImdb_v1.tar.gz
# -
# ### 2. Using HugginFace DistilBert model with TF a on single node
# +
train_texts, train_labels = read_imdb_split('aclImdb/train')
if sample_fraction < 1.0:
train_texts, _, train_labels, _ = train_test_split(train_texts, train_labels, test_size=1. - sample_fraction)
train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.25)
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
train_dataset = tf.data.Dataset.from_tensor_slices((
dict(train_encodings),
train_labels
))
val_dataset = tf.data.Dataset.from_tensor_slices((
dict(val_encodings),
val_labels
))
# -
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5)
model.compile(optimizer=optimizer, loss=model.compute_loss, metrics=['acc'])
model.fit(train_dataset.batch(16), epochs=3, validation_data=val_dataset.batch(16))
# ### 3. Using HugginFace DistilBert model with Cerebro for Distributed Model Selection
# +
# If GPU memory runs out restart the notebook and only run the imports
# and Cerebro section (3.)
# -
# Change the master url (local[1]) to the correct Spark master url.
spark = SparkSession \
.builder \
.master("local[1]") \
.appName("IMDB Sequence Classification") \
.getOrCreate()
# +
train_texts, train_labels = read_imdb_split('aclImdb/train')
if sample_fraction < 1.0:
train_texts, _, train_labels, _ = train_test_split(train_texts, train_labels, test_size=1. - sample_fraction)
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
# -
df = spark.createDataFrame(pd.DataFrame.from_dict({
'input_ids': train_encodings['input_ids'],
'attention_mask': train_encodings['attention_mask'],
'label': train_labels}
))
# +
backend = SparkBackend(spark_context=spark.sparkContext, num_workers=1, verbose=0)
store = LocalStore("/users/snakanda/cerista")
# Define more parameters if you want to try more model configurations.
search_space = {'lr': hp_choice([5e-5])}
# -
def estimator_gen_fn(params):
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from transformers import TFDistilBertForSequenceClassification
# TFDistilBertForSequenceClassification model is not directly serializable. Hence we recreate the model
# and wrap it using a serializable Keras model. Check `call` method of TFDistilBertForSequenceClassification
# class for more details
distilbert_model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
# Inputs
input_ids = Input(shape=(512,), dtype=tf.int64)
attention_mask = Input(shape=(512,), dtype=tf.int64)
hidden_state = distilbert_model.distilbert(input_ids, attention_mask=attention_mask, training=False)[0]
pooled_output = hidden_state[:, 0]
pooled_output = distilbert_model.pre_classifier(pooled_output)
pooled_output = distilbert_model.dropout(pooled_output, training=False)
logits = distilbert_model.classifier(pooled_output)
model = Model(inputs=[input_ids, attention_mask], outputs=logits)
optimizer = tf.keras.optimizers.Adam(lr=params['lr'])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
CUSTOM_OBJECTS = {'TFDistilBertForSequenceClassification': TFDistilBertForSequenceClassification}
keras_estimator = SparkEstimator(
model=model,
optimizer=optimizer,
loss=loss_fn,
metrics=['acc'],
batch_size=16,
custom_objects=CUSTOM_OBJECTS)
return keras_estimator
grid_search = GridSearch(backend, store, estimator_gen_fn, search_space, 3,
validation=0.25, evaluation_metric='loss',
feature_columns=['input_ids', 'attention_mask'],
label_columns=['label'],
verbose=1)
# +
model = grid_search.fit(df)
# Or use the follwing method if the data is already materialized.
# model = grid_search.fit_on_prepared_data()
# -
| examples/IMDB_sequence_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Logistic Regression
#
# + A classification algorithm for categorical variables. See Image below, find the "churn" categorical variable.
# + It is an analog to linear regression, but it does not attempt to predict a numeric independent variable.
# + The example below is for a binary classifier, but logistic regression can be used for multiclass as well.
# + Examples of using logistic regression: probability of someone having a heart attack; predicting mortality in injured patients; prediction of a customer propensity to halting a subscription.
# > #### Suitability of Logistic Regression
# > + Data is binary (0/1, True/False)
# > + Need probabilistic results
# > + Need a linear decision boundary (may be polynomial, not straight line)
# > + It allows us to understand the impact an independent variable has on the dependent variable while controlling other independent variables.
#
# +
UserPath = "/home/cerbero/Documents/edX/IBM Data Science/IBM ML0101EN/"
InputPath = UserPath+"00/"
OutputPath = UserPath+"04/"
from IPython.display import Image
Image(OutputPath+"Selection_005.png", retina=True)
# -
Image(OutputPath+"Selection_006.png", retina=True)
# ## Logistic Regression vs Linear Regression
#
# + The Logistic Regression equation can be _perceived_ as a line on a two-dimensional plane. It is actually the shadow of multi-dimensional lines (one dimension for each dependent variable pairing with the independent variable) on a single 2D plane of the binary classifier.
# + In the first Image below we can see how it works as a step function, establishing a _threshold_ to divide the function result into two categorical variables (0 and 1).
# + The second Image below shows how the use of a **sigmoid** instead of a step function returns the _probability_ of y-hat for that one independent variable being 1.
# + Our friend the sigmoid function is AKA the **logistic function**. Hence, logistic regression.
# + Examples of using logistic regression: probability of someone having a heart attack; predicting mortality in injured patients; prediction of a customer propensity to halting a subscription.
# > #### Suitability of Logistic Regression
# > + Data is binary (0/1, True/False)
# > + Need probabilistic results
# > + Need a linear decision boundary (may be polynomial, not straight line)
# > + It allows us to understand the impact an independent variable has on the dependent variable while controlling other independent variables.
#
Image(OutputPath+"Selection_007.png", retina=True)
Image(OutputPath+"Selection_008.png", retina=True)
# ## Lab Activities
import pandas as pd
import pylab as pl
import numpy as np
import scipy.optimize as opt
from sklearn import preprocessing
# %matplotlib inline
import matplotlib.pyplot as plt
# +
# https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/ChurnData.csv
# # !wget -O InputPath+"ChurnData.csv" https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/ChurnData.csv
# -
churn_df = pd.read_csv(InputPath+"ChurnData.csv")
churn_df.head()
churn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless','churn']]
churn_df['churn'] = churn_df['churn'].astype('int')
churn_df.head()
churn_df.shape
X = np.asarray(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip']])
X[0:5]
y = np.asarray(churn_df['churn'])
y [0:5]
from sklearn import preprocessing
X = preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)
LR
yhat = LR.predict(X_test)
yhat
yhat_prob = LR.predict_proba(X_test)
yhat_prob
from sklearn.metrics import jaccard_similarity_score
jaccard_similarity_score(y_test, yhat)
from sklearn.metrics import jaccard_score, accuracy_score
jaccard_score(y_test, yhat), accuracy_score(y_test, yhat)
# Please pay attention, jaccard_similarity_score is now accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
print(confusion_matrix(y_test, yhat, labels=[1,0]))
# +
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, yhat, labels=[1,0])
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['churn=1','churn=0'],normalize= False, title='Confusion matrix')
# -
# Matrix above shows success for Churn = 0, but not for Churn = 1
print (classification_report(y_test, yhat))
from sklearn.metrics import log_loss
log_loss(y_test, yhat_prob)
LR2 = LogisticRegression(C=0.01, solver='sag').fit(X_train,y_train)
yhat_prob2 = LR2.predict_proba(X_test)
print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2))
| 04 Module 3 Logistic Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! pip install tensorflow
import pandas as pd
from tqdm import tqdm
import pickle
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, load_model
from sklearn import model_selection
from sklearn.metrics import accuracy_score
import pickle
import lightgbm as lgb
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.ensemble import AdaBoostClassifier
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
NUM_FEATURES = 3
test_df = pd.read_csv("../data/processed/test/test.csv")
train_df = pd.read_csv("../data/processed/test/train.csv")
testdb_df = pd.read_csv("../../androzoo/test/test_db/Features_files/test_db.csv")
train_df
test_df
base_df[0]
# +
# %%time
def separate_features(target_df):
df = [None]*NUM_FEATURES
# intent activities
df[0] = target_df.filter(regex="^ACTIVITY")
# api calls
df[1] = target_df.filter(regex="^APICALL")
# opcodes
df[2] = target_df.filter(regex="^OPCODE*")
#permissions
df[3] = target_df.filter(regex="^PERMISSION")
# intent receivers
df[4] = target_df.filter(regex="^RECEIVER")
# intent services
df[5] = target_df.filter(regex="^SERVICE")
# system calls
df[6] = target_df.filter(regex="^SYSTEM")
return df
# -
df = separate_features(test_df)
#meta_train_df = separate_features(meta_train)
#meta_test_df = separate_features(meta_test)
df_train = separate_features(train_df)
def load_feature(feature):
print("Loading "+feature +" features ...")
base_df = pd.read_csv("../data/processed/train_"+ feature+".csv")
base_df = pd.DataFrame(index=[], columns=base_df.columns)
return base_df
# +
# feature_names = ["activities", "api_calls", "opcodes", "permissions", "receivers", "services", "system"]
# base_df = [None]*len(feature_names)
# for i, feature in enumerate(tqdm(feature_names)):
# base_df[i] = load_feature(feature)
# +
# Load base df
feature_names = ["activities", "api_calls", "opcodes", "permissions", "receivers", "services", "system"]
base_df = [None]*len(feature_names)
for i, feature in enumerate(tqdm(feature_names)):
base_df[i] = pd.read_csv('../src/info/base_'+feature+".csv")
# +
# concat base df to pred df
pred_df = [None]*len(feature_names)
for i, feature in enumerate(tqdm(feature_names)):
pred_df[i] = pd.concat([base_df[i], df[i]], join='outer', sort=False)
pred_df[i] = pd.concat([pred_df[i], base_df[i]], join = 'inner', sort=False)
pred_df[i].fillna(0, inplace=True)
# -
pred_df[2].dtypes
pred_df_ = pd.concat([base_df[0], df[0]], join='outer', sort=False)
pred_df_ = pd.concat([pred_df[0], base_df[0]], join = 'inner', sort=False)
pred_df_.fillna(0, inplace=True)
pred_df_.dtypes
base_df[0].dtypes
# +
lgbm_models = []
rfc_models = []
knearest_models = []
xgb_models = []
cat_models = []
bagging_models = []
scalers = []
tnn_models = []
for i, n in enumerate(feature_names):
filename = n + '_scaler.sav'
scalers.append(pickle.load(open("../models/Scaler/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'gsLGB_'+ n + '_model.sav'
lgbm_models.append(pickle.load(open("../models/LightGBM/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'gsRFC_'+ n + '_model.sav'
rfc_models.append(pickle.load(open("../models/Random_Forest_Classifier/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'gsKnearest_'+ n + '_model.sav'
knearest_models.append(pickle.load(open("../models/Knearest/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'gsXGBM_'+ n + '_model.sav'
xgb_models.append(pickle.load(open("../models/xgbm/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'catboost_'+ n + '_model.sav'
cat_models.append(pickle.load(open("../models/Catboost/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'bagging_'+ n + '_model.sav'
bagging_models.append(pickle.load(open("../models/Bagging/"+filename, 'rb')))
for i, n in enumerate(feature_names):
filename = 'nn_'+ n + '_model.h5'
tnn_models.append(load_model("../models/nn/"+filename))
meta_model = pickle.load(open("../models/Meta/meta_logistic.sav", 'rb'))
# -
pred_df[1]
pred_df_sc[1]
meta_train_df[1] + meta_train_df[2] + meta_train_df[4]
#preprocess loaded data
def preprocess_data(data):
data_sc = [None]*NUM_FEATURES
for i, feature in enumerate(tqdm(feature_names)):
scaled_features = scalers[i].transform(data[i])
data_sc[i] = pd.DataFrame(scaled_features)
return data_sc
meta_train_df_sc[1].shape
# ## Stacking prediction
# +
#Train Stacking model 2st stage
all_models = lgbm_models + rfc_models + cat_models + knearest_models + xgb_models + tnn_models + bagging_models
all_pred_df_sc =meta_train_df_sc +meta_train_df_sc +meta_train_df_sc +meta_train_df_sc +meta_train_df_sc +meta_train_df_sc +meta_train_df_sc
# all_test_df_sc = meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc
valid_pred = [None]*len(all_models)
# test_pred = [None]*len(all_models)
for i , base_model in enumerate(all_models):
valid_pred[i] = base_model.predict(all_pred_df_sc[i])
# test_pred[i] = base_model.predict(all_test_df_sc[i])
stacked_valid_predictions = np.column_stack(tuple(valid_pred))
# stacked_test_predictions = np.column_stack(tuple(test_pred))
# pred = meta_model.predict(stacked_valid_predictions)
# -
len(all_models)
# +
#Train Stacking model 2st stage with only best 3, API calls, opcodes, receivers
# all_models_1 = [lgbm_models[1] , rfc_models[1] , cat_models[1] , knearest_models[1] , xgb_models[1] , tnn_models[1] , bagging_models[1]]
# all_models_2 = [lgbm_models[2] , rfc_models[2], cat_models[2] , knearest_models[2], xgb_models[2] , tnn_models[2] , bagging_models[2]]
# all_models_3 = [lgbm_models[4] , rfc_models[4] , cat_models[4] , knearest_models[4] , xgb_models[4] , tnn_models[4] , bagging_models[4]]
all_models_1 = [lgbm_models[1], lgbm_models[2], lgbm_models[4]]
all_models_2 = [rfc_models[1], rfc_models[2], rfc_models[4]]
all_models_3 = [cat_models[1], cat_models[2], cat_models[4]]
all_models_4 = [knearest_models[1], knearest_models[2], knearest_models[4]]
all_models_5 = [xgb_models[1], xgb_models[2], xgb_models[4]]
all_models_6 = [tnn_models[1], tnn_models[2], tnn_models[4]]
all_models_7 = [bagging_models[1], bagging_models[2], bagging_models[4]]
all_models_ = all_models_1+ all_models_2+ all_models_3+ all_models_4+ all_models_5+ all_models_6+ all_models_7
all_pred_df_sc_ =[meta_train_df_sc[1], meta_train_df_sc[2], meta_train_df_sc[4]]
all_pred_df_sc_ = all_pred_df_sc_ + all_pred_df_sc_ + all_pred_df_sc_ + all_pred_df_sc_ + all_pred_df_sc_+ all_pred_df_sc_+ all_pred_df_sc_
# all_test_df_sc = meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc + meta_test_df_sc
valid_pred_ = [None]*len(all_models_)
# test_pred = [None]*len(all_models[1])
for i , base_model in enumerate(all_models_):
valid_pred_[i] = base_model.predict(all_pred_df_sc_[i])
# test_pred[i] = base_model.predict(all_test_df_sc[i])
stacked_valid_predictions_ = np.column_stack(tuple(valid_pred_))
# stacked_test_predictions = np.column_stack(tuple(test_pred))
# pred = meta_model.predict(stacked_valid_predictions)
# -
len(all_models_)
len(np.ravel(np.array(all_models_).T).tolist())
len(all_pred_df_sc_)
stacked_valid_predictions.shape
# +
from sklearn.model_selection import GridSearchCV
epochs = [20]
batch_size = [10, 50 , 100]
param_grid = dict(epochs=epochs,
batch_size=batch_size)
nn_classifier = KerasClassifier(build_fn=create_baseline, input_dim=stacked_valid_predictions.shape[1], verbose=False)
gs_nn = GridSearchCV(nn_classifier, cv=3, verbose=True, param_grid=param_grid, n_jobs=8)
gs_nn.fit(stacked_valid_predictions, meta_train["MW"])
best_score = gs_nn.best_score_
cv_results = gs_nn.cv_results_
cv_mean = cv_results["mean_test_score"].mean()
print("cv_result : " + str(cv_mean))
filename = 'gs_nn_model.h5'
gs_nn.best_estimator_.model.save("../models/Meta/"+filename)
# +
#Train Stacking model 3st stage
all_2_models = [gs_nn.best_estimator_ , logicreg, rfc_classifier, linearsvc]
all_train_2 = [stacked_valid_predictions]*4
# all_test_2 = [stacked_test_predictions]*4
train_2_pred = [None]*len(all_2_models)
# test_2_preds = [None]*len(all_2_models)
for i , base_model in enumerate(all_2_models):
train_2_pred[i] = base_model.predict(all_train_2[i])
# test_2_pred[i] = base_model.predict(all_test_2[i])
stacked_2_predictions = np.column_stack(tuple(train_2_pred[]))
# stacked_test_2_predictions = np.column_stack(tuple(test_2_pred))
# -
# +
for i , base_model in enumerate(all_2_models):
train_2_pred_[i] = base_model.predict(all_train_2[i])
# test_2_pred[i] = base_model.predict(all_test_2[i])
stacked_2_predictions_ = np.column_stack(tuple(train_2_pred_))
# +
meta_3_model = LogisticRegression()
meta_3_model.fit(stacked_2_predictions, meta_train["MW"])
filename = 'meta_2_logistic.sav'
pickle.dump(meta_3_model, open("../models/Meta/"+filename, 'wb'))
# -
stacked_2_predictions_.shape
# +
# only API calls, Opcodes, receivers
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LogisticRegression() , stacked_2_predictions_ , train_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LogisticRegression() , stacked_2_predictions , train_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LogisticRegression() , stacked_test_2_predictions , meta_test["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LinearSVC(max_iter=10000) , stacked_2_predictions , train_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LinearSVC(max_iter=10000) , stacked_test_2_predictions , meta_test["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
all_models = lgbm_models + rfc_models + cat_models + knearest_models + xgb_models + tnn_models + bagging_models
all_pred_df_sc = pred_df_sc + pred_df_sc + pred_df_sc + pred_df_sc + pred_df_sc + pred_df_sc + pred_df_sc
valid_pred = [None]*len(all_models)
for i , base_model in enumerate(all_models):
valid_pred[i] = base_model.predict(all_pred_df_sc[i])
stacked_valid_predictions = np.column_stack(tuple(valid_pred))
pred = meta_model.predict(stacked_valid_predictions)
# -
from sklearn.ensemble import BaggingClassifier
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(BaggingClassifier() , stacked_2_predictions , train_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
from sklearn.ensemble import BaggingClassifier
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(BaggingClassifier() , stacked_test_2_predictions , meta_test["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
nn_classifier = KerasClassifier(build_fn=create_baseline, input_dim=stacked_2_predictions.shape[1], verbose=False)
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(nn_classifier , stacked_2_predictions , train_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
meta_model = LogisticRegression()
meta_model.fit(stacked_valid_predictions, meta_train["MW"])
filename = 'meta_logistic.sav'
pickle.dump(meta_model, open("../models/meta/"+filename, 'wb'))
# -
meta_valid_pred = meta_model.score(stacked_valid_predictions, test_df["MW"])
print("Test data score: {}".format(meta_valid_pred))
meta_train, meta_test = train_test_split(test_df, test_size=None, train_size=0.7, random_state = 3)
meta_train.head()
meta_train.to_csv('../data/processed/meta/meta_train.csv')
meta_test.to_csv('../data/processed/meta/meta_test.csv')
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
def create_baseline(input_dim):
# create model
model = Sequential()
model.add(Dense(1000, input_dim=input_dim, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(1000, activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# Compile model. We use the the logarithmic loss function, and the Adam gradient optimizer.
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
stacked_valid_predictions.shape[1]
cv_2_table = pd.DataFrame(index=["Neural Network", "LogisticRegressioni", "LinearSVC", "RandomForestClassifier"],columns=["CV mean"])
# +
#score with only Opcodes, API calls, receivers
nn_classifier = KerasClassifier(build_fn=create_baseline, input_dim=stacked_valid_predictions_.shape[1], verbose=False)
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(nn_classifier , stacked_valid_predictions_ , train_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
nn_classifier = KerasClassifier(build_fn=create_baseline, input_dim=stacked_valid_predictions.shape[1], verbose=False)
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(nn_classifier , stacked_valid_predictions , test_df["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
nn_classifier = KerasClassifier(build_fn=create_baseline, input_dim=stacked_valid_predictions.shape[1], verbose=False)
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(nn_classifier , stacked_valid_predictions , meta_train["MW"], cv=stratifiedkfold, n_jobs=8)
cv_2_table.iloc[0, 0] = np.mean(scores)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
nn_classifier = KerasClassifier(build_fn=create_baseline, input_dim=stacked_valid_predictions.shape[1], verbose=False)
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(nn_classifier , stacked_test_predictions , meta_test["MW"], cv=stratifiedkfold, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# -
nn_classifierl = KerasClassifier(build_fn=create_baseline, input_dim=stacked_valid_predictions.shape[1], verbose=False)
nn_classifier = nn_classifier.fit(stacked_valid_predictions, meta_train['MW'])
nn_classifier.model.save("../models/Meta/nn_classifier.h5")
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(RandomForestClassifier() , stacked_valid_predictions_ , train_df["MW"], cv=10, n_jobs=8)
# cv_2_table.iloc[3, 0] = np.mean(scores)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(RandomForestClassifier() , stacked_valid_predictions , meta_train["MW"], cv=10, n_jobs=8)
cv_2_table.iloc[3, 0] = np.mean(scores)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(RandomForestClassifier() , stacked_valid_predictions , meta_train["MW"], cv=10, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(RandomForestClassifier() , stacked_test_predictions , meta_test["MW"], cv=10, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# -
rfc_classifier = RandomForestClassifier().fit(stacked_valid_predictions, meta_train['MW'])
filename = 'rfc_model.sav'
pickle.dump(rfc_classifier, open("../models/Meta/"+filename, 'wb'))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC, LinearSVC
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LinearSVC(max_iter=10000) , stacked_valid_predictions_ , train_df["MW"], cv=10, n_jobs=8)
# cv_2_table.iloc[2, 0] = np.mean(scores)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC, LinearSVC
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LinearSVC(max_iter=10000) , stacked_valid_predictions , meta_train["MW"], cv=10, n_jobs=8)
cv_2_table.iloc[2, 0] = np.mean(scores)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LinearSVC(max_iter=10000) , stacked_test_predictions , meta_test["MW"], cv=10, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# -
linearsvc = LinearSVC(max_iter=10000).fit(stacked_valid_predictions, meta_train['MW'])
filename = 'linearsvc_model.sav'
pickle.dump(linearsvc, open("../models/Meta/"+filename, 'wb'))
# +
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LogisticRegression(max_iter=10000) , stacked_valid_predictions_ , train_df["MW"], cv=10, n_jobs=8)
cv_2_table.iloc[1, 0] = np.mean(scores)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(LogisticRegression(max_iter=10000) , stacked_test_predictions , meta_test["MW"], cv=10, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# -
cv_2_table.to_csv("../reports/2nd_stage_cv_results.csv")
cv_2_table
logicreg = LogisticRegression(max_iter=10000).fit(stacked_valid_predictions, meta_train['MW'])
filename = 'logicreg_model.sav'
pickle.dump(logicreg, open("../models/Meta/"+filename, 'wb'))
from sklearn.ensemble import VotingClassifier
voting = VotingClassifier([('logic', LogisticRegression(max_iter=10000)), ('rfc', RandomForestClassifier()), ('linearsvc', LinearSVC(max_iter=10000)), ('nn', gs_nn.best_estimator_)], voting='soft')
voting.fit(stacked_valid_predictions, meta_train['MW'])
scores = voting.score(stacked_test_predictions, y=meta_test['MW'])
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(voting , stacked_valid_predictions , meta_train["MW"], cv=10, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# +
stratifiedkfold = StratifiedKFold(n_splits=10)
scores = cross_val_score(voting , stacked_valid_predictions , meta_train["MW"], cv=10, n_jobs=8)
print("Cross-Validation scores: {}".format(scores))
print("Average score: {}".format(np.mean(scores)))
# -
cv_table = pd.DataFrame(index=["LightGBM", "Bagging", "CatBoost", "Neural Network", "XGBM", "Random Forest", "K-Neighbors"],columns=["Activities CV", "Activities test", "API calls CV", "API calls test", "Opcodes CV", "Opcodes test", "Permissions CV", "Permissions test", "Receivers CV", "Receivers test", "Services CV", "Services test", "System CV", "System test"])
cv_table.to_csv("../reports/1st_stage_cv_results.csv")
cv_table
cv_table.max().max()
for i, model in enumerate(cat_models):
cv_results = model.cv_results_
cv_mean = cv_results["mean_test_score"].mean()
result = model.score(pred_df_sc[i], test_df["MW"])
cv_table.iloc[2,2*i] = cv_mean
cv_table.iloc[2,(2*i)+1] = result
print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
len(lgbm_models)
for i, model in enumerate(lgbm_models):
cv_results = model.cv_results_
cv_mean = cv_results["mean_test_score"].mean()
result = model.score(pred_df_sc[i], test_df["MW"])
cv_table.iloc[0,2*i] = cv_mean
cv_table.iloc[0,(2*i)+1] = result
print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
# +
cv_mean = [0.5123466138559081, 0.8517648849527397, 0.818891074007848, 0.784123629674715, 0.8429025840423374, 0.5092410236809257,0.7692016416238197]
for i, model in enumerate(tnn_models):
# cv_results = model.cv_results_
# cv_mean = cv_results["mean_test_score"].mean()
result = model.evaluate(pred_df_sc[i], test_df["MW"])
cv_table.iloc[3,2*i] = cv_mean[i]
cv_table.iloc[3,(2*i)+1] = result[1]
# print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
# report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
# -
for i, model in enumerate(bagging_models):
cv_results = model.cv_results_
cv_mean = cv_results["mean_test_score"].mean()
result = model.score(pred_df_sc[i], test_df["MW"])
cv_table.iloc[1,2*i] = cv_mean
cv_table.iloc[1,(2*i)+1] = result
print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
for i, model in enumerate(rfc_models):
cv_results = model.cv_results_
cv_mean = cv_results["mean_test_score"].mean()
result = model.best_estimator_.score(pred_df_sc[i], test_df["MW"])
cv_table.iloc[5,2*i] = cv_mean
cv_table.iloc[5,(2*i)+1] = result
print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
cv_mean = [0.504946996466431, 0.846643109540636, 0.8279151943462898, 0.7817264008076729, 0.8440181726400807, 0.5115598182735992, 0.7742554265522463]
for i, model in enumerate(knearest_models):
# cv_results = model.cv_results_
# cv_mean = cv_results["mean_test_score"].mean()
result = model.score(pred_df_sc[i], test_df["MW"])
cv_table.iloc[6,2*i] = cv_mean[i]
cv_table.iloc[6,(2*i)+1] = result
print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
for i, model in enumerate(xgb_models):
cv_results = model.cv_results_
cv_mean = cv_results["mean_test_score"].mean()
result = model.score(pred_df_sc[i], test_df["MW"])
cv_table.iloc[4,2*i] = cv_mean
cv_table.iloc[4,(2*i)+1] = result
print(feature_names[i] + " cv_result : " + str(cv_mean))
print(feature_names[i] +" result on test data: " + str(result))
report = classification_report(test_df["MW"], model.predict(pred_df_sc[i]), target_names=["benign", "malware"], digits=6, output_dict=True)
cv_table
lgbm_models[6].score(pred_df_sc[6], test_df["MW"])
# +
df_pred_ = pd.DataFrame(pred)
df_pred_
# -
test_df['label'] = df_pred_
test_df
# !pip install --upgrade scikit-learn
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import train_test_split
# +
all_models = lgbm_models + rfc_models + cat_models + knearest_models + xgb_models + bagging_models
base_pred = [None]*len(all_models)
all_X_sc = X_sc*len(all_modelsl)
for i , base_model in enumerate(all_models):
base_pred[i] = base_model.predict(all_X_sc[i])
stacked_predictions = np.column_stack(tuple(base_pred))
meta_model = LogisticRegression()
meta_model.fit(stacked_predictions, df_1["MW"])
all_X_test_sc = X_test_sc
valid_pred = [None]*len(all_models)
for i , base_model in enumerate(all_models):
valid_pred[i] = base_model.predict(all_X_test_sc[i])
stacked_valid_predictions = np.column_stack(tuple(valid_pred))
| notebooks/2019-12-11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DanielleYurit/LibrayPandas/blob/main/Estrutra_de_dados.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="4BHH94zfUbWG"
# **Estrutura de dados**
# + [markdown] id="S7taYssoUfdF"
# **Listas**
# + id="DOBdX_zyUT_q"
animais = [1,2,3]
animais
# + id="MhANiKr7Umh-"
animais = ["cachorro","gato",12345,6.5]
animais
# + id="7D1iL5EeUr1B"
animais[0]
# + id="aypKFtDfUxTG"
animais[3]
# + id="ibm3afwmUyqx"
animais[0] = 'papagaio'
# + id="UcgHd66HU21w"
animais
# + id="r08-UIAJU4Hq"
animais.remove("gato")
# + id="kxiQ9849U9Wt"
animais
# + id="XsUg4dH2U-dx"
len(animais)
# + id="qHzeP-NPVBm0"
"gato" in animais
# + id="HXAZKQCVVDz7"
lista = [500, 30, 300, 80, 10]
# + id="iqsyhWxiVHqp"
max(lista)
# + id="JoUOdVsNVJJr"
min(lista)
# + id="Bo-INt2IVKne"
animais.append(["leão","cachorro"])
# + id="DdwMSgWWVO8J"
animais
# + id="MzGA0oxcVP4a"
animais.extend(["cobra",6])
# + id="AxClK3MeVWxD"
animais
# + id="USR8ytZ4VX_6"
animais.count("leão")
# + id="lm96iSZXVbMs"
lista.sort()
# + id="iKUGcFJGW5Jk"
lista
# + [markdown] id="fDyUOqnPW-Sz"
# **Tuplas**
# + id="NVcXO901W6y9"
tp = ("Banana","Maçã",10,50)
# + id="sUzcCGpLXUzn"
tp[0]
# + id="Z3ws43LTXYrr"
tp[0] = "Laranja"
# + id="iXns0Vu0Xw9V"
tp.count("Maçã")
# + id="Tq__PVUwX5Pz"
tp[0:2]
# + [markdown] id="RdwsGrbOYKiH"
# **Dicionarios**
#
# + id="ASP_XdhmYIRu"
dc = {"Maçã":20, "Banana":10, "Laranja":15, "Uva":5}
# + id="hTNIuc9QYeb6"
dc
# + id="LelA0oXQYhw_"
dc["Maçã"]
# + id="bkE4E1YBYnDi"
dc["Maçã"] = 25
dc
# + id="3mdqhM9zYrJ5"
dc.keys()
# + id="vzRhAPi2Ytx-"
dc.values()
# + id="qlMPIZwQYveJ"
dc.setdefault("Limão",22)
# + id="cczbzgOFY24z"
dc
| Estrutra_de_dados.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regresia logistică multinomială
# <NAME>, _grupa 10LF383_
# # Load data, filtering
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from typing import Tuple, List
# -
def load_file(path: str) -> Tuple[np.ndarray, np.ndarray]:
"""Loads the data from the file stored at :param path: and returns the
input values and the class labels.
:param path: path of a CVS file with data
:return: a tuple containing the input matrix of shape (m, n) and a column
vector with the m class labels in {0, ..., 9}
"""
# scrieti cod pentru citire de CSV
df = pd.read_csv(path, header=None)
X = df[df.columns[1:]]
y = df[df.columns[0]].to_frame()
assert X.ndim == 2, 'Matrix required for input values'
assert y.ndim == 2, 'Column matrix required for labels'
assert y.shape == (X.shape[0], 1), 'Same number of lines is required'
return X, y
path_train = './data/mnist_train.csv'
path_test = './data/mnist_test.csv'
# +
X_train, y_train = load_file(path_train)
assert X_train.shape == (60000, 784)
assert y_train.shape == (60000, 1)
X_test, y_test = load_file(path_test)
assert X_test.shape == (10000, 784)
assert y_test.shape == (10000, 1)
# -
def scale_values(X: np.ndarray) -> np.ndarray:
"""Scales the values to range [0, 1].
:param X: an (m, n) matrix with values between 0 and 255.
:return: an (m, n) matrix containing values of :param X: scaled in [0, 1]
"""
# scrieti cod pentru scalare intre 0 si 1
result = np.array(X) / 255
assert 0 <= np.min(result) <= np.max(result) <= 1, 'Scaled values should be in [0, 1]'
assert X.shape == result.shape, 'Scaling preserves shape'
return result
X_train = scale_values(X_train)
assert X_train.shape == (60000, 784)
X_test = scale_values(X_test)
assert X_test.shape == (10000, 784)
def augment_matrix(X: np.ndarray) -> np.ndarray:
"""Creates the design matrix: the original matrix with a prepended column
of 1.
:param X: an (m, n) matrix. Each row contains the pixels of a digit.
:return: an ndarray of shape (X.shape[0], X.shape[1]+1)
"""
m = X.shape[0]
result = np.hstack((np.ones(m).reshape(m, 1), X))
assert result.shape == (X.shape[0], X.shape[1] + 1)
assert np.alltrue(result[:, 0] == 1), 'The first column should be filled with 1'
return result
X_train = augment_matrix(X_train)
X_test = augment_matrix(X_test)
# # Create model
# $$\large h_\Theta (\textbf{x})= \begin{pmatrix} P(y = 1|\textbf{x};\Theta) \\ P(y = 2|\textbf{x};\Theta) \\ . . . \\ P(y = K|\textbf{x};\Theta) \end{pmatrix} = softmax \begin{pmatrix} \theta_1^t \cdot \textbf{x} \\ \theta_2^t \cdot \textbf{x} \\ . . . \\ \theta_K^t \cdot \textbf{x} \end{pmatrix} $$<br>
# $$\large \textbf{z} = (z_1, ..., z_K)^t$$ <br>
# $$\large softmax(\textbf{z};l) = \frac{\exp(z_l)}{\sum_{k=1}^K \exp(z_k)}$$
# <p style="text-align: center;">sau $\large softmax(\textbf{z}';l) = \frac{\exp(z_l - M)}{\sum_{k=1}^K \exp(z_k - M)}$</p>
# <p style="text-align: center;">unde $k$ este numărul de clase; $ \textbf{z}' = (z_1 - M, ..., z_K - M)^t$ și $M$ valoarea maximă din $(z_1, ..., z_K)^t$ </p>
k = 10 # number of classes
m, n = X_train.shape # after augmentation
def softmax(z, axis=1):
"""Applies softmax to a matrix z.
:param z: np.array of shape (m, k)
:return: np.array of shape (m, k) containing in cell (i, j): P(class=j|input i)
"""
max_z = np.max(z, axis=axis, keepdims=True) # maximum on each row
# calcul de expnentiala; folosoti trucul dat in curs, utilizand max_z
exp_z = np.exp(z-max_z) # all numbers exponentiated
sum_exp_z = np.sum(np.exp(z-max_z), axis=axis, keepdims=True) # sum on each row
# scrieti cod; se face normalizarea valorilor; considerati ultimul asert
result = exp_z/sum_exp_z
assert exp_z.shape == z.shape
assert np.allclose(np.sum(result, axis=1), 1)
return result
def can_multiply(a, b):
return a.ndim == b.ndim == 2 and a.shape[1] == b.shape[0]
def model(X, theta):
"""
:param X: np.array of shape (m, n)
:param theta: np.array of shape (n, k)
:return: np.array of shape (m, k) containing in cell (i, j): P(class=j|input i)
"""
assert can_multiply(X, theta)
z = X@theta
result = softmax(z)
return result
# Funcție de cost:
# $$\Large J(\Theta) = -\frac{1}{m} \Bigg[ \sum_{j=1}^m \sum_{l=1}^k I(y^{(j)} = l) \cdot \ln \frac{\exp(\theta_l^t \textbf{x}^{(j)})}{\sum_{i=1}^k\exp(\theta_i^t \textbf{x}^{(j)})} \Bigg] + \frac{\lambda}{2m} \sum_{l=1}^k \sum_{i=1}^n \theta_{li}^2$$
# unde $I(\cdot)$ este funcția indicator:
# $$\Large \begin{equation}
# I(valoare\_logica)= \left\{
# \begin{array}{}
# 1, & \text{dacă} \ \ valoare\_logica = adevarat\\
# 0, & \text{dacă} \ \ valoare\_logica = fals
# \end{array}\right.
# \end{equation}
# $$
def J(X, y, num_classes, theta, _lambda):
"""Computes the error function for multinomial logisitc regression
:param X: np.array of shape (m, n)
:param y: np.array of shape (m, 1)
:param theta: np.array of shape (n, k)
:return: loss values, ocmposed of cross entropy + penalty term
"""
m = X.shape[0]
EPS = 1e-5
# computes a one hot encoding for the griven classes:
# if y[i]=c, 0 <= c <= 9 (here), then line i in one_hot_encoding is filled
# in with 0, excepting column c where one can find value 1
one_hot_encoding = np.zeros((m, num_classes))
# scrieti cod pentru a seta valori de 1 pe acele y[i]
one_hot_encoding[np.arange(one_hot_encoding.shape[0]), np.transpose(y)] = 1
#..... cod vectorizat
assert np.all(one_hot_encoding.sum(axis=1) == 1)
predicted = model(X, theta) # scrieti cod # shape (m, n)
predicted = np.clip(predicted, EPS, 1-EPS) # se evita valori care duc la log(0)
log_predicted = np.log(predicted)
# calcul suma pentru cross entropy # Hadamard product
temp = np.multiply(one_hot_encoding, log_predicted)
loss1 = -temp.sum() / m # calcul valoare medie a erorii
loss2 = _lambda/(2*m) * (theta ** 2).sum() # calcul valoare de penalizare L2, aplicand si coeficientul _lambda
return loss1 + loss2
# Gradient:
# $$\LARGE \frac{\partial J}{\partial \theta_k}(\Theta) = -\frac{1}{m}\sum_{j=1}^m \Big[ \textbf{x}^{(j)} \Big( I(y^{(j)} = k) - P(y^{(j)} = k | \textbf{x}^{(j)}; \Theta) \Big) \Big]+\frac{\lambda}{m} \theta_k$$
def gradient(X, theta, y, num_classes, _lambda):
"""
:param X: np.array of shape (m, n)
:param theta: np.array of shape (n, k)
:param y: np.array of shape (m, 1)
:return: np.array of shape (n, k)
"""
m, n = X.shape
# cateva linii vectorizate cu calcul de gradient
one_hot_encoding = np.zeros((m, num_classes))
one_hot_encoding[np.arange(one_hot_encoding.shape[0]), np.transpose(y)] = 1
predicted = model(X, theta)
diff = np.subtract(one_hot_encoding, predicted)
grad = X.transpose()@diff
grad = -1/m * grad + (_lambda/m)*theta
assert grad.shape == theta.shape
return grad
def accuracy(X:np.array, y:np.array, theta:np.array) -> float:
"""Computes the accuracy on a given input dataset X, with ground truth y
:param X: np.array of shape (m, n)
:param y: np.array of shape (m, 1); each value is the index of a class
:param theta: np.array of shape (n, k) with weights
:return: ratio between correcly classified cases and total number of classes
"""
# scrieti cod pentru a calcula estimarea data de model;
# pe fiecare linie i si coloana c a matricei de forma (m, k)
# se regaseste probabilitatea conditionata ca intrarea i sa fie de clasa c
m = X.shape[0]
k = theta.shape[1]
y_hat = model(X, theta)
# cod pentru a determina care e clase de probabilitate conditionata maxima
y_one_hot_encoded = np.zeros((m, k))
y_one_hot_encoded[np.arange(y_one_hot_encoded.shape[0]), np.transpose(y)] = 1
y_predicted = np.zeros_like(y_hat)
y_predicted[np.arange(y_predicted.shape[0]), y_hat.argmax(axis=1)] = 1
return (np.logical_and(y_predicted == y_one_hot_encoded, y_one_hot_encoded == 1)).sum() / X.shape[0]
# # Train model
def train(X: np.array, y: np.array, num_classes, theta_init: np.array, _lambda: float, alpha: float, max_delta_error:float=1e-4) -> Tuple[np.array, List[float], List[float], List[float]]:
"""Runs the training on the training dataset (X, y). Stops when
difference between two succesive error values is lower than :param max_delta_error:
:param X: np.array of shape (m, n), with training cases. Each row is a traininv case
:param y: np.array of shape (m, 1), containing labels (0=class 0, ...)
:param theta_init: np.array of shape (n, k), initial weights
:param _lambda: coefficient >= for the L2 penalty term
:param num_classes: number of classes
:param alpha: > 0, learning rate
:max_delta_error: >0, a threshold for max absolute difference of succesive loss values
:return: a tuple consisting of: weight matrix, list of errors computed at each epoch, 2 lists of accuracies on training and on test set at each epoch
"""
theta = theta_init
errors = [J(X, y, num_classes, theta, _lambda)]
acc_train = [accuracy(X_train, y_train, theta)]
acc_test = [accuracy(X_test, y_test, theta)]
epoch = 0
while True:
epoch += 1
theta += -alpha * gradient(X=X, theta=theta, y=y, num_classes=num_classes, _lambda = _lambda)
error = J(X, y, num_classes, theta, _lambda)
errors.append(error)
train_acc = accuracy(X_train, y_train, theta)
acc_train.append(train_acc)
test_acc = accuracy(X_test, y_test, theta)
acc_test.append(test_acc)
if epoch % 10 == 0:
print(f'Epoch: {epoch}, error: {error}, train accuracy: {train_acc}, test accuracy: {test_acc}')
if np.abs(errors[-1] - errors[-2]) < max_delta_error:
break
return theta, errors, acc_train, acc_test
# +
theta = np.zeros((n, k))
theta, errors, acc_train, acc_test = train(X=X_train, y=y_train, num_classes = 10, theta_init=theta, _lambda=0.0, alpha=0.5)
# -
plt.figure(figsize=(10, 8))
plt.plot(errors)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
# # Test model
# +
# tipariti eroarea pe setul de testare
# -
test_error = J(X_test, y_test, num_classes=10, theta=theta, _lambda=0.0)
print(f'Test error: {test_error}')
| 2020.03.17/logreg_k_classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building "Sums of Characteristics" BLP Instruments Example
# +
import pyblp
import numpy as np
import pandas as pd
np.set_printoptions(precision=3)
pyblp.__version__
# -
# In this example, we'll load the automobile product data from :ref:`references:Berry, Levinsohn, and Pakes (1995)` and show how to construct the included instruments from scratch.
product_data = pd.read_csv(pyblp.data.BLP_PRODUCTS_LOCATION)
product_data.head()
product_data[[f'demand_instruments{i}' for i in range(8)]]
product_data[[f'supply_instruments{i}' for i in range(12)]]
# The demand-side "sums of characterstics" BLP instruments included in `product_data` can be built from scratch with the :func:`build_blp_instruments` function.
demand_instruments = pyblp.build_blp_instruments(pyblp.Formulation('1 + hpwt + air + mpd'), product_data)
demand_instruments
# The supply-side instruments from the original paper are "sums of characteristics" BLP instruments as well, but also include a standalone `mpd` shifter. Because of collinearity issues, the "rival" instrument constructed from the `trend` variable is excluded, and only the "own" instrument is retained.
supply_instruments = np.c_[
pyblp.build_blp_instruments(pyblp.Formulation('1 + log(hpwt) + air + log(mpg) + log(space)'), product_data),
pyblp.build_blp_instruments(pyblp.Formulation('0 + trend'), product_data)[:, 0],
product_data['mpd'],
]
supply_instruments
| docs/notebooks/api/build_blp_instruments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sqlalchemy import create_engine
import api_keys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# +
DB_USER = api_keys.DB_USER
DB_PASS = api_keys.DB_PASS
DB_URL = api_keys.DB_URL
engine = create_engine("mysql+pymysql://{0}:{1}@{2}".format(DB_USER, DB_PASS, DB_URL), echo=True)
connection = engine.connect()
statement = """SELECT * FROM dublin_bikes.weather_current
order by time_queried desc
limit 109;""" # create select statement for stations table
# -
df = pd.read_sql_query(statement, engine) # https://stackoverflow.com/questions/29525808/sqlalchemy-orm-conversion-to-pandas-dataframe
df.shape
df.head(5)
df.tail(5)
# +
# the following notebook is based off material presented in Data Analytics module COMP47350 labs 7 and 9
# -
df.dtypes
df["humidity"].fillna(0)
# +
categorical_columns = df[['station_number','weather_main', 'weather_description']].columns
# Convert data type to category for these columns
for column in categorical_columns:
df[column] = df[column].astype('category')
df["humidity"] = df["humidity"].fillna(0)
df["humidity"] = df["humidity"].astype('int64')
continuous_columns = df.select_dtypes(['int64']).columns
datetime_columns = df.select_dtypes(['datetime64[ns]']).columns
df.dtypes
# +
#Print the number of duplicates, without the original rows that were duplicated
print('Number of duplicate (excluding first) rows in the table is: ', df.duplicated().sum())
# Check for duplicate rows.
# Use "keep=False" to mark all duplicates as true, including the original rows that were duplicated.
print('Number of duplicate rows (including first) in the table is:', df[df.duplicated(keep=False)].shape[0])
# -
# Check for duplicate columns
#First transpose the df so columns become rows, then apply the same check as above
dfT = df.T
print("Number of duplicate (excluding first) columns in the table is: ", dfT.duplicated().sum())
print("Number of duplicate (including first) columns in the table is: ", dfT[dfT.duplicated(keep=False)].shape[0])
# # no duplicate rows or columns
df.select_dtypes(['category']).describe().T
# # station_status is constant column
df.select_dtypes(include=['int64']).describe().T
df.select_dtypes(include=['datetime64[ns]']).describe().T
df.isnull().sum()
# # logical integrity
test_1 = df[['time_queried','last_update']][df['time_queried']<df['last_update']]
print("Number of rows failing the test: ", test_1.shape[0])
test_1.head(5)
df[continuous_columns].hist(layout=(3, 3), figsize=(10,10), bins=10)
df[continuous_columns].plot(kind='box', subplots=True, figsize=(10,5), layout=(3,3), sharex=False, sharey=False)
df[datetime_columns].plot()
print(datetime_columns[0])
df[datetime_columns[0]].hist()
print(datetime_columns[1])
df[datetime_columns[1]].hist()
for col in categorical_columns:
f = df[col].value_counts().plot(kind='bar', figsize=(12,10))
plt.title(col)
plt.ylabel('number of occurances')
plt.show()
df.columns
df[["temp", "feels_like", "station_number", "time_queried"]].sort_values("time_queried")[:50]
| data_analytics/current weather data quality initial findings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### 15.4.2 用Pytorch实现SARSA算法
# +
import time
import numpy as np
import tkinter as tk
from PIL import ImageTk, Image
np.random.seed(1)
PhotoImage = ImageTk.PhotoImage
UNIT = 100
HEIGHT = 5
WIDTH = 5
class Env(tk.Tk):
def __init__(self):
super(Env, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('Q Learning')
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))
self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.texts = []
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT)
# create grids
for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 100
x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 100
x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r
canvas.create_line(x0, y0, x1, y1)
# 把图标加载到环境中
self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])
self.tree1 = canvas.create_image(250, 150, image=self.shapes[1])
self.tree2 = canvas.create_image(150, 250, image=self.shapes[1])
self.star = canvas.create_image(250, 250, image=self.shapes[2])
# 对环境进行包装
canvas.pack()
return canvas
def load_images(self):
rectangle = PhotoImage(
Image.open("img/bob.png").resize((65, 65)))
tree = PhotoImage(
Image.open("img/tree.png").resize((65, 65)))
star = PhotoImage(
Image.open("img/star.jpg").resize((65, 65)))
return rectangle, tree, star
def text_value(self, row, col, contents, action, font='Helvetica', size=10,
style='normal', anchor="nw"):
if action == 0:
origin_x, origin_y = 7, 42
elif action == 1:
origin_x, origin_y = 85, 42
elif action == 2:
origin_x, origin_y = 42, 5
else:
origin_x, origin_y = 42, 77
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def print_value_all(self, q_table):
for i in self.texts:
self.canvas.delete(i)
self.texts.clear()
for i in range(HEIGHT):
for j in range(WIDTH):
for action in range(0, 4):
state = [i, j]
if str(state) in q_table.keys():
temp = q_table[str(state)][action]
self.text_value(j, i, round(temp, 2), action)
def coords_to_state(self, coords):
x = int((coords[0] - 50) / 100)
y = int((coords[1] - 50) / 100)
return [x, y]
def state_to_coords(self, state):
x = int(state[0] * 100 + 50)
y = int(state[1] * 100 + 50)
return [x, y]
def reset(self):
self.update()
time.sleep(0.5)
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
self.render()
# return observation
return self.coords_to_state(self.canvas.coords(self.rectangle))
def step(self, action):
state = self.canvas.coords(self.rectangle)
base_action = np.array([0, 0])
self.render()
if action == 0: # up
if state[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if state[1] < (HEIGHT - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # left
if state[0] > UNIT:
base_action[0] -= UNIT
elif action == 3: # right
if state[0] < (WIDTH - 1) * UNIT:
base_action[0] += UNIT
# 移动
self.canvas.move(self.rectangle, base_action[0], base_action[1])
self.canvas.tag_raise(self.rectangle)
next_state = self.canvas.coords(self.rectangle)
# 判断得分条件
if next_state == self.canvas.coords(self.star):
reward = 100
done = True
elif next_state in [self.canvas.coords(self.tree1),
self.canvas.coords(self.tree2)]:
reward = -100
done = True
else:
reward = 0
done = False
next_state = self.coords_to_state(next_state)
return next_state, reward, done
# 渲染环境
def render(self):
time.sleep(0.03)
self.update()
# +
import numpy as np
import random
from collections import defaultdict
class QLearningAgent:
def __init__(self, actions):
# 四种动作分别用序列表示:[0, 1, 2, 3]
self.actions = actions
self.learning_rate = 0.01
self.discount_factor = 0.9
#epsilon贪婪策略取值
self.epsilon = 0.1
self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])
# 采样 <s, a, r,a',s'>
def learn(self, state, action, reward,next_action,next_state):
current_q = self.q_table[state][action]
# 更新Q表
new_q = reward + self.discount_factor * (self.q_table[next_state][next_action])
self.q_table[state][action] += self.learning_rate * (new_q - current_q)
# 从Q-table中选取动作
def get_action(self, state):
if np.random.rand() < self.epsilon:
# 贪婪策略随机探索动作
action = np.random.choice(self.actions)
else:
# 从q表中选择
state_action = self.q_table[state]
action = self.arg_max(state_action)
return action
@staticmethod
def arg_max(state_action):
max_index_list = []
max_value = state_action[0]
for index, value in enumerate(state_action):
if value > max_value:
max_index_list.clear()
max_value = value
max_index_list.append(index)
elif value == max_value:
max_index_list.append(index)
return random.choice(max_index_list)
# -
env = Env()
agent = QLearningAgent(actions=list(range(env.n_actions)))
#共进行200次游戏
for episode in range(200):
state = env.reset()
action = agent.get_action(str(state))
while True:
env.render()
#获取新的状态、奖励分数
next_state, reward, done = env.step(action)
#产生新的动作
next_action = agent.get_action(str(state))
# 更新Q表,sarsa根据新的状态及动作获取Q表的值
#而不是基于新状态对所有动作的最大值
agent.learn(str(state), action, reward, next_action, str(next_state))
state = next_state
action = next_action
env.print_value_all(agent.q_table)
# 当到达终点就终止游戏开始新一轮训练
if done:
break
| wx_reading/reinforcelearning/pytorch-15-02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Objective
# * Predict stock price in next day using long short term memory(LSTM)
# * Given prices for the last N days, we do prediction for day N+1
# * Here we split 3 years of data into train(60%), dev(20%) and test(20%)
# This is an example snippet!
# To create your own, add a new snippet block to the
# snippets.json file in your jupyter nbextensions directory:
# /nbextensions/snippets/snippets.json
import this
# This is an example snippet!
# To create your own, add a new snippet block to the
# snippets.json file in your jupyter nbextensions directory:
# /nbextensions/snippets/snippets.json
import this
# +
# %matplotlib inline
import math
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
import time
import tensorflow
from datetime import date
from matplotlib import pyplot as plt
from numpy.random import seed
from pylab import rcParams
from sklearn.metrics import mean_squared_error
from tqdm import tqdm_notebook
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.utils import plot_model
#### Input params ##################
stk_path = "D:/Power BI/ABIN.csv"
test_size = 0.2 # proportion of dataset to be used as test set
cv_size = 0.2 # proportion of dataset to be used as cross-validation set
N = 9 # for feature at day t, we use lags from t-1, t-2, ..., t-N as features.
# initial value before tuning
lstm_units=50 # lstm param. initial value before tuning.
dropout_prob=1 # lstm param. initial value before tuning.
optimizer='adam' # lstm param. initial value before tuning.
epochs=1 # lstm param. initial value before tuning.
batch_size=1 # lstm param. initial value before tuning.
model_seed = 100
fontsize = 14
ticklabelsize = 14
####################################
# Set seeds to ensure same output results
seed(101)
tensorflow.random.set_seed(model_seed)
# -
# # Common functions
# +
def get_mape(y_true, y_pred):
"""
Compute mean absolute percentage error (MAPE)
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def get_x_y(data, N, offset):
"""
Split data into x (features) and y (target)
"""
x, y = [], []
for i in range(offset, len(data)):
x.append(data[i-N:i])
y.append(data[i])
x = np.array(x)
y = np.array(y)
return x, y
def get_x_scaled_y(data, N, offset):
"""
Split data into x (features) and y (target)
We scale x to have mean 0 and std dev 1, and return this.
We do not scale y here.
Inputs
data : pandas series to extract x and y
N
offset
Outputs
x_scaled : features used to predict y. Scaled such that each element has mean 0 and std dev 1
y : target values. Not scaled
mu_list : list of the means. Same length as x_scaled and y
std_list : list of the std devs. Same length as x_scaled and y
"""
x_scaled, y, mu_list, std_list = [], [], [], []
for i in range(offset, len(data)):
mu_list.append(np.mean(data[i-N:i]))
std_list.append(np.std(data[i-N:i]))
x_scaled.append((data[i-N:i]-mu_list[i-offset])/std_list[i-offset])
y.append(data[i])
x_scaled = np.array(x_scaled)
y = np.array(y)
return x_scaled, y, mu_list, std_list
def train_pred_eval_model(x_train_scaled, \
y_train_scaled, \
x_cv_scaled, \
y_cv, \
mu_cv_list, \
std_cv_list, \
lstm_units=50, \
dropout_prob=0.5, \
optimizer='adam', \
epochs=1, \
batch_size=1):
'''
Train model, do prediction, scale back to original range and do evaluation
Use LSTM here.
Returns rmse, mape and predicted values
Inputs
x_train_scaled : e.g. x_train_scaled.shape=(451, 9, 1). Here we are using the past 9 values to predict the next value
y_train_scaled : e.g. y_train_scaled.shape=(451, 1)
x_cv_scaled : use this to do predictions
y_cv : actual value of the predictions
mu_cv_list : list of the means. Same length as x_scaled and y
std_cv_list : list of the std devs. Same length as x_scaled and y
lstm_units : lstm param
dropout_prob : lstm param
optimizer : lstm param
epochs : lstm param
batch_size : lstm param
Outputs
rmse : root mean square error
mape : mean absolute percentage error
est : predictions
'''
# Create the LSTM network
model = Sequential()
model.add(LSTM(units=lstm_units, return_sequences=True, input_shape=(x_train_scaled.shape[1],1)))
model.add(Dropout(dropout_prob)) # Add dropout with a probability of 0.5
model.add(LSTM(units=lstm_units))
model.add(Dropout(dropout_prob)) # Add dropout with a probability of 0.5
model.add(Dense(1))
# Compile and fit the LSTM network
model.compile(loss='mean_squared_error', optimizer=optimizer)
model.fit(x_train_scaled, y_train_scaled, epochs=epochs, batch_size=batch_size, verbose=0)
# Do prediction
est_scaled = model.predict(x_cv_scaled)
est = (est_scaled * np.array(std_cv_list).reshape(-1,1)) + np.array(mu_cv_list).reshape(-1,1)
# Calculate RMSE and MAPE
# print("x_cv_scaled = " + str(x_cv_scaled))
# print("est_scaled = " + str(est_scaled))
# print("est = " + str(est))
rmse = math.sqrt(mean_squared_error(y_cv, est))
mape = get_mape(y_cv, est)
return rmse, mape, est
# -
# # Load data
# +
df = pd.read_csv(stk_path, sep = ",")
# Convert Date column to datetime
df.loc[:, 'Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d')
# Change all column headings to be lower case, and remove spacing
df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]
# Get month of each sample
df['month'] = df['date'].dt.month
# Sort by datetime
df.sort_values(by='date', inplace=True, ascending=True)
df.head()
# +
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = df.plot(x='date', y='adj_close', style='b-', grid=True)
ax.set_xlabel("date")
ax.set_ylabel("USD")
# -
# # Split into train, dev and test sets
# We use lags up to N number of days to use as features.
# +
# Get sizes of each of the datasets
num_cv = int(cv_size*len(df))
num_test = int(test_size*len(df))
num_train = len(df) - num_cv - num_test
print("num_train = " + str(num_train))
print("num_cv = " + str(num_cv))
print("num_test = " + str(num_test))
# Split into train, cv, and test
train = df[:num_train][['date', 'adj_close']]
cv = df[num_train:num_train+num_cv][['date', 'adj_close']]
train_cv = df[:num_train+num_cv][['date', 'adj_close']]
test = df[num_train+num_cv:][['date', 'adj_close']]
print("train.shape = " + str(train.shape))
print("cv.shape = " + str(cv.shape))
print("train_cv.shape = " + str(train_cv.shape))
print("test.shape = " + str(test.shape))
# +
# Converting dataset into x_train and y_train
# Here we only scale the train dataset, and not the entire dataset to prevent information leak
scaler = StandardScaler()
train_scaled = scaler.fit_transform(np.array(train['adj_close']).reshape(-1,1))
print("scaler.mean_ = " + str(scaler.mean_))
print("scaler.var_ = " + str(scaler.var_))
# Split into x and y
x_train_scaled, y_train_scaled = get_x_y(train_scaled, N, N)
print("x_train_scaled.shape = " + str(x_train_scaled.shape)) # (446, 7, 1)
print("y_train_scaled.shape = " + str(y_train_scaled.shape)) # (446, 1)
# -
# Scale the cv dataset
# Split into x and y
x_cv_scaled, y_cv, mu_cv_list, std_cv_list = get_x_scaled_y(np.array(train_cv['adj_close']).reshape(-1,1), N, num_train)
print("x_cv_scaled.shape = " + str(x_cv_scaled.shape))
print("y_cv.shape = " + str(y_cv.shape))
print("len(mu_cv_list) = " + str(len(mu_cv_list)))
print("len(std_cv_list) = " + str(len(std_cv_list)))
# +
# Here we scale the train_cv set, for the final model
scaler_final = StandardScaler()
train_cv_scaled_final = scaler_final.fit_transform(np.array(train_cv['adj_close']).reshape(-1,1))
print("scaler_final.mean_ = " + str(scaler_final.mean_))
print("scaler_final.var_ = " + str(scaler_final.var_))
# # Scale the test dataset
# x_test_scaled, y_test, mu_test_list, std_test_list = get_x_scaled_y(np.array(df['adj_close']).reshape(-1,1), N, num_train+num_cv)
# print("x_test_scaled.shape = " + str(x_test_scaled.shape))
# print("y_test.shape = " + str(y_test.shape))
# print("len(mu_test_list) = " + str(len(mu_test_list)))
# print("len(std_test_list) = " + str(len(std_test_list)))
# -
# # Create and fit the LSTM network
# +
# Create the LSTM network
model = Sequential()
model.add(LSTM(units=lstm_units, return_sequences=True, input_shape=(x_train_scaled.shape[1],1)))
model.add(Dropout(dropout_prob)) # Add dropout with a probability of 0.5
model.add(LSTM(units=lstm_units))
model.add(Dropout(dropout_prob)) # Add dropout with a probability of 0.5
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer=optimizer)
model.fit(x_train_scaled, y_train_scaled, epochs=epochs, batch_size=batch_size, verbose=2)
# -
# Print model summary
model.summary()
# +
# Plot model and save to file
from IPython.display import SVG
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
plot_model(model, to_file='model_v4a.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
# -
# # Predict on dev set
# +
# Do prediction
est_scaled = model.predict(x_cv_scaled)
est = (est_scaled * np.array(std_cv_list).reshape(-1,1)) + np.array(mu_cv_list).reshape(-1,1)
print("est.shape = " + str(est.shape))
# Calculate RMSE
rmse_bef_tuning = math.sqrt(mean_squared_error(y_cv, est))
print("RMSE = %0.3f" % rmse_bef_tuning)
# Calculate MAPE
mape_pct_bef_tuning = get_mape(y_cv, est)
print("MAPE = %0.3f%%" % mape_pct_bef_tuning)
# +
# Plot adjusted close over time
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
est_df = pd.DataFrame({'est': est.reshape(-1),
'y_cv': y_cv.reshape(-1),
'date': cv['date']})
ax = train.plot(x='date', y='adj_close', style='b-', grid=True)
ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax)
ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax)
ax = est_df.plot(x='date', y='est', style='r-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test', 'est'])
ax.set_xlabel("date")
ax.set_ylabel("USD")
# -
# # Tuning N (no. of days to use as features)
# +
param_label = 'N'
param_list = range(3, 60)
error_rate = {param_label: [], 'rmse': [], 'mape_pct': []}
tic = time.time()
for param in tqdm_notebook(param_list):
# Split train into x and y
x_train_scaled, y_train_scaled = get_x_y(train_scaled, param, param)
# Split cv into x and y
x_cv_scaled, y_cv, mu_cv_list, std_cv_list = get_x_scaled_y(np.array(train_cv['adj_close']).reshape(-1,1), param, num_train)
# Train, predict and eval model
rmse, mape, _ = train_pred_eval_model(x_train_scaled, \
y_train_scaled, \
x_cv_scaled, \
y_cv, \
mu_cv_list, \
std_cv_list, \
lstm_units=lstm_units, \
dropout_prob=dropout_prob, \
optimizer=optimizer, \
epochs=epochs, \
batch_size=batch_size)
# Collect results
error_rate[param_label].append(param)
error_rate['rmse'].append(rmse)
error_rate['mape_pct'].append(mape)
error_rate = pd.DataFrame(error_rate)
toc = time.time()
print("Minutes taken = " + str((toc-tic)/60.0))
error_rate
# +
# Plot RMSE
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = error_rate.plot(x='N', y='rmse', style='bx-', grid=True)
ax = error_rate.plot(x='N', y='mape_pct', style='rx-', grid=True, ax=ax)
ax.set_xlabel("N")
ax.set_ylabel("RMSE/MAPE(%)")
# -
# Get optimum value for param
temp = error_rate[error_rate['rmse'] == error_rate['rmse'].min()]
N_opt = temp['N'].values[0]
print("min RMSE = %0.3f" % error_rate['rmse'].min())
print("min MAPE = %0.3f%%" % error_rate['mape_pct'].min())
print("optimum " + param_label + " = " + str(N_opt))
# # Tuning model - epochs and batch_size
# +
param_label = 'epochs'
param_list = [1, 10, 20, 30, 40, 50]
param2_label = 'batch_size'
param2_list = [8, 16, 32, 64, 128]
# Split train into x and y
x_train_scaled, y_train_scaled = get_x_y(train_scaled, N_opt, N_opt)
# Split cv into x and y
x_cv_scaled, y_cv, mu_cv_list, std_cv_list = get_x_scaled_y(np.array(train_cv['adj_close']).reshape(-1,1), N_opt, num_train)
error_rate = {param_label: [], param2_label: [], 'rmse': [], 'mape_pct': []}
tic = time.time()
for param in tqdm_notebook(param_list):
for param2 in tqdm_notebook(param2_list):
# Train, predict and eval model
rmse, mape, _ = train_pred_eval_model(x_train_scaled, \
y_train_scaled, \
x_cv_scaled, \
y_cv, \
mu_cv_list, \
std_cv_list, \
lstm_units=lstm_units, \
dropout_prob=dropout_prob, \
optimizer=optimizer, \
epochs=param, \
batch_size=param2)
# Collect results
error_rate[param_label].append(param)
error_rate[param2_label].append(param2)
error_rate['rmse'].append(rmse)
error_rate['mape_pct'].append(mape)
error_rate = pd.DataFrame(error_rate)
toc = time.time()
print("Minutes taken = " + str((toc-tic)/60.0))
error_rate
# +
# Plot performance versus params
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
temp = error_rate[error_rate[param2_label]==param2_list[0]]
ax = temp.plot(x=param_label, y='rmse', style='bs-', grid=True)
legend_list = [param2_label + '_' + str(param2_list[0])]
color_list = ['r', 'g', 'k', 'y', 'm', 'c', '0.75']
for i in range(1,len(param2_list)):
temp = error_rate[error_rate[param2_label]==param2_list[i]]
ax = temp.plot(x=param_label, y='rmse', color=color_list[i%len(color_list)], marker='s', grid=True, ax=ax)
legend_list.append(param2_label + '_' + str(param2_list[i]))
ax.set_xlabel(param_label)
ax.set_ylabel("RMSE")
matplotlib.rcParams.update({'font.size': 14})
plt.legend(legend_list, loc='center left', bbox_to_anchor=(1.0, 0.5)) # positions legend outside figure
# ax.set_xlim([10, 50])
# ax.set_ylim([0, 5])
# -
# Get optimum value for param and param2
temp = error_rate[error_rate['rmse'] == error_rate['rmse'].min()]
epochs_opt = temp[param_label].values[0]
batch_size_opt = temp[param2_label].values[0]
print("min RMSE = %0.3f" % error_rate['rmse'].min())
print("min MAPE = %0.3f%%" % error_rate['mape_pct'].min())
print("optimum " + param_label + " = " + str(epochs_opt))
print("optimum " + param2_label + " = " + str(batch_size_opt))
# # Tuning model - LSTM units and dropout prob
# +
param_label = 'lstm_units'
param_list = [10, 50, 64, 128]
param2_label = 'dropout_prob'
param2_list = [0.5, 0.6, 0.7, 0.8, 0.9, 1]
error_rate = {param_label: [], param2_label: [], 'rmse': [], 'mape_pct': []}
tic = time.time()
for param in tqdm_notebook(param_list):
for param2 in tqdm_notebook(param2_list):
# Train, predict and eval model
rmse, mape, _ = train_pred_eval_model(x_train_scaled, \
y_train_scaled, \
x_cv_scaled, \
y_cv, \
mu_cv_list, \
std_cv_list, \
lstm_units=param, \
dropout_prob=param2, \
optimizer=optimizer, \
epochs=epochs_opt, \
batch_size=batch_size_opt)
# Collect results
error_rate[param_label].append(param)
error_rate[param2_label].append(param2)
error_rate['rmse'].append(rmse)
error_rate['mape_pct'].append(mape)
error_rate = pd.DataFrame(error_rate)
toc = time.time()
print("Minutes taken = " + str((toc-tic)/60.0))
error_rate
# +
# Plot performance versus params
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
temp = error_rate[error_rate[param2_label]==param2_list[0]]
ax = temp.plot(x=param_label, y='rmse', style='bs-', grid=True)
legend_list = [param2_label + '_' + str(param2_list[0])]
color_list = ['r', 'g', 'k', 'y', 'm', 'c', '0.75']
for i in range(1,len(param2_list)):
temp = error_rate[error_rate[param2_label]==param2_list[i]]
ax = temp.plot(x=param_label, y='rmse', color=color_list[i%len(color_list)], marker='s', grid=True, ax=ax)
legend_list.append(param2_label + '_' + str(param2_list[i]))
ax.set_xlabel(param_label)
ax.set_ylabel("RMSE")
matplotlib.rcParams.update({'font.size': 14})
plt.legend(legend_list, loc='center left', bbox_to_anchor=(1.0, 0.5)) # positions legend outside figure
# -
# Get optimum value for param and param2
temp = error_rate[error_rate['rmse'] == error_rate['rmse'].min()]
lstm_units_opt = temp[param_label].values[0]
dropout_prob_opt = temp[param2_label].values[0]
print("min RMSE = %0.3f" % error_rate['rmse'].min())
print("min MAPE = %0.3f%%" % error_rate['mape_pct'].min())
print("optimum " + param_label + " = " + str(lstm_units_opt))
print("optimum " + param2_label + " = " + str(dropout_prob_opt))
# # Tuning model - optimizer
# +
param_label = 'optimizer'
param_list = ['adam', 'sgd', 'rmsprop', 'adagrad', 'adadelta', 'adamax', 'nadam']
error_rate = {param_label: [], 'rmse': [], 'mape_pct': []}
tic = time.time()
for param in tqdm_notebook(param_list):
# Train, predict and eval model
rmse, mape, _ = train_pred_eval_model(x_train_scaled, \
y_train_scaled, \
x_cv_scaled, \
y_cv, \
mu_cv_list, \
std_cv_list, \
lstm_units=lstm_units_opt, \
dropout_prob=dropout_prob_opt, \
optimizer=param, \
epochs=epochs_opt, \
batch_size=batch_size_opt)
# Collect results
error_rate[param_label].append(param)
error_rate['rmse'].append(rmse)
error_rate['mape_pct'].append(mape)
error_rate = pd.DataFrame(error_rate)
toc = time.time()
print("Minutes taken = " + str((toc-tic)/60.0))
error_rate
# +
# Plot RMSE
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
ax = error_rate.plot(x='optimizer', y='rmse', style='bx-', grid=True)
ax = error_rate.plot(x='optimizer', y='mape_pct', style='rx-', grid=True, ax=ax)
ax.set_xticklabels(param_list)
ax.set_xlabel("Optimizer")
ax.set_ylabel("RMSE/MAPE(%)")
# -
# Get optimum value for param and param2
temp = error_rate[error_rate['rmse'] == error_rate['rmse'].min()]
optimizer_opt = temp[param_label].values[0]
print("min RMSE = %0.3f" % error_rate['rmse'].min())
print("min MAPE = %0.3f%%" % error_rate['mape_pct'].min())
print("optimum " + param_label + " = " + str(optimizer_opt))
# # Tuned params
d = {'param': ['N', 'lstm_units', 'dropout_prob', 'optimizer', 'epochs', 'batch_size', 'rmse', 'mape_pct'],
'original': [N, lstm_units, dropout_prob, optimizer, epochs, batch_size, rmse_bef_tuning, mape_pct_bef_tuning],
'after_tuning': [N_opt, lstm_units_opt, dropout_prob_opt, optimizer_opt, epochs_opt, batch_size_opt, error_rate['rmse'].min(), error_rate['mape_pct'].min()]}
tuned_params = pd.DataFrame(d)
tuned_params
# # Final model
# +
# Split train_cv into x and y
x_train_cv_scaled, y_train_cv_scaled = get_x_y(train_cv_scaled_final, N_opt, N_opt)
# Split test into x and y
x_test_scaled, y_test, mu_test_list, std_test_list = get_x_scaled_y(np.array(df['adj_close']).reshape(-1,1), N_opt, num_train+num_cv)
# Train, predict and eval model
rmse, mape, est = train_pred_eval_model(x_train_cv_scaled, \
y_train_cv_scaled, \
x_test_scaled, \
y_test, \
mu_test_list, \
std_test_list, \
lstm_units=lstm_units_opt, \
dropout_prob=dropout_prob_opt, \
optimizer=optimizer_opt, \
epochs=epochs_opt, \
batch_size=batch_size_opt)
# Calculate RMSE
print("RMSE on test set = %0.3f" % rmse)
# Calculate MAPE
print("MAPE on test set = %0.3f%%" % mape)
# +
# Plot adjusted close over time
rcParams['figure.figsize'] = 16, 8 # width 10, height 8
est_df = pd.DataFrame({'est': est.reshape(-1),
'date': df[num_train+num_cv:]['date']})
ax = train.plot(x='date', y='adj_close', style='b-', grid=True)
ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax)
ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax)
ax = est_df.plot(x='date', y='est', style='r-', grid=True, ax=ax)
ax.legend(['train', 'dev', 'test', 'predictions'])
ax.set_xlabel("date")
ax.set_ylabel("USD")
# -
| ts_lstm_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Samsomyajit/GlobalTerrorism_EDA/blob/main/EDA_Global_Terrorism.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="VajB2IYaB6cZ"
#
# # Exploratory Data Analysis(EDA) on Global Terrorism
# ### By <NAME>
# GRIP Task - 4
# + colab={"base_uri": "https://localhost:8080/"} id="7nQuXiRUFEYB" outputId="3ded19a3-d102-4cff-fa66-3bc4c635dcd0"
from google.colab import drive
drive.mount('/content/drive')
# + id="3Xid_7BHB9Iq"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# + colab={"base_uri": "https://localhost:8080/"} id="5eAIXaQ2l4rn" outputId="096ecef5-c354-42d6-990e-a0fa06f4082e"
df = pd.read_csv("/content/drive/MyDrive/globalterrorismdb_0718dist.csv",encoding='latin1')
# + colab={"base_uri": "https://localhost:8080/"} id="riZL0WRxD-g-" outputId="3037228f-e3ca-4b2b-9899-6b32337304da"
df.shape
# + id="ob942Ua4aypp"
df.rename(columns={'iyear':'year','imonth':'month','iday':'day','country_txt':'Country','region_txt':'Region','provstate':'State','natlty1_txt':'nationality_of_target','weaptype1_txt':'WeaponType','nkill':'killed','nwound':'wounded'},inplace=True)
# + id="SyW90SLobx6d"
df=df[['year','month','day','Country','Region','city','extended','latitude','longitude','vicinity','suicide','success','State','nationality_of_target','WeaponType','killed','wounded']]
# + colab={"base_uri": "https://localhost:8080/", "height": 212} id="xs3Ianyfb6gR" outputId="c5926d23-e2fa-4c6a-ad2b-d197b85d727f"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="3bPtCnEdb-LF" outputId="dbe0c7f0-7f1c-4efd-ff8f-4393b9ead5af"
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 373} id="yOogFYgMcGKg" outputId="c3dc9b98-9e95-4baf-c378-11442e8283a0"
df.describe().T
# + colab={"base_uri": "https://localhost:8080/"} id="_7dtxXtLcHqF" outputId="7f7c416d-5779-4160-f5d1-860a7281886e"
df.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="IBXsZj0acMP8" outputId="7ab8dd4b-b21d-4ebe-a866-35332f2a3bf2"
df['year'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="CCGuUDIwcUXq" outputId="f5a89e16-6d68-4cfa-e966-5a9b3e7fdadb"
df['month'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="hWlXIeItcVxs" outputId="30e5e0ce-da01-405e-d76d-1a02c4530f52"
df['day'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="Nke_rcv2caCW" outputId="0b443bbd-7288-466e-d0d3-fd0a479c4fd1"
df['day'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="DFoBCqv5ceoC" outputId="dfda34ca-43b1-47a1-c6bb-0381b985cfb2"
df['Country'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="svWs0zOccl24" outputId="92a6e0c1-7c52-43cf-c582-fc2923b85bee"
df['nationality_of_target'].value_counts()
# + [markdown] id="-tEE9GQCf1mD"
# # Killings in each Region
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="dN54JOVhcvjM" outputId="207d0693-3eda-4f03-9e33-951ed0f3a281"
killings_per_region=df.groupby(['Region'],as_index=False)['killed']
killings_per_region.sum()
# + [markdown] id="bHS71E_KgArC"
# # Maximum Killings in each region
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="cCwi_Nygc0Ew" outputId="31fc0c8e-2a49-44bb-93bc-ea18100ea7cd"
killings_per_region.max()
# + [markdown] id="3zDcQzOBgQNc"
# # Total Kills
# + colab={"base_uri": "https://localhost:8080/"} id="4js3yxbdc16s" outputId="d291b514-cdea-4c9e-d869-9083e2ddec1f"
df['killed'].sum()
# + [markdown] id="ZPZzNYH_gWIn"
# # Total wounded cases
#
# + colab={"base_uri": "https://localhost:8080/"} id="F8zyDma9c6ho" outputId="4325a168-a041-4079-8a6e-6457ec16fa54"
df['wounded'].sum()
# + [markdown] id="M6_VHfE4gbMT"
# # Total Casualities
# + colab={"base_uri": "https://localhost:8080/"} id="Ihxyi7VJc-h-" outputId="17675f16-4588-4e58-cffe-e7ac880be242"
df['casualities']=df['killed']+df['wounded']
df['casualities'].sum()
# + [markdown] id="XDOIRpdBggWg"
# # Total casualities by Region
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="1HY5AIIHdBz0" outputId="c7c1919a-c7e3-4cf7-b1e2-5f9818d30fe0"
df1=df.groupby('Region')['casualities'].sum()
df2=df1.to_frame()
df2.reset_index()
# + [markdown] id="nX8lAAnRgqLN"
# # Spatio-Temporal casulatity Count
# + colab={"base_uri": "https://localhost:8080/"} id="_qFDmzPKdIQN" outputId="8b545fa8-2bf1-46d5-df95-577d23fd2e18"
region_and_yearwise_casualitycount=df.groupby(['Region', 'year'])['casualities'].count().head()
region_and_yearwise_casualitycount
# + [markdown] id="vs7AGNkcg1JW"
# Visualizing Year and Regionwise casulatity Count
#
# + colab={"base_uri": "https://localhost:8080/", "height": 654} id="vzfhLj5jdJVR" outputId="3897c03b-3c0b-4d37-e01f-8f103c0a1b0c"
plt.subplots(figsize=(18,8))
df.groupby(['Region', 'year']).count()['casualities'].plot()
plt.xticks(rotation=90)
plt.ylabel("Casualities")
# + [markdown] id="nhBarlT9g9YQ"
# # Top 30 worst terrorist attacks
# + id="sRcTY0BkdMSw"
df1=df.sort_values(by='casualities',ascending=False)[:30]
worst=df1.pivot_table(index='Country',columns='year',values='casualities')
worst.fillna(0,inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 225} id="ryIGCAH9dQ2Q" outputId="b4f9438c-e8c9-47d0-f77f-b82fc9060112"
worst.head()
# + [markdown] id="Wb9hJwk1hJLJ"
# # Null Values Check
# + colab={"base_uri": "https://localhost:8080/", "height": 376} id="eJyPF7N9dUhN" outputId="861bb923-7bfc-4265-c126-3f3763d44e2f"
sns.heatmap(df.isnull() , yticklabels=False , cbar=False , cmap='viridis')
# + colab={"base_uri": "https://localhost:8080/"} id="FnEARzc5dZY_" outputId="291d833f-81a8-4876-8ed3-a59e59aaca88"
missing=df[df.isnull().any(axis=1)]
missing.shape
# + colab={"base_uri": "https://localhost:8080/"} id="v066DbaBddMH" outputId="07ef66c3-dc12-4d6b-e604-b859b2ca9736"
df.shape
# + [markdown] id="p7E8ZJzRhcE1"
# # Terrorist Attacks Summary
# + colab={"base_uri": "https://localhost:8080/"} id="W1oGqDi4dkC8" outputId="7dda584b-eb0b-496b-91d8-e0d9420c0deb"
print('Country with Highest Terrorist Attacks:',df['Country'].value_counts().index[0])
print('Regions with Highest Terrorist Attacks:',df['Region'].value_counts().index[0])
print('Maximum people killed in an attack are:',df['killed'].max(),'that took place in',df.loc[df['killed'].idxmax()].Country)
print("Nationality of the maximally targetted group is:", df['nationality_of_target'].value_counts().index[0])
# + [markdown] id="AUOokP14hi7u"
# # Total weapons used in these attacks
# + colab={"base_uri": "https://localhost:8080/", "height": 704} id="s4F8CsP0dk_7" outputId="425f1112-f403-4e00-948b-c2f769265c2b"
plt.subplots(figsize=(15,4))
sns.countplot('WeaponType',data=df,palette='inferno',order=df['WeaponType'].value_counts().index)
plt.xticks(rotation=90)
plt.xlabel('Weapon Type')
plt.title('Weapons used by Terrorists')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="vnB4w7nJdpRx" outputId="ca26a646-9d20-4ef9-f9f1-8a913075637b"
sns.set_style('whitegrid')
print(df.suicide.value_counts())
sns.countplot('suicide',data=df).set_title('Suicide Outcome')
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="NtXuKt5bdukV" outputId="aa41edfd-09d2-4d32-ccf0-3fe6e4afdd1f"
sns.set_style('whitegrid')
print(df.success.value_counts())
sns.countplot('success',data=df).set_title('Success Outcome')
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="tD3SEYQxd1gp" outputId="a5e32f9a-caea-410a-f0cb-21ecc83bd594"
sns.set_style('whitegrid')
print(df.extended.value_counts())
sns.countplot('extended',data=df).set_title('Success Outcome')
# + colab={"base_uri": "https://localhost:8080/", "height": 400} id="nV-U3Pzkd1-F" outputId="59f40d39-9432-49cf-aaf2-d629efdc07e0"
sns.lmplot(x='year',y='killed',data=df,fit_reg=False,hue='extended',legend=True,palette="Set1")
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="ISSmH6WEd_1w" outputId="7f75a6d2-4a8e-4906-944f-d5a45c0b71f2"
count=df['Country'].value_counts()[:15].to_frame()
count.columns=['Attacks']
data=df.groupby('Country')['killed'].sum().to_frame()
count.merge(data,left_index=True,right_index=True,how='left').plot.bar(width=0.9)
fig=plt.gcf()
fig.set_size_inches(18,6)
plt.show()
# + [markdown] id="HfTwYPLmjZOL"
# # Top Countries affected by terrorist attacks
# + colab={"base_uri": "https://localhost:8080/", "height": 686} id="ejhLShVIeBNI" outputId="78cd90ad-1d9c-4267-a72f-1eb055d4859e"
plt.subplots(figsize=(15,8))
sns.barplot(df['Country'].value_counts()[:30].index,df['Country'].value_counts()[:30].values,palette='CMRmap')
plt.title('Top countries Affected')
plt.xticks(rotation=90)
plt.xlabel('Countries')
plt.ylabel('Count')
plt.show()
# + [markdown] id="Jp1vAyLKjf4v"
# # Total Terrorist Activities Each Year
# + colab={"base_uri": "https://localhost:8080/", "height": 582} id="4dm_3amyeGeP" outputId="d9ffbace-cb9f-49bc-f974-12366d2a6b61"
plt.subplots(figsize=(15,8))
sns.countplot('year',data=df,palette='CMRmap_r',edgecolor=sns.color_palette('dark',7))
plt.xticks(rotation=90)
plt.xlabel('Year of attack')
plt.title('Number Of Terrorist Activities Each Year')
plt.show()
# + [markdown] id="qa9ubNnmjkwk"
# # Total Regionwise Terrorist Activities
# + colab={"base_uri": "https://localhost:8080/", "height": 540} id="S8qva8cHeOIL" outputId="21f0fb26-8122-4641-f15c-103cf384b036"
plt.subplots(figsize=(15,5))
sns.countplot('Region',data=df,palette='Paired',order=df['Region'].value_counts().index)
plt.xticks(rotation=90)
plt.xlabel('Regions')
plt.title('Number Of Terrorist Activities By Region')
plt.show()
# + [markdown] id="1mPraxwzjp8d"
# # Spatio-Temporal Visualization of Terrorist Activities
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="WGkImHf2eOid" outputId="55b39e11-a680-4e95-ba36-8de032ba39f2"
df_region=pd.crosstab(df.year,df.Region).plot(kind='area',figsize=(20,10))
plt.title('Terrorist Activities by region in each year')
plt.ylabel('Attacks')
plt.show()
| Analytics/EDA_Global_Terrorism.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Assumptions
# Transaction cost <br>
# Total Cost: 0.05% on large portfolio
#
# Frazzini, Andrea and <NAME> and Moskowitz, <NAME>., Trading Costs (April 7, 2018). Available at SSRN: https://ssrn.com/abstract=3229719 or http://dx.doi.org/10.2139/ssrn.3229719
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# plt.style.use('ggplot')
# # import matplotlib
# # matplotlib.style.use('classic')
# # sns.set_style("whitegrid")
# # # %matplotlib inline
pd.set_option('display.max_columns', None)
import sys
# +
positions = pd.read_csv("../../Predictions/Metalearner/25_tickers_prediction.csv")
positions.Date = pd.to_datetime(positions.Date, dayfirst=False)
# Create extra column of long only returns
positions["long_only"] = 1
# Need to read in prices
prices = pd.read_csv("../../Raw Data/Price/price_labels.csv")
prices["Date"] = pd.to_datetime(prices["Date"], dayfirst=False)
# We use adjusted close. This accounts for dividends stock splits (if we're holding and we get dividends, this
# will be reflected in the adj close)
prices = prices.pivot(columns="Ticker", index="Date", values="Adj Close")
returns = np.log(prices).diff() # log returns so that we can add
# Index and columns same as positions
returns = returns[returns.columns[returns.columns.isin(positions.Ticker.unique())]]
returns = returns.loc[positions.Date.unique()]
col_order = returns.columns
# -
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, recall_score, precision_score
# #### Obtain scoring metrics across different predictions
performance_results = pd.DataFrame()
for pred_type in ['Predictions_8k', 'Predictions_10kq', 'Predictions_tweets', 'Predictions_news', 'ensemble_pred',
'xgb_pred', 'logreg_pred', 'RF_pred', 'long_only']:
preds = positions[~(positions[pred_type] == 0)][pred_type]
y_actual = positions[~(positions[pred_type] == 0)].Label
performance_results[pred_type] = [
accuracy_score(y_actual, preds),
f1_score(y_actual, preds),
recall_score(y_actual, preds),
precision_score(y_actual, preds)
]
plt.figure(figsize=(15,6))
sns.heatmap(performance_results, cmap="Blues", annot=True, cbar_kws = dict(use_gridspec=False,location="bottom"))
plt.title("Performance Metrics")
# # Preprocess Data to obtain model returns
# #### Pivot positions to get time series of positions of companies by date
sec_8k = positions.pivot(columns="Ticker", index="Date", values="Predictions_8k")[col_order]
sec_10k10q = positions.pivot(columns="Ticker", index="Date", values="Predictions_10kq")[col_order]
tweets = positions.pivot(columns="Ticker", index="Date", values="Predictions_tweets")[col_order]
news = positions.pivot(columns="Ticker", index="Date", values="Predictions_news")[col_order]
meta_ensemble = positions.pivot(columns="Ticker", index="Date", values="ensemble_pred")[col_order]
meta_xgb = positions.pivot(columns="Ticker", index="Date", values="xgb_pred")[col_order]
meta_logreg = positions.pivot(columns="Ticker", index="Date", values="logreg_pred")[col_order]
meta_rf = positions.pivot(columns="Ticker", index="Date", values="RF_pred")[col_order]
long_only = positions.pivot(columns="Ticker", index="Date", values="long_only")[col_order]
perfect_pred = positions.pivot(columns="Ticker", index="Date", values="Label")[col_order]
sec_8k.head()
# #### Get Apple Only Returns For Comparison with Amazon Augmented Datasets
positions = pd.concat([sec_8k.AAPL, sec_10k10q.AAPL, tweets.AAPL, news.AAPL, meta_ensemble.AAPL,
meta_xgb.AAPL, meta_logreg.AAPL, meta_rf.AAPL, long_only.AAPL, perfect_pred.AAPL], axis=1)
aapl_rtns = positions * returns[["AAPL"]]
aapl_rtns.columns = ["sec_8k", "sec_10k10q", "tweets","news", "meta_ensemble", "meta_xgb","meta_logreg", "meta_rf", "long_only", "perfect_pred"]
aapl_rtns.to_csv("../../Predictions/Metalearner/appl_no_amzn.csv")
# #### Scale all values, absolute sum of all values should = 1
epsilon = sys.float_info.epsilon
epsilon # for adding small values
sec_8k = sec_8k.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
sec_10k10q = sec_10k10q.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
tweets = tweets.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
news = news.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
meta_ensemble = meta_ensemble.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
meta_xgb = meta_xgb.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
meta_logreg = meta_logreg.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
meta_rf = meta_rf.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
long_only = long_only.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
perfect_pred = perfect_pred.apply(lambda x: x/sum(abs(x)+epsilon), axis = 1)
sec_8k.head()
# ### Pre-TC Model Returns Calculation
sec_8k_pretc = sec_8k * returns # no tc subtracted
sec_10k10q_pretc = sec_10k10q * returns
tweets_pretc = tweets * returns
news_pretc = news * returns
meta_ensemble_pretc = meta_ensemble * returns
meta_xgb_pretc = meta_xgb * returns
meta_logreg_pretc = meta_logreg * returns
meta_rf_pretc = meta_rf * returns
long_only_pretc = long_only * returns
perfect_pred_pretc = perfect_pred * returns
# #### Sum up row-wise to obtain model returns
sec_8k_pretc = sec_8k_pretc.apply(lambda x: sum(x), axis = 1)
sec_10k10q_pretc = sec_10k10q_pretc.apply(lambda x: sum(x), axis = 1)
tweets_pretc = tweets_pretc.apply(lambda x: sum(x), axis = 1)
news_pretc = news_pretc.apply(lambda x: sum(x), axis = 1)
meta_ensemble_pretc = meta_ensemble_pretc.apply(lambda x: sum(x), axis = 1)
meta_xgb_pretc = meta_xgb_pretc.apply(lambda x: sum(x), axis = 1)
meta_logreg_pretc = meta_logreg_pretc.apply(lambda x: sum(x), axis = 1)
meta_rf_pretc = meta_rf_pretc.apply(lambda x: sum(x), axis = 1)
long_only_pretc = long_only_pretc.apply(lambda x: sum(x), axis = 1)
perfect_pred_pretc = perfect_pred_pretc.apply(lambda x: sum(x), axis = 1)
data = {"sec_8k":sec_8k_pretc,
"sec_10k10q":sec_10k10q_pretc,
"tweets":tweets_pretc,
"news":news_pretc,
"meta_ensemble":meta_ensemble_pretc,
"meta_xgb":meta_xgb_pretc,
"meta_logreg":meta_logreg_pretc,
"meta_rf":meta_rf_pretc,
"long_only":long_only_pretc,
"perfect_pred":perfect_pred_pretc}
model_returns_pretc = pd.DataFrame(data).iloc[1:]
model_returns_pretc.head()
# ### Post-TC Model Returns Calculation
# #### Get transaction cost
sec_8k_tc = abs(sec_8k.diff()) * 0.05 / 100
sec_10k10q_tc = abs(sec_10k10q.diff()) * 0.05 / 100
tweets_tc = abs(tweets.diff()) * 0.05 / 100
news_tc = abs(news.diff()) * 0.05 / 100
meta_ensemble_tc = abs(meta_ensemble.diff()) * 0.05 / 100
meta_xgb_tc = abs(meta_xgb.diff()) * 0.05 / 100
meta_logreg_tc = abs(meta_logreg.diff()) * 0.05 / 100
meta_rf_tc = abs(meta_rf.diff()) * 0.05 / 100
long_only_tc = abs(long_only.diff()) * 0.05 / 100
perfect_pred_tc = abs(perfect_pred.diff()) * 0.05 / 100
sec_8k = sec_8k * returns - sec_8k_tc
sec_10k10q = sec_10k10q * returns - sec_10k10q_tc
tweets = tweets * returns - tweets_tc
news = news * returns - news_tc
meta_ensemble = meta_ensemble * returns - meta_ensemble_tc
meta_xgb = meta_xgb * returns - meta_xgb_tc
meta_logreg = meta_logreg * returns - meta_logreg_tc
meta_rf = meta_rf * returns - meta_rf_tc
long_only = long_only * returns - long_only_tc
perfect_pred = perfect_pred * returns - perfect_pred_tc
sec_8k.head()
# #### Lastly, sum up all the returns from different assets to obtain model returns
sec_8k = sec_8k.apply(lambda x: sum(x), axis = 1)
sec_10k10q = sec_10k10q.apply(lambda x: sum(x), axis = 1)
tweets = tweets.apply(lambda x: sum(x), axis = 1)
news = news.apply(lambda x: sum(x), axis = 1)
meta_ensemble = meta_ensemble.apply(lambda x: sum(x), axis = 1)
meta_xgb = meta_xgb.apply(lambda x: sum(x), axis = 1)
meta_logreg = meta_logreg.apply(lambda x: sum(x), axis = 1)
meta_rf = meta_rf.apply(lambda x: sum(x), axis = 1)
long_only = long_only.apply(lambda x: sum(x), axis = 1)
perfect_pred = perfect_pred.apply(lambda x: sum(x), axis = 1)
sec_8k.head()
data = {"sec_8k":sec_8k,
"sec_10k10q":sec_10k10q,
"tweets":tweets,
"news":news,
"meta_ensemble":meta_ensemble,
"meta_xgb":meta_xgb,
"meta_logreg":meta_logreg,
"meta_rf":meta_rf,
"long_only":long_only,
"perfect_pred":perfect_pred}
model_returns = pd.DataFrame(data).iloc[1:]
model_returns.head()
# # Visualisations with Perfect Prediction
# +
sharpe_ratios = round(model_returns_pretc.mean() / model_returns_pretc.std() * 252 ** 0.5, 2)
ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,5))
for p in ax.patches:
if p.get_height() <= 0:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01))
else:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005))
plt.title("Sharpe Ratios by NLP (Pre TC)")
plt.ylabel("Annualised Sharpe Ratio")
# +
sharpe_ratios = round(model_returns.mean() / model_returns.std() * 252 ** 0.5, 2)
ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,5))
for p in ax.patches:
if p.get_height() <= 0:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01))
else:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005))
plt.title("Sharpe Ratios by NLP (Post-TC)")
plt.ylabel("Annualised Sharpe Ratio")
# -
model_returns_pretc.expanding(2).sum().plot(figsize=(15,5))
plt.title("Model Returns (Pre-TC)")
model_returns.expanding(2).sum().plot(figsize=(15,5))
plt.title("Model Returns (Post-TC)")
# +
transaction_cost = round((model_returns_pretc.expanding(2).sum().iloc[-1] - model_returns.expanding(2).sum().iloc[-1]) * 100, 2)
ax = transaction_cost.plot(kind="bar", figsize=(15, 5), rot=45)
for p in ax.patches:
if p.get_height() <= 0:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, 0.01))
else:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, p.get_height() * 1.005))
plt.title("Total Transaction Cost")
plt.ylabel("Transaction Cost (%)")
plt.xlabel("Prediction Type")
# -
# # Visualisations
plot_cols = ['sec_8k', 'sec_10k10q', 'tweets', 'news', 'meta_ensemble', 'meta_xgb', 'meta_logreg', 'meta_rf', 'long_only']
model_returns_pretc[plot_cols].expanding(2).sum().plot(figsize=(15,8))
plt.title("Model Returns (Pre-TC)")
plot_cols
model_returns[plot_cols].expanding(2).sum().plot(figsize=(15,8))
plt.title("Model Returns (Post-TC)")
model_returns[['sec_10k10q',
'meta_ensemble',
'meta_xgb',
'meta_rf',
'long_only']].expanding(2).sum().plot(figsize=(15,8))
plt.title("Model Returns (Post-TC)")
plt.axhline(0, color='blue', ls="--")
# +
sharpe_ratios = round(model_returns_pretc[plot_cols].mean() / model_returns_pretc[plot_cols].std() * 252 ** 0.5, 2)
ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,5))
for p in ax.patches:
if p.get_height() <= 0:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, 0.01))
else:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.05, p.get_height() * 1.005))
plt.title("Sharpe Ratios by NLP (Pre TC)")
plt.ylabel("Annualised Sharpe Ratio")
plt.xlabel("Prediction Type")
# +
sharpe_ratios = round(model_returns[plot_cols].mean() / model_returns[plot_cols].std() * 252 ** 0.5, 2)
ax = sharpe_ratios.plot(kind="bar", rot=45, figsize=(15,7))
for p in ax.patches:
if p.get_height() <= 0:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, 0.01))
else:
ax.annotate(str(p.get_height()), (p.get_x()+p.get_width()/2-0.1, p.get_height() * 1.005))
plt.title("Sharpe Ratios by NLP (After Accouting for Transaction Cost)")
plt.ylabel("Annualised Sharpe Ratio")
plt.xlabel("Prediction Type")
plt.axhline(0, color='black')
plt.axhline(0.79, color='orange', ls="--")
# -
returns = model_returns[['sec_10k10q', 'meta_ensemble', 'meta_xgb', 'meta_rf', 'long_only']].expanding(2).sum().iloc[-1] / 501 * 252
annual_rtns = round(pd.DataFrame(returns) * 100, 2)
annual_rtns.columns = ["Annual Returns"]
plt.figure(figsize=(1.5,6))
ax = sns.heatmap(annual_rtns, cmap = sns.diverging_palette(10, 150, n=9), annot=True)
ax.xaxis.set_ticks_position('top')
vol = pd.DataFrame(round(model_returns[['sec_10k10q', 'meta_ensemble', 'meta_xgb', 'meta_rf', 'long_only']].std() * 252 ** 0.5 * 100, 2))
vol.columns = ["Annual Volatility"]
plt.figure(figsize=(1.5,6))
ax = sns.heatmap(vol, cmap = sns.diverging_palette(150, 10, n=9), annot=True)
ax.xaxis.set_ticks_position('top')
cumulative_returns = model_returns[['sec_10k10q', 'meta_ensemble', 'meta_xgb', 'meta_rf', 'long_only']].expanding(2).sum()
drawdowns = dict()
for col in cumulative_returns.columns:
maximum_drawdown = []
highest = cumulative_returns[col].values[1]
lowest = cumulative_returns[col].values[1]
for cum_rtn in cumulative_returns[col].values:
if cum_rtn >= highest:
maximum_drawdown.append(highest - lowest)
highest = cum_rtn
lowest = cum_rtn
continue
if cum_rtn <= lowest:
lowest = cum_rtn
drawdowns[col] = [max(maximum_drawdown)]
drawdowns
plt.figure(figsize=(1.5,6))
dd_df = round(pd.DataFrame.from_dict(drawdowns).T * 100, 2)
dd_df.columns = ["Max Drawdown"]
ax = sns.heatmap(dd_df, cmap = sns.diverging_palette(150, 10, n=9), annot=True)
ax.xaxis.set_ticks_position('top')
| Analytics/Backtest/Backtest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### CRISP-DM Intro
# The data explored in the notebook is from [Stackoverflow’s 2020 Annual Developer Survey](https://insights.stackoverflow.com/survey). The CRISP-DM is known as the Cross Industry Standard Prcoess for data mining will be used to explore the data.
#
# 1. Business Understanding
# 2. Data Understanding
# 3. Prepare Data
# 4. Data Modeling
# 5. Evaluate the Results
# 6. Deploy
#
# ### 1. Business Understanding
# I used [data](https://drive.google.com/file/d/1dfGerWeWkcyQ9GX9x20rdSGj7WtEpzBB/view) to take a closer look at three questions.
#
# - What is the percentage of women as a developer by profession?
# - What factors benefit you choose software development as a career?
# - What are the downsides to a developer's career?
#
#
# ### 2. Data Understanding
#
# +
import numpy as np
import pandas as pd
from collections import defaultdict
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv('./data/survey_results_public.csv')
df.head()
# -
df2 = pd.read_csv('./data/survey_results_schema.csv')
df2.head()
num_rows = df.shape[0]
num_cols = df.shape[1]
print('There are {} rows and {} cols in the dataset.'.format(num_rows, num_cols))
# ### 3. Prepare Data
status_vals = df.MainBranch.value_counts()
print(status_vals/df.shape[0])
plt.title('The professional status of developers')
(status_vals/df.shape[0]).plot.barh(stacked=True, figsize=(10,5));
# +
developer_df = df[df.MainBranch == 'I am a developer by profession']
print('There were a total of {} participants, of whom {} were developers by profession.'\
.format(df.shape[0], developer_df.shape[0]))
# -
list(df2[df2.Column == 'ConvertedComp']['QuestionText'])[0]
# ### 4 Data Modeling
# #### Question 1 : What is the percentage of women as a developer by profession?
pc_gender = developer_df['Gender'].value_counts()/(developer_df.shape[0] - sum(developer_df['Gender'].isnull()))
print(pc_gender)
print('There are {:.2f} percent of female developers'.format(pc_gender.Woman*100))
plt.title('The Gender percentage of developers');
pd.Series([pc_gender.Man, pc_gender.Woman,(1-pc_gender.Man-pc_gender.Woman)], name='').plot.pie(labels=['Man', 'Woman', 'Other'], colors=['r', 'g', 'b'],
autopct='%.2f', fontsize=20, figsize=(6, 6));
developer_df.WorkWeekHrs.mean()
developer_df.NEWLearn.value_counts()/(developer_df.shape[0] - sum(developer_df['NEWLearn'].isnull()))
# #### Question 2 : What does the career of a developer relate?
print('Average annual salary for developers is {:.2f}'.format(developer_df.ConvertedComp.mean()))
sur_series = (developer_df['Country'].value_counts()/(developer_df.shape[0] - sum(developer_df['Country'].isnull())))[:10]
print('The top 10 countries:\n {}'.format(sur_series))
sal_df = developer_df.groupby('Country').mean()['ConvertedComp'].dropna().sort_values(ascending=False)
print(sal_df[sur_series.keys()].sort_values(ascending=False))
sal_df[sur_series.keys()].sort_values(ascending=False).plot.bar();
developer_df.groupby('OrgSize').describe()['ConvertedComp']
developer_df.groupby('OrgSize').describe()['WorkWeekHrs'].sort_values(by='mean',ascending=False)
possible_vals = set(np.concatenate(df.NEWJobHunt.dropna().str.split(';').to_numpy()).ravel().tolist())
print(possible_vals)
def total_count(df, col1, col2, look_for):
'''
INPUT:
df - the pandas dataframe you want to search
col1 - the column name you want to look through
col2 - the column you want to count values from
look_for - a list of strings you want to search for in each row of df[col]
OUTPUT:
new_df - a dataframe of each look_for with the count of how often it shows up
'''
new_df = defaultdict(int)
#loop through list of ed types
for val in look_for:
#loop through rows
for idx in range(df.shape[0]):
#if the ed type is in the row add 1
if val in df[col1][idx]:
new_df[val] += int(df[col2][idx])
new_df = pd.DataFrame(pd.Series(new_df)).reset_index()
new_df.columns = [col1, col2]
new_df.sort_values('count', ascending=False, inplace=True)
return new_df
# +
def clean_and_plot(df, title='What drives you to look for a new job?', plot=True):
'''
INPUT
df - a dataframe holding the CousinEducation column
title - string the title of your plot
axis - axis object
plot - bool providing whether or not you want a plot back
OUTPUT
study_df - a dataframe with the count of how many individuals
Displays a plot of pretty things related to the CousinEducation column.
'''
jobhunt = df['NEWJobHunt'].value_counts().reset_index()
jobhunt.rename(columns={'index': 'apply', 'NEWJobHunt': 'count'}, inplace=True)
jobhunt_df = total_count(jobhunt, 'apply', 'count', possible_vals)
jobhunt_df.set_index('apply', inplace=True)
if plot:
(jobhunt_df/jobhunt_df.sum()).plot(kind='bar', legend=None);
plt.title(title);
plt.show()
props_jobhunt_df = jobhunt_df/jobhunt_df.sum()
return props_jobhunt_df
props_df = clean_and_plot(df)
props_df
# -
# #### Question 3 : What are the downsides to a developer's career?
developer_df.groupby('Employment').describe()['Age']
# In the Independent contractor, freelancer, or self-employed, Not employed, and Not looking for work, the average age is 35.64. The average age in Employed full-time is 31.59, meaning the prime age for engineers is between 31 to 32 years old.
developer_df.groupby('JobSeek').describe()['Age']
# Younger engineers are more motivated to find work
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
df = pd.read_csv('./data/survey_results_public.csv')
sns.heatmap(df.corr(), annot=True, fmt='.2f');
num_vars = df[['Age','WorkWeekHrs','ConvertedComp']]
prop_sals = 1- num_vars['ConvertedComp'].isnull().mean()
prop_sals
drop_sal_df = num_vars.dropna(subset=['ConvertedComp'], axis=0)
drop_sal_df.shape
# I was only looking at the correlation between age and annual salary, so drop the ['ConvertedComp'] NA value.
df_drop = drop_sal_df.dropna(axis=0)
df_drop.shape
# When I drop all NA value , there are enough data for me to see if age relate to annual salary.
# +
X = df_drop[['Age','WorkWeekHrs']]
y = df_drop['ConvertedComp']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=42)
lm_model = LinearRegression(normalize=True)
# -
lm_model.fit(X_train, y_train)
# +
#Predict using your model
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#Score using your model
test_score = r2_score(y_test, y_test_preds)
train_score = r2_score(y_train, y_train_preds)
# -
length_y_train= len(y_train_preds)
length_y_test = len(y_test_preds)
"The r-squared score for your model was {} on {} values.".format(train_score, length_y_train)
"The r-squared score for your model was {} on {} values.".format(test_score, length_y_test)
# +
# look at the weight of their coefficients
def coef_weights(coefficients, X_train):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = lm_model.coef_
coefs_df['abs_coefs'] = np.abs(lm_model.coef_)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
#Use the function
coef_df = coef_weights(lm_model.coef_, X_train)
#A quick look at the top results
coef_df
# -
# The age of the developer has an impact on the annual salary.
# ### 5.Evaluate the Results
# [The medium stroy](https://medium.com/@tomgtbst/would-you-advise-your-daughter-to-be-a-software-engineer-d7ca9e53370a)
| DSND-P1-Write-a-Data-Science-Blog-Post.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework 2: Working with SQL (Data and Databases 2016)
#
# This homework assignment takes the form of an IPython Notebook. There are a number of exercises below, with notebook cells that need to be completed in order to meet particular criteria. Your job is to fill in the cells as appropriate.
# You'll need to download this notebook file to your computer before you can complete the assignment. To do so, follow these steps:
#
# 1. Make sure you're viewing this notebook in Github.
# 2. Ctrl+click (or right click) on the "Raw" button in the Github interface, and select "Save Link As..." or your browser's equivalent. Save the file in a convenient location on your own computer.
# 3. Rename the notebook file to include your own name somewhere in the filename (e.g., Homework_2_Allison_Parrish.ipynb).
# 4. Open the notebook on your computer using your locally installed version of IPython Notebook.
# 5. When you've completed the notebook to your satisfaction, e-mail the completed file to the address of the teaching assistant (as discussed in class).
#
# ## Setting the scene
#
# These problem sets address SQL, with a focus on joins and aggregates.
#
# I've prepared a SQL version of the MovieLens data for you to use in this homework. [Download this .psql file here](movielens.psql). You'll be importing this data into your own local copy of PostgreSQL.
#
# To import the data, follow these steps:
#
# * Launch `psql`.
# * At the prompt, type `CREATE DATABASE homework2;`
# * Connect to the database you just created by typing `\c homework2`
# * Import the `.psql` file you downloaded earlier by typing `\i ` followed by the path to the `.psql` file.
#
# After you run the `\i` command, you should see the following output:
#
# ```
# CREATE TABLE
# CREATE TABLE
# CREATE TABLE
# COPY 100000
# COPY 1682
# COPY 943
# ```
#
# The table schemas for the data look like this:
# ```
# Table "public.udata"
# Column | Type | Modifiers
# -----------+---------+-----------
# user_id | integer |
# item_id | integer |
# rating | integer |
# timestamp | integer |
# ```
#
# ```
# Table "public.uuser"
# Column | Type | Modifiers
# ------------+-----------------------+-----------
# user_id | integer |
# age | integer |
# gender | character varying(1) |
# occupation | character varying(80) |
# zip_code | character varying(10) |
# ```
#
# ```
# Table "public.uitem"
# Column | Type | Modifiers
# --------------------+------------------------+-----------
# movie_id | integer | not null
# movie_title | character varying(81) | not null
# release_date | date |
# video_release_date | character varying(32) |
# imdb_url | character varying(134) |
# unknown | integer | not null
# action | integer | not null
# adventure | integer | not null
# animation | integer | not null
# childrens | integer | not null
# comedy | integer | not null
# crime | integer | not null
# documentary | integer | not null
# drama | integer | not null
# fantasy | integer | not null
# film_noir | integer | not null
# horror | integer | not null
# musical | integer | not null
# mystery | integer | not null
# romance | integer | not null
# scifi | integer | not null
# thriller | integer | not null
# war | integer | not null
# western | integer | not null
# ```
# Run the cell below to create a connection object. This should work whether you have `pg8000` installed or `psycopg2`.
import pg8000
conn = pg8000.connect(database="homework2")
# If you get an error stating that `database "homework2" does not exist`, make sure that you followed the instructions above exactly. If necessary, drop the database you created (with, e.g., `DROP DATABASE your_database_name`) and start again.
#
# In all of the cells below, I've provided the necessary Python scaffolding to perform the query and display the results. All you need to do is write the SQL statements.
#
# As noted in the tutorial, if your SQL statement has a syntax error, you'll need to rollback your connection before you can fix the error and try the query again. As a convenience, I've included the following cell, which performs the rollback process. Run it whenever you hit trouble.
conn.rollback()
# ## Problem set 1: WHERE and ORDER BY
#
# In the cell below, fill in the string assigned to the variable `statement` with a SQL query that finds all movies that belong to both the science fiction (`scifi`) and horror genres. Return these movies in reverse order by their release date. (Hint: movies are located in the `uitem` table. A movie's membership in a genre is indicated by a value of `1` in the `uitem` table column corresponding to that genre.) Run the cell to execute the query.
#
# Expected output:
#
# ```
# Deep Rising (1998)
# Alien: Resurrection (1997)
# Hellraiser: Bloodline (1996)
# <NAME>'s The Puppet Masters (1994)
# Body Snatchers (1993)
# Army of Darkness (1993)
# Body Snatchers (1993)
# Alien 3 (1992)
# Heavy Metal (1981)
# Alien (1979)
# Night of the Living Dead (1968)
# Blob, The (1958)
# ```
cursor = conn.cursor()
statement = "SELECT movie_title FROM uitem WHERE scifi = 1 AND horror = 1 ORDER BY release_date DESC"
cursor.execute(statement)
for row in cursor:
print(row[0])
# ## Problem set 2: Aggregation, GROUP BY and HAVING
#
# In the cell below, fill in the string assigned to the `statement` variable with a SQL query that returns the number of movies that are *either* musicals or children's movies (columns `musical` and `childrens` respectively). Hint: use the `count(*)` aggregate.
#
# Expected output: `157`
cursor = conn.cursor()
statement = "SELECT COUNT(*) FROM uitem WHERE musical = 1 OR childrens = 1"
cursor.execute(statement)
for row in cursor:
print(row[0])
# Nicely done. Now, in the cell below, fill in the indicated string with a SQL statement that returns all occupations, along with their count, from the `uuser` table that have *more than fifty* users listed for that occupation. (I.e., the occupation `librarian` is listed for 51 users, so it should be included in these results. There are only 12 lawyers, so `lawyer` should not be included in the result.)
#
# Expected output:
#
# ```
# administrator 79
# programmer 66
# librarian 51
# student 196
# other 105
# engineer 67
# educator 95
# ```
#
# Hint: use `GROUP BY` and `HAVING`. (If you're stuck, try writing the query without the `HAVING` first.)
cursor = conn.cursor()
statement = "SELECT DISTINCT(occupation), COUNT(*) FROM uuser GROUP BY occupation HAVING COUNT(*) > 50"
cursor.execute(statement)
for row in cursor:
print(row[0], row[1])
# ## Problem set 3: Joining tables
#
# In the cell below, fill in the indicated string with a query that finds the titles of movies in the Documentary genre released before 1992 that received a rating of 5 from any user. Expected output:
#
# ```
# Madonna: Truth or Dare (1991)
# Koyaanisqatsi (1983)
# Paris Is Burning (1990)
# Thin Blue Line, The (1988)
# ```
#
#
# Hints:
#
# * `JOIN` the `udata` and `uitem` tables.
# * Use `DISTINCT()` to get a list of unique movie titles (no title should be listed more than once).
# * The SQL expression to include in order to find movies released before 1992 is `uitem.release_date < '1992-01-01'`.
# +
cursor = conn.cursor()
statement = "SELECT DISTINCT(movie_title) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE EXTRACT(YEAR FROM release_date) < 1992 AND rating = 5 GROUP BY movie_title"
#TA-STEPHAN: Try using this statement
#statement = "SELECT DISTINCT uitem.movie_title, udata.rating FROM uitem JOIN udata ON uitem.movie_id = udata.item_id WHERE documentary = 1 AND udata.rating = 5 AND uitem.release_date < '1992-01-01';"
# if "any" has to be taken in the sense of "every":
# statement = "SELECT movie_title FROM uitem JOIN udata ON uitem.movie_id = udata.item_id WHERE EXTRACT(YEAR FROM release_date) < 1992 GROUP BY movie_title HAVING MIN(rating) = 5"
cursor.execute(statement)
for row in cursor:
print(row[0])
# -
# ## Problem set 4: Joins and aggregations... together at last
#
# This one's tough, so prepare yourself. Go get a cup of coffee. Stretch a little bit. Deep breath. There you go.
#
# In the cell below, fill in the indicated string with a query that produces a list of the ten lowest rated movies in the Horror genre. For the purposes of this problem, take "lowest rated" to mean "has the lowest average rating." The query should display the *titles* of the movies, not their ID number. (So you'll have to use a `JOIN`.)
#
# Expected output:
#
# ```
# Amityville 1992: It's About Time (1992) 1.00
# Beyond Bedlam (1993) 1.00
# Amityville: Dollhouse (1996) 1.00
# Amityville: A New Generation (1993) 1.00
# Amityville 3-D (1983) 1.17
# Castle Freak (1995) 1.25
# Amityville Curse, The (1990) 1.25
# Children of the Corn: The Gathering (1996) 1.32
# Machine, The (1994) 1.50
# Body Parts (1991) 1.62
# ```
conn.rollback()
cursor = conn.cursor()
statement = "SELECT movie_title), AVG(rating) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE horror = 1 GROUP BY movie_title ORDER BY AVG(rating) LIMIT 10"
cursor.execute(statement)
for row in cursor:
print(row[0], "%0.2f" % row[1])
# BONUS: Extend the query above so that it only includes horror movies that have ten or more ratings. Fill in the query as indicated below.
#
# Expected output:
#
# ```
# Children of the Corn: The Gathering (1996) 1.32
# Body Parts (1991) 1.62
# Amityville II: The Possession (1982) 1.64
# Jaws 3-D (1983) 1.94
# Hellraiser: Bloodline (1996) 2.00
# Tales from the Hood (1995) 2.04
# Audrey Rose (1977) 2.17
# Addiction, The (1995) 2.18
# Halloween: The Curse of <NAME> (1995) 2.20
# Phantoms (1998) 2.23
# ```
cursor = conn.cursor()
statement = "SELECT movie_title, AVG(rating) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE horror = 1 GROUP BY movie_title HAVING COUNT(rating) > 10 ORDER BY AVG(rating) LIMIT 10;"
cursor.execute(statement)
for row in cursor:
print(row[0], "%0.2f" % row[1])
# Great work! You're done.
| Data_and_databases/Homework_2_Paul_Ronga_Graded.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://www.kaggle.com/c/titanic
#
# Competition Description
#
# The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.
#
# One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.
#
# In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.
#
# Practice Skills
# Binary classification
# Python and R basics
# Variable Notes
#
# survival: Survival 0 = No, 1 = Yes
#
# <br>pclass: Ticket class:A proxy for socio-economic status (SES)
# <br>1st = Upper
# <br>2nd = Middle
# <br>3rd = Lower
#
# age: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5
#
# sibsp: # of siblings / spouses aboard the Titanic
# <br>The dataset defines family relations in this way...
# <br>Sibling = brother, sister, stepbrother, stepsister
# <br>Spouse = husband, wife (mistresses and fiancés were ignored)
#
# <br>parch: # of parents / children aboard the Titanic
# <br>The dataset defines family relations in this way...
# <br>Parent = mother, father
# <br>Child = daughter, son, stepdaughter, stepson
# <br>Some children travelled only with a nanny, therefore parch=0 for them.
#
# embarked:port of Embarkation C = Cherbourg, Q = Queenstown, S = Southampton
# +
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
import matplotlib.pyplot as plt
import time
#setting for plot fonts
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
#display notebook in full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.options.mode.chained_assignment = None # default='warn'
random_state = 42
# -
# ### Read Data
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
print('Train shape {} Test Shape{}'.format(train.shape, test.shape))
train.head()
# ### Missing Data
# Its important to find null values in data so that we can impute(replace) them as many ML alogorthms cant handle missing values.
# Column Cabin has 77% of misisng values
#
# +
def missing_data(data):
total = data.isnull().sum().sort_values(ascending = False)
percent= (data.isnull().sum() * 100 / data.isnull().count() ).sort_values(ascending = False)
df = pd.concat([total, percent], axis = 1, keys = ['Total', 'Percent'])
return df[df['Total'] != 0]
# -
missing_data(train)
# ### Prepare Numeric and Categorical columns
all_cols = train.columns.tolist()
print(all_cols)
num_cols = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']
cat_cols = [ 'Sex', 'Ticket', 'Cabin', 'Embarked']
features = num_cols + cat_cols
print(features)
# ### Target value exploration
# Here target is column Survived which we need to predict in test data. We can see that around 62% of passenegers died while 38% of passengers survived. This also helps us to guage the performance of our model. A random model which allways predicts that all passengers died will be have accuracy of 62%. Our model needs to perform better than that to have any meaningful predictions. We will aim to achieve more than 80% accuracy which is a good relative performance
# +
def display_survival_data(df):
sns.countplot(df.target_name)
plt.title('Titanic passenger survival')
plt.xlabel('Passenger Survived?', fontsize = 12)
plt.ylabel('Number of occurenecs', fontsize = 12)
plt.show()
total = df.Survived.count()
total_0 = df['Survived'][df.Survived == 0].count()
total_1 = df['Survived'][df.Survived == 1].count()
class_0_perc = total_0/total
class_1_perc = 1 - class_0_perc
print('% passengers died: {:.3%}'.format(class_0_perc))
print('% passengers survived: {:.3%}'.format(class_1_perc))
# -
train['target_name'] = train['Survived'].map({0: 'died', 1: 'survived'})
display_survival_data(train)
# ### Correlation For Numeric variables
# Lets us check if there is correlation in numeric features
#
# We can see Pclass is negatively correlated with Fare meaning higher the class higher the fare. Note that here Pclass 1 is higher than Pclass 2 hence negative correlation
# +
def display_correlation(df):
cor_matrix = df.corr().round(2)
# Plotting heatmap
fig = plt.figure(figsize=(6, 6));
plt.title('correlation matrix')
sns.heatmap(cor_matrix, annot=True, center=0, cmap = sns.diverging_palette(250, 10, as_cmap=True), ax=plt.subplot(111));
plt.show()
# -
display_correlation(train[num_cols])
# ### Numeric features statistics
# <br>Median Age of passengers in 28
# <br>Maximum number of siblings or spouses(SibSp) travelling along a passengers is 8
train[num_cols].describe()
# ### Cardinality of variables
# Lets check how many unique values all features have.
# +
def print_cardinality(df, cols):
for col in cols:
dist_vals = df[col].value_counts().shape[0]
print('Column:{}, Number of distict Values:{}'.format(col, dist_vals))
# -
print_cardinality(train, all_cols)
# ## Plot Discrete Features
# +
def plot_discrete_feature(df, col ):
fig, ax = plt.subplots(figsize =(24, 6))
# plot the count for each value
sns.countplot(x = col, data = df, ax= plt.subplot(131))
#Plot survival occurances for value
ax = sns.countplot(x = col, hue = 'target_name', data = df, ax = plt.subplot(132))
plt.xlabel(col, fontsize = 14)
plt.ylabel('Count', fontsize = 14)
# Title for whole figure
plt.suptitle('Plots for ' +col, fontsize = 18)
# Show percentages of pasengers survived
sns.pointplot(x = col, y= 'Survived', data = df, ax = plt.subplot(133))
# X-axis Label
plt.xlabel(col, fontsize=14);
# Y-axis Label
plt.ylabel('Survived %', fontsize=14);
plt.show()
# -
# ### plots for passengers class
#
# We can clearly see that pasengers belonging to higher class had better survival rate.
# Survival rate of passengers belonging to upper class(Pclass = 1) is around 62% while those in lower class(Pclass = 3) had only 25% survival rate.
plot_discrete_feature(train, 'Pclass')
# ### plots for passengers sex
# 72% of female passengers survived while only 19% of males survived, The article dicusses this observation along with the class based survival which we saw earlier
# https://www.washingtonpost.com/opinions/women-and-children-did-go-first-on-titanic/2012/04/19/gIQAgSaugT_story.html?noredirect=on&utm_term=.341673775648
plot_discrete_feature(train, 'Sex')
# ### Plots for Count of siblings / spouses
# Maximum passengers had no siblings or spouses.
# Passengers with 1 sibling or spouse had best survival rate of 53%
plot_discrete_feature(train, 'SibSp')
# ### Plots for Count of parents/Children
# Maximum passengers were travelling without children or parents
plot_discrete_feature(train, 'Parch')
# #### Plots for port of Embarkation
# Maximum passengers embarked from Southampton and their survical rate was also lowest around 34%
#
# Passengers who embarked from Cherbourg had highest survival rate of 55%
plot_discrete_feature(train, 'Embarked')
# ## Display Continous Features
# +
def display_continous_features(df, col):
df = df[[col, 'Survived', 'target_name']].copy()
df.dropna(subset=[col], inplace =True)
fig = plt.figure(figsize=(16, 10))
#Display Density Plot
sns.distplot(df[col], color = 'b', ax = plt.subplot(221) )
plt.ylabel('Density')
#Display Density Plot for survived vs died
sns.distplot(df[df['Survived'] == 0][col], color = 'r', label = 'died', ax = plt.subplot(222) )
sns.distplot(df[df['Survived'] == 1][col], color = 'b', label = 'survived', ax = plt.subplot(222))
plt.legend(loc = 'best')
plt.ylabel('Density survival vs died')
# Display Box Plot for feature
sns.boxplot(x = col , data = df, ax = plt.subplot(223))
# Display Violin Plot for survived vs died
sns.violinplot(x = col , y = 'target_name', data = train, ax = plt.subplot(224))
plt.show()
# -
# ### Plots for Age
# Children under age 5 had better survival rate while those between 15 to 30 had worse survival rate
#
# The box plot interpretation can be checked at https://en.wikipedia.org/wiki/Interquartile_range#/media/File:Boxplot_vs_PDF.svg
display_continous_features(train, 'Age')
# ### Plots for Fare
# There are some outiers for fares as shown by black spots on box plot.
# Passengers who paid higher fare were more liklely to survive.
#
display_continous_features(train, 'Fare')
# # Data Pre-Processing
# ### Split Data into Training and Validation Sets
# +
from sklearn.model_selection import train_test_split
X = train[features].copy()
y = train['Survived']
X_test = test[features].copy()
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size = 0.25, stratify = y, random_state = random_state)
print('Training Shape {}, Validation shape {}, Test Shape {}'.format(X_train.shape, X_valid.shape, X_test.shape))
# -
# ### Impute Missing Values
missing_data(X_train)
# +
from sklearn.impute import SimpleImputer
#replace misisng values for Cabin by NA
X_train['Cabin'].fillna('NA', inplace = True)
X_valid['Cabin'].fillna('NA', inplace = True)
X_test['Cabin'].fillna('NA', inplace = True)
#Replace missing values for Embarked as mode
imp_mode = SimpleImputer(strategy = 'most_frequent')
X_train['Embarked'] = imp_mode.fit_transform(X_train['Embarked'].values.reshape(-1,1))
X_valid['Embarked'] = imp_mode.transform(X_valid['Embarked'].values.reshape(-1,1))
X_test['Embarked'] = imp_mode.transform(X_test['Embarked'].values.reshape(-1,1))
#Replace missing values for Age by Median
imp_median = SimpleImputer(strategy= 'median')
X_train['Age'] = imp_median.fit_transform(X_train['Age'].values.reshape(-1,1))
X_valid['Age'] = imp_median.fit_transform(X_valid['Age'].values.reshape(-1,1))
X_test['Age'] = imp_median.fit_transform(X_test['Age'].values.reshape(-1,1))
#Replace Fare in test set by 0
X_test['Fare'] = imp_median.fit_transform(X_test['Fare'].values.reshape(-1,1))
# -
missing_data(X_train)
# ### One Hot Encode categorical features
# We will be using library category_encoders for one hot encoding: http://contrib.scikit-learn.org/categorical-encoding/#. This page also have installation for the package using conda command
#
# If you have scikit-learn v0.20.0 its recommneded to use OneHotEncoder:http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
X_train.head()
import category_encoders as ce
ohe = ce.OneHotEncoder(cols = cat_cols, drop_invariant= True, use_cat_names = True)
X_train = ohe.fit_transform(X_train)
X_valid = ohe.transform(X_valid)
X_test = ohe.transform(X_test)
print('Training Shape {}, Validation shape {}, Test Shape {}'.format(X_train.shape, X_valid.shape, X_test.shape))
X_train.head()
# ### Normalize Numeric Data
# +
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[num_cols] = sc.fit_transform(X_train[num_cols])
X_valid[num_cols] = sc.transform(X_valid[num_cols])
X_test[num_cols] = sc.transform(X_test[num_cols])
# -
X_train[num_cols].head()
# ## Model Training Utilities
# +
from sklearn.metrics import accuracy_score
def print_logs(model):
print('Validation Accuracy %0.5f' % accuracy_score(y_valid, y_pred_val))
print('Training Shape {}, Validation shape {}, Test Shape {}'.format(X_train.shape, X_valid.shape, X_test.shape))
print(model.get_params())
def write_sub(y_pred, filename):
sub = pd.DataFrame()
sub['PassengerId'] = test['PassengerId']
sub['Survived'] = y_pred
sub.to_csv(filename, index = False)
# -
# ## Random Forest
# ### Random Forest Baseline
# +
from sklearn.ensemble import RandomForestClassifier
#Train Model
model = RandomForestClassifier(random_state = random_state, n_estimators= 100)
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
print('Training Accuracy %0.5f' %accuracy_score(y_train, y_pred))
#Predict on Validation set
y_pred = model.predict(X_valid)
print('Validation Accuracy %0.5f' % accuracy_score(y_valid, y_pred))
# -
# #### Predict On Test Set and Write Submission File
# Go to https://www.kaggle.com/c/titanic and submit your predictions to check your accuracy score
#predict on test set and write submissions to file
y_pred = model.predict(X_test)
write_sub(y_pred, 'rf_sub.csv')
| Titanic/TitanicEda_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
from tqdm import tqdm
from Client import Clients
import os
import numpy as np
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def buildClients(num, local_client_number=1):
learning_rate = 0.0001
num_input = 32 # image shape: 32*32
num_input_channel = 3 # image channel: 3
num_classes = 3 # Cifar-10 total classes (0-9 digits)
#create Client and model
return Clients(input_shape=[None, num_input, num_input, num_input_channel],
num_classes=num_classes,
learning_rate=learning_rate,
clients_num=num,
local_client_number=local_client_number
)
def run_global_test(client, global_vars, test_num):
client.set_global_vars(global_vars)
acc, loss = client.run_test(test_num)
print("[epoch {}, {} inst] Testing ACC: {:.4f}, Loss: {:.4f}".format(
ep + 1, test_num, acc, loss))
return acc, loss
# +
#### SOME TRAINING PARAMS ####
CLIENT_NUMBER = 2
LOCAL_CLIENT_NUMBER = 2
CLIENT_RATIO_PER_ROUND = 1
LOCAL_CLIENT_RATIO_PER_ROUND = 1
epoch = 100
# +
#### CREATE CLIENT AND LOAD DATASET ####
client = buildClients(CLIENT_NUMBER, LOCAL_CLIENT_NUMBER)
import json
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
# +
#### BEGIN TRAINING ####
global_vars3 = client.get_client_vars()
for client_number in [5, 10, 15]:
for lower_client_number in [2, 3, 4, 5]:
print(client_number, lower_client_number)
CLIENT_NUMBER = client_number
LOCAL_CLIENT_NUMBER = lower_client_number
key = str(client_number) + "_" + str(lower_client_number)
global_vars = client.get_client_vars()
client.set_global_vars(global_vars3)
for ep in range(epoch):
random_clients = client.choose_clients(CLIENT_RATIO_PER_ROUND)
global_weights2 = client.get_client_vars()
init_local_weights = [global_weights2 for _ in range(CLIENT_NUMBER)]
for client_id in random_clients:
local_random_clients = client.choose_local_clients(LOCAL_CLIENT_RATIO_PER_ROUND)
local_client_vars_list = [[] for _ in range(LOCAL_CLIENT_NUMBER)]
client_local_vars_sum = None
for local_client_id in local_random_clients:
client.set_global_vars(init_local_weights[client_id])
client.train_epoch(cid=client_id, lcid=local_client_id)
current_client_vars = client.get_client_vars()
# sum it up
if client_local_vars_sum is None:
client_local_vars_sum = current_client_vars
else:
for cv, ccv in zip(client_local_vars_sum, current_client_vars):
cv += ccv
# average_weights = client_local_vars_sum / local_random_clients
average_weights = []
for var in client_local_vars_sum:
average_weights.append(var / len(local_random_clients))
init_local_weights[client_id] = average_weights
# We are going to sum up active clients' vars at each epoch
client_vars_sum = None
# Train with these clients
for client_id in random_clients:
# Restore global vars to client's model
client.set_global_vars(init_local_weights[client_id])
# train one client
client.train_epoch(cid=client_id, lcid=0)
# obtain current client's vars
current_client_vars = client.get_client_vars()
# sum it up
if client_vars_sum is None:
client_vars_sum = current_client_vars
else:
for cv, ccv in zip(client_vars_sum, current_client_vars):
cv += ccv
# obtain the avg vars as global vars
global_vars = []
for var in client_vars_sum:
global_vars.append(var / len(random_clients))
# run test on 1000 instances
acc, loss = run_global_test(client, global_vars, test_num=80)
with open("acc_loss.txt", "r") as f:
import json
dic = json.load(f)
with open("acc_loss.txt", "w") as f:
import json
small_dic = dic.get(key, {})
small_dic[ep] = [acc, loss]
dic[key] = small_dic
json.dump(dic, f, cls=MyEncoder)
# +
#### FINAL TEST ####
run_global_test(client, global_vars, test_num=80)
# -
# +
import matplotlib.pyplot as plt
def plot_graph(loss, acc, round):
# make a figure
fig = plt.figure(figsize=(16,8))
# subplot loss
ax1 = fig.add_subplot(121)
# ax1.plot(loss,label='global_loss')
for acc_ in acc:
ax1.plot(acc_[:-1],label="%s LC, %s ILC"%(tuple(acc_[-1].split("_"))))
ax1.set_xlabel('Communication Rounds')
ax1.set_ylabel('Test Accuracy')
ax1.set_title('Acc ')
ax1.legend()
ax2 = fig.add_subplot(122)
# subplot acc
for loss_ in loss:
ax2.plot(loss_[:-1],label="%s LC, %s ILC"%(tuple(loss_[-1].split("_"))))
ax2.set_xlabel('Communication Rounds')
ax2.set_ylabel('Loss')
ax2.set_title('Loss')
ax2.legend()
plt.tight_layout()
plt.savefig("round_info/aslam.eps",format="eps")
# -
#
with open("acc_loss.txt", "r") as f:
import json
dic = json.load(f)
lis = [i for i in dic.keys()]
sorted(lis)
loss = []
acc = []
for i in lis:
lis2 = dic[i].keys()
sorted(lis2)
lis3 = [dic[i][j][1] for j in lis2]
lis3.append(i)
loss.append(lis3[:])
lis4 = [dic[i][j][0] for j in lis2]
lis4.append(i)
acc.append(lis4[:])
plot_graph(loss, acc, lis)
| src/federated_learning_covid_19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # How to use R packages in Python
#
# As a data scientist working with biological data, the programming language I use on a daily basis is Python. However, I sometimes find myself needing to use an R package, such as those provided by [Bioconductor](https://www.bioconductor.org/). For me this is most often the excellent [MSstats](https://msstats.org/) R package for anlayzing mass spectrometry proteomics data that was from <NAME>oi ([@MeenaChoi](https://twitter.com/MeenaChoi)) and <NAME> lab ([@olgavitek](https://twitter.com/olgavitek)). Although it has always been possible to write R scripts alongside my Python scripts and Jupyter notebooks, I find it cumbersome to switch back and forth between them, particularly when it involves generating unnecessary intermediate files.
#
# In this post I'll show you how I use the MSstats R package from within a Python script or Jupyter notebook, providing an example for how you can use the occasional R package in your own analyses. This post will assume that you're comfortable programming in Python and that you're familiar with the Pandas, NumPy, and Matplotlib Python packages.[^1] Additionally, I'll assume that you have some familiarity with R programming, since you're reading this post. I'll be using mass spectrometry proteomics data as an example, but understanding it is not critical for learning from this post.
#
# [^1]:If these are not familiar and you want to learn, I recommend the ["Plotting and Programming in Python"](http://swcarpentry.github.io/python-novice-gapminder/) Software Carpentry course.
#
# This entire post is available as a Jupyter notebook on GitHub: https://github.com/wfondrie/msstats-demo
#
# ## Setup
# If you want to follow along with this post, you'll need to install a few things. I use conda as my package manager for Python and R whenever possible.[^2] First, we'll create a new conda environment, `msstats-demo`, and install the necessary packages from the [bioconda](https://bioconda.github.io/) and [conda-forge](https://conda-forge.org/) channels. I've created a conda environment YAML file that looks like this:
#
# [^2]: If you need to install conda, I'd recommend the [miniconda distribution](https://docs.conda.io/en/latest/miniconda.html).
#
# ```yaml
# # https://github.com/wfondrie/msstats-demo/environment.yaml
# name: msstats-demo
# channels:
# - bioconda
# - conda-forge
# - defaults
# dependencies:
# - ppx>=1.2.5 # For downloading data
# - bioconductor-msstats==4.2.0 # The MSstats R package
# - notebook # Jupyter notebook
# - ipywidgets # For progress bars
# - pandas # DataFrames for tabular data
# - numpy # To do math
# - matplotlib # The de facto standard Python plotting library
# - seaborn # Make matplotlib plots pretty
# - rpy2 # For using R packages in Python!
# ```
#
# Let's start by creating the conda environment from this file:
# ```bash
# conda env create -f https://raw.githubusercontent.com/wfondrie/msstats-demo/main/environment.yaml
# ```
#
# Then activate our new conda environment:
# ```bash
# conda activate msstats-demo
# ```
#
# Now let's fire up Python. If you want to use a [Jupyter notebook](https://jupyter.org/), you can launch it with:
# ```bash
# jupyter notebook
# ```
#
# Then click `New` → `Python 3 (ipykernel)` to open a new notebook.
#
# ## Getting started
#
# For this post, we're going to reproduce an analysis of the the dataset from [Selevsek et al](https://doi.org/10.1074/mcp.M113.035550) as performed in the [MassIVE.quant paper](https://doi.org/10.1038/s41592-020-0955-0). We'll use Python to download and read the data, process that data with the MSstats R package, then recreate the volcano plot in Figure 2j with Python. Let's start by loading the libraries we'll need into our Python session:
# +
import ppx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# These will let us use R packages:
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
# Convert pandas.DataFrames to R dataframes automatically.
pandas2ri.activate()
# -
# The [rpy2](https://rpy2.github.io/) Python package will perform the magic that allows us to use R packages in our Python session. The `importr` function give us the power to import R packages and `pandas2ri`---along with the subsequent `pandas2ri.activate()` line---will allow our Pandas DataFrames to be automically converted into R data frames when used as input to an R function.
#
# We also need to set up a plotting style that looks nice on my website:
# +
# Set plotting theme:
primary = "#404040"
accent = "#24B8A0"
pal = sns.color_palette([primary, accent])
style = {
"axes.edgecolor": primary,
"axes.labelcolor": primary,
"text.color": primary,
"xtick.color": primary,
"ytick.color": primary,
}
sns.set_palette(pal)
sns.set_context("talk")
sns.set_style("ticks", style)
# -
# Now let's download the dataset from the [MassIVE mass spectrometry data repository](https://massive.ucsd.edu/ProteoSAFe/static/massive.jsp) under the accession RMSV000000251. We'll use the [ppx Python package](https://ppx.readthedocs.io) to do this:
# +
proj = ppx.find_project("RMSV000000251")
# The proteomics data:
quant_file = "2019-06-03_mnchoi_bb4aeafb/quant/Selevsek2015-MSstats-input-90nodup-i.csv"
# The annotation file:
annot_file = "2019-06-03_mnchoi_bb4aeafb/metadata/Selevsek2015_DIA_Skyline_all_annotation.csv"
# The local paths to them:
quant_path, annot_path = proj.download([quant_file, annot_file], silent=True)
# -
# We can then read the proteomics data into our Python session using Pandas:
quant_df = pd.read_csv(quant_path, dtype={"StandardType": str})
quant_df.head() # View the first five rows
# We also need to read the annotation data using Pandas:
annot_df = pd.read_csv(annot_path)
annot_df.head()
# Finally, we need to create a contrast matrix that will define the comparisons we want to test with MSstats:
# +
cols = annot_df["Condition"].unique()
cols.sort()
rows = ["T1-T0", "T2-T0", "T3-T0", "T4-T0", "T5-T0"]
contrasts = [
[-1, 1, 0, 0, 0, 0],
[-1, 0, 1, 0, 0, 0],
[-1, 0, 0, 1, 0, 0],
[-1, 0, 0, 0, 1, 0],
[-1, 0, 0, 0, 0, 1],
]
contrast_df = pd.DataFrame(
contrasts,
columns=cols,
index=rows,
)
contrast_df
# -
# ## Run MSstats in Python using rpy2
# Now for the fun part: let's run MSstats without leaving our Python session. Just like if we were using R directly, we first need to import the libraries that we'll be using. This looks a little different using rpy2, but ultimately we assign the imported R package to a variable that we can use like any other Python package. Here, we import MSstats:
MSstats = importr("MSstats")
# Next, we'll perform our MSstats data processing. Note that each of these functions actually call the underlying MSstats R package. The rpy2 Python package does all of the work tranforming our Pandas DataFrames (`quant_df`, `annot_df`, and `contrast_df`) into R data frames that MSstats can use. When each function is complete, it returns an R object. Fortunately, we've setup rpy2 to automatically convert R data frames back into Pandas DataFrames, allowing us to use the results seamlessly. The final output returned by MSstats in this analysis (`results` below) will be a Pandas DataFrame containing the p-values for each protein for contrasts that we specified in our contrast matrix (`contrast_df`).
#
# If you're following along, this next step may take a few minutes. Go ahead and enjoy a cup of coffee or your favorite beverage while it's running.
# +
raw = MSstats.SkylinetoMSstatsFormat(
quant_df,
annotation=annot_df,
removeProtein_with1Feature=True,
use_log_file=False,
)
processed = MSstats.dataProcess(
raw,
use_log_file=False,
)
# Note that the 'contrast_matrix' argument below
# is actually 'contrast.matrix' in the MSstats
# R package. rpy2 swaps '.' for '_' so that it
# becomes a valid Python variable name.
results, *_ = MSstats.groupComparison(
contrast_matrix=contrast_df,
data=processed,
use_log_file=False,
)
# -
# Now that the process is complete, we can verify that `results` is a Pandas DataFrame containing our MSstats results:
# + hidePrompt=true
results.head()
# -
# We've successfully run MSstats without leaving our Python session!
# + [markdown] hideOutput=true
# ## Reproduce a figure panel from the paper
# -
# The only task left is reproduce a panel from a figure in the MassIVE.quant paper. We'll recreate the volcano plot from Figure 2j and see how close our results are to the original.[^3] However, we'll make this plot using [seaborn](https://seaborn.pydata.org/) and [matplotlib](https://matplotlib.org/) in our Python session!
#
# [^3]: I haven't included the original figure for copyright reasons.
# +
# Filter the results for the points we want to plot
results = results.loc[results["adj.pvalue"] > 0, :]
results = results.loc[results["Label"] == "T5-T0", :]
results["accepted"] = results["adj.pvalue"] <= 0.05
results["neg_log_pvalue"] = -np.log10(results["adj.pvalue"])
# Get the number of up and down-regulated proteins
n_up = ((results["log2FC"] > 0) & results["accepted"]).sum()
n_down = ((results["log2FC"] < 0) & results["accepted"]).sum()
# Create the figure
plt.figure()
# Create the Scatter plot
sns.scatterplot(
data=results,
x="log2FC",
y="neg_log_pvalue",
hue="accepted",
legend=False,
s=20
)
# Add annotations
plt.text(
x=0.02,
y=0.98,
s=f"Up: {n_up}\nDown: {n_down}",
transform=plt.gca().transAxes,
va="top",
)
# Add the horizontal line
plt.axhline(-np.log10(0.05), linestyle="dashed", zorder=0)
# Add labeling
plt.xlabel("log2 fold change")
plt.ylabel("-log10(adjusted p-value)")
plt.title("Skyline:all")
# Set the axes limits
plt.ylim(0, 8)
plt.xlim(-4, 4)
# Show the plot
plt.show()
# -
# This looks pretty close to the original to me, particularly considering that we didn't make any attempt to match our software versions with the original analysis. The number of up and down-regulated proteins is nearly identical, with our analysis finding one fewer down-regulated protein than in the original.
#
# ## Conclusion
#
# The rpy2 Python package provides a pretty convenient way to use the occassional R package in Python, and I've shown you how to use it to run MSstats. Finally, I'll leave you with this: if you want to use the occasional Python package in R, try the [reticulate](https://rstudio.github.io/reticulate/) R package.
| msstats-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import json
file = open('data/recipes_with_nutritional_info.json')
data = json.load(file)
data[0]
# +
nutrition_status = []
for row in data:
req_data={}
if(row['partition'] == 'train'):
req_data['id'] = row['id']
req_data['fat_status'] = row['fsa_lights_per100g']['fat']
req_data['salt_status'] = row['fsa_lights_per100g']['salt']
req_data['saturates_status'] = row['fsa_lights_per100g']['saturates']
req_data['sugar_status'] = row['fsa_lights_per100g']['sugars']
req_data.update(row['nutr_values_per100g'])
nutrition_status.append(req_data)
# req_data['id'] = row['id']
# req_data['fat_status'] = row['fsa_lights_per100g']['fat']
# req_data['salt_status'] = row['fsa_lights_per100g']['salt']
# req_data['saturates_status'] = row['fsa_lights_per100g']['saturates']
# req_data['sugar_status'] = row['fsa_lights_per100g']['sugars']
# req_data.update(row['nutr_values_per100g'])
# nutrition_status.append(req_data)
nutrition_status
# -
nutrition_status_df = pd.DataFrame(nutrition_status)
nutrition_status_df
nutrition_status_df.to_csv('data/nutrition_status.csv',index=False)
df = pd.read_csv('data/train_image_data.csv')
df
df_ids = df['id']
common_data = nutrition_status_df.loc[nutrition_status_df['id'].isin(df_ids)]
common_data
common_data.to_csv('data/nutrition_status.csv',index=False)
df = pd.read_csv('data/output/resnet/similarity_knn.csv')
df_ids = nutrition_status_df['id']
test_data = df.loc[df['top1_id'].isin(df_ids)]
test_data
import shutil, os
test_data[['test_id','test_image_id']].head(10).to_numpy()[0][0]
for i in test_data[['test_id','test_image_id']].head(10).to_numpy():
shutil.copy('data/test/'+i[0]+'_'+i[1], 'data/querydata/resnet/')
| handling_nutritional_info.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Made by [Sharkbyteprojects](https://github.com/sharkbyteprojects)
# [ON GITHUB](https://github.com/Sharkbyteprojects/IRIS-ML_and_Deep-Learning)
# [Other Example](https://mybinder.org/v2/gh/Sharkbyteprojects/IRIS-ML_and_Deep-Learning/master?filepath=csv%20based%2F%3D%20or%20not.ipynb)
# Need
#
#
# - Keras
# - SKLEARN
# - numpy
# - seaborn
#
from keras.layers import Input
import numpy as np
from keras.layers import Dense
inputs=Input(shape=(4,))
fc=Dense(3)(inputs)
from keras.models import Model
model=Model(input=inputs,output=fc)
# Zeige infos über Model:
model.summary()
# Compile and add new Neurons
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
predictionss=Dense(8,activation="softmax")(fc)
predictions=Dense(3,activation="softmax")(predictionss)
# Test of Model:
az=model.predict(np.array([[5.1,5.3,1.4,0.2]]))
# Recompile and Retry
#
model=Model(input=inputs,output=predictions)
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()
aza=model.predict(np.array([[5.1,5.3,1.4,0.2]]))
# train it
#
from sklearn import datasets
iris =datasets.load_iris()
X=np.array(iris.data)
y=np.array(iris.target)
X.shape, y.shape
from keras.utils.np_utils import to_categorical
y=to_categorical(y,3)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=42,stratify=y)
# complete prepare for train, start train:
#
model.fit(X_train, y_train,epochs=500,validation_split=0.3)
# Test
#
print("1. 1 Neuron")
print(az)
print("2. 2 Neuron:")
print(aza)
print("3. Train")
print(model.predict(np.array([[5.1,5.3,1.4,0.2]])))
# using testdata
train_loss, train_accuracy=model.evaluate(X_train, y_train)
round(train_loss*100,1), round(train_accuracy*100,1)
test_loss, test_accuracy=model.evaluate(X_test, y_test)
round(test_loss*100,1), round(test_accuracy*100,1)
# ### HOW TO:
# Using This Iris Framework (Data in MM)
print(model.predict(np.array([[5.1,5.3,1.4,0.2]])))#RANGE: SEPAL LENGHT, SEPAL WIDTH,PETAL LENGHT, PETAL WIDTH
# ---
#
# ## Vergleich mit ML
Xy=iris.data
yy=iris.data
Xy_sepal_lenght=Xy[:,0]
Xy_sepal_width=Xy[:,1]
Xy_petal_lenght=Xy[:,2]
Xy_petal_width=Xy[:,3]
from sklearn.model_selection import train_test_split
Xy_train, Xy_test, yy_train, yy_test=train_test_split(X,y,test_size=0.4)
#USING NEAREST NEIGHBORS
from sklearn import neighbors
clf=neighbors.KNeighborsClassifier(1)
clf.fit(Xy_train,yy_train)
print(clf.score(Xy_train,yy_train))
print(clf.score(Xy_test,yy_test))
# ## Vergleich
print("Deep Learning:")
deplearn=model.predict(np.array([[6.3,2.7,5.5,1.5]]))
print("Probability for zero in percent")
print(round(deplearn[0][0]*100,1))
print("Probability for one in percent")
print(round(deplearn[0][1]*100,1))
print("Probability for two in percent")
print(round(deplearn[0][2]*100,1))
print("ML:")
clf.predict([[6.3,2.7,5.5,1.5]])
# ### distribution of data
import seaborn as sns
sns.jointplot(Xy_sepal_lenght, Xy_petal_lenght)
sns.jointplot(Xy_sepal_width, Xy_petal_width)
| DEEP LEARNING.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Pymaceuticals Homework - Observations and Insights
#
# !!!!PLEASE READ!!!!
# Summary analysis with observations and insights of the study:
# create 3 observations and insights
# 1. There is a postitive correlation between the weight of the mice and the tumor size. The P-Value of .84 indicates this linear correlation. Therefore, we might want to keep the mice on a healthy diet and provide exercise to help them decrease tumor size.
# 2. Capomulin seems to be an effective drug regimen to treat the cancer. The tumor size in mouse l509 decreased as time went on.
# 3. Of the 4 drug regimens we analyized, Capomulin and Ramicane were the most effective at decreasing tumor size. The least effective were the other two drugs Infubinol and Ceftamin. Therefore, I would recommend we do further studies or advance the two effective drugs to the next stage of trials.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
df = pd.merge(mouse_metadata, study_results, how="outer", on="Mouse ID")
# Display the data table for preview
df.head()
df.shape
# -
# Checking the number of mice via calling 'mouse id' column
df['Mouse ID'].value_counts()
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
# Optional: Get all the data for the duplicate mouse ID.
index_df = df.set_index('Mouse ID')
index_df.loc['g989']
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_df = index_df.drop(['g989'])
# Checked that the rows have decreased by 13 (the number of rows for mouse 'g989')
# Used .shape to check the rows/columns in the clean_df
clean_df.shape
# Created a dataframe that reset the index 'Mouse ID' as a column just in case we need it later.
# Used .reset_index()
clean_df2 = clean_df.reset_index()
# Checking the number of mice in the clean DataFrame.
clean_df2['Mouse ID'].value_counts()
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
tumor = clean_df2.groupby(['Drug Regimen'])
# This method is the most straighforward, creating multiple series and putting them all together at the end.
tumor_mean = tumor['Tumor Volume (mm3)'].mean()
tumor_med = tumor['Tumor Volume (mm3)'].median()
tumor_var = tumor['Tumor Volume (mm3)'].var()
tumor_std = tumor['Tumor Volume (mm3)'].std()
tumor_sem = tumor['Tumor Volume (mm3)'].sem()
tumor_df = pd.DataFrame({
"Mean": tumor_mean,
"Median" : tumor_med,
"Variance" : tumor_var,
"Standard Deviation" : tumor_std,
"SEM" : tumor_sem
})
tumor_df
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method produces everything in a single groupby function
# used function .agg() on the Tumor panda series and applied the aggregation functions required
tumor2 = clean_df2.groupby(['Drug Regimen'])['Tumor Volume (mm3)'].agg(['mean', 'median', 'var', 'std', 'sem'])
tumor2
# -
# ## Bar and Pie Charts
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas.
timepoint = clean_df2.groupby(['Drug Regimen'])['Timepoint'].count() #do a groupby and count for each drug regimen
# plotted a bar chart using .plot.bar() and assorted the values so they are in descending. Chained in one line of code
timepoint.sort_values(ascending=False).plot.bar()
plt.show()
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot.
timepoint_desc = timepoint.sort_values(ascending=False) # sorted values
plt.bar(timepoint_desc.index, timepoint_desc.values) # used pyplot function
plt.ylabel('Number of Timepoints', size=10) # define y label
plt.xlabel('Drug Regimen') # define x label
plt.xticks(rotation='vertical') # this is to rotate the x values
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pandas
# grouped by sex and then did a count
sex = clean_df2.groupby(['Sex'])['Sex'].count()
# plotted chart using pandas .plot.pie() with customizations
sex.plot.pie(startangle=185,autopct='%1.1f%%', figsize=(5, 5))
# +
# sex_df = pd.DataFrame(sex)
# sex_df
# -
# Generate a pie plot showing the distribution of female versus male mice using pyplot
# used pyplot function plt.pie() using the 'sex' groupby object and with customizations
plt.pie(sex, labels=('female', 'male'), startangle=185, autopct='%1.1f%%')
plt.ylabel('Sex') # labeled the pie chart
plt.tight_layout() # used to make it look better
plt.show()
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
# created a panda series by groupby of mouse id and the looking at the max timepoints of each mouse id
last_tm = clean_df2.groupby(['Mouse ID'])['Timepoint'].max()
last_tm_df = pd.DataFrame(last_tm) # turned it into a dataframe for merging
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
# merging "on" Mouse ID and Timepoint, "how" on RIGHT so that it only records the MAX timepoints
last_df = pd.merge(clean_df2, last_tm_df, on=('Mouse ID', 'Timepoint'), how='right')
last_df
# +
# Put treatments into a list for for loop (and later for plot labels)
treatments = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
# Create empty list to fill with tumor vol data (for plotting)
tumor_cap = []
tumor_ram = []
tumor_inf = []
tumor_cef = []
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
for index, row in last_df.iterrows():
if row["Drug Regimen"] == 'Capomulin':
tumor_cap.append(row['Tumor Volume (mm3)'])
if row["Drug Regimen"] == treatments[1]:
tumor_ram.append(row['Tumor Volume (mm3)'])
if row["Drug Regimen"] == treatments[2]:
tumor_inf.append(row['Tumor Volume (mm3)'])
if row["Drug Regimen"] == treatments[3]:
tumor_cef.append(row['Tumor Volume (mm3)'])
# add subset
#created a data frame with the tumor data of the drugs listed.
drug_df = pd.DataFrame({"Capomulin": tumor_cap,
treatments[1]: tumor_ram,
treatments[2]: tumor_inf,
treatments[3]: tumor_cef
})
# Determine outliers using upper and lower bounds
drug_df_max = drug_df.max()
drug_df_min = drug_df.min()
# drug_df2 = pd.merge(drug_df_min, drug_df_max)
drug_df
# +
# Determine outliers using upper and lower bounds
# Could this be done with a for loop?
#calculations of quartile, quantile, etc for each of the drugs
quart_cap = drug_df[treatments[0]].quantile([.25, .5, .75])
low_cap = quart_cap[.25]
med_cap = quart_cap[.5]
up_cap = quart_cap[.75]
iqr_cap = up_cap-low_cap
lowb_cap = low_cap - (1.5*iqr_cap)
upb_cap = up_cap + (1.5*iqr_cap)
max_cap = drug_df[treatments[0]].max()
min_cap = drug_df[treatments[0]].min()
quart_ram = drug_df['Ramicane'].quantile([.25, .5, .75])
low_ram = quart_ram[.25]
med_ram = quart_ram[.5]
up_ram = quart_ram[.75]
iqr_ram = up_ram-low_ram
lowb_ram = low_ram - (1.5*iqr_ram)
upb_ram = up_ram + (1.5*iqr_ram)
max_ram = drug_df[treatments[1]].max()
min_ram = drug_df[treatments[1]].min()
quart_inf = drug_df[treatments[2]].quantile([.25, .5, .75])
low_inf = quart_inf[.25]
med_inf = quart_inf[.5]
up_inf = quart_inf[.75]
iqr_inf = up_inf-low_inf
lowb_inf = low_inf - (1.5*iqr_inf)
upb_inf = up_inf + (1.5*iqr_inf)
max_inf = drug_df[treatments[2]].max()
min_inf = drug_df[treatments[2]].min()
quart_cef = drug_df[treatments[3]].quantile([.25, .5, .75])
low_cef = quart_cef[.25]
med_cef = quart_cef[.5]
up_cef = quart_cef[.75]
iqr_cef = up_cef-low_cef
lowb_cef = low_cef - (1.5*iqr_cef)
upb_cef = up_cef + (1.5*iqr_cef)
max_cef = drug_df[treatments[3]].max()
min_cef = drug_df[treatments[3]].min()
outliers = pd.DataFrame({'Drug': treatments,
'Lower Quartile':[low_cap, low_ram, low_inf, low_cef],
'Upper Quartile':[up_cap, up_ram, up_inf, up_cef],
'IQR':[iqr_cap, iqr_ram, iqr_inf, iqr_cef],
'Median':[med_cap, med_ram, med_inf, med_cef],
'Upper Bound': [upb_cap, upb_ram, upb_inf, upb_cef],
'Lower Bound': [lowb_cap, lowb_ram, lowb_inf, lowb_cef],
'Max': [max_cap, max_ram, max_inf, max_cef],
'Min': [min_cap, min_ram, min_inf, min_cef]
})
outliers
outliers_group = outliers.groupby('Drug')
outliers_group.sum()
# -
cap_m = drug_df['Capomulin'] > 47.69
drug_df["Capomulin"]
print(f"There is an outlier for the drug Infubinol as a mouse has a tumor size of 36.32.")
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# Created a list to use for the boxplot. Each one list is a list of each drug tumor data.
boxplot_list = [drug_df[treatments[0]],
drug_df[treatments[1]],
drug_df[treatments[2]],
drug_df[treatments[3]]
]
# used plt.subplots to define size
fig1, ax = plt.subplots(figsize=(9,7))
ax.set_title('Final measured Tumor volume by Drug Regimen') #set title
ax.set_xlabel('Drug Regimen') # set x label
ax.set_ylabel('Tumor Vol (mm3)') #set y lable
# used ax.boxplot function and ran the parameter boxplot_list to create the boxplot
ax.boxplot(boxplot_list, 0, 'gD')
plt.xticks([1,2,3,4], treatments) # set labels of each boxplot
plt.savefig('boxplot')
plt.show()
# ## Line and Scatter Plots
# +
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# created a dataframe with specific mouse using the .loc() function and a boolean to find l509 data
line_df = clean_df2.loc[clean_df2["Mouse ID"] == "l509",:]
line_df
# defined the x axis list by calling the timepoints from the l509 dataframe
x_axisTP = line_df["Timepoint"]
# defined the y axis or tumor size list by calling the tumor size from the dataframe
tumsiz = line_df["Tumor Volume (mm3)"]
plt.title('Capomulin treatmeant of mouse l509') # created title
# the plot function plt.plot() with x and y values and customizations
plt.plot(x_axisTP, tumsiz,linewidth=2, markersize=12)
plt.xlabel('Timepoint (Days)')
plt.ylabel('Tumor Volume (mm3)')
plt.savefig('linechart')
plt.show()
# +
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# created dataframe using .loc() of the drug regimine Capomulin using a boolean
cap_df = clean_df.loc[clean_df["Drug Regimen"] == "Capomulin",:]
# use groupby() and mean() to create a dataframe
capavg = cap_df.groupby(['Mouse ID']).mean()
# created scatter plot of the x axis (weight) and the y axis (tumor size) by calling from the capavg dataframe
plt.scatter(capavg['Weight (g)'],capavg['Tumor Volume (mm3)'])
plt.xlabel('Weight (g)')
plt.ylabel('Average Tumor Volume (mm3)')
plt.savefig('scatterplot')
plt.show()
capavg
# -
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
# using the persons formula st.pearsonr() and used the capavg dataframe to pull the p value
# this is a linear value that says there's a high degree of correlation if the number is close to 1;
# no correlation if close to 0; or a negative correlation if close to -1.
corr=round(st.pearsonr(capavg['Weight (g)'],capavg['Tumor Volume (mm3)'])[0],2)
print(f"The correlation between mouse weight and average tumor volume is {corr}")
# calculating linear regression values using st.linregress() function passing parameters from the capavg dataframe
model=st.linregress(capavg['Weight (g)'],capavg['Tumor Volume (mm3)'])
model
# defined slope of regression line using the linregress results
# defined the y-intercept using the linregress results
slope = 0.9544396890241045
intercept = 21.552160532685015
# +
# ploting the regression line
# calculate the the linear line plot list of values
line = slope*(capavg['Weight (g)'])+intercept
# create the scatter plot like above
plt.scatter(capavg['Weight (g)'],capavg['Tumor Volume (mm3)'])
# plot the linear regression line which is the averaged line of all the values in red
# i used the .format() function to create a label of the red line using the slope and intercept from above
plt.plot(capavg['Weight (g)'], line, 'r', label='y={:.2f}x+{:.2f}'.format(slope,intercept))
plt.xlabel('Weight(g)')
plt.ylabel('Average Tumore Volume (mm3)')
# included legend with the redline slope formula using plt.legend()
plt.legend()
plt.savefig('linearregression')
plt.show()
# -
| Pymaceuticals/Pymaceuticals_jay_v2_FINALSUBMISSION.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# # homework 4: bootstrapping
# +
using DataFrames
using CSV
using PyPlot
using Random
using StatsBase
using Statistics
# (optional) change the style. see styles here: https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html
PyPlot.matplotlib.style.use("bmh")
# (optional) change settings for all plots at once, e.g. I like larger font size
rcParams = PyPlot.PyDict(PyPlot.matplotlib.rcParams)
rcParams["font.size"] = 16
# (optional) change the max width of columns of `DataFrame`
# that are displayed in the Juptyer Notebook.
ENV["COLUMNS"] = 300 # characters
# -
# ## bicycle rentals
#
# You own a popular bicycle rental company near the Golden Gate Bridge that operates during the summer only. Hundreds of people rent your bicycles each day to ride over the Golden Gate bridge and around the park, and then they return their rented bicycle. To assess your inventory for a future surge in demand, you want to know the average duration that people rent your bicycles. Because it is too tedious to record the exact times that *every* bicycle (i.e. the entire population of bicycle renters) was rented and returned, you instead select a random sample of renters over the summer and record the duration that they rented their bicycle. You also record the weather that day-- whether it was cool or hot-- because you are interested in if the weather affects the mean duration that bicycles are rented.
#
# (1) In `bicycle_rides.csv`, each row corresponds to the rental and subsequent return of a bicycle. The `:duration` column gives the duration, in minutes, that the bicycle was rented. The `:weather` column tells us whether the day was hot or cool. Read in the CSV file as a `DataFrame`, `df`, and display only the first 5 rows.
df =
# (2) how many rentals of bicycles are represented in this sample?
# (3) use the `by` function to group by the weather on the day of the rental, find the number of bike rides in the data set that took place with that kind of weather, then assign this to be a new column `nb_rides`. i.e. create a new `DataFrame`:
#
# ```
# weather nb_rides
# cool x
# hot y
# ```
# where `x` and `y` are integers denoting the number of bicycle rentals in the sample during cool and hot days, respectively.
# (4) Make a data visualization that compares the empirical distribution of the duration of bike rides on cold vs. hot days.
# * use one big plot with two panels
# * plot on panel `i` the histogram of durations of bike rides when the weather is `weathers[i]` for `i=1:2` and `weathers = ["cool", "hot"]`
# * the two panels should have an identical x- and y-axis scale for a fair comparison
# * the two histograms should use the same bin placements/widths for a fair comparison
# * label each panel with a title corresponding to whether the weather was cool or hot
# * use x- and y-axis labels
# * use the color blue for the histogram corresponding to cool days and red for hot days
# * use a `for` loop to make the plot
# +
fig, axs = subplots(2, 1, figsize=(10, 8),
sharex=true, sharey=true, tight_layout=true)
for (i, weather) in enumerate(["cool", "hot"])
some_random_numbers = rand(30)
axs[i].hist(some_random_numbers)
axs[i].set_ylabel("blah $i")
axs[i].set_title(weather)
end
# -
# (5) does one distribution appear shifted to the right of the other? use the `by` function to group the `df` from (1) by weather, compute the mean duration of a bike ride in that kind of weather, and assign the result to be a new column `:mean_duration`. i.e. create a new `DataFrame`:
#
# ```
# weather mean_duration
# cool x
# hot y
# ```
# where `x` and `y` are floating points denoting the mean bicycle ride duration on cool and hot days, respectively.
# (6) write a function `run_bootstrap` that takes in three arguments:
# * `df::DataFrame` the entire bicycle rides `DataFrame` that you loaded in from (1)
# * `weather::String` either "cool" or "warm"
# * `nb_sims::Int` the number of times to draw a bootstrap sample to characterize the bootstrap empirical distribution of the sample mean duration of a bike ride = number of simulations of sampling the population
#
# and does the following inside the function:
#
# * creates a new `DataFrame`, `df_w`, with all the rows corresponding to bike rides taking place in `weather` weather.
# * draws `nb_sims` bootstrap samples of bicycle rides in `weather` weather and computes the bootstrap sample mean of bicycle ride durations in `weather` weather. `df_w` will facilitate this. store these bootstrap sample means in an `Array{Float64}`
# * return the array of the bootstrap sample means. this array is of length `nb_sims`.
#
# test out your function by calling:
# ```julia
# run_bootstrap(df, "cool", 100)
# ```
# make sure you return an array of 100 (different) values that hover around the mean from (5).
# (7) write a function `confidence_interval` that takes in the following arguments:
# * `bootstrap_means::Array{Float64}`: an array of bootstrap sample means that is output from `run_bootstrap` above.
# * `confidence::Float64`: the desired confidence level, e.g. 95.0 for a 95% confidence level.
#
# and returns a two-element array giving the interval estimate for the mean bicycle duration in a certain type of weather.
#
# test your function by, e.g.:
# ```julia
# bootstrap_means = run_bootstrap(df, "cool", 100)
# ci = confidence_interval(bootstrap_means, 95.0) # returns [42.2, 53.4] but will differ from run to run
# ```
# (8) use your `run_bootstrap` and `confidence_interval` functions to, finally:
# * visualize the bootstrap empirical distribution of the sample mean bicycle ride duration in each type of weather (for cool, hot weather) in two different panels much like in (4). again, the scales on the x- and y-axes should be the identical for a fair comparison.
# * plot as a vertical line the sample mean bicycle ride duration in that type of weather
# * highlight, as we did in lecture, the 95% confidence interval on the x-axis of the two panels, corresponding to the interval estimate for the mean bicycle ride duration in that type of weather. hint: `plot(ci, [0, 0], lw=6)` where `lw` is for line width.
# * use the color blue for lines/bars in the "cool" weather panel and the color red for the "hot" weather panel. I think it looks nice if you use `alpha=0.3` for the histograms and non-transparent colors for the vertical line/confidence interval.
# +
fig, axs = subplots(2, 1, figsize=(10, 8),
sharex=true, sharey=true, tight_layout=true)
for (i, weather) in enumerate(["cool", "hot"])
axs[i].set_title(weather)
end
# -
# (9) the width of the bootstrap empirical distribution of the sample mean bicycle ride duration is wider in one type of weather than the other... can you explain this?
# (10) judging from the 95% confidence intervals, what do you think would be the outcome of a permutation test of the following two hypotheses with a significance level of $\alpha=0.05$?
#
# **null hypothesis**: the mean duration of a bicycle ride on a hot day is 40 min
#
# **alternative hypothesis**: the mean duration of a bicycle ride on a hot day is not 40 min
#
# Choose one answer:<br>
# (a) null hypothesis rejected<br>
# (b) fail to reject the null hypothesis<br>
# (c) I can't tell you from looking at my plot from (8), I'd have to actually do the permutation test to be sure.
#
# A helpful resource for answering this: "Using a Confidence Interval to Test Hypotheses" [here](https://www.inferentialthinking.com/chapters/13/4/Using_Confidence_Intervals.html).
# (10) judging from the 95% confidence intervals, what do you think would be the outcome of a permutation test of the following two hypotheses with a significance level of $\alpha=0.05$?
#
# **null hypothesis**: there is no difference in the mean duration of a bicycle ride on a hot vs. cool day
#
# **alternative hypothesis**: the mean bicycle ride duration is higher on a cool day than a hot day.
#
# Choose one answer:<br>
# (a) null hypothesis rejected<br>
# (b) fail to reject the null hypothesis<br>
# (c) I can't tell you from looking at my plot from (8), I'd have to actually do the permutation test.
#
# A helpful resource for answerin this: [this](https://core.ac.uk/download/pdf/82702323.pdf) and [this](https://blog.minitab.com/blog/real-world-quality-improvement/common-statistical-mistakes-you-should-avoid).
# ### etymology for curious readers
#
# [bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping)
| CHE599-IntroDataScience/hw4/hw4_skeleton.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Amazon SageMaker Linear Learner algorithm
#
# #### Predicting fashion type using Zalando's Fasion-MNIST dataset (https://github.com/zalandoresearch/fashion-mnist), Amazon SageMaker Linear Learner algorithm for classification and Amazon SageMaker Automatic Model Tuning
# Copyright [2018]-[2018] Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at -
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# 1. [Introduction](#Introduction)
# 2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
# 1. [Permissions and environment variables](#Permissions-and-environment-variables)
# 2. [Data ingestion](#Data-ingestion)
# 3. [Data inspection](#Data-inspection)
# 4. [Data conversion](#Data-conversion)
# 5. [Upload training data](#Upload-training-data)
# 3. [Training the linear model](#Training-the-linear-model)
# 4. [Set up hosting for the model](#Set-up-hosting-for-the-model)
# 5. [Test the model and see how it performs](#Test-the-model-and-see-how-it-performs)
# 6. [Improving the model using Hyperparameter tuning](#Improving-the-model-using-hyperparameter-tuning)
# 7. [A-B testing models using product variants in endpoints](#A-B-testing-models-using-product-variants-in-endpoints)
# ## Introduction
#
# Welcome to our example introducing Amazon SageMaker! To drive this example, we will show you how to use Amazon SageMaker to learn how to build and deploy a model to recognize the type of apparel from images.
#
# The first step in machine learning is to get data to learn from. For our example, we will use the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which consists of images of apparel, classified from zero to nine representing what kind of apparel they represent.
#
# This dataset contains labeled examples ( **`x`**, `y`) where **`x`** is a high dimensional vector and `y` is a numeric label. As the dataset we are using for training contains labels for what the correct answer "should be", this example falls under the category of Supervised Learning. Once the model is deployed, we will be predicting which of 10 possible values (0,1,2 .. 9) the image falls under, the type of machine learning we are doing is Supervised Classification. Lastly, since the predictions can be one of 10 values rather than a yes/no type of response, this example demonstrates Supervised Multiclass Classification.
#
# To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
# ## Prequisites and Preprocessing
#
# ### Permissions and environment variables
#
# We use Amazon Simple Storage Service (Amazon S3) to store the training and model data. The S3 bucket should be within the same region as the Notebook Instance, training, and hosting.
#
# If you haven't create an S3 bucket for the use with SageMaker in your account yet you can create one now.
# +
import sagemaker
from datetime import datetime
# customize environment name to help you find your training jobs and endpoints in SageMaker console
envname = 'ai-ml-id'
# --- do not change values below ---
sess = sagemaker.Session()
bucket = sess.default_bucket()
timestamp = datetime.utcnow().strftime("%s")
print('timestamp :: {}'.format(timestamp))
basicModelName = 'linear-model-basic-{}-{}'.format(envname, timestamp)
advancedModelName = 'linear-model-adv-{}-{}'.format(envname, timestamp)
basicJobName = 'linear-job-basic-{}-{}'.format(envname, timestamp)[:32]
advancedJobName = 'linear-job-adv-{}-{}'.format(envname, timestamp)[:32]
accelerator_regions = ['eu-west-1', 'us-west-2', 'us-east-1x', 'us-east-2', 'ap-northeast-1', 'ap-northeast-2']
# the prefix defines the location of our data for this excercise
prefix = 'sagemaker/{}'.format(envname)
# the IAM role is granting SageMaker access to the required resources for this excercise
from sagemaker import get_execution_role
role = get_execution_role()
# -
# ### Data ingestion
#
# Next we download the dataset and apply some preprocessing prior to training. In this examples this is done using the Jupyter Notebook. For large datasets this would be done *in situ* by Amazon Glue, Apache Spark in Amazon EMR, etc.
#
# The dataset has been made publicly available by Zalando Research [here](https://github.com/zalandoresearch/fashion-mnist). The block below downloads the dataset and extracts the data according to the instructions provided in the link.
# +
# %%time
import zipfile, urllib.request, sys
# Load the dataset
urllib.request.urlretrieve("https://github.com/zalandoresearch/fashion-mnist/archive/master.zip", "fashion-mnist.zip")
zip_ref = zipfile.ZipFile("fashion-mnist.zip", 'r')
zip_ref.extractall()
zip_ref.close()
sys.path.append('./fashion-mnist-master/utils')
import mnist_reader
X_train_val, y_train_val = mnist_reader.load_mnist('fashion-mnist-master/data/fashion', kind='train')
X_test, y_test = mnist_reader.load_mnist('fashion-mnist-master/data/fashion', kind='t10k')
# -
# The data is structured into two sets of data: One set with 60k labeled entries and another with 10k labeled entries. We will now split the first set into 50k entries for training & 10k entries for validation. The second data set (also 10k) will be used for testing.
# Split the training set into training and validation sets
X_validation = X_train_val[0:10000]
y_validation = y_train_val[0:10000]
X_train = X_train_val[10000:60000]
y_train = y_train_val[10000:60000]
# ### Data inspection
#
# Once the dataset is loaded, it is typical as part of the machine learning process to inspect the data, understand the distributions, and determine what type(s) of preprocessing might be needed. You can perform those tasks right here in the notebook.
# Note that upon loading the data, train_set contain the n'th image in test_set[0][n] and the corresponding label (which apparel type it is supposed to be) in test_set[1][n].
#
# As an example, let's go ahead and look at the 30th image that is part of the dataset.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (2,10)
def show_item(img, caption='', subplot=None):
if subplot==None:
_,(subplot)=plt.subplots(1,1)
imgr=img.reshape((28,28))
subplot.axis('off')
subplot.imshow(imgr, cmap='gray')
plt.title(caption)
apparel_lookup = {0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'}
show_item(X_train[30], 'This is a {}'.format(apparel_lookup[y_train[30]]))
# -
# ### Choosing an Algorithm
#
# As we are looking to perform Supervised Multiclass Classification, let's find an appropriate algorithm from among the [Amazon SageMaker built-in algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html) makes available to you.
#
# Looking through the list, it looks like the Linear Learner, XGBoost Algorithm or the Image Classification Algorithm would fit to our needs.
#
# If you have a highly specialized use case, you can also [bring your own algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html)
#
# For this example, we will pick the Linear Learner to get familiar with SageMaker and its capabilities. We encourage you to explore the other algorithms [here (XGBoost for MNIST)](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/introduction_to_amazon_algorithms/xgboost_mnist) and [here (Image Classification Algorithm for Fashion MNIST)](https://aws.amazon.com/blogs/machine-learning/classify-your-own-images-using-amazon-sagemaker/)
# ### Data conversion
#
# Since algorithms have particular input and output requirements, converting the dataset is also part of the process that a data scientist goes through prior to initiating training. In this particular case, the Amazon SageMaker implementation of Linear Learner takes recordIO-wrapped protobuf, where the data we have today is a pickle-ized numpy array on disk.
#
# Most of the conversion effort is handled by the Amazon SageMaker Python SDK, imported as `sagemaker` below.
# +
import io
import numpy as np
import sagemaker.amazon.common as smac
# Write the training data
vectors = np.array([t.tolist() for t in X_train]).astype('float32')
labels = np.array([t.tolist() for t in y_train]).astype('float32')
buf = io.BytesIO()
smac.write_numpy_to_dense_tensor(buf, vectors, labels)
buf.seek(0)
# -
# ### Upload training data
# Now that we've created our recordIO-wrapped protobuf, we'll need to upload it to S3, so that Amazon SageMaker training can use it.
# +
import boto3
import os
key = 'recordio-pb-data'
boto3.resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', key)).upload_fileobj(buf)
s3_train_data = 's3://{}/{}/train/{}'.format(bucket, prefix, key)
print('uploaded training data location: {}'.format(s3_train_data))
# -
# Now that our inputs are in S3, ready to be used for training, let's also setup an output S3 location for the model artifact that will be output as the result of training with the algorithm.
output_location = 's3://{}/{}/output'.format(bucket, prefix)
print('training artifacts will be uploaded to: {}'.format(output_location))
# ## Training the linear model
#
# Once we have the data preprocessed and available in the correct format for training, the next step is to actually train the model using the data. Since this data is relatively small, it isn't meant to show off the performance of the Linear Learner training algorithm, although we have tested it on multi-terabyte datasets.
#
# Again, we'll use the Amazon SageMaker Python SDK to kick off training, and monitor status until it is completed. In this example that takes between 7 and 11 minutes. Despite the dataset being small, provisioning hardware and loading the algorithm container take time upfront.
#
# First, let's specify our container. Since we want this notebook to run in any of the Amazon SageMaker's regions, we'll create a small lookup. More details on algorithm containers can be found in [AWS documentation](https://docs-aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). Note that the lookup also gets to location of the Linear Learner algorithm in the region that the notebook is running in.
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(boto3.Session().region_name, 'linear-learner')
print('using container image: {}'.format(container))
# Using the container we have obtained, we will create a sagemaker estimator object that represents the Linear Learner algorithm
linear = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path=output_location,
sagemaker_session=sess,
tags=[
{'Key':'model', 'Value':'linear-learner'},
{'Key':'version', 'Value':'basic'},
{'Key': 'Name', 'Value':basicModelName}
],
base_job_name='{}-trainjob'.format(envname))
# Before we get to training on the dataset, we have to specify some hyperparameters. Hyperparameters can be seen as information that we provide to the estimator to control the way the estimator treats and learns from the data.
#
# - `feature_dim` is set to 784, which is the number of pixels in each 28 x 28 image.
# - `predictor_type` is set to 'multi_classifier' since we are doing a Multiclass Classification
# - `num_classes` is set to 10 since we can have 10 different predictions for an image (0,1,2...9)
linear.set_hyperparameters(feature_dim=784,
predictor_type='multiclass_classifier',
num_classes = 10)
# We are now ready to train our model. We can do this simply by calling fit() and specificying our training and validation data. While you wait for the training to finish, take some time to review the [AWS Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html) to find out what other hypermarameter options are available for the Linear Learner algorithm or to get a feeling of the other algorithms available .
linear.fit(
{'train': s3_train_data},
job_name=basicJobName
)
jobStatus = boto3.client('sagemaker').describe_training_job(TrainingJobName=basicJobName)
#print(jobStatus)
# ## Set up hosting for the model
#
# Now that we've trained our model, we can deploy it behind an Amazon SageMaker real-time hosted endpoint. This will allow us to make predictions (or inference) from the model dyanamically.
#
# _Note, Amazon SageMaker allows you the flexibility of importing models trained elsewhere, as well as the choice of not importing models if the target of model creation is AWS Lambda, AWS Greengrass, or other deployment target, we will discuss more of those options towards the end of the workshop._
#
# Depending on the region your SageMaker instance is running in, we may add an Elastic Inference accelerator card.
#
# For currently supported regions, please see https://aws.amazon.com/machine-learning/elastic-inference/pricing/
#
# The Elastic Inference accelerator card is attached by specifying a value for the accelerator_type parameter.
#
# +
launched = False
try:
if sess.boto_region_name in accelerator_regions:
print('Using an Elastic Inference accelerator because we\'re in the \'{}\ region.'.format(sess.boto_region_name))
linear_predictor = linear.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge',
accelerator_type='ml.eia1.medium',
endpoint_name='{}-endpoint'.format(envname))
launched = True
except Exception as e:
print("Failed to launch with elastic inference, a service limit raise is probably required")
finally:
if not launched:
print('Launching a larger instance without using Elastic Inference accelerator')
linear_predictor = linear.deploy(initial_instance_count=1,
instance_type='ml.m5.2xlarge',
endpoint_name='{}-endpoint'.format(envname))
# -
#
# If our example would require a GPU to serve predictions, with Elastic Inference we can use a small ml.m5 family machine, rather than an ml.p3, and offload the processing to the Elastic Inference addon card.
#
# To compare costs,
#
# an ml.eial.medium costs `$ 0.140/hour` in us-west-1, and an ml.m5.2xlarge, which is `$ 0.538 / hour`,
# giving us an hourly run rate of `$ 0.678` for this configuration.
#
# Whereas, if we just specified bigger machines we would be looking at something like
#
# ml.p3.2xlarge, which is $ `4.284/hour` in us-west-1
#
#
# giving an hourly saving of around **87%**!!
#
# For more details on pricing, please see https://aws.amazon.com/machine-learning/elastic-inference/pricing/
#
#
# ## Test the model and see how it performs
# Finally, we can now validate the model for use. We can pass HTTP POST requests to the endpoint to get back predictions. To make this easier, we'll again use the Amazon SageMaker Python SDK and specify how to serialize requests and deserialize responses that are specific to the algorithm.
# +
from sagemaker.predictor import csv_serializer, json_deserializer
linear_predictor.content_type = 'text/csv'
linear_predictor.serializer = csv_serializer
linear_predictor.deserializer = json_deserializer
# -
# Now let's try getting a prediction for a single record. We will display the image and the prediction
ind = 45
result = linear_predictor.predict(X_test[ind:ind+1])
show_item(X_test[ind:ind+1], 'This is a {}'.format(apparel_lookup[int(float(result['predictions'][0]['predicted_label']))]))
# OK, a single prediction seems to work. We see that for one record our endpoint returned some JSON which contains `predictions`, including the `score` and `predicted_label`. In this case, `score` will be a continuous value between [0, 1] representing the probability we think that the image corresponds to each possible prediction (0,1,2,.9). `predicted_label` will be a value from (0,1,2,3...9) which indicates what number the model thinks the image contains
#
# Let's do a whole batch of images and evaluate our predictive accuracy.
# +
import numpy as np
predictions = []
for array in np.array_split(X_test, 100):
result = linear_predictor.predict(array)
predictions += [r['predicted_label'] for r in result['predictions']]
predictions = np.array(predictions)
# -
# Now we that we have our predictions from the test set, let us build a confusion matrix. The confusion matrix shows us the count of the actual labels (along the rows) versus the predicted labels (along the columns).
import pandas as pd
actual_labels = np.array(y_test)
pd.crosstab(actual_labels, predictions, rownames=['actuals'], colnames=['predictions'])
# Since this matrix is dominant along the diagonal, it looks like the algorithm did pretty well! Let us check how often we got it right
# +
correct_preds = 0
for i in range(1,len(actual_labels)):
if actual_labels[i] == predictions[i]:
correct_preds = correct_preds + 1
print('The model predicted correctly on {}% of the test set'.format(correct_preds*100/len(actual_labels)))
# -
# For a fun next step, let us try to classify a random piece of apparel that you get off the internet!
# +
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image
# Change the imgsrc with a link to an image of your choice with a white background
imgsrc = 'https://images-na.ssl-images-amazon.com/images/I/81OaXwn1x4L._UX679_.jpg'
#imgsrc = 'https://images-eu.ssl-images-amazon.com/images/I/31TcgNHsbIL._AC_UL260_SR200,260_.jpg'
#imgsrc = 'https://images-eu.ssl-images-amazon.com/images/I/41hWhZBIc3L._AC_UL260_SR200,260_.jpg'
imglocname = 'localimage.jpg'
#Download the file locally
urllib.request.urlretrieve(imgsrc, imglocname)
# Resize and convert to grayscale
def rgb2gray(rgb):
return np.dot((255-rgb[...,:3]), [0.299, 0.587, 0.114])
fullimgread = Image.open(imglocname)
imgread = fullimgread.resize((28, 28))
gray = rgb2gray(np.asarray( imgread, dtype="int32" ))
# Do the prediction!
result = linear_predictor.predict(gray.reshape((1,784)))
print ('This is a {}'.format(apparel_lookup[int(float(result['predictions'][0]['predicted_label']))]))
plt.imshow(fullimgread)
plt.axis('off')
plt.show()
# -
# ## Improving the model using hyperparameter tuning
#
# Despite having a good model, let us take a moment to see if we can do better!
# We discussed hyperparameters earlier that we used to instruct the algorithm on some characteristics of the data. However, hyperparameters also include some adjustable 'settings' for the algorithm itself. So far, we have used the default values, but picking the right values for some of these hyperparameters can help us improve the model.
#
# Each algorithm has its own hyperparameters. If you have not managed to look them up by yourself earlier, these are the [hyperparameters for the linear learner](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)
#
# How can we arrive at the 'best values' for these hyperparameters? Amazon SageMaker provides [Automatic Model Tuning](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner-tuning.html) to allow you to use your test and validation datasets to identify the best hyperparameters for your particular use case.
#
# First, let us prepare the validation data in the same way we prepared the training data.
#
# +
# Prepare the validation data
validation_vectors = np.array([t.tolist() for t in X_validation]).astype('float32')
validation_labels = np.array([t.tolist() for t in y_validation]).astype('float32')
validation_buf = io.BytesIO()
smac.write_numpy_to_dense_tensor(validation_buf, validation_vectors, validation_labels)
validation_buf.seek(0)
key = 'recordio-pb-data'
boto3.resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation', key)).upload_fileobj(validation_buf)
s3_validation_data = 's3://{}/{}/validation/{}'.format(bucket, prefix, key)
print('uploaded validation data location: {}'.format(s3_validation_data))
# -
# Let us now set up Amazon SageMaker's hyperparameter tuning capabilities. We will use four of the [Linear Learner Tuning Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner-tuning.html) and their recommended ranges.
#
# Note: These were chosen primarily to demonstrate the different types of hyperparameters you can tune. You might be able to achieve better or faster results by choosing a different set of hyperparameters and ranges.
# +
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter
hyperparameters_to_tune = { 'wd': ContinuousParameter(0.001, 0.1),
'learning_rate': ContinuousParameter(0.001, 0.01),
}
# -
# We now have to choose a metric for the hyperparameter optimization. For this example, we will use the validation:objective_loss metric. This is also the reason we are including validation data in our model tuning step below.
#
# Let us set up the tuner itself and have it tune the model and hopefully come up with a better model!
# +
from sagemaker.tuner import HyperparameterTuner
linear_tuner = HyperparameterTuner(estimator=linear,
objective_metric_name = 'validation:objective_loss',
objective_type = 'Minimize',
hyperparameter_ranges=hyperparameters_to_tune,
max_jobs=10,
max_parallel_jobs=2,
tags=[
{'Key':'model', 'Value':'linear-learner'},
{'Key':'version', 'Value':'hpo'},
{'Key': 'Name', 'Value':advancedModelName}
],
base_tuning_job_name='{}-hpojob'.format(envname))
linear_tuner.fit(
inputs={'train': s3_train_data, 'validation': s3_validation_data},
include_cls_metadata=False,
job_name=advancedJobName
)
# -
# When we call fit() with a HyperparameterTuner object, it initiates a job that will typically run through many iterations. During this process, you can always access the best model the job has found so far. For our example today, we will wait until the job is complete. Run the block below to get the latest status. When the status is 'Completed', we will move to the next block.
# +
from pprint import pprint
import time
sagemaker = boto3.Session().client(service_name='sagemaker')
tuning_job_result = sagemaker.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=advancedJobName)
status = tuning_job_result['HyperParameterTuningJobStatus']
#print(status)
counter = 0
best_job_yet = None
last_job_count = -1
job_count = 0
while status != 'Completed' and counter < 400:
job_count = tuning_job_result['TrainingJobStatusCounters']['Completed']
if job_count != last_job_count:
last_job_count = job_count
print("\n%d training jobs have completed out of 10" % job_count)
statstr = 'Status: {}'.format(tuning_job_result['HyperParameterTuningJobStatus'])
if (tuning_job_result['HyperParameterTuningJobStatus'] != 'Completed'):
statstr = statstr + '\nRunning Since: ' + str(tuning_job_result['CreationTime'])
statstr = statstr + '\nTraining Jobs Completed: ' + str(tuning_job_result['TrainingJobStatusCounters']['Completed'])
fail_jobs = tuning_job_result['TrainingJobStatusCounters']['RetryableError'] + tuning_job_result['TrainingJobStatusCounters']['NonRetryableError']
statstr = statstr + '\nTraining Jobs Failed: ' + str(fail_jobs)
statstr = statstr + '\nMaximum Training Jobs: ' + str(tuning_job_result['HyperParameterTuningJobConfig']['ResourceLimits']['MaxNumberOfTrainingJobs'])
print(statstr)
if tuning_job_result.get('BestTrainingJob', None):
current_job = tuning_job_result['BestTrainingJob']
if current_job != best_job_yet:
best_job_yet = current_job
print('\nNew best model found!')
#print("Best model found so far:")
#pprint(tuning_job_result['BestTrainingJob'])
# sleep for 5 and then update markers
time.sleep(5)
print('.', end = '')
counter += 1
tuning_job_result = sagemaker.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=advancedJobName)
status = tuning_job_result['HyperParameterTuningJobStatus']
if status == 'Failed':
raise Exception('Job failed because :: {}'.format(tuning_job_result['FailureReason']))
if tuning_job_result.get('BestTrainingJob', None):
print("\nBest model after training:")
pprint(tuning_job_result['BestTrainingJob'])
# -
# By running a variety of training jobs, the hyperparameter tuner has now identified a set of hyperparameters that give us the best results in terms of the objective metric (in our case, the objective_loss). Let us go ahead and deploy this model in a similar fashion to how we deployed the model earlier.
linear_tuned_predictor = linear_tuner.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge',
endpoint_name='{}-tunedendpoint'.
format(envname))
# We will now run the test set against our newly deployed endpoint and gather the predictions
# +
linear_tuned_predictor.content_type = 'text/csv'
linear_tuned_predictor.serializer = csv_serializer
linear_tuned_predictor.deserializer = json_deserializer
predictions = []
for array in np.array_split(X_test, 100):
result = linear_tuned_predictor.predict(array)
predictions += [r['predicted_label'] for r in result['predictions']]
predictions = np.array(predictions)
# -
# Now for the moment of truth!
#
# Let us calculate how often the tuned model got the right prediction and also generate the corresponding confusion matrix.
# +
actual_labels = np.array(y_test)
correct_preds = 0
for i in range(1,len(actual_labels)):
if actual_labels[i] == predictions[i]:
correct_preds = correct_preds + 1
print('The tuned model predicted correctly on {}% of the test set'.format(correct_preds*100/len(actual_labels)))
pd.crosstab(actual_labels, predictions, rownames=['actuals'], colnames=['predictions'])
# -
# ### Review
#
# Let us quickly summarize what we have achieved so far. We have taken a common dataset (Fashion MNIST) and used Amazon SageMaker with it's Linear Learner algorithm to train a multiclass classification model that allows us to predict fashion items among 10 different categories. Once trained we deployed the trained model using Amazon SageMaker's model hosting capabilities to create a fully-managed, automated scalable model endpoint.
#
# Finally we have used the Automatic Model Tuning to help us find the best version of a model by running many training jobs against our dataset using the algorithm and ranges of hyperparameters that we specified.
#
# ### Stage clean up
#
# We can now delete the endpoints that we created in this section.
# delete model endpoint
linear_predictor.delete_endpoint()
linear_tuned_predictor.delete_endpoint()
# ## A-B testing models using product variants in endpoints
#
# Ok, so we've trained a quick model, and a hyperparameter tuned model, deployed and tested them, and now torn down the endpoints.
#
# In a real world situation we would probably want to use an A/B deployment or similar, so let's have a look at that.
#
# Let's start off with defining a few helper functions that return a model ARN from a normal training job or the best model from an HPO job using the SageMaker API.
# +
import json
import matplotlib.pyplot as plt
def createModelFromHpoJob(trainingJobName, modelName):
info = sagemaker.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=trainingJobName)
resolvedJobName = info['BestTrainingJob']['TrainingJobName']
print('best training job resolved as : {}'.format(resolvedJobName))
return createModel(resolvedJobName, modelName)
def createModel (trainingJobName, modelName, hyper = False):
info = sagemaker.describe_training_job(TrainingJobName=trainingJobName)
container = info['AlgorithmSpecification']['TrainingImage']
model_data = info['ModelArtifacts']['S3ModelArtifacts']
#print('trainging info : {}'.format(info))
print('container : {}'.format(container))
print('model data : {}'.format(model_data))
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
print('creating model :: {}'.format(modelName))
create_model_response = sagemaker.create_model(
ModelName = modelName,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
return create_model_response['ModelArn']
# -
# Let's get a reference to the basic model first...
basicModelArn = createModel(basicJobName, basicModelName)
print('basic model created as : {}'.format(basicModelArn))
# And the same for the advanced HPO one...
advancedModelArn = createModelFromHpoJob(advancedJobName, advancedModelName)
print('advanced model created as : {}'.format(advancedModelArn))
# So we've created models from the best jobs, now we create a config referencing both models.
#
# The integer values for 'InitialWeightVariant' are whatever you feel gives you the granualarity to express the distribution you want.
# +
endpointConfigName = 'linear-endpoint-config-{}-{}'.format(envname, timestamp)
create_endpoint_config_response = sagemaker.create_endpoint_config(
EndpointConfigName = endpointConfigName,
ProductionVariants=[
{
'InstanceType':'ml.m5.xlarge',
'InitialInstanceCount':1,
'ModelName':basicModelName,
'InitialVariantWeight':9,
'VariantName':'basicTraffic'},
{
'InstanceType':'ml.m5.xlarge',
'InitialInstanceCount':1,
'ModelName':advancedModelName,
'InitialVariantWeight':1,
'VariantName':'hpoTraffic'
}
])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
# -
# Now we can create an endpoint, using this dual model config
# +
# %%time
endpointName = 'linear-endpoint-{}-{}'.format(envname, timestamp)
create_endpoint_response = sagemaker.create_endpoint(
EndpointName=endpointName,
EndpointConfigName=endpointConfigName)
print('Creating endpoint ARN : {}'.format(create_endpoint_response['EndpointArn']))
resp = sagemaker.describe_endpoint(EndpointName=endpointName)
status = resp['EndpointStatus']
print("Status : " + status)
while status=='Creating':
time.sleep(10)
resp = sagemaker.describe_endpoint(EndpointName=endpointName)
status = resp['EndpointStatus']
print('.', end = '')
print('\nEndpoint created as : {}'.format(resp['EndpointArn']))
print('Status : {}'.format(status))
# -
# So, at this stage we have an new SageMaker Endpoint, balancing between the two models that we have created so far.
#
# You can see the created endpoint in the console using the following link
#
# https://console.aws.amazon.com/sagemaker/home?#/endpoints
#
# If you click into the new endpoint, and scroll down tp the Product Varinat section, you will see the model names, initial weights, variant names etc for each Product Variant that we have defined.
#
# Now we'll set up our environment to do some requests against this endpoint.
#
# First, we need a SageMaker runtime client
runtime_client = boto3.client('sagemaker-runtime')
# Next we need to define some variables and helper functions, to
#
# - convert gray scale images,
# - do some logging,
# - test the distribution of the end point
# - update the weights on the endpoint
#
# The last two functions, `test_variant` and `update_weights`, will be used by you later, so it's worth having a good look into the code there.
# +
imgLocalName = 'test_image.jpg'
def rgb2gray(rgb):
return np.dot((255-rgb[...,:3]), [0.299, 0.587, 0.114])
def predict(body):
response = runtime_client.invoke_endpoint(
EndpointName=endpointName,
Body=body,
ContentType='text/csv',
Accept='application/json'
)
return response
def get_product_variant(response):
return response['ResponseMetadata']['HTTPHeaders']['x-amzn-invoked-production-variant']
def resolve_prediction(response):
# print (response)
# let's look at the body now...
result = json.loads(response['Body'].read().decode())
print('----------------------------')
print (' prediction')
print('----------------------------')
print ('This is a {}'.format(apparel_lookup[int(float(result['predictions'][0]['predicted_label']))]))
return result
def test_variant(max):
mydict = {'basicTraffic': 0, 'hpoTraffic': 0}
for x in range(1, max):
result = predict(body)
variant = get_product_variant(result)
if (variant == 'basicTraffic'):
mydict['basicTraffic'] += 1
else:
mydict['hpoTraffic'] += 1
#print(mydict)
pd.Series(mydict).plot(kind='barh')
def update_weights(a, b):
sagemaker.update_endpoint_weights_and_capacities(
EndpointName=endpointName,
DesiredWeightsAndCapacities=[
{
'VariantName': 'basicTraffic',
'DesiredWeight': a
},
{
'VariantName': 'hpoTraffic',
'DesiredWeight': b
}
]
)
resp = sagemaker.describe_endpoint(EndpointName=endpointName)
status = resp['EndpointStatus']
print("Status : " + status)
while status == 'Updating':
time.sleep(5)
resp = sagemaker.describe_endpoint(EndpointName=endpointName)
status = resp['EndpointStatus']
print('.', end = '')
# let it settle in a bit
counter = 0
while counter < 10:
counter += 1
time.sleep(5)
print('.', end = '')
print("\nStatus : " + status)
# -
# Now we can download an image and read it
# +
#imgSrc = 'https://images-na.ssl-images-amazon.com/images/I/81OaXwn1x4L._UX679_.jpg'
imgSrc = 'https://images-eu.ssl-images-amazon.com/images/I/31TcgNHsbIL._AC_UL260_SR200,260_.jpg'
urllib.request.urlretrieve(imgSrc, imgLocalName)
fullimgread = Image.open(imgLocalName)
imgread = fullimgread.resize((28, 28))
gray = rgb2gray(np.asarray( imgread, dtype="int32" ))
# make it a 1d array
grayShape = gray.reshape((1,784))
# convert to csv for request
body = ''
for x in np.nditer(grayShape):
body += str((x)) + ','
# strip last comma
body = body[:-1]
# -
# And we're ready to make a prediction
# +
result = predict(body)
resolve_prediction(result)
plt.imshow(fullimgread)
plt.axis('off')
plt.show()
# -
# We currently have a 9 in 10 chance of hitting the basicTraffic model.
#
# Let's test that out, and run a small series of predictions against our current configuration
test_variant(50)
# These results should look plausible, so now let's update the distribution to 50/50 by setting the DesiredWeights to 5 and 5 and calling the update_endpoint_weights_and_capacities endpoint via our utility function.
#
update_weights(5,5)
# Now hop over to the SageMaker console while this cell is running, and find your endpoint, and wait for it to finish updating.
#
# You can see the endpoint in the console using the following link
#
# https://console.aws.amazon.com/sagemaker/home?#/endpoints
#
# During the updating process traffic will of course be served by the existing config until the new one is scaled up, health checks passed, and it is ready to serve traffic.
#
#
# When it's finished updating, we will run the same test...
test_variant(50)
# Ok, so that hopefully looks pretty even.
#
# (If it's not quite there yet then run this last step again)
#
# And now it's for you to implement a weighting, using any integer values you want.
# Fill in the 2 cells below,
#
# - the first cell needs a new weighting, using any integer values you want
# - the second cell will test it, as before, using the number of sample requests that you specify
raise NotImplementedError('You need to replace this line with our utility function, and 2 positive integer values')
raise NotImplementedError('You need to replace this line with a call to the function that will test the distribution')
# Now finally, let's go to 100% hpoTraffic.
update_weights(0,10)
# As before, we need to go and look in the SageMaker console to see when it has finished updating,
# and then run the next cell.
test_variant(50)
# And there we have it, 100% cut over to the newer hpo tuned model.
#
# (If it's not quite there yet then run the last cell again)
#
# Finally, let's clean up local disk space for good hygiene, as well as the Endpoint and the Endpoint Configuration
# +
# !rm -rf fashion-mnist.zip
# !rm -rf fashion-mnist-master
# !rm -rf test_image.jpg
# !rm -rf localimage.jpg
sagemaker.delete_endpoint(EndpointName=endpointName)
sagemaker.delete_endpoint_config(EndpointConfigName=endpointConfigName)
| lab3-image-classification/FashionMNIST_HPO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Notable Built-In Functions in Python
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# Obtain the maximum number among the values 25, 65, 890, and 15.
max(25, 65, 890, 15)
# Obtain the minimum number among the values 25, 65, 890, and 15.
min(25, 65, 890, 15)
# Find the absolute value of -100
abs(-100)
# Round the value of 55.5. Did you obtain 56.0?
round(55.5)
# Round 35.56789 to the third digit.
round(35.56789, 3)
# Find the sum of all elements in the provided list, called "Numbers".
Numbers = [1, 5, 64, 24.5]
sum(Numbers)
# Use a built-in function to raise 10 to the power of 3.
pow(10, 3)
# How many characters are there in the word "Elephant"?
len("Elephant")
# Create a function, called "distance_from_zero", that returns the absolute value of a provided single argument and prints a statement "Not Possible" if the argument provided is not a number.
# Call the funtion with the values of -10 and "cat" to verify it works correctly.
def distance_from_zero(x):
if type(x) == int or type(x) == float:
return abs(x)
else:
print ("Not possible")
distance_from_zero(-10)
distance_from_zero("cat")
| 11 - Introduction to Python/6_Functions/7_Notable Built-In Functions in Python (3:56)/Notable Built-In Functions in Python - Solution_Py3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DAT210x - Programming with Python for DS
# ## Module5- Lab7
# +
import random, math
import pandas as pd
import numpy as np
import scipy.io
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#matplotlib.style.use('ggplot') # Look Pretty
plt.style.use('ggplot')
# Leave this alone until indicated:
Test_PCA = True
# -
# ### A Convenience Function
# This method is for your visualization convenience only. You aren't expected to know how to put this together yourself, although you should be able to follow the code by now:
def plotDecisionBoundary(model, X, y):
print("Plotting...")
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.1
resolution = 0.1
#(2 for benign, 4 for malignant)
colors = {2:'royalblue', 4:'lightsalmon'}
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
plt.contourf(xx, yy, Z, cmap=plt.cm.seismic)
plt.axis('tight')
# Plot your testing points as well...
for label in np.unique(y):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], alpha=0.8)
p = model.get_params()
plt.title('K = ' + str(p['n_neighbors']))
plt.show()
# ### The Assignment
# Load in the dataset, identify nans, and set proper headers. Be sure to verify the rows line up by looking at the file in a text editor.
# .. your code here ..
col_names = ['sample', 'thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'status']
X = pd.read_csv('Datasets/breast-cancer-wisconsin.data', names=col_names, na_values='?')
# Copy out the status column into a slice, then drop it from the main dataframe. Always verify you properly executed the drop by double checking (printing out the resulting operating)! Many people forget to set the right axis here.
#
# If you goofed up on loading the dataset and notice you have a `sample` column, this would be a good place to drop that too if you haven't already.
# .. your code here ..
y = X['status'].copy()
X.drop(labels=['sample', 'status'], inplace=True, axis=1)
# With the labels safely extracted from the dataset, replace any nan values with the mean feature / column value:
# .. your code here ..
X = X.fillna(X.mean())
# Do train_test_split. Use the same variable names as on the EdX platform in the reading material, but set the random_state=7 for reproducibility, and keep the test_size at 0.5 (50%).
# .. your code here ..
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=7)
# Experiment with the basic SKLearn preprocessing scalers. We know that the features consist of different units mixed in together, so it might be reasonable to assume feature scaling is necessary. Print out a description of the dataset, post transformation. Recall: when you do pre-processing, which portion of the dataset is your model trained upon? Also which portion(s) of your dataset actually get transformed?
# +
# .. your code here ..
from sklearn.preprocessing import Normalizer, MaxAbsScaler, MinMaxScaler, StandardScaler, RobustScaler
#norm = Normalizer().fit(X_train)
#max_abs = MaxAbsScaler().fit(X_train)
min_max = MinMaxScaler().fit(X_train)
#stand = StandardScaler().fit(X_train)
#robust = RobustScaler().fit(X_train)
#X_train = norm.transform(X_train)
#X_test = norm.transform(X_test)
#X_train = max_abs.transform(X_train)
#X_test = max_abs.transform(X_test)
X_train = min_max.transform(X_train)
X_test = min_max.transform(X_test)
#X_train = stand.transform(X_train)
#X_test = stand.transform(X_test)
#X_train = robust.transform(X_train)
#X_test = robust.transform(X_test)
# -
# ### Dimensionality Reduction
# PCA and Isomap are your new best friends
# +
model = None
if Test_PCA:
print('Computing 2D Principle Components')
# TODO: Implement PCA here. Save your model into the variable 'model'.
# You should reduce down to two dimensions.
# .. your code here ..
from sklearn.decomposition import PCA
model = PCA(n_components=2)
model.fit(X_train)
else:
print('Computing 2D Isomap Manifold')
# TODO: Implement Isomap here. Save your model into the variable 'model'
# Experiment with K values from 5-10.
# You should reduce down to two dimensions.
# .. your code here ..
from sklearn.manifold import Isomap
model = Isomap(n_neighbors=5, n_components=2)
model.fit(X_train)
# -
# Train your model against data_train, then transform both `data_train` and `data_test` using your model. You can save the results right back into the variables themselves.
# .. your code here ..
X_train = model.transform(X_train)
X_test = model.transform(X_test)
# Implement and train `KNeighborsClassifier` on your projected 2D training data here. You can name your variable `knmodel`. You can use any `K` value from 1 - 15, so play around with it and see what results you can come up. Your goal is to find a good balance where you aren't too specific (low-K), nor are you too general (high-K). You should also experiment with how changing the weights parameter affects the results.
# +
# .. your code here ..
from sklearn.neighbors import KNeighborsClassifier
knmodel = KNeighborsClassifier(n_neighbors=15, weights='distance')
knmodel.fit(X_train, y_train)
# -
# Be sure to always keep the domain of the problem in mind! It's WAY more important to errantly classify a benign tumor as malignant, and have it removed, than to incorrectly leave a malignant tumor, believing it to be benign, and then having the patient progress in cancer. Since the UDF weights don't give you any class information, the only way to introduce this data into SKLearn's KNN Classifier is by "baking" it into your data. For example, randomly reducing the ratio of benign samples compared to malignant samples from the training set.
# Calculate and display the accuracy of the testing set:
# .. your code changes above ..
accuracy_score = knmodel.score(X_test, y_test)
accuracy_score
plotDecisionBoundary(knmodel, X_test, y_test)
| Module5/Module5 - Lab7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 感知机及其改进算法
# ### 小组成员:温紫珺、谭力仁、沈旭阳、邹子涵
# ### 汇报人:邹子涵
# ## 导包
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# from sklearn import preprocessing
font = r'C:\Windows\Fonts\simfang.ttf'
# -
# ## 导入数据集
data = pd.read_csv(r"./dataset/iris.csv")
data
# ## 数据解释
# 1.萼片长度(以厘米计)
# 2.萼片宽度(以厘米计)
# 3.花瓣长度(以厘米计)
# 4.花瓣宽度(以厘米计)
# 5.类别:
# ## 数据预处理
# ### 选择数据集中四列数据
# 两列作为输入数据,一列作为输出数据
col = ['sepal width','petal width','class']
data = pd.DataFrame(data,columns = col)
# ### 取前100行作为本次实验数据
data = data[:100]
len(data)
# ### 重写class
data.loc[(data['class']=='Iris-setosa'),'class'] = -1
data.loc[(data['class']=='Iris-versicolor'),'class'] = 1
# ### 打乱数据
data = data.sample(frac=1)
data
# ### 取值
data = data.values
# ### 绘图
for i in range(len(data)):
if data[i][-1] == -1:
plt.scatter(data[i][0],data[i][1],c='r',marker='o')
else:
plt.scatter(data[i][0],data[i][1],c='b',marker='x')
plt.show()
# ### 划分训练集与测试集
training = data[:70]
test = data[70:]
len(test)
x = test[0][:-1]
x
y = test[0][-1:]
y
# # 一些工具函数
def makeArray(I,J):
m = []
for i in range(I):
fill = np.random.random()
m.append([fill]*J)
return m
def sign(a):
if a>0:
return 1
else:
return -1
# # 一、感知机模型
def feeling():
w = np.zeros(2)
lr = 0.01
c = 1
# 循环遍历计数
cnt = 0
while True:
cnt += 1
# 遍历次数超过1000时跳出循环,学习结束
if cnt > 1000:
break
for i in range(len(training)):
x = np.array(training[i][:-1])
y = np.array(training[i][-1])
s = np.sum(w*x)+c
z = sign(s)
if y * z <= 0:
# e = y-z
# w = w+lr*e*x
# c = np.sum(c+lr*e)
w = w+lr*y*x
c = c+lr*y
return cnt,w,c
if __name__=='__main__':
cnt,w,c=feeling()
print("迭代次数:",cnt)
print("超平面法向量:",w)
print("阈值:",c)
count = 0
for i in range(len(test)):
target = test[i][-1:]
result = sign(np.sum(w*test[i][:-1])+c)
if(target == result):
count += 1
accuracy = float(count/len(test))
for i in range(len(test)):
if test[i][-1] == -1:
plt.scatter(test[i][0],test[i][1],c='r',marker='o')
else:
plt.scatter(test[i][0],test[i][1],c='b',marker='x')
# for i in range(len(data)):
# if training[i][-1] == -1:
# plt.scatter(data[i][0],data[i][1],c='r')
# else:
# plt.scatter(data[i][0],data[i][1],c='b')
X = np.random.uniform(2,4,100)
Y = -(w[0]*X+c)/w[1]
plt.xlabel('sepal width')
plt.ylabel('petal width')
plt.plot(X,Y,'g')
plt.show()
print("测试集准确率:",accuracy)
for i in range(len(data)):
if data[i][-1] == -1:
plt.scatter(data[i][0],data[i][1],c='r',marker='o')
else:
plt.scatter(data[i][0],data[i][1],c='b',marker='x')
X = np.random.uniform(2,4,100)
Y = -(w[0]*X+c)/w[1]
plt.xlabel('sepal width')
plt.ylabel('petal width')
plt.plot(X,Y,'g')
plt.show()
# # 二、口袋算法
def checkErrorRate(training,w,c):
count=0
for i in range(len(training)):
x=np.array(training[i][:-1])
y=training[i][-1]
if sign(np.sum(w*x)+c)!=sign(y):
count+=1
return count/len(training)
def Pocket():
w = np.zeros(2)
lr = 0.01
c = 1
best_w = w
bestRate = 1
best_c = c
# 循环遍历计数
cnt = 0
while True:
cnt += 1
# 遍历次数超过1000时跳出循环,学习结束
if cnt > 1000:
break
for i in range(len(training)):
x = np.array(training[i][:-1])
y = training[i][-1]
s = np.sum(w*x)+c
z = sign(s)
if y * z <= 0:
# e = y-z
# w = w+lr*e*x
# c = np.sum(c+lr*e)
w = w+lr*y*x
c = c+lr*y
rate = checkErrorRate(training,w,c)
if rate<bestRate:
bestRate = rate
print("bestRate update:",bestRate)
best_w = w
best_c = c
return cnt,best_w,best_c
if __name__=='__main__':
cnt,best_w,best_c=Pocket()
print("迭代次数:",cnt)
print("超平面法向量:",best_w)
print("阈值:",best_c)
count = 0
for i in range(len(test)):
target = test[i][-1:]
result = sign(np.sum(best_w*test[i][:-1])+best_c)
if(target == result):
count += 1
accuracy = float(count/len(test))
for i in range(len(test)):
if test[i][-1] == -1:
plt.scatter(test[i][0],test[i][1],c='r',marker='o')
else:
plt.scatter(test[i][0],test[i][1],c='b',marker='x')
# for i in range(len(data)):
# if data[i][-1] == -1:
# plt.scatter(data[i][0],data[i][1],c='r')
# else:
# plt.scatter(data[i][0],data[i][1],c='b')
X = np.random.uniform(2,4,100)
Y = -(best_w[0]*X+best_c)/best_w[1]
plt.plot(X,Y,'g')
plt.xlabel('sepal width')
plt.ylabel('petal width')
plt.show()
print("准确率:",accuracy)
for i in range(len(data)):
if data[i][-1] == -1:
plt.scatter(data[i][0],data[i][1],c='r',marker='o')
else:
plt.scatter(data[i][0],data[i][1],c='b',marker='x')
X = np.random.uniform(2,4,100)
Y = -(best_w[0]*X+best_c)/best_w[1]
plt.xlabel('sepal width')
plt.ylabel('petal width')
plt.plot(X,Y,'g')
plt.show()
| 3-24homework/homeworkv1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="1Ny7DC00F72L"
# # Distributing standardized COMBINE archives with Tellurium
#
# <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/tellurium-and-libroadrunner.png" width="60%" style="padding: 20px"></div>
# <div align='center' style='font-size:100%'>
# <NAME>, BS
# <div align='center' style='font-size:100%'>Sauro Lab PhD Student, Department of Bioengineering<br>
# Head of Outreach, <a href="https://reproduciblebiomodels.org/dissemination-and-training/seminar/">Center for Reproducible Biomedical Modeling</a><br>
# University of Washington, Seattle, WA USA
# </div>
# <hr>
# + [markdown] id="ZhcdSCBpF72N"
# To facilitate design and comprehension of their models, modelers should use standard systems biology formats for
# model descriptions, simulation experiments, and to distribute stand-alone archives which can regenerate the modeling study. We will discuss three of these standards - the Systems Biology Markup Language (SBML), the Simulation Experiment Description Markup Language (SED-ML), and the COMBINE archive/ inline Open Modeling EXchange format (OMEX) format.
#
# + [markdown] id="gTacOCQSF72O"
# ## TOC
# * [Links to relevant resources](#relevant-resources)
# * [Packages and Constants](#standardized-formats-packages-and-constants)
# * [Import and export capabilities with Tellurium](#import-export)
# * [Importing SBML directly from the BioModels Database for simulation](#import-from-biomodels)
# * [Exporting SBML or Antimony models](#export-to-sbml-or-antimony)
# * [Writing SED-ML with PhraSED-ML](#writing-phrasedml)
# * [Exporting SED-ML](#exporting-sedml)
# * [Generating a COMBINE archive](#combine-archive)
# * [Exercises](#exercises)
# + [markdown] id="Ggjj-PUibEvL"
# # Links to relevant resources <a class="anchor" id="relevant-resources"></a>
#
# <a href="http://model.caltech.edu/">SBML specification</a><br>
# <a href="http://sbml.org/SBML_Software_Guide/SBML_Software_Matrix">SBML tool support</a><br>
# <a href="https://sed-ml.org/">SED-ML specification</a><br>
# <a href="https://sed-ml.org/showcase.html">SED-ML tool support</a><br>
# <a href="http://phrasedml.sourceforge.net/phrasedml__api_8h.html">PhraSED-ML documentation</a><br>
# <a href="http://phrasedml.sourceforge.net/Tutorial.html">PhraSED-ML tutorial</a><br>
# <a href="https://tellurium.readthedocs.io/en/latest/">Tellurium documentation</a><br>
# <a href="https://libroadrunner.readthedocs.io/en/latest/">libRoadRunner documentation</a><br>
# <a href="https://tellurium.readthedocs.io/en/latest/antimony.html">Antimony documentation</a><br>
# <a href="http://copasi.org/Download/">COPASI download</a><br>
#
#
#
# + [markdown] id="fTKMXo5KF72O"
# # Packages and constants <a class="anchor" id="standardized-formats-packages-and-constants"></a>
#
# + pycharm={"name": "#%%\n"} id="1cR03Fb2F72P" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627169908111, "user_tz": 420, "elapsed": 33331, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="5495d70c-d014-4a93-bbe9-f48422474f8d"
# !pip install tellurium -q
# + id="fkVMJxyp7onl" executionInfo={"status": "ok", "timestamp": 1627169910173, "user_tz": 420, "elapsed": 2066, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
import tellurium as te
import phrasedml
# + [markdown] id="I9K8TkELF72R"
# # Import and export capabilities with Tellurium <a class="anchor" id="import-export"></a>
#
# Models can be imported from the BioModels Database, given the appropriate BioModel ID using a standard URL format to programmatically access the model of interest.
#
# We will use this model of respiratory oscillations in Saccharomyces cerevisae by <a href="https://www.ebi.ac.uk/biomodels/BIOMD0000000090"><NAME> et al. (2001)</a> </div> as an example:
# <br>
#
# <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/wolf_publication.PNG" width="65%" style="padding: 20px"></div>
# <br>
# <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/wolf_network.PNG" width="65%" style="padding: 20px"></div>
# + [markdown] id="n1CuAlSBF72R"
# # Importing SBML directly from the BioModels Database for simulation <a class="anchor" id="import-from-biomodels"></a>
#
# SBML is a software data format for describing computational biological models. Markup languages allow you to separate annotations and documentation about the content from the content itself, using standardized tags. So the model and annotations are stored in a single file, but tools that support SBML are designed to interpret these to perform tasks. SBML is independent of any particular software tool and is broadly applicable to the modeling domain. It is open and free, and widely supported. Tools might allow for writing the model, simulating the model, visualizing the network, etc.
#
# We will demonstrate how Tellurium supports import and export of SBML model files.
# + pycharm={"name": "#%%\n"} id="X-IBVygFF72R" colab={"base_uri": "https://localhost:8080/", "height": 606} executionInfo={"status": "ok", "timestamp": 1627169911809, "user_tz": 420, "elapsed": 1640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="2e49cd9d-e081-48e1-fa73-def82caf4434"
# Import an SBML model from the BioModels Database using a url
wolf = te.loadSBMLModel("https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000090.2?filename=BIOMD0000000090_url.xml")
wolf.simulate(0, 200, 1000)
wolf.plot(figsize = (15, 10), xtitle = 'Time', ytitle = 'Concentration')
# + [markdown] id="75VhyKNVF72S"
# # Exporting SBML or Antimony models <a class="anchor" id="export-to-sbml-or-antimony"></a>
# + pycharm={"name": "#%%\n"} id="vWlz_l41F72S" executionInfo={"status": "ok", "timestamp": 1627169911811, "user_tz": 420, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
# Export the model you just accessed from BioModels to the current directory as an SBML string
wolf.reset()
wolf.exportToSBML('Wolf2001_Respiratory_Oscillations.xml', current = True)
# + pycharm={"name": "#%%\n"} id="BthQF38yF72S" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627169911957, "user_tz": 420, "elapsed": 156, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="6c4dcd30-5946-4642-ed8f-2ee62914d5d3"
# You can also export the model to the current directory as an Antimony string
# Let's take a look at the string first
print(wolf.getCurrentAntimony())
# + id="mTKLzMkoWoUG" executionInfo={"status": "ok", "timestamp": 1627169912075, "user_tz": 420, "elapsed": 119, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
# Edit the Antimony string of Wolf et al.:
# Update model name for ease of use with PhraSED-ML
# Remove model name annotatations -- causes error with SED-ML export
wolf = te.loada("""
// Created by libAntimony v2.12.0
model wolf
// Compartments and Species:
compartment c0, c1, c2;
species $sul_ex in c0, $eth_ex in c0, $oxy_ex in c0, oxy in c2, $H2O in c2;
species A3c in c1, aps in c1, $PPi in c1, pap in c1, sul in c1, eth in c1;
species $A2c in c1, hyd in c1, cys in c1, N2 in c1, $N1 in c1, aco in c1;
species oah in c1, S1 in c2, $S2 in c2, $C1 in c2, $C2 in c2, $A2m in c2;
species A3m in c2, $Ho in c1, $Hm in c2;
// Assignment Rules:
A2c := Ac - A3c;
N1 := N - N2;
S2 := S - S1;
A2m := Am - A3m;
// Reactions:
v1: $sul_ex => sul; c0*k_v0/(1 + (cys/Kc)^n);
v13: $eth_ex => eth; c0*k_v13;
v2: sul + A3c => aps + $PPi; c1*k2*sul*A3c;
v10: $oxy_ex => oxy; c0*k_v10;
v14: oxy => $oxy_ex; c2*k14*oxy;
v3: aps + A3c => pap + $A2c; c1*k3*aps*A3c;
v4: pap + 3 N2 => hyd + 3 $N1; c1*k4*pap*N2;
v5: hyd + oah => cys; c1*k5*hyd*oah;
v6: cys => ; c1*k6*cys;
v7: eth + 2 $N1 => aco + 2 N2; c1*k7*eth*N1;
v15: aco => oah; c1*k15*aco;
v17: hyd => ; c1*k17*hyd;
v18: oah => ; c1*k18*oah;
v8: $S2 + aco => S1; c2*k8*aco*S2;
v9: S1 + 4 $N1 => $S2 + 4 N2; c2*k9*S1*N1;
v11a: $C1 + $Hm + N2 => $C2 + $Ho + $N1; c2*k11*N2*oxy/((a*N2 + oxy)*(1 + (hyd/Kh)^m));
v11a2: $C2 + oxy => $C1 + $H2O; c2*k11*N2*oxy/((a*N2 + oxy)*(1 + (hyd/Kh)^m));
v16: $A2c + A3m => $A2m + A3c; c2*k16*A3m*A2c;
v11b: $Ho + $A2m => $Hm + A3m; (c2*3*k11*N2*oxy/((a*N2 + oxy)*(1 + (hyd/Kh)^m)))*A2m/(Ka + A2m);
vLEAK: $Ho => $Hm; 0;
v12: A3c => $A2c; c1*k12*A3c;
// Species initializations:
sul_ex = 0;
eth_ex = 0;
oxy_ex = 0;
oxy = 7/c2;
oxy has substance_per_volume;
H2O = 0;
A3c = 1.5/c1;
A3c has substance_per_volume;
aps = 0.5/c1;
aps has substance_per_volume;
PPi = 0;
pap = 0.4/c1;
pap has substance_per_volume;
sul = 0.4/c1;
sul has substance_per_volume;
eth = 4/c1;
eth has substance_per_volume;
A2c has substance_per_volume;
hyd = 0.5/c1;
hyd has substance_per_volume;
cys = 0.3/c1;
cys has substance_per_volume;
N2 = 2/c1;
N2 has substance_per_volume;
N1 has substance_per_volume;
aco = 0.3/c1;
aco has substance_per_volume;
oah = 1.5/c1;
oah has substance_per_volume;
S1 = 1.5/c2;
S1 has substance_per_volume;
S2 has substance_per_volume;
C1 = 0;
C2 = 0;
A2m has substance_per_volume;
A3m = 1.5/c2;
A3m has substance_per_volume;
Ho = 0;
Hm = 0;
// Compartment initializations:
c0 = 1;
c1 = 1;
c2 = 1;
// Variable initializations:
Ac = 2;
N = 2;
S = 2;
Am = 2;
k_v0 = 1.6;
k2 = 0.2;
k3 = 0.2;
k4 = 0.2;
k5 = 0.1;
k6 = 0.12;
k7 = 10;
k8 = 10;
k9 = 10;
k_v10 = 80;
k11 = 10;
k12 = 5;
k_v13 = 4;
k14 = 10;
k15 = 5;
k16 = 10;
k17 = 0.02;
k18 = 1;
n = 4;
m = 4;
Ka = 1;
Kc = 0.1;
a = 0.1;
Kh = 0.5;
// Other declarations:
const c0, c1, c2, Ac, N, S, Am, k_v0, k2, k3, k4, k5, k6, k7, k8, k9, k_v10;
const k11, k12, k_v13, k14, k15, k16, k17, k18, n, m, Ka, Kc, a, Kh;
// Unit definitions:
unit substance = mole;
unit substance_per_volume = mole / litre;
// Display Names:
c0 is "external";
c1 is "cytosol";
c2 is "mitochondria";
sul_ex is "SO4_ex";
eth_ex is "EtOH_ex";
oxy_ex is "O2_ex";
oxy is "O2";
A3c is "ATP";
aps is "APS";
pap is "PAPS";
sul is "SO4";
eth is "EtOH";
A2c is "ADP";
hyd is "H2S";
cys is "CYS";
N2 is "NADH";
N1 is "NAD";
aco is "AcCoA";
oah is "OAH";
A2m is "ADP_mit";
A3m is "ATP_mit";
v11a is "vET1";
v11a2 is "vET2";
v11b is "vSYNT";
// CV terms:
c0 hypernym "http://identifiers.org/obo.go/GO:0005576"
c1 hypernym "http://identifiers.org/obo.go/GO:0005829"
c2 hypernym "http://identifiers.org/obo.go/GO:0005739"
sul_ex identity "http://identifiers.org/obo.chebi/CHEBI:16189"
eth_ex identity "http://identifiers.org/obo.chebi/CHEBI:16236"
oxy_ex identity "http://identifiers.org/obo.chebi/CHEBI:15379"
oxy identity "http://identifiers.org/obo.chebi/CHEBI:15379"
H2O identity "http://identifiers.org/obo.chebi/CHEBI:15377"
A3c identity "http://identifiers.org/obo.chebi/CHEBI:15422"
aps identity "http://identifiers.org/obo.chebi/CHEBI:17709"
PPi identity "http://identifiers.org/obo.chebi/CHEBI:18361"
pap identity "http://identifiers.org/obo.chebi/CHEBI:17980"
sul identity "http://identifiers.org/obo.chebi/CHEBI:16189"
eth identity "http://identifiers.org/obo.chebi/CHEBI:16236"
A2c identity "http://identifiers.org/obo.chebi/CHEBI:16761"
hyd identity "http://identifiers.org/obo.chebi/CHEBI:16136"
cys identity "http://identifiers.org/obo.chebi/CHEBI:17561"
N2 identity "http://identifiers.org/obo.chebi/CHEBI:16908"
N1 identity "http://identifiers.org/obo.chebi/CHEBI:15846"
aco identity "http://identifiers.org/obo.chebi/CHEBI:15351"
oah identity "http://identifiers.org/obo.chebi/CHEBI:16288"
S1 parthood "http://identifiers.org/obo.go/GO:0030062"
S2 parthood "http://identifiers.org/obo.go/GO:0030062"
C1 hypernym "http://identifiers.org/obo.go/GO:0005746"
C2 hypernym "http://identifiers.org/obo.go/GO:0005746"
A2m identity "http://identifiers.org/obo.chebi/CHEBI:16761"
A3m identity "http://identifiers.org/obo.chebi/CHEBI:15422"
Ho identity "http://identifiers.org/obo.chebi/CHEBI:24636"
Hm identity "http://identifiers.org/obo.chebi/CHEBI:24636"
v1 hypernym "http://identifiers.org/obo.go/GO:0015381"
v13 hypernym "http://identifiers.org/obo.go/GO:0015850"
v2 identity "http://identifiers.org/ec-code/2.7.7.4"
v3 identity "http://identifiers.org/ec-code/2.7.1.25"
v3 hypernym "http://identifiers.org/obo.go/GO:0004020"
v4 version "http://identifiers.org/ec-code/1.8.4.8",
"http://identifiers.org/ec-code/1.8.1.2"
v5 version "http://identifiers.org/ec-code/4.4.1.1",
"http://identifiers.org/ec-code/4.2.1.22",
"http://identifiers.org/ec-code/2.5.1.49"
v7 version "http://identifiers.org/ec-code/6.2.1.1",
"http://identifiers.org/ec-code/1.2.1.3",
"http://identifiers.org/ec-code/1.1.1.1"
v15 identity "http://identifiers.org/ec-code/2.3.1.31"
v8 parthood "http://identifiers.org/obo.go/GO:0006099"
v9 parthood "http://identifiers.org/obo.go/GO:0006099"
v11a identity "http://identifiers.org/obo.go/GO:0015990"
v11a parthood "http://identifiers.org/obo.go/GO:0042775"
v11a version "http://identifiers.org/obo.go/GO:0002082"
v11a2 parthood "http://identifiers.org/obo.go/GO:0042775"
v11a2 version "http://identifiers.org/obo.go/GO:0002082"
v11a2 identity "http://identifiers.org/obo.go/GO:0006123"
v16 identity "http://identifiers.org/obo.go/GO:0005471"
v11b parthood "http://identifiers.org/obo.go/GO:0042775"
v11b hypernym "http://identifiers.org/obo.go/GO:0006119"
v11b version "http://identifiers.org/obo.go/GO:0002082"
vLEAK hypernym "http://identifiers.org/obo.go/GO:0006810"
v12 hypernym "http://identifiers.org/obo.go/GO:0006200"
end
""")
# Export SBML and Antimony versions of the updated model to current working directory
wolf.exportToAntimony('wolf_antimony.txt')
wolf.exportToSBML('wolf.xml')
# + pycharm={"name": "#%%\n"} id="Dfu1ZRKbF72T" colab={"base_uri": "https://localhost:8080/", "height": 388} executionInfo={"status": "ok", "timestamp": 1627169912468, "user_tz": 420, "elapsed": 395, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="3dfd8893-c6d9-4ed9-f590-58d678c6ced6"
# Let's work with the species 'oxy'(CHEBI ID: 15379) - or dioxygen - going forward
wolf.simulate(0, 100, 1000, ['time', 'oxy']) # note that specific species can be selected for recording concentrations over the timecourse
wolf.plot(figsize = (10, 6), xtitle = 'Time', ytitle = 'Concentration')
# + [markdown] id="zDoRvfzRF72T"
# # Writing SED-ML with PhraSED-ML <a class="anchor" id="writing-phrasedml"></a>
#
# SED-ML encodes the information required by the minimal information about a simiulation experiment guidelines (MIASE) to enable reproduction of simulation experiments in a computer-readable format.
#
# The specification includes:
# * selection of experimental data for the experiment
# * models used for the experiement
# * which simulation to run on which models
# * which results to pass to output
# * how results should be output
#
# PhraSED-ML is a language and a library that provide a text-based way to read, summarize, and create SED-ML files as part of the greater Tellurium modeling environment we have discussed.
# + pycharm={"name": "#%%\n"} id="gr67sF3iF72T" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627169912469, "user_tz": 420, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="d3220092-77bb-4f12-9c4b-220ecb59a43c"
# Write phraSED-ML string specifying the simulation study
wolf_phrasedml = '''
// Set model
wolf = model "wolf.xml" # model_id = model source_model
// Deterministic simulation
det_sim = simulate uniform(0, 500, 1000) # sim_id = simulate simulation_type
wolf_det_sim = run det_sim on wolf # task_id = run sim_id on model_id
plot "Wolf et al. dynamics (Model ID: BIOMD0000000090)" time vs oxy # plot title_name x vs y
'''
# Generate SED-ML string from the phraSED-ML string
wolf.resetAll()
wolf_sbml = wolf.getSBML()
phrasedml.setReferencedSBML("wolf.xml", wolf_sbml)
wolf_sedml = phrasedml.convertString(wolf_phrasedml)
print(wolf_sedml)
# + [markdown] id="1T62KeYyF72U"
# # Exporting SED-ML <a class="anchor" id="exporting-sedml"></a>
# + pycharm={"name": "#%%\n"} id="t67J1PpdF72U" colab={"base_uri": "https://localhost:8080/", "height": 404} executionInfo={"status": "ok", "timestamp": 1627169912978, "user_tz": 420, "elapsed": 513, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="aaedf2d6-2fcb-453b-9e84-7d523f3d5fda"
# Save the SED-ML simulation experiment to your current working directory
te.saveToFile('wolf_sedml.xml', wolf_sedml)
# Load and run SED-ML script
te.executeSEDML('wolf_sedml.xml')
# + [markdown] id="w6z4-C-YF72U"
# # Generating a COMBINE archive <a class="anchor" id="combine-archive"></a>
#
# COMBINE archives package SBML models and SED-ML simulation experiment descriptions together to ensure complete modeling studies or experiments can be exchangesd between software tools. Tellurium provides the inline Open Modeling EXchange format (OMEX) to edit contents of COMBINE archives in a human-readable format. Inline OMEX is essentially an Antimony description of the model joined to the PhraSED-ML experiment description.
#
# + colab={"base_uri": "https://localhost:8080/"} id="Rti8j3F8OHYz" executionInfo={"status": "ok", "timestamp": 1627169912979, "user_tz": 420, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="8277a728-facb-400a-b332-b8122771236a"
# Read Antimony model into a string
wolf_antimony = te.readFromFile('wolf_antimony.txt')
# create an inline OMEX string
wolf_inline_omex = '\n'.join([wolf_antimony, wolf_phrasedml])
print(wolf_inline_omex)
# + id="ECy83k3MQtqK" executionInfo={"status": "ok", "timestamp": 1627169912979, "user_tz": 420, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
# export to a COMBINE archive
te.exportInlineOmex(wolf_inline_omex, 'wolf.omex')
# + [markdown] id="cYngS7UPF72V"
# # Exercises <a class="anchor" id="exercises"></a>
# + [markdown] id="uE4vHfryF72W"
# ## Exercise 1:
#
# Download the <a href="http://www.ebi.ac.uk/biomodels-main/BIOMD0000000010 "> Kholodenko 2000 model</a> of ultrasensitivity and negative feedback oscillations in the MAPK cascade from the BioModels Database, and upload to your workspace. Simulate and plot simulation results for the model.
#
# <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/kholodenko_publication.PNG" width="75%"></div>
#
# + id="VgvtvJkGR4If" executionInfo={"status": "ok", "timestamp": 1627169912981, "user_tz": 420, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}}
# Write your solution here
# + [markdown] id="ll3L2cFTR786"
# ## Exercise 1 Solution:
# + pycharm={"name": "#%%\n"} id="_4Ka9ODyF72W" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1627169914061, "user_tz": 420, "elapsed": 1087, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjBIQDv_3y0-AcP-Q3CnINVqa-uRyxiVqVccRVRpSg=s64", "userId": "08884588606127160502"}} outputId="3a6bd8dd-1427-4e77-c203-baa9aba34fe2"
# Solution
r = te.loadSBMLModel(
"https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000010?filename=BIOMD0000000010_url.xml")
r.simulate(0, 5000, 1000)
r.plot()
# + [markdown] id="ZWKlKAyNF72W"
# # Acknowledgements
# <br>
# <div align='left'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/acknowledgments.png" width="80%"></div>
# + [markdown] id="hHmm-gKZF72W"
# <br>
# <html>
# <head>
# </head>
# <body>
# <h1>Bibliography</h1>
# <ol>
# <li>
# <p><NAME> et al., <cite>Tellurium: An extensible python-based modeling environment for systems and synthetic biology</cite>, Biosystems, vol. 171, pp. 74–79, Sep. 2018.</p>
# </li>
# <li>
# <p><NAME> et al., <cite>libRoadRunner: a high performance SBML simulation and analysis library.,</cite>, Bioinformatics, vol. 31, no. 20, pp. 3315–21, Oct. 2015.</p>
# <li>
# <p><NAME>, <NAME>, <NAME>, and <NAME>, <cite>Antimony: a modular model definition language</cite>, Bioinformatics, vol. 25, no. 18, pp. 2452–2454, Sep. 2009.</p>
# </li>
# <li>
# <p><NAME>, <NAME>, <NAME>, and <NAME>, <cite>phraSED-ML: a paraphrased, human-readable adaptation of SED-ML</cite>, J. Bioinform. Comput. Biol., vol. 14, no. 06, Dec. 2016.</p>
# </li>
# <li>
# <p> <NAME>, <NAME>, <NAME>, <NAME>, <cite>Quantification of short term signaling by the epidermal growth factor receptor.</cite>, J Biol Chem., vol. 274, no. 42, Oct. 1999.</p>
# </li>
# </ol>
# </body>
# </html>
#
| notebooks/archive/Reproducibility-and-Standards/reproducible-distribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from numpy import array
from sympy import symbols, Eq, Matrix, diff, Derivative, simplify, factor, expand, latex, init_printing, collect
init_printing()
from IPython.display import display, Math
# ## Define the interpolation functions
# +
x1, y1, x2, y2, x3, y3, x4, y4 = symbols('x_1, y_1, x_2, y_2, x_3, y_3, x_4, y_4')
r, s = symbols('r, s')
# Define the interpolation functions
h1 = factor(1/4*(1+r)*(1+s))
h2 = factor(1/4*(1-r)*(1+s))
h3 = factor(1/4*(1-r)*(1-s))
h4 = factor(1/4*(1+r)*(1-s))
display(Math('h_1 = ' + latex(h1)))
display(Math('h_2 = ' + latex(h2)))
display(Math('h_2 = ' + latex(h3)))
display(Math('h_2 = ' + latex(h4)))
# -
# # Relate the (x, y) Local Coordinate System to the (r, s) Natural Coordinate System
# +
# Relate the (x, y) coordinate system to the (r, s) coordinate system
x = h1*x1 + h2*x2 + h3*x3 + h4*x4
y = h1*y1 + h2*y2 + h3*y3 + h4*y4
display(Math('x = ' + latex(x)))
display(Math('y = ' + latex(y)))
# -
# # Find the Jacobian Matrix
# The Jacobian matrix converts derivatives with respect to x and y into derivatives with respect to r and s
#
# $J = \begin{bmatrix} \frac{dx}{dr} & \frac{dy}{dr} \\ \frac{dx}{ds} & \frac{dy}{ds} \end{bmatrix}$
# +
# Calculate the Jacobian matrix
J = Matrix([[diff(x, r), diff(y, r)],
[diff(x, s), diff(y, s)]])
display(Math('J = ' + latex(factor(J))))
# +
B_kappa = Matrix([[0, 0, -diff(h1, r), 0, 0, -diff(h2, r), 0, 0, -diff(h2, r), 0, 0, -diff(h4, r)],
[0, diff(h1, s), 0, 0, diff(h2, s), 0, 0, diff(h3, s), 0, 0, diff(h4, s), 0 ],
[0, diff(h1, r), -diff(h1, s), 0, diff(h2, r), -diff(h2, s), 0, diff(h3, r), -diff(h3, s), 0, diff(h4, r), -diff(h4, s)]])
display(Math('B_\kappa = J^{-1}(1/4)' + latex(B_kappa*4)))
# +
dH = Matrix([[diff(h1, r), diff(h2, r), diff(h3, r), diff(h4, r)],
[diff(h1, s), diff(h2, s), diff(h3, s), diff(h4, s)]])
B_m = Matrix([[diff(h1, r), 0, diff(h2, r), 0, diff(h3, r), 0, diff(h4, r), 0 ],
[ 0, diff(h1, s), 0, diff(h2, s), 0, diff(h3, s), 0, diff(h4, s)],
[diff(h1, s), diff(h1, r), diff(h2, s), diff(h2, r), diff(h3, s), diff(h3, r), diff(h4, s), diff(h4, r)]])
display(Math('dH = (1/4)' + latex(dH*4)))
display(Math('B_m = J^{-1}(1/4)' + latex(B_m*4)))
print(B_m*4)
# +
r, s = symbols('r, s')
h = symbols('h')
x1, x2, x3, x4 = symbols('x1, x2, x3, x4')
y1, y2, y3, y4 = symbols('y1, y2, y3, y4')
w1, w2, w3, w4 = symbols('w1, w2, w3, w4')
theta_x1, theta_x2, theta_x3, theta_x4 = symbols('theta_x1, theta_x2, theta_x3, theta_x4')
theta_y1, theta_y2, theta_y3, theta_y4 = symbols('theta_y1, theta_y2, theta_y3, theta_y4')
gamma_rz = (1 + s)*((w1 - w2)/2 + (x1 - x2)/4*(theta_y1 + theta_y2) - (y1 - y2)/4*(theta_x1 + theta_x2)) \
+ (1 - s)*((w4 - w3)/2 + (x4 - x3)/4*(theta_y4 + theta_y3) - (y4 - y3)/4*(theta_x4 + theta_x3))
gamma_sz = (1 + r)*((w1 - w4)/2 + (x1 - x4)/4*(theta_y1 + theta_y4) - (y1 - y4)/4*(theta_x1 + theta_x4)) \
+ (1 - r)*((w2 - w3)/2 + (x2 - x3)/4*(theta_y2 + theta_y3) - (y2 - y3)/4*(theta_x2 + theta_x3))
display(1/4*collect(expand(gamma_rz*4), [w1, theta_x1, theta_y1, w2, theta_x2, theta_y2, w3, theta_x3, theta_y3, w4, theta_x4, theta_y4]))
print('')
display(1/4*collect(expand(gamma_sz*4), [w1, theta_x1, theta_y1, w2, theta_x2, theta_y2, w3, theta_x3, theta_y3, w4, theta_x4, theta_y4]))
# -
# # References
#
# The following references were used in the formulation of this element. The primary reference was the first one because it had a section directly relating to isoparametric general plate bending elements, but all three were used. The first reference is a free download from MIT's website.
#
# 1. "Finite Element Procedures, 2nd Edition", <NAME>
# 2. "Finite Element Analysis Fundamentals", <NAME>
# 3. "A First Course in the Finite Element Method, 4th Edition", <NAME>
# # Write the Equations that Map the (x, y) Coordinates to the (r, s) Coordinates
# For any invertible matrix $M = \begin{bmatrix} a & b \\ c & d \end{bmatrix}$, the inverse can be found as follows:
# +
a, b, c, d = symbols('a, b, c, d')
M = Matrix([[a, b],
[c, d]])
display(Math('M^{-1} = ' + latex(M.inv())))
# -
# Recognizing that the denominator in each term of this inverse is the determinant of $M$, the inverse can be rewritten as:
#
# $M^{-1} = \frac{1}{|M|}\begin{bmatrix} d & -b \\ -c & a \end{bmatrix}$
# Applying this to the Jacobian, we can express its the inverse as:
#
# $J^{-1} = \frac{1}{|J|} \begin{bmatrix} \frac{dy}{ds} & \frac{-dy}{dr} \\ \frac{-dx}{ds} & \frac{dx}{dr} \end{bmatrix}$
# We can relate the derivatives of the (r, s) coordinate system to the derivatives of the (x, y) coordinate system using the Jacobian operator:
#
# $\begin{bmatrix} \frac{d}{dr} \\ \frac{d}{ds} \end{bmatrix} = \begin{bmatrix} \frac{dx}{dr} & \frac{dy}{dr} \\ \frac{dx}{ds} & \frac{dy}{ds}\end{bmatrix} \begin{bmatrix} \frac{d}{dx} \\ \frac{d}{dy} \end{bmatrix} = J \begin{bmatrix} \frac{d}{dx} \\ \frac{d}{dy} \end{bmatrix}$
#
# Rearranging the equation gives us the formula for any derivative of the (x, y) coordinate system in terms of (r, s):
#
# $\begin{bmatrix} \frac{d}{dx} \\ \frac{d}{dy} \end{bmatrix} = \frac{1}{|J|} \begin{bmatrix} \frac{dy}{ds} & \frac{-dy}{dr} \\ \frac{-dx}{ds} & \frac{dx}{dr} \end{bmatrix} \begin{bmatrix} \frac{d}{dr} \\ \frac{d}{ds} \end{bmatrix}$
#
# Finally, multiplying through, we can obtain equations for the converting derivatives of any function with respect to x or y to derivatives with respect to r and s, respectively:
# $\frac{d}{dx} = \frac{1}{|J|} (\frac{dy}{ds} \frac{d}{dr} + \frac{-dy}{dr} \frac{d}{ds})$
#
# $\frac{d}{dy} = \frac{1}{|J|} (\frac{-dx}{ds} \frac{d}{dr} + \frac{dx}{dr} \frac{d}{ds})$
# The bending strains vary through the thickness of the plate. Using the curvatures of the plate they can be expressed as:
#
# $\begin{bmatrix} \epsilon_{xx} \\ \epsilon_{yy} \\ \gamma_{xy} \end{bmatrix} = -z \begin{bmatrix} \frac{d \beta_{x}}{dx} \\ \frac{d \beta_{y}}{dy} \\ \frac{d \beta_{x}}{dy} + \frac{d\beta_{y}}{dx} \end{bmatrix} = \frac{-z}{|J|} \begin{bmatrix} \frac{dy}{ds} \frac{d\beta_x}{dr} + \frac{-dy}{dr} \frac{d\beta_x}{ds} \\ \frac{-dx}{ds} \frac{d\beta_y}{dr} + \frac{dx}{dr} \frac{d\beta_y}{ds} \\ (\frac{-dx}{ds} \frac{d\beta_x}{dr} + \frac{dx}{dr} \frac{d\beta_x}{ds}) + (\frac{dy}{ds} \frac{d\beta_y}{dr} + \frac{-dy}{dr} \frac{d\beta_y}{ds})\end{bmatrix}$
# +
w_zi, theta_xi, theta_yi = symbols('w_{zi}, \\theta_{xi}, \\theta_{yi}')
w_zj, theta_xj, theta_yj = symbols('w_{zj}, \\theta_{xj}, \\theta_{yj}')
w_zm, theta_xm, theta_ym = symbols('w_{zm}, \\theta_{xm}, \\theta_{ym}')
w_zn, theta_xn, theta_yn = symbols('w_{zn}, \\theta_{xn}, \\theta_{yn}')
d = Matrix([w_zi, theta_xi, theta_yi,
w_zj, theta_xj, theta_yj,
w_zm, theta_xm, theta_ym,
w_zn, theta_xn, theta_yn])
d
# -
w = Matrix([h1, 0, 0, h2, 0, 0, h3, 0, 0, h4, 0, 0]).T*d
beta_x = -1*Matrix([0, 0, h1, 0, 0, h2, 0, 0, h3, 0, 0, h4]).T*d
beta_y = Matrix([0, h1, 0, 0, h2, 0, 0, h3, 0, 0, h4, 0]).T*d
display(w, beta_x, beta_y)
beta_x_dx = 1/J.det()*(diff(y, s)*diff(beta_x, r) - diff(y, r)*diff(beta_x, s))
beta_x_dy = 1/J.det()*(-diff(x, s)*diff(beta_x, r) + diff(x, r)*diff(beta_x, s))
beta_y_dx = 1/J.det()*(diff(y, s)*diff(beta_y, r) - diff(y, r)*diff(beta_y, s))
beta_y_dy = 1/J.det()*(-diff(x, s)*diff(beta_y, r) + diff(x, r)*diff(beta_y, s))
B_kappa = Matrix([[0, 0, diff(-h1, r), 0, 0, diff(-h2, r), 0, 0, diff(-h3, r), 0, 0, diff(-h4, r)],
[0, diff(h1, s), 0, 0, diff(h2, s), 0, 0, diff(h3, s), 0, 0, diff(h4, s), 0],
[0, diff(h1, r), diff(-h1, s), 0, diff(h2, r), diff(-h2, s), 0, diff(h3, r), diff(-h3, s), 0, diff(h4, r), diff(-h4, s)]])
B_kappa
x =
gr = diff(x, r)
| Derivations/Isoparametric Quad Element.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to PyHEP 2020!
#
# This is the third workshop in the series, and it's already a little different.
# +
import matplotlib.pyplot as plt
plt.bar(["2018", "2019", "2020"], [68, 55, 1000])
plt.ylabel("Number of participants");
# -
import pandas
df = pandas.read_csv("~/tmp/pyhep2020-survey-results.csv")
fix_collaboration_names = lambda x: {
"alice": "ALICE",
"Alice": "ALICE",
"ALICE": "ALICE",
"ALICE member": "ALICE",
"ATLAS": "ATLAS",
"ATLAS, BELLE2": "ATLAS;BELLE",
"ATLAS, Darkside": "ATLAS;DarkSide",
"ATLAS, FCC, IDEA": "ATLAS;FCC;IDEA",
"ATLAS, IRIS-HEP": "ATLAS;IRIS-HEP",
"ATLAS, KM3NeT": "ATLAS;KM3NeT",
"BaBar, HFLAV, PDG, LHCb, FNAL-E989": "BaBar;HFLAV;PDG;LHCb;g-2",
"Belle": "BELLE",
"BELLE": "BELLE",
"Belle2": "BELLE",
"Belle, ARA": "BELLE;ARA",
"Belle / Belle II": "BELLE",
"Belle, Belle II": "BELLE",
"belle II": "BELLE",
"Belle II": "BELLE",
"Belle II and GAMBIT": "BELLE;GAMBIT",
"Belle II, CMS": "BELLE;CMS",
"BELLE I & II": "BELLE",
"BNL-STAR collaboration": "STAR",
"cms": "CMS",
"Cms": "CMS",
"CMS": "CMS",
"CMS, ALICE": "CMS;ALICE",
"CMS, DUNE": "CMS;DUNE",
"CMS Experiment": "CMS",
"CMS, SModelS": "CMS;SModelS",
"COMET": "COMET",
"COMET MEG": "COMET;MEG",
"COMPASS": "COMPASS",
"CRESST, COSINUS": "CRESST;COSINUS",
"CUORE/CUPID": "CUORE",
"DarkSide": "DarkSide",
"DUNE": "DUNE",
"DUNE (SAND), NICA": "DUNE;NICA",
"I am currently associated with Belle and Belle 2 experiments.": "BELLE",
"I am in a team that they are in collaboration with CMS and I am going to join in a CERN group.": "CMS",
"ICARUS and DUNE": "ICARUS;DUNE",
"I'm associated with CMS experiment": "CMS",
"ISOLDE": "ISOLDE",
"Iuac": "Iuac",
"Just switched from CMS to CTA": "CTA",
"KEK": "KEK",
"key4hep": "key4hep",
"lhcb": "LHCb",
"LHCb": "LHCb",
"LHCb SHiP": "LHCb;SHiP",
"LHCb, SHiP": "LHCb;SHiP",
"LHCb, TORCH": "LHCb;TORCH",
"LUX, NEST": "LUX;NEST",
"Mu2e": "Mu2e",
"Mu2e, a few small instrumentation efforts outside Mu2e": "Mu2e",
"Mu2e, Muon g-2": "Mu2e;g-2",
"Mu2e, NEWS-G": "Mu2e;NEWS-G",
"MUon RAdiography of Mt VESuvius (MURAVES)": "MURAVES",
"NA61/SHINE Experiment": "NA61",
"NOvA": "NOvA",
"NOvA, DUNE": "NOvA;DUNE",
"RENO,BELLE2, etc": "RENO;BELLE",
"ROOT": "ROOT",
"STAR": "STAR",
"SuperCDMS": "SuperCDMS",
"Super CDMS": "SuperCDMS",
"Super-Kamiokande, T2K, DUNE": "Super-Kamiokande;T2K;DUNE",
"T2K": "T2K",
"UCNA": "UCNA",
"XENON (direct dark matter search)": "XENON",
"XENON NEST": "XENON;NEST",
"Yes. Belle and Belle II": "BELLE",
"Yes, Belle II collaboration. :)": "BELLE",
"yes, CMS": "CMS",
"Yes: CMS": "CMS",
"Yes, with ATLAS.": "ATLAS",
}.get(x, "Other or none")
pandas.DataFrame(df[
["Professional life: Are you associated with one or more experimental or theoretical collaborations? (E.g. ATLAS, CMS, DUNE, USQCD...)"]
].applymap(fix_collaboration_names).iloc[:, 0].str.split(";").tolist()).stack().value_counts(ascending=True).plot.barh(figsize=(10, 20))
# <img src="collaboration-logos.png" width="1000px">
options = [
"General physics (student)",
"High-energy collider physics",
"Neutrino physics",
"Physics of nuclei or exotic atoms",
"Precision frontier",
"Direct dark matter searches",
"Astroparticle physics",
"Astronomy",
"Theory/simulations",
"Instrumentation",
"Other, not listed above",
]
def explode(responses):
responses = [response.strip() for response in responses.split(";")]
return [1.0 if option in responses else 0.0 for option in options]
exploded = df[["Professional life: If you're involved in physics, what area(s) do you study?"]].fillna("").applymap(explode)
indicator = pandas.DataFrame(exploded.iloc[:, 0].tolist(), columns=options)
indicator.div(indicator.sum(axis=1), axis=0).sum(axis=0).iloc[::-1].plot.barh();
def fix_country_names(name):
name = " ".join(x.capitalize() for x in str(name).strip(" '\"").split())
name = {
"Brasil": "Brazil",
"Canada (east)": "Canada",
"Canada (montreal)": "Canada",
"Ch": "Switzerland",
"Czech Rep.": "Czech Republic",
"Czechia": "Czech Republic",
"France (cern-based)": "France",
"Greeve": "Greece",
"México": "Mexico",
"Netherlands. Time Slot Also Dependent On Another Conference. So Need To Be Able To Attend Both.": "Netherlands",
"Osaka, Japan": "Japan",
"Republic Of Korea": "South Korea",
"Russia Federation": "Russia",
"S.korea": "South Korea",
"Stockholm": "Sweden",
"Taiwan (r.o.c.)": "Taiwan",
"The Netherlands": "Netherlands",
"The U.s": "United States",
"Uk": "United Kingdom",
"U.s.": "United States",
"U.s.a.": "United States",
"United State": "United States",
"Us": "United States",
"Us (est)": "United States",
"Usa": "United States",
"Usa (chicago)": "United States",
"Usa - Michigan": "United States",
"United States Of America": "United States",
"United Status": "United States",
"Nan": "Prefer not to say",
}.get(name, name)
return name
df[["PyHEP feedback: In what country do you currently reside?"]].applymap(fix_country_names).iloc[:, 0].value_counts(ascending=True).plot.barh(figsize=(10, 20))
# +
# https://towardsdatascience.com/using-python-to-create-a-world-map-from-a-list-of-country-names-cd7480d03b10
import folium
import folium.plugins
import pycountry_convert
import geopy
geolocator = geopy.geocoders.Nominatim(user_agent="PyHEP2020")
world_map = folium.Map(tiles="cartodbpositron")
marker_cluster = folium.plugins.MarkerCluster().add_to(world_map)
countries = df[["PyHEP feedback: In what country do you currently reside?"]].applymap(fix_country_names).iloc[:, 0].value_counts()
for country, count in countries.items():
try:
two_letter = pycountry_convert.country_name_to_country_alpha2(country)
except:
pass
else:
loc = geolocator.geocode(two_letter)
for i in range(count):
folium.CircleMarker(location=(loc.latitude, loc.longitude), radius=5, fill=True).add_to(marker_cluster)
print(f"{count} in {country} (lat {loc.latitude} lng {loc.longitude})")
world_map
# -
df[[
"PyHEP feedback: Atlantic: 15:00 CET, 06:00 PDT, 18:30 IST, 22:00 JST",
"PyHEP feedback: Indian Ocean: 09:00 CET, 00:00 PDT, 12:30 IST, 16:00 JST",
"PyHEP feedback: Pacific: 00:00 CET, 15:00 PDT, 03:30 IST, 07:00 JST",
]].apply(pandas.Series.value_counts).loc[["Great!", "Acceptable", "BAD"]].plot.bar(rot=0).legend(bbox_to_anchor=(1.2, 0.5));
hopes = [
"Particle physics analysis tools (other than ROOT)",
"General-purpose data analysis toolkits",
"Machine learning/deep learning toolkits",
"Software engineering skills (beyond the fundamentals)",
"ROOT and PyROOT",
"Python fundamentals (how to program in Python)",
"Collaboration-specific topics",
"Other",
]
def explode(responses):
responses = [response.strip() for response in responses.split(";")]
return [1.0 if hope in responses else 0.0 for hope in hopes]
exploded = df[["PyHEP feedback: What are you hoping to learn from this workshop?"]].fillna("").applymap(explode)
indicator = pandas.DataFrame(exploded.iloc[:, 0].tolist(), columns=hopes)
indicator.div(indicator.sum(axis=1), axis=0).sum(axis=0).iloc[::-1].plot.barh();
# # You're in the right place!
#
# **Keynotes:**
#
# * Rubin Observatory: the software behind the science _(Nate Lust)_
# * Python & HEP: a perfect match, in theory _(David Straub)_
#
# **Tutorials:**
#
# * Uproot & Awkward Arrays _(<NAME>)_
# * Jagged physics analysis with Numba, Awkward, and Uproot on a GPU _(Joosep Pata)_
# * Ganga: flexible virtualization for user-based large computations _(Ulrik Egede)_
# * A prototype U.S. CMS analysis facility _(Oksana Shadura)_
# * Columnar analysis at scale with Coffea _(Mat Adamec)_
# * Introduction to automatic differentiation _(Lukas Heinrich)_
# * High-performance Python _(Henry Schreiner)_
# * Model-building & statistical inference with zfit and hepstats _(<NAME>le)_
# * pyhf: accelerating analyses and preserving likelihoods _(<NAME>)_
# * ThickBrick: optimal event selection and categorization in HEP _(Prasanth Shyamsundar)_
#
# **Talks:**
#
# * NanoEvents object _(<NAME>)_
# * TITANIA: how to structure dector monitoring _(<NAME>, <NAME>)_
# * A new PyROOT for ROOT 6.22 _(<NAME>)_
# * Resample: bootstrap and jackknife from Python _(<NAME>ski)_
# * Design pattern for analysis automation using Luigi _(<NAME>)_
# * ServiceX: on-demand data transformation & delivery _(Kyungeon Choi)_
# * Integrating Coffea and WorkQueue _(<NAME>)_
# * High granularity calorimeter (HGCAL) test beam analysis using Jupyter _(<NAME>)_
# * neos: physics analysis as a differentiable program _(<NAME>)_
# * SModelS: a tool for interpreting simplified-model results _(Wolfgang Waltenberger)_
# * TensorFlow-based maximum likelihood fits for high-precision Standard Model measurements at CMS _(Josh Bendavid)_
# * Error computation in iminuit and MINUIT: how HESSE and MINOS work _(Hans Dembinski)_
# * zfit with TensorFlow 2.0: dynamic and compiled HPC _(Jonas Eschle)_
# * Machine learning for signal-background separation of nuclear interaction vertices in CMS _(Anna Kropivnitskaya)_
# * The boost-histogram package _(Henry Schreiner)_
# * Providing Python bindings for complex and feature-rich C and C++ libraries _(Martin Schwinzerl)_
# * Integrating GPU libraries for fun and profit _(Adrian Oeftiger)_
# * mplhep: bridging Matplotlib and HEP _(<NAME>)_
# * ROOT preprocessing pipeline for machine learning with TensorFlow _(Matthias Komm)_
# * Integrated data acquisition in Python _(<NAME>)_
# # Thank you to our sponsors!
#
# <br><br>
#
# <center>
# <a href="https://iris-hep.org/"><img src="sponsors/28213-IRIS-HEP_logo.png" wdith="300px" style="margin-right: 50px; margin-bottom: 30px"></a><a href="https://www.liverpool.ac.uk/"><img src="sponsors/28228-university-liverpool-logo.png" width="450px" style="margin-bottom: 30px"></a><br>
# <a href="https://www.python.org/psf/"><img src="sponsors/28600-PSF_logo.png" wdith="500px" style="margin-right: 50px; margin-bottom: 30px"></a><a href="https://www.software.ac.uk/"><img src="sponsors/28229-SSI_logo.png" width="400px" style="margin-bottom: 30px"></a><br>
# <a href="https://www.fnal.gov/"><img src="sponsors/29001-FNAL_ogo.png" wdith="400px"></a>
# </center>
| welcome-pacific-EVALUATED.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import requests
import csv
# +
# /Users/mjc014/Documents/repo/pythonLib/plotlyLib/dgidb-4-v-normalizer/fix/therapy-v0.3.0.xls
# sheet_name = unmatched Chembls
with open('/Users/mjc014/Documents/repo/pythonLib/plotlyLib/dgidb-4-v-normalizer/fix/therapy-v0.3.0.xls', 'r') as f:
f.readline()
| genesCuration/monte-carlo/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.11 ('base')
# language: python
# name: python3
# ---
# # Lab3 - effect of days
#
# run steypwise on data
#
#
# +
import algorithms
in_threshold = 0.3
out_threshold = 0.05
sderived = 'amount of sleep'
data = algorithms.load_data('30days.csv', sderived)
data_10_days = algorithms.load_data('10days.csv', sderived)
data_15_days = algorithms.load_data('15days.csv', sderived)
data_20_days = algorithms.load_data('20days.csv', sderived)
lbasics = list(data.columns.values)
lbasics.remove(sderived)
# print(data_15_days)
from algorithms import stepwise_regression
import evaluation
(fit_model, param_in_model, cur_pvalue) = stepwise_regression(data_10_days, sderived, lbasics, in_threshold, out_threshold)
print(f'param_in_model: {param_in_model}')
print(f'cur_pvalue: {cur_pvalue}')
distance = evaluation.distance_vec(data[20:30], sderived, param_in_model, fit_model)
print(f'accuracy: {evaluation.accuracy(distance)}')
(fit_model, param_in_model, cur_pvalue) = stepwise_regression(data_15_days, sderived, lbasics, in_threshold, out_threshold)
print(f'param_in_model: {param_in_model}')
print(f'cur_pvalue: {cur_pvalue}')
distance = evaluation.distance_vec(data[20:30], sderived, param_in_model, fit_model)
print(f'accuracy: {evaluation.accuracy(distance)}')
(fit_model, param_in_model, cur_pvalue) = stepwise_regression(data_20_days, sderived, lbasics, in_threshold, out_threshold)
print(f'param_in_model: {param_in_model}')
print(f'cur_pvalue: {cur_pvalue}')
distance = evaluation.distance_vec(data[20:30], sderived, param_in_model, fit_model)
print(f'accuracy: {evaluation.accuracy(distance)}')
# -
# # Result - analysis
import evaluation
#print(data['mood'][14:30])
distance = evaluation.distance_vec(data[20:30], sderived, param_in_model, fit_model)
print(param_in_model)
print(distance)
print(f'accuracy: {evaluation.accuracy(distance)}')
| labs/lab3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## - Iterators are easy to understand
# +
spam = [0, 1, 2, 3, 4]
for item in spam:
print item
else:
print "Looped whole list"
# +
# What is really happening here?
it = iter(spam) # Obtain an iterator
try:
item = it.next() # Retrieve first item through the iterator
while True:
# Body of the for loop goes here
print item
item = it.next() # Retrieve next item through the iterator
except StopIteration: # Capture iterator exception
# Body of the else clause goes here
print "Looped whole list"
# +
# Another example
spam = "spam"
it = iter(spam)
# -
print it.next()
print it.next()
print it.next()
print it.next()
# Once the StopIteration is raised an iterator is useless, there is no 'restart'
print it.next()
# ## Python Generators
# +
# expression generator
spam = [0, 1, 2, 3, 4]
fooo = (2 ** s for s in spam) # Syntax similar to list comprehension but between parentheses
print fooo
# -
print fooo.next()
print fooo.next()
print fooo.next()
print fooo.next()
print fooo.next()
# Generator is exhausted
print fooo.next()
# - Generators are a simple and powerful tool for creating iterators.
# - Each iteration is computed on demand
# - In general terms they are more efficient than list comprehension or loops
# - If not the whole sequence is traversed
# - When looking for a certain element
# - When an exception is raised
# - So they save computing power and memory
# - Used to operate with I/O, with big amounts of data (e.g. DB queries)...
# ## yield
def countdown(n):
while n > 0:
yield n
n -= 1
gen_5 = countdown(5)
gen_5
# where is the sequence?
print gen_5.next()
print gen_5.next()
print gen_5.next()
print gen_5.next()
print gen_5.next()
gen_5.next()
for i in countdown(5):
print i,
# - yield makes a function a generator
# - the function only executes on next (easier than implements iteration)
# - it produces a value and suspend the execution of the function
# +
# Let's see another example with yield tail -f and grep
import time
def follow(thefile):
thefile.seek(0, 2) # Go to the end of the file
while True:
line = thefile.readline()
if not line:
time.sleep(0.1) # Sleep briefly
continue
yield line
# -
logfile = open("fichero.txt")
for line in follow(logfile):
print line,
# Ensure f is closed
if logfile and not logfile.closed:
logfile.close()
# ### using generators to build a pipeline as unix (tail + grep)
# +
def grep(pattern, lines):
for line in lines:
if pattern in line:
yield line
# TODO: use a generator expression
# -
# Set up a processing pipe : tail -f | grep "tefcon"
logfile = open("fichero.txt")
loglines = follow(logfile)
pylines = grep("python", loglines)
# nothing happens until now
# Pull results out of the processing pipeline
for line in pylines:
print line,
# Ensure f is closed
if logfile and not logfile.closed:
logfile.close()
# +
# Yield can be used as an expression too
def g_grep(pattern):
print "Looking for %s" % pattern
while True:
line = (yield)
if pattern in line:
print line,
# -
# ### Coroutines
# - Using yield as this way we get a coroutine
# - function not just returns values, it can consume values that we send
g = g_grep("python")
g.next()
g.send("Prueba a ver si encontramos algo")
g.send("Hemos recibido python")
# - Sent values are returned in (yield)
# - Execution as a generator function
# - coroutines responds to next and send
# avoid the first next call -> decorator
import functools
def coroutine(func):
def wrapper(*args, **kwargs):
cr = func(*args, **kwargs)
cr.next()
return cr
return wrapper
@coroutine
def cool_grep(pattern):
print "Looking for %s" % pattern
while True:
line = (yield)
if pattern in line:
print line,
g = cool_grep("python")
# no need of call next
g.send("Prueba a ver si encontramos algo")
g.send("Prueba a ver si python es cool")
# +
# use close to shutdown a coroutine (can run forever)
# -
@coroutine
def last_grep(pattern):
print "Looking for %s" % pattern
try:
while True:
line = (yield)
if pattern in line:
print line,
except GeneratorExit:
print "Going away. Goodbye"
# Exceptions can be thrown inside a coroutine
g = last_grep("python")
g.send("Prueba a ver si encontramos algo")
g.send("Prueba a ver si python es cool")
g.close()
g.send("prueba a ver si python es cool")
# can send exceptions
g.throw(RuntimeError, "Lanza una excepcion")
# - generators produces values and coroutines mostly consumes
# - DO NOT mix the concepts to avoid exploiting your mind
# - Coroutines are not for iteratin
def countdown_bug(n):
print "Counting down from", n
while n >= 0:
newvalue = (yield n)
# If a new value got sent in, reset n with it
if newvalue is not None:
n = newvalue
else:
n -= 1
c = countdown_bug(5)
for n in c:
print n
if n == 5:
c.send(3)
# ### What has happened here?
# - chain coroutines together and push data through the pipe using send()
# - you need a source that normally is not a coroutine
# - you will also needs a pipelines sinks (end-point) that consumes data and processes
# - don't mix the concepts too much
# ### lets go back to the tail -f and grep
# ### our source is tail -f
import time
def c_follow(thefile, target):
thefile.seek(0,2) # Go to the end of the file
while True:
line = thefile.readline()
if not line:
time.sleep(0.1) # Sleep briefly
else:
target.send(line)
# a sink: just print
@coroutine
def printer(name):
while True:
line = (yield)
print name + " : " + line,
# example
f = open("fichero.txt")
c_follow(f, printer("uno"))
# Ensure f is closed
if f and not f.closed:
f.close()
# Pipeline filters: grep
@coroutine
def c_grep(pattern,target):
while True:
line = (yield) # Receive a line
if pattern in line:
target.send(line)
# Send to next stage
# +
# Exercise: tail -f "fichero.txt" | grep "python"
# do not forget the last print as sink
# -
# We have the same, with iterators we pull data with iteration
# With coroutines we push data with send
# BROADCAST
@coroutine
def broadcast(targets):
while True:
item = (yield)
for target in targets:
target.send(item)
f = open("fichero.txt")
c_follow(f,
broadcast([c_grep('python', printer("uno")),
c_grep('hodor', printer("dos")),
c_grep('hold', printer("tres"))])
)
# - coroutines add routing
# - complex arrrangment of pipes, branches, merging...
if f and not f.closed:
f.close()
f = open("fichero.txt")
p = printer("uno")
c_follow(f,
broadcast([c_grep('python', p),
c_grep('hodor', p),
c_grep('hold', p)])
)
if f and not f.closed:
f.close()
| advanced/0_Iterators_generators_and_coroutines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python391jvsc74a57bd00ac516125b3ee16d4eaa41f008fade7bad50b808bd1b6d74d2e8ae0015ba9066
# ---
# # Client_sample
# Při vytvaření clienta na auth serveru jsme ziskali dulezite kody. ClientID a ClientSecret. Oba je potreba na pevno priřadit k promennym. Dale se s nimi pracuje.
#
# Z modulu importujeme funkce pro asynchroni request komunikaci
# +
import tornado.ioloop
import tornado.web
import json
import aiohttp
#from moduls import * # Contains a module for asynchronous requests
g_ClientID="<KEY>"
g_ClientSecret="<KEY>"
# -
# V promenne g_port definujeme na jakem portu nam client pobezi.
# G_authServerPublic je promenna typu string ve ktere mame zapsane host + port authorizacniho serveru na ktery se prepojujeme pomoci redirect
# G_authServerDocker je promenna typu string ve ktere mame zapsane host + port authorizacniho serveru pro fukce get/post v pripade ze auth server bezi v dockeru. Pokud v dockeru nebezi je nutne promennou nahradit hodnotou g_authServerPublic.
# +
g_port = 9999
g_authServerPublic = "http://127.0.0.1:5000"
g_authServerDocker = "http://172.16.238.1:5000" # Adress for docker inner tunneling
'''**************************************************'''
# -
# ## Uvodni strana
# Primitivni uvodni strana, kde se nachazi dve tlacitka. Funkcni je pouze tlacitko na prepojeni na authserver. Pri redirect posilame na auth server informace o tom co pozadujeme, kdo jsme a jakou metodou to pozadujeme. response_type=code -> Budeme se bavit tokem authorization code. To znamena ze dostaneme athorizacni kod, ktery pam smenime za access token. Scope=profile -> Celkove pozadujeme informace o uzivateli typu profile. Vice definovano v authserver
# +
class loginPage(tornado.web.RequestHandler):
def get(self):
self.render("templates/loginPage.html")
def post(self):
'''Redirect user to authorization server, you will receive a code (request token) in redirect URL'''
if self.get_argument("authSer_button", None) != None:
self.redirect(tornado.httputil.url_concat(g_authServerPublic + "/oauth/authorize",
[("response_type","code"),
("client_id",g_ClientID),
("scope","profile")]))
if self.get_argument("googleAuth_button", None) != None:
self.write("This part has not been programmed yet.")
# -
# # AfterLog
# Sem jsme prepojeni po uspesne authorizaci uzivatele na authorizacnim serveru. Pri prepojeni ziskavame auth code, ktery nasledne menime za access token. Opet pridavame informace o tom kdo jsme, a co chceme. Aby se nestalo ze nekdo jiny nahodne vygeneruje nas auth code a pouzije ho misto nas.
#
# Kdyz ziskame access token tak ten uz konecne muzeme vyuzit k ziskani dat o uzivateli. Jednoduse metodou get, kde jen pripojime jako parametr ziskany access token.
#
# Po ziskani dat se prepojime na nasi hlavni stranu s prislusnymi parametry.
# +
class afterLog(tornado.web.RequestHandler):
async def get(self):
'''Exchange code (request token) for access token, fetch user data'''
code = self.get_query_argument("code")
print("Acquired code: " + str(code))
# Token exchange
files = {
'grant_type': 'authorization_code',
'scope': 'profile',
'code': code,
}
response = await post_request_with_files(g_authServerDocker + "/oauth/token", data=files, ClientID=g_ClientID, ClientSecret=g_ClientSecret)
access_token = response["access_token"]
print("Acquired access token: " + access_token)
# Fetch user data
headers = {
'Authorization': f"Bearer {access_token}",
}
response = await get_request_with_headers(g_authServerDocker + "/api/me", data=headers)
print("Data o uzivateli ziskana: " + str(response))
data = {"username":response["username"], "access_token":access_token}
self.redirect(tornado.httputil.url_concat('mainPage', data))
# -
# # Hlavni strana
# Tlacitkem logout odesilame na auth server pozadavek o zruseni access tokenu. Jako parametry udavame nase ID a samozrejme access token, ktery chceme nechat smazat
# +
class mainPage(tornado.web.RequestHandler):
def get(self):
self.render("templates/afterLog.html", title="Client Main Page", data=self)
async def post(self):
'''Log out and send a request to delete access token'''
if self.get_argument("logOut_button", None) != None:
print("logOut")
data = {
'access_token': self.get_argument("access_token"),
'client_ID':g_ClientID,
}
await get_request_with_params(g_authServerDocker + '/delete_AT', data)
self.redirect('/')
if self.get_argument("nothing_button", None) != None:
self.write("What did you expect? Told you, I do nothing.")
# -
# prepojovani do prislusnych trid na zakladade url stromu
application = tornado.web.Application([
(r"/", loginPage),
(r"/afterLog", afterLog),
(r"/mainPage", mainPage),
])
# hlavni kod aplikace. Spousti server a definuje port na kterem bude poslouchat.
if __name__ == "__main__":
application.listen(g_port)
print("Client_sample running on port: " + str(g_port) + "...")
tornado.ioloop.IOLoop.instance().start()
# +
async def get_request(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.text()
async def get_request_with_headers(url, data):
async with aiohttp.ClientSession() as session:
async with session.get(url=url, headers=data) as resp:
return await resp.json()
async def post_request_with_files(url, data, ClientID, ClientSecret):
async with aiohttp.ClientSession(auth=aiohttp.BasicAuth(ClientID,ClientSecret)) as session:
async with session.post(url, data=data) as resp:
return await resp.json()
async def get_request_with_params(url, data):
async with aiohttp.ClientSession() as session:
async with session.get(url=url, params=data) as resp:
return await resp.text()
# -
| Docs/Client/main.ipynb |