code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
from scipy.spatial import distance as dist
import numpy as np
import cv2
from imutils import face_utils
from imutils.video import VideoStream
import imutils
from fastai.vision import *
import argparse
import time
import dlib
from playsound import playsound
from torch.serialization import SourceChangeWarning
warnings.filterwarnings("ignore", category=SourceChangeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
path = './'
print(path + 'export.pkl')
learn = load_learner(path, 'export.pkl')
face_cascade = cv2.CascadeClassifier("../haarcascade_frontalface_alt2.xml")
vs = VideoStream(src=0).start()
start = time.perf_counter()
data = []
time_value = 0
EYE_AR_THRESH = 0.20
EYE_AR_CONSEC_FRAMES = 10
COUNTER = 0
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def data_time(time_value, prediction, probability, ear):
current_time = int(time.perf_counter()-start)
if current_time != time_value:
data.append([current_time, prediction, probability, ear])
time_value = current_time
return time_value
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# Function to test the emotion
def test_emotion(emotion, vs):
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30))
return
# To start the game you will press q. Once pressed the first emotion you will
# make is neutral. If you do make the emotion for 3 seconds you get a correct
# noise and if not it will give an incorrect noise. There will be a noises
# introducing all the emotions and noises to say you passed or you did not pass.
# This will go in order after neutral to happy, sad, surprised, then angry. Once
# you go through all emotions correctly you win.
emotions = ['neutral','happy', 'sad', 'surprise', 'angry'] # put the emotions
FRAMES_TO_PASS = 6
for emotion in emotions:
emotion_counter = 0
first_pass = True
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30))
cv2.putText(frame, f"Make a {emotion} face!", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
for coords in face_coord:
X, Y, w, h = coords
H, W, _ = frame.shape
X_1, X_2 = (max(0, X - int(w * 0.3)), min(X + int(1.3 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.3 * h)), min(Y + int(1.3 * h), H))
img_cp = gray[Y_1:Y_2, X_1:X_2].copy()
prediction, idx, probability = learn.predict(Image(pil2tensor(img_cp, np.float32).div_(225)))
cv2.rectangle(
img=frame,
pt1=(X_1, Y_1),
pt2=(X_2, Y_2),
color=(128, 128, 0),
thickness=2,
)
rect = dlib.rectangle(X, Y, X+w, Y+h)
cv2.putText(frame, str(prediction), (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (225, 255, 255), 2)
cv2.putText(frame, "Press q to quit", (250, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
if str(prediction) == emotion:
emotion_counter += 1
if emotion_counter > FRAMES_TO_PASS:
playsound('../sounds/correct.mp3')
cv2.imshow("frame", frame)
cv2.putText(frame, f"You passed the {emotion} round!", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
print(f'You passed the {emotion} round')
break
cv2.imshow("frame", frame)
if first_pass:
playsound('../sounds/' + emotion + '.mp3')
first_pass = False
if cv2.waitKey(1) & 0xFF == ord("q") :
vs.stop()
cv2.destroyAllWindows()
cv2.waitKey(1)
break
# End the game
playsound('../sounds/win.mp3')
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
cv2.putText(frame, "You win!!!", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q") :
vs.stop()
cv2.destroyAllWindows()
cv2.waitKey(1)
break
```
| github_jupyter |
## The QLBS model for a European option
Welcome to your 2nd assignment in Reinforcement Learning in Finance. In this exercise you will arrive to an option price and the hedging portfolio via standard toolkit of Dynamic Pogramming (DP).
QLBS model learns both the optimal option price and optimal hedge directly from trading data.
**Instructions:**
- You will be using Python 3.
- Avoid using for-loops and while-loops, unless you are explicitly told to do so.
- Do not modify the (# GRADED FUNCTION [function name]) comment in some cells. Your work would not be graded if you change this. Each cell containing that comment should only contain one function.
- After coding your function, run the cell right below it to check if your result is correct.
- When encountering **```# dummy code - remove```** please replace this code with your own
**After this assignment you will:**
- Re-formulate option pricing and hedging method using the language of Markov Decision Processes (MDP)
- Setup foward simulation using Monte Carlo
- Expand optimal action (hedge) $a_t^\star(X_t)$ and optimal Q-function $Q_t^\star(X_t, a_t^\star)$ in basis functions with time-dependend coefficients
Let's get started!
## About iPython Notebooks ##
iPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing "SHIFT"+"ENTER" or by clicking on "Run Cell" (denoted by a play symbol) in the upper bar of the notebook.
We will often specify "(≈ X lines of code)" in the comments to tell you about how much code you need to write. It is just a rough estimate, so don't feel bad if your code is longer or shorter.
```
#import warnings
#warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from scipy.stats import norm
import random
import time
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
import grading
### ONLY FOR GRADING. DO NOT EDIT ###
submissions=dict()
assignment_key="wLtf3SoiEeieSRL7rCBNJA"
all_parts=["15mYc", "h1P6Y", "q9QW7","s7MpJ","Pa177"]
### ONLY FOR GRADING. DO NOT EDIT ###
COURSERA_TOKEN = 'gF094cwtidz2YQpP' # the key provided to the Student under his/her email on submission page
COURSERA_EMAIL = 'cilsya@yahoo.com' # the email
```
## Parameters for MC simulation of stock prices
```
S0 = 100 # initial stock price
mu = 0.05 # drift
sigma = 0.15 # volatility
r = 0.03 # risk-free rate
M = 1 # maturity
T = 24 # number of time steps
N_MC = 10000 # number of paths
delta_t = M / T # time interval
gamma = np.exp(- r * delta_t) # discount factor
```
### Black-Sholes Simulation
Simulate $N_{MC}$ stock price sample paths with $T$ steps by the classical Black-Sholes formula.
$$dS_t=\mu S_tdt+\sigma S_tdW_t\quad\quad S_{t+1}=S_te^{\left(\mu-\frac{1}{2}\sigma^2\right)\Delta t+\sigma\sqrt{\Delta t}Z}$$
where $Z$ is a standard normal random variable.
Based on simulated stock price $S_t$ paths, compute state variable $X_t$ by the following relation.
$$X_t=-\left(\mu-\frac{1}{2}\sigma^2\right)t\Delta t+\log S_t$$
Also compute
$$\Delta S_t=S_{t+1}-e^{r\Delta t}S_t\quad\quad \Delta\hat{S}_t=\Delta S_t-\Delta\bar{S}_t\quad\quad t=0,...,T-1$$
where $\Delta\bar{S}_t$ is the sample mean of all values of $\Delta S_t$.
Plots of 5 stock price $S_t$ and state variable $X_t$ paths are shown below.
```
# make a dataset
starttime = time.time()
np.random.seed(42)
# stock price
S = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
S.loc[:,0] = S0
# standard normal random numbers
RN = pd.DataFrame(np.random.randn(N_MC,T), index=range(1, N_MC+1), columns=range(1, T+1))
for t in range(1, T+1):
S.loc[:,t] = S.loc[:,t-1] * np.exp((mu - 1/2 * sigma**2) * delta_t + sigma * np.sqrt(delta_t) * RN.loc[:,t])
delta_S = S.loc[:,1:T].values - np.exp(r * delta_t) * S.loc[:,0:T-1]
delta_S_hat = delta_S.apply(lambda x: x - np.mean(x), axis=0)
# state variable
X = - (mu - 1/2 * sigma**2) * np.arange(T+1) * delta_t + np.log(S) # delta_t here is due to their conventions
endtime = time.time()
print('\nTime Cost:', endtime - starttime, 'seconds')
# plot 10 paths
step_size = N_MC // 10
idx_plot = np.arange(step_size, N_MC, step_size)
plt.plot(S.T.iloc[:,idx_plot])
plt.xlabel('Time Steps')
plt.title('Stock Price Sample Paths')
plt.show()
plt.plot(X.T.iloc[:,idx_plot])
plt.xlabel('Time Steps')
plt.ylabel('State Variable')
plt.show()
```
Define function *terminal_payoff* to compute the terminal payoff of a European put option.
$$H_T\left(S_T\right)=\max\left(K-S_T,0\right)$$
```
def terminal_payoff(ST, K):
# ST final stock price
# K strike
payoff = max(K - ST, 0)
return payoff
type(delta_S)
```
## Define spline basis functions
```
import bspline
import bspline.splinelab as splinelab
X_min = np.min(np.min(X))
X_max = np.max(np.max(X))
print('X.shape = ', X.shape)
print('X_min, X_max = ', X_min, X_max)
p = 4 # order of spline (as-is; 3 = cubic, 4: B-spline?)
ncolloc = 12
tau = np.linspace(X_min,X_max,ncolloc) # These are the sites to which we would like to interpolate
# k is a knot vector that adds endpoints repeats as appropriate for a spline of order p
# To get meaninful results, one should have ncolloc >= p+1
k = splinelab.aptknt(tau, p)
# Spline basis of order p on knots k
basis = bspline.Bspline(k, p)
f = plt.figure()
# B = bspline.Bspline(k, p) # Spline basis functions
print('Number of points k = ', len(k))
basis.plot()
plt.savefig('Basis_functions.png', dpi=600)
type(basis)
X.values.shape
```
### Make data matrices with feature values
"Features" here are the values of basis functions at data points
The outputs are 3D arrays of dimensions num_tSteps x num_MC x num_basis
```
num_t_steps = T + 1
num_basis = ncolloc # len(k) #
data_mat_t = np.zeros((num_t_steps, N_MC,num_basis ))
print('num_basis = ', num_basis)
print('dim data_mat_t = ', data_mat_t.shape)
t_0 = time.time()
# fill it
for i in np.arange(num_t_steps):
x = X.values[:,i]
data_mat_t[i,:,:] = np.array([ basis(el) for el in x ])
t_end = time.time()
print('Computational time:', t_end - t_0, 'seconds')
# save these data matrices for future re-use
np.save('data_mat_m=r_A_%d' % N_MC, data_mat_t)
print(data_mat_t.shape) # shape num_steps x N_MC x num_basis
print(len(k))
```
## Dynamic Programming solution for QLBS
The MDP problem in this case is to solve the following Bellman optimality equation for the action-value function.
$$Q_t^\star\left(x,a\right)=\mathbb{E}_t\left[R_t\left(X_t,a_t,X_{t+1}\right)+\gamma\max_{a_{t+1}\in\mathcal{A}}Q_{t+1}^\star\left(X_{t+1},a_{t+1}\right)\space|\space X_t=x,a_t=a\right],\space\space t=0,...,T-1,\quad\gamma=e^{-r\Delta t}$$
where $R_t\left(X_t,a_t,X_{t+1}\right)$ is the one-step time-dependent random reward and $a_t\left(X_t\right)$ is the action (hedge).
Detailed steps of solving this equation by Dynamic Programming are illustrated below.
With this set of basis functions $\left\{\Phi_n\left(X_t^k\right)\right\}_{n=1}^N$, expand the optimal action (hedge) $a_t^\star\left(X_t\right)$ and optimal Q-function $Q_t^\star\left(X_t,a_t^\star\right)$ in basis functions with time-dependent coefficients.
$$a_t^\star\left(X_t\right)=\sum_n^N{\phi_{nt}\Phi_n\left(X_t\right)}\quad\quad Q_t^\star\left(X_t,a_t^\star\right)=\sum_n^N{\omega_{nt}\Phi_n\left(X_t\right)}$$
Coefficients $\phi_{nt}$ and $\omega_{nt}$ are computed recursively backward in time for $t=T−1,...,0$.
Coefficients for expansions of the optimal action $a_t^\star\left(X_t\right)$ are solved by
$$\phi_t=\mathbf A_t^{-1}\mathbf B_t$$
where $\mathbf A_t$ and $\mathbf B_t$ are matrix and vector respectively with elements given by
$$A_{nm}^{\left(t\right)}=\sum_{k=1}^{N_{MC}}{\Phi_n\left(X_t^k\right)\Phi_m\left(X_t^k\right)\left(\Delta\hat{S}_t^k\right)^2}\quad\quad B_n^{\left(t\right)}=\sum_{k=1}^{N_{MC}}{\Phi_n\left(X_t^k\right)\left[\hat\Pi_{t+1}^k\Delta\hat{S}_t^k+\frac{1}{2\gamma\lambda}\Delta S_t^k\right]}$$
$$\Delta S_t=S_{t+1} - e^{-r\Delta t} S_t\space \quad t=T-1,...,0$$
where $\Delta\hat{S}_t$ is the sample mean of all values of $\Delta S_t$.
Define function *function_A* and *function_B* to compute the value of matrix $\mathbf A_t$ and vector $\mathbf B_t$.
## Define the option strike and risk aversion parameter
```
risk_lambda = 0.001 # risk aversion
K = 100 # option stike
# Note that we set coef=0 below in function function_B_vec. This correspond to a pure risk-based hedging
```
### Part 1 Calculate coefficients $\phi_{nt}$ of the optimal action $a_t^\star\left(X_t\right)$
**Instructions:**
- implement function_A_vec() which computes $A_{nm}^{\left(t\right)}$ matrix
- implement function_B_vec() which computes $B_n^{\left(t\right)}$ column vector
```
# functions to compute optimal hedges
def function_A_vec(t, delta_S_hat, data_mat, reg_param):
"""
function_A_vec - compute the matrix A_{nm} from Eq. (52) (with a regularization!)
Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of data_mat
delta_S_hat - pandas.DataFrame of dimension N_MC x T
data_mat - pandas.DataFrame of dimension T x N_MC x num_basis
reg_param - a scalar, regularization parameter
Return:
- np.array, i.e. matrix A_{nm} of dimension num_basis x num_basis
"""
### START CODE HERE ### (≈ 5-6 lines of code)
# store result in A_mat for grading
# # The cell above shows the equations we need
# # Eq. (53) in QLBS Q-Learner in the Black-Scholes-Merton article we are trying to solve for
# # Phi* = (At^-1)(Bt)
# #
# # This function solves for the A coeffecient, which is shown in the cell above, which is
# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
# #
# # The article is located here
# # https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3087076
# # Get the data matrix at this specific time index
# Xt = data_mat[t,:,:]
# # As shown in the description of the arguments in this function
# # data_mat - pandas.DataFrame of dimension T x N_MC x num_basis
# #
# # We got Xt at a certain t time index, so
# # Xt pandas.DataFrame of dimension N_MC x num_basis
# #
# # Therefore...
# num_basis = Xt.shape[1]
# # Now we need Delta S hat at this time index for the
# # 'A' coefficient from the
# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
# #
# # We are feed the parameter delta_S_hat into this function
# # and
# # delta_S_hat - pandas.DataFrame of dimension N_MC x T
# #
# # We what the delta_S_hat at this time index
# #
# # Therefore...
# current_delta_S_hat = delta_S_hat.loc[:, t]
# # The last term in the A coefficient calculation in the
# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
# # is delta_S_hat squared
# #
# # NOTE: There is .reshape(-1,1) which means that 1 for the columns
# # MUST be respected, but the -1 for the rows means that whatever
# # elements are left, fill it up to be whatever number.
# current_delta_S_hat_squared = np.square(current_delta_S_hat).reshape( -1, 1)
# # Now we have the terms to make up the equation.
# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
# # NOTE: The summation is not done in this function.
# # NOTE: You do not see it in the equation
# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
# # but regularization is a technique used in Machine Learning.
# # You add the term.
# # np.eye() creates an identity matrix of size you specify.
# #
# # NOTE: When doing dot products, might have to transpose so the dimensions
# # align.
# A_mat = ( np.dot( Xt.T, Xt*current_delta_S_hat_squared )
# +
# reg_param * np.eye(num_basis) )
X_mat = data_mat[t, :, :]
num_basis_funcs = X_mat.shape[1]
this_dS = delta_S_hat.loc[:, t]
hat_dS2 = (this_dS ** 2).reshape(-1, 1)
A_mat = np.dot(X_mat.T, X_mat * hat_dS2) + reg_param * np.eye(num_basis_funcs)
### END CODE HERE ###
return A_mat
def function_B_vec(t,
Pi_hat,
delta_S_hat=delta_S_hat,
S=S,
data_mat=data_mat_t,
gamma=gamma,
risk_lambda=risk_lambda):
"""
function_B_vec - compute vector B_{n} from Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of delta_S_hat
Pi_hat - pandas.DataFrame of dimension N_MC x T of portfolio values
delta_S_hat - pandas.DataFrame of dimension N_MC x T
S - pandas.DataFrame of simulated stock prices of dimension N_MC x T
data_mat - pandas.DataFrame of dimension T x N_MC x num_basis
gamma - one time-step discount factor $exp(-r \delta t)$
risk_lambda - risk aversion coefficient, a small positive number
Return:
np.array() of dimension num_basis x 1
"""
# coef = 1.0/(2 * gamma * risk_lambda)
# override it by zero to have pure risk hedge
### START CODE HERE ### (≈ 5-6 lines of code)
# store result in B_vec for grading
# # Get the data matrix at this specific time index
# Xt = data_mat[t,:,:]
# # Computer the first term in the brackets.
# first_term = Pi_hat[ :, t+1 ] * delta_S_hat.loc[:, t]
# # NOTE: for the last term in the equation
# # Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article
# #
# # would be
# # last_term = 1.0/(2 * gamma * risk_lambda) * S.loc[:, t]
# last_coefficient = 1.0/(2 * gamma * risk_lambda)
# #
# # But the instructions say make it equal override it by zero to have pure risk hedge
# last_coefficient = 0
# last_term = last_coefficient * S.loc[:, t]
# # Compute
# second_factor = first_term + last_term
# # Compute the equation
# # NOTE: When doing dot products, might have to transpose so the dimensions
# # align.
# B_vec = np.dot(Xt.T, second_factor)
tmp = Pi_hat.loc[:,t+1] * delta_S_hat.loc[:, t]
X_mat = data_mat[t, :, :] # matrix of dimension N_MC x num_basis
B_vec = np.dot(X_mat.T, tmp)
### END CODE HERE ###
return B_vec
### GRADED PART (DO NOT EDIT) ###
reg_param = 1e-3
np.random.seed(42)
A_mat = function_A_vec(T-1, delta_S_hat, data_mat_t, reg_param)
idx_row = np.random.randint(low=0, high=A_mat.shape[0], size=50)
np.random.seed(42)
idx_col = np.random.randint(low=0, high=A_mat.shape[1], size=50)
part_1 = list(A_mat[idx_row, idx_col])
try:
part1 = " ".join(map(repr, part_1))
except TypeError:
part1 = repr(part_1)
submissions[all_parts[0]]=part1
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:1],all_parts,submissions)
A_mat[idx_row, idx_col]
### GRADED PART (DO NOT EDIT) ###
### GRADED PART (DO NOT EDIT) ###
np.random.seed(42)
risk_lambda = 0.001
Pi = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
Pi.iloc[:,-1] = S.iloc[:,-1].apply(lambda x: terminal_payoff(x, K))
Pi_hat = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
Pi_hat.iloc[:,-1] = Pi.iloc[:,-1] - np.mean(Pi.iloc[:,-1])
B_vec = function_B_vec(T-1, Pi_hat, delta_S_hat, S, data_mat_t, gamma, risk_lambda)
part_2 = list(B_vec)
try:
part2 = " ".join(map(repr, part_2))
except TypeError:
part2 = repr(part_2)
submissions[all_parts[1]]=part2
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:2],all_parts,submissions)
B_vec
### GRADED PART (DO NOT EDIT) ###
```
## Compute optimal hedge and portfolio value
Call *function_A* and *function_B* for $t=T-1,...,0$ together with basis function $\Phi_n\left(X_t\right)$ to compute optimal action $a_t^\star\left(X_t\right)=\sum_n^N{\phi_{nt}\Phi_n\left(X_t\right)}$ backward recursively with terminal condition $a_T^\star\left(X_T\right)=0$.
Once the optimal hedge $a_t^\star\left(X_t\right)$ is computed, the portfolio value $\Pi_t$ could also be computed backward recursively by
$$\Pi_t=\gamma\left[\Pi_{t+1}-a_t^\star\Delta S_t\right]\quad t=T-1,...,0$$
together with the terminal condition $\Pi_T=H_T\left(S_T\right)=\max\left(K-S_T,0\right)$ for a European put option.
Also compute $\hat{\Pi}_t=\Pi_t-\bar{\Pi}_t$, where $\bar{\Pi}_t$ is the sample mean of all values of $\Pi_t$.
Plots of 5 optimal hedge $a_t^\star$ and portfolio value $\Pi_t$ paths are shown below.
```
starttime = time.time()
# portfolio value
Pi = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
Pi.iloc[:,-1] = S.iloc[:,-1].apply(lambda x: terminal_payoff(x, K))
Pi_hat = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
Pi_hat.iloc[:,-1] = Pi.iloc[:,-1] - np.mean(Pi.iloc[:,-1])
# optimal hedge
a = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
a.iloc[:,-1] = 0
reg_param = 1e-3 # free parameter
for t in range(T-1, -1, -1):
A_mat = function_A_vec(t, delta_S_hat, data_mat_t, reg_param)
B_vec = function_B_vec(t, Pi_hat, delta_S_hat, S, data_mat_t, gamma, risk_lambda)
# print ('t = A_mat.shape = B_vec.shape = ', t, A_mat.shape, B_vec.shape)
# coefficients for expansions of the optimal action
phi = np.dot(np.linalg.inv(A_mat), B_vec)
a.loc[:,t] = np.dot(data_mat_t[t,:,:],phi)
Pi.loc[:,t] = gamma * (Pi.loc[:,t+1] - a.loc[:,t] * delta_S.loc[:,t])
Pi_hat.loc[:,t] = Pi.loc[:,t] - np.mean(Pi.loc[:,t])
a = a.astype('float')
Pi = Pi.astype('float')
Pi_hat = Pi_hat.astype('float')
endtime = time.time()
print('Computational time:', endtime - starttime, 'seconds')
# plot 10 paths
plt.plot(a.T.iloc[:,idx_plot])
plt.xlabel('Time Steps')
plt.title('Optimal Hedge')
plt.show()
plt.plot(Pi.T.iloc[:,idx_plot])
plt.xlabel('Time Steps')
plt.title('Portfolio Value')
plt.show()
```
## Compute rewards for all paths
Once the optimal hedge $a_t^\star$ and portfolio value $\Pi_t$ are all computed, the reward function $R_t\left(X_t,a_t,X_{t+1}\right)$ could then be computed by
$$R_t\left(X_t,a_t,X_{t+1}\right)=\gamma a_t\Delta S_t-\lambda Var\left[\Pi_t\space|\space\mathcal F_t\right]\quad t=0,...,T-1$$
with terminal condition $R_T=-\lambda Var\left[\Pi_T\right]$.
Plot of 5 reward function $R_t$ paths is shown below.
```
# Compute rewards for all paths
starttime = time.time()
# reward function
R = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
R.iloc[:,-1] = - risk_lambda * np.var(Pi.iloc[:,-1])
for t in range(T):
R.loc[1:,t] = gamma * a.loc[1:,t] * delta_S.loc[1:,t] - risk_lambda * np.var(Pi.loc[1:,t])
endtime = time.time()
print('\nTime Cost:', endtime - starttime, 'seconds')
# plot 10 paths
plt.plot(R.T.iloc[:, idx_plot])
plt.xlabel('Time Steps')
plt.title('Reward Function')
plt.show()
```
## Part 2: Compute the optimal Q-function with the DP approach
Coefficients for expansions of the optimal Q-function $Q_t^\star\left(X_t,a_t^\star\right)$ are solved by
$$\omega_t=\mathbf C_t^{-1}\mathbf D_t$$
where $\mathbf C_t$ and $\mathbf D_t$ are matrix and vector respectively with elements given by
$$C_{nm}^{\left(t\right)}=\sum_{k=1}^{N_{MC}}{\Phi_n\left(X_t^k\right)\Phi_m\left(X_t^k\right)}\quad\quad D_n^{\left(t\right)}=\sum_{k=1}^{N_{MC}}{\Phi_n\left(X_t^k\right)\left(R_t\left(X_t,a_t^\star,X_{t+1}\right)+\gamma\max_{a_{t+1}\in\mathcal{A}}Q_{t+1}^\star\left(X_{t+1},a_{t+1}\right)\right)}$$
Define function *function_C* and *function_D* to compute the value of matrix $\mathbf C_t$ and vector $\mathbf D_t$.
**Instructions:**
- implement function_C_vec() which computes $C_{nm}^{\left(t\right)}$ matrix
- implement function_D_vec() which computes $D_n^{\left(t\right)}$ column vector
```
def function_C_vec(t, data_mat, reg_param):
"""
function_C_vec - calculate C_{nm} matrix from Eq. (56) (with a regularization!)
Eq. (56) in QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of data_mat
data_mat - pandas.DataFrame of values of basis functions of dimension T x N_MC x num_basis
reg_param - regularization parameter, a scalar
Return:
C_mat - np.array of dimension num_basis x num_basis
"""
### START CODE HERE ### (≈ 5-6 lines of code)
# your code here ....
# C_mat = your code here ...
X_mat = data_mat[t, :, :]
num_basis_funcs = X_mat.shape[1]
C_mat = np.dot(X_mat.T, X_mat) + reg_param * np.eye(num_basis_funcs)
### END CODE HERE ###
return C_mat
def function_D_vec(t, Q, R, data_mat, gamma=gamma):
"""
function_D_vec - calculate D_{nm} vector from Eq. (56) (with a regularization!)
Eq. (56) in QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of data_mat
Q - pandas.DataFrame of Q-function values of dimension N_MC x T
R - pandas.DataFrame of rewards of dimension N_MC x T
data_mat - pandas.DataFrame of values of basis functions of dimension T x N_MC x num_basis
gamma - one time-step discount factor $exp(-r \delta t)$
Return:
D_vec - np.array of dimension num_basis x 1
"""
### START CODE HERE ### (≈ 5-6 lines of code)
# your code here ....
# D_vec = your code here ...
X_mat = data_mat[t, :, :]
D_vec = np.dot(X_mat.T, R.loc[:,t] + gamma * Q.loc[:, t+1])
### END CODE HERE ###
return D_vec
### GRADED PART (DO NOT EDIT) ###
C_mat = function_C_vec(T-1, data_mat_t, reg_param)
np.random.seed(42)
idx_row = np.random.randint(low=0, high=C_mat.shape[0], size=50)
np.random.seed(42)
idx_col = np.random.randint(low=0, high=C_mat.shape[1], size=50)
part_3 = list(C_mat[idx_row, idx_col])
try:
part3 = " ".join(map(repr, part_3))
except TypeError:
part3 = repr(part_3)
submissions[all_parts[2]]=part3
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:3],all_parts,submissions)
C_mat[idx_row, idx_col]
### GRADED PART (DO NOT EDIT) ###
### GRADED PART (DO NOT EDIT) ###
Q = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
Q.iloc[:,-1] = - Pi.iloc[:,-1] - risk_lambda * np.var(Pi.iloc[:,-1])
D_vec = function_D_vec(T-1, Q, R, data_mat_t,gamma)
part_4 = list(D_vec)
try:
part4 = " ".join(map(repr, part_4))
except TypeError:
part4 = repr(part_4)
submissions[all_parts[3]]=part4
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:4],all_parts,submissions)
D_vec
### GRADED PART (DO NOT EDIT) ###
```
Call *function_C* and *function_D* for $t=T-1,...,0$ together with basis function $\Phi_n\left(X_t\right)$ to compute optimal action Q-function $Q_t^\star\left(X_t,a_t^\star\right)=\sum_n^N{\omega_{nt}\Phi_n\left(X_t\right)}$ backward recursively with terminal condition $Q_T^\star\left(X_T,a_T=0\right)=-\Pi_T\left(X_T\right)-\lambda Var\left[\Pi_T\left(X_T\right)\right]$.
```
starttime = time.time()
# Q function
Q = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))
Q.iloc[:,-1] = - Pi.iloc[:,-1] - risk_lambda * np.var(Pi.iloc[:,-1])
reg_param = 1e-3
for t in range(T-1, -1, -1):
######################
C_mat = function_C_vec(t,data_mat_t,reg_param)
D_vec = function_D_vec(t, Q,R,data_mat_t,gamma)
omega = np.dot(np.linalg.inv(C_mat), D_vec)
Q.loc[:,t] = np.dot(data_mat_t[t,:,:], omega)
Q = Q.astype('float')
endtime = time.time()
print('\nTime Cost:', endtime - starttime, 'seconds')
# plot 10 paths
plt.plot(Q.T.iloc[:, idx_plot])
plt.xlabel('Time Steps')
plt.title('Optimal Q-Function')
plt.show()
```
The QLBS option price is given by $C_t^{\left(QLBS\right)}\left(S_t,ask\right)=-Q_t\left(S_t,a_t^\star\right)$
## Summary of the QLBS pricing and comparison with the BSM pricing
Compare the QLBS price to European put price given by Black-Sholes formula.
$$C_t^{\left(BS\right)}=Ke^{-r\left(T-t\right)}\mathcal N\left(-d_2\right)-S_t\mathcal N\left(-d_1\right)$$
```
# The Black-Scholes prices
def bs_put(t, S0=S0, K=K, r=r, sigma=sigma, T=M):
d1 = (np.log(S0/K) + (r + 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
d2 = (np.log(S0/K) + (r - 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
price = K * np.exp(-r * (T-t)) * norm.cdf(-d2) - S0 * norm.cdf(-d1)
return price
def bs_call(t, S0=S0, K=K, r=r, sigma=sigma, T=M):
d1 = (np.log(S0/K) + (r + 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
d2 = (np.log(S0/K) + (r - 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
price = S0 * norm.cdf(d1) - K * np.exp(-r * (T-t)) * norm.cdf(d2)
return price
```
## The DP solution for QLBS
```
# QLBS option price
C_QLBS = - Q.copy()
print('-------------------------------------------')
print(' QLBS Option Pricing (DP solution) ')
print('-------------------------------------------\n')
print('%-25s' % ('Initial Stock Price:'), S0)
print('%-25s' % ('Drift of Stock:'), mu)
print('%-25s' % ('Volatility of Stock:'), sigma)
print('%-25s' % ('Risk-free Rate:'), r)
print('%-25s' % ('Risk aversion parameter: '), risk_lambda)
print('%-25s' % ('Strike:'), K)
print('%-25s' % ('Maturity:'), M)
print('%-26s %.4f' % ('\nQLBS Put Price: ', C_QLBS.iloc[0,0]))
print('%-26s %.4f' % ('\nBlack-Sholes Put Price:', bs_put(0)))
print('\n')
# plot 10 paths
plt.plot(C_QLBS.T.iloc[:,idx_plot])
plt.xlabel('Time Steps')
plt.title('QLBS Option Price')
plt.show()
### GRADED PART (DO NOT EDIT) ###
part5 = str(C_QLBS.iloc[0,0])
submissions[all_parts[4]]=part5
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:5],all_parts,submissions)
C_QLBS.iloc[0,0]
### GRADED PART (DO NOT EDIT) ###
```
### make a summary picture
```
# plot: Simulated S_t and X_t values
# optimal hedge and portfolio values
# rewards and optimal Q-function
f, axarr = plt.subplots(3, 2)
f.subplots_adjust(hspace=.5)
f.set_figheight(8.0)
f.set_figwidth(8.0)
axarr[0, 0].plot(S.T.iloc[:,idx_plot])
axarr[0, 0].set_xlabel('Time Steps')
axarr[0, 0].set_title(r'Simulated stock price $S_t$')
axarr[0, 1].plot(X.T.iloc[:,idx_plot])
axarr[0, 1].set_xlabel('Time Steps')
axarr[0, 1].set_title(r'State variable $X_t$')
axarr[1, 0].plot(a.T.iloc[:,idx_plot])
axarr[1, 0].set_xlabel('Time Steps')
axarr[1, 0].set_title(r'Optimal action $a_t^{\star}$')
axarr[1, 1].plot(Pi.T.iloc[:,idx_plot])
axarr[1, 1].set_xlabel('Time Steps')
axarr[1, 1].set_title(r'Optimal portfolio $\Pi_t$')
axarr[2, 0].plot(R.T.iloc[:,idx_plot])
axarr[2, 0].set_xlabel('Time Steps')
axarr[2, 0].set_title(r'Rewards $R_t$')
axarr[2, 1].plot(Q.T.iloc[:,idx_plot])
axarr[2, 1].set_xlabel('Time Steps')
axarr[2, 1].set_title(r'Optimal DP Q-function $Q_t^{\star}$')
# plt.savefig('QLBS_DP_summary_graphs_ATM_option_mu=r.png', dpi=600)
# plt.savefig('QLBS_DP_summary_graphs_ATM_option_mu>r.png', dpi=600)
#plt.savefig('QLBS_DP_summary_graphs_ATM_option_mu>r.png', dpi=600)
plt.savefig('r.png', dpi=600)
plt.show()
# plot convergence to the Black-Scholes values
# lam = 0.0001, Q = 4.1989 +/- 0.3612 # 4.378
# lam = 0.001: Q = 4.9004 +/- 0.1206 # Q=6.283
# lam = 0.005: Q = 8.0184 +/- 0.9484 # Q = 14.7489
# lam = 0.01: Q = 11.9158 +/- 2.2846 # Q = 25.33
lam_vals = np.array([0.0001, 0.001, 0.005, 0.01])
# Q_vals = np.array([3.77, 3.81, 4.57, 7.967,12.2051])
Q_vals = np.array([4.1989, 4.9004, 8.0184, 11.9158])
Q_std = np.array([0.3612,0.1206, 0.9484, 2.2846])
BS_price = bs_put(0)
# f, axarr = plt.subplots(1, 1)
fig, ax = plt.subplots(1, 1)
f.subplots_adjust(hspace=.5)
f.set_figheight(4.0)
f.set_figwidth(4.0)
# ax.plot(lam_vals,Q_vals)
ax.errorbar(lam_vals, Q_vals, yerr=Q_std, fmt='o')
ax.set_xlabel('Risk aversion')
ax.set_ylabel('Optimal option price')
ax.set_title(r'Optimal option price vs risk aversion')
ax.axhline(y=BS_price,linewidth=2, color='r')
textstr = 'BS price = %2.2f'% (BS_price)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax.text(0.05, 0.95, textstr, fontsize=11,transform=ax.transAxes, verticalalignment='top', bbox=props)
plt.savefig('Opt_price_vs_lambda_Markowitz.png')
plt.show()
```
| github_jupyter |
```
import copy
import random
import numpy as np
import pandas as pd
import torch
from scipy import stats
from torch import nn
from torchtext.legacy import data
from torchtext.vocab import Vectors
from tqdm import tqdm
from util import calc_accuracy, calc_f1, init_device, load_params
from util.model import MyClassifier
from util.nlp_preprocessing import dataframe2dataset, tokenizer_ja
# ランダムシード初期化
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
device = init_device()
# パラメータ読み込み
print("Loading parameters...")
params = load_params("/workspace/amazon_review/config/params_mmd.json")
# データセット読み込み
train_df = pd.read_json(params["ja_train_path"], orient="record", lines=True)
if params["is_developing"]:
train_df = train_df.sample(n=200000, random_state=1)
dev_df = pd.read_json(params["ja_dev_path"], orient="record", lines=True)
test_df = pd.read_json(params["ja_test_path"], orient="record", lines=True)
# sourceカテゴリーとtargetカテゴリーを分ける
train_source_df = train_df[train_df["product_category"] == params["source_category"]]
dev_source_df = dev_df[dev_df["product_category"] == params["source_category"]]
test_source_df = test_df[test_df["product_category"] == params["source_category"]]
train_target_df = train_df[train_df["product_category"] == params["target_category"]]
dev_target_df = dev_df[dev_df["product_category"] == params["target_category"]]
test_target_df = test_df[test_df["product_category"] == params["target_category"]]
# クラスラベル設定
for df in [train_source_df, dev_source_df, test_source_df, train_target_df, dev_target_df, test_target_df]:
# 3以上かを予測する場合
df["class"] = 0
df["class"][df["stars"] > 3] = 1
# 5クラス分類する場合
# df["class"] = df["stars"] - 1
# フィールド作成
print("Building data iterator...")
text_field = data.Field(
sequential=True,
tokenize=tokenizer_ja,
use_vocab=True,
lower=True,
include_lengths=True,
batch_first=True,
fix_length=params["token_max_length"],
init_token="<cls>",
eos_token="<eos>",
)
label_field = data.Field(sequential=False, use_vocab=False)
fields = [("text", text_field), ("label", label_field)]
# データセット作成
columns = ["review_body", "class"]
train_source_dataset = dataframe2dataset(train_source_df, fields, columns)
dev_source_dataset = dataframe2dataset(dev_source_df, fields, columns)
# test_source_dataset = dataframe2dataset(test_source_df, fields, columns)
# train_target_dataset = dataframe2dataset(train_target_df, fields, columns)
dev_target_dataset = dataframe2dataset(dev_target_df, fields, columns)
test_target_dataset = dataframe2dataset(test_target_df, fields, columns)
all_train_dataset = dataframe2dataset(pd.concat([train_source_df, train_target_df]), fields, columns)
# embedding作成
if params["use_pretrained_vector"]:
japanese_fasttext_vectors = Vectors(name=params["ja_vector_path"])
text_field.build_vocab(all_train_dataset, vectors=japanese_fasttext_vectors, min_freq=1)
else:
text_field.build_vocab(all_train_dataset, min_freq=1)
# データローダー作成
train_source_iter = data.BucketIterator(dataset=train_source_dataset, batch_size=params["batch_size"], train=True)
dev_source_iter = data.BucketIterator(
dataset=dev_source_dataset, batch_size=params["batch_size"], train=False, sort=False
)
# test_source_iter = data.BucketIterator(
# dataset=test_source_dataset, batch_size=params["batch_size"], train=False, sort=False
# )
# train_target_iter = data.BucketIterator(dataset=train_target_dataset, batch_size=params["batch_size"], train=True)
dev_target_iter = data.BucketIterator(
dataset=dev_target_dataset, batch_size=params["batch_size"], train=False, sort=False
)
test_target_iter = data.BucketIterator(
dataset=test_target_dataset, batch_size=params["batch_size"], train=False, sort=False
)
# モデル構築
v_size = len(text_field.vocab.stoi)
if params["use_pretrained_vector"]:
model = MyClassifier(params["emb_dim"], v_size, params["token_max_length"], params["class_num"], text_field).to(
device
)
else:
model = MyClassifier(params["emb_dim"], v_size, params["token_max_length"], params["class_num"]).to(device)
criterion = getattr(nn, params["criterion"])()
optimizer = getattr(torch.optim, params["optimizer"])(model.parameters(), lr=params["lr"])
# sourceで訓練
# print("sourceで事前学習開始")
# for epoch in range(params["epochs"]):
# print(f"\nepoch {epoch+1} / {params['epochs']}")
# total_loss = 0
# for i, batch in tqdm(enumerate(train_source_iter), total=len(train_source_iter)):
# model.train()
# x, y = batch.text[0].to(device), batch.label.to(device)
# _, pred = model(x)
# loss = criterion(pred, y)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# total_loss += loss.cpu()
# print(f"Train Source Loss: {total_loss / len(train_source_iter):.3f}")
# total_dev_accuracy = 0
# total_dev_f1 = 0
# model.eval()
# for valid_batch in dev_source_iter:
# x, y = valid_batch.text[0].to(device), valid_batch.label.to(device)
# with torch.no_grad():
# _, pred = model(x)
# label_array = y.cpu().numpy()
# logit_array = pred.cpu().numpy()
# total_dev_accuracy += calc_accuracy(label_array, logit_array)
# total_dev_f1 += calc_f1(label_array, logit_array)
# print(f"Dev Source Accuracy: {total_dev_accuracy / len(dev_source_iter):.2f}")
# print(f"Dev Source F1 Score: {total_dev_f1 / len(dev_source_iter):.2f}")
# ブートストラップで複数回実行する
print("\ntargetでFineTuning開始")
# 事前学習したモデルを保持
# メモリを共有しないためにdeepcopyを使用する
model_pretrained = copy.deepcopy(model.cpu())
params["target_ratio"] = [0.01, 0.05, 0.1, 0.3, 0.5]
accuracy_list = []
f1_list = []
for target_ratio in params["target_ratio"]:
print("------------------------------")
print(f"target_ratio = {target_ratio}")
print("------------------------------")
for count in range(params["trial_count"]):
print(f"\n{count+1}回目の試行")
# targetでFineTuningする準備
# target_ratioで指定した比率までtargetのデータ数を減らす
source_num = train_source_df.shape[0]
target_num = int(source_num * target_ratio)
if target_num > train_target_df.shape[0]:
print("Target ratio is too large.")
exit()
train_target_df_sample = train_target_df.sample(target_num, replace=False)
print(f"Source num: {source_num}, Target num: {target_num}")
# targetのデータローダー作成
train_target_dataset = dataframe2dataset(train_target_df_sample, fields, columns)
train_target_iter = data.BucketIterator(
dataset=train_target_dataset, batch_size=params["batch_size"], train=True
)
# 事前学習したモデルをロード
model = copy.deepcopy(model_pretrained).to(device)
optimizer = getattr(torch.optim, params["optimizer"])(model.parameters(), lr=params["lr"])
# targetでFineTuning
for epoch in range(params["epochs"]):
print(f"\nepoch {epoch+1} / {params['epochs']}")
total_loss = 0
# for i, batch in tqdm(enumerate(train_target_iter), total=len(train_target_iter)):
for i, batch in enumerate(train_target_iter):
model.train()
x, y = batch.text[0].to(device), batch.label.to(device)
_, pred = model(x)
loss = criterion(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.cpu()
print(f"Train Target Loss: {total_loss / len(train_target_iter):.3f}")
# total_dev_accuracy = 0
# total_dev_f1 = 0
# model.eval()
# for valid_batch in dev_target_iter:
# x, y = valid_batch.text[0].to(device), valid_batch.label.to(device)
# with torch.no_grad():
# _, pred = model(x)
# label_array = y.cpu().numpy()
# logit_array = pred.cpu().numpy()
# total_dev_accuracy += calc_accuracy(label_array, logit_array)
# total_dev_f1 += calc_f1(label_array, logit_array)
# print(f"Dev Target Accuracy: {total_dev_accuracy / len(dev_target_iter):.2f}")
# print(f"Dev Target F1 Score: {total_dev_f1 / len(dev_target_iter):.2f}")
total_test_accuracy = 0
total_test_f1 = 0
model.eval()
for test_batch in test_target_iter:
x, y = test_batch.text[0].to(device), test_batch.label.to(device)
with torch.no_grad():
_, pred = model(x)
label_array = y.cpu().numpy()
logit_array = pred.cpu().numpy()
total_test_accuracy += calc_accuracy(label_array, logit_array)
total_test_f1 += calc_f1(label_array, logit_array)
test_accuracy = total_test_accuracy / len(test_target_iter)
test_f1 = total_test_f1 / len(test_target_iter)
accuracy_list.append(test_accuracy)
f1_list.append(test_f1)
print(f"\nTest Target Accuracy: {test_accuracy:.2f}")
print(f"Test Target F1 Score: {test_f1:.2f}")
accuracy_interval = stats.t.interval(
alpha=0.95, df=len(accuracy_list) - 1, loc=np.mean(accuracy_list), scale=stats.sem(accuracy_list)
)
f1_interval = stats.t.interval(alpha=0.95, df=len(f1_list) - 1, loc=np.mean(f1_list), scale=stats.sem(f1_list))
print("\n\t\tMean, Std, 95% interval (bottom, up)")
print(
f"Accuracy\t{np.mean(accuracy_list):.2f}, {np.std(accuracy_list, ddof=1):.2f}, {accuracy_interval[0]:.2f}, {accuracy_interval[1]:.2f}"
)
print(
f"F1 Score\t{np.mean(f1_list):.2f}, {np.std(f1_list, ddof=1):.2f}, {f1_interval[0]:.2f}, {f1_interval[1]:.2f}"
)
```
| github_jupyter |
### Installation
`devtools::install_github("zji90/SCRATdatahg19")`
`source("https://raw.githubusercontent.com/zji90/SCRATdata/master/installcode.R")`
### Import packages
```
library(devtools)
library(GenomicAlignments)
library(Rsamtools)
library(SCRATdatahg19)
library(SCRAT)
```
### Obtain Feature Matrix
```
start_time = Sys.time()
metadata <- read.table('./input/metadata.tsv',
header = TRUE,
stringsAsFactors=FALSE,quote="",row.names=1)
SCRATsummary <- function (dir = "", genome, bamfile = NULL, singlepair = "automated",
removeblacklist = T, log2transform = T, adjustlen = T, featurelist = c("GENE",
"ENCL", "MOTIF_TRANSFAC", "MOTIF_JASPAR", "GSEA"), customfeature = NULL,
Genestarttype = "TSSup", Geneendtype = "TSSdown", Genestartbp = 3000,
Geneendbp = 1000, ENCLclunum = 2000, Motifflank = 100, GSEAterm = "c5.bp",
GSEAstarttype = "TSSup", GSEAendtype = "TSSdown", GSEAstartbp = 3000,
GSEAendbp = 1000)
{
if (is.null(bamfile)) {
bamfile <- list.files(dir, pattern = ".bam$")
}
datapath <- system.file("extdata", package = paste0("SCRATdata",
genome))
bamdata <- list()
for (i in bamfile) {
filepath <- file.path(dir, i)
if (singlepair == "automated") {
bamfile <- BamFile(filepath)
tmpsingle <- readGAlignments(bamfile)
tmppair <- readGAlignmentPairs(bamfile)
pairendtf <- testPairedEndBam(bamfile)
if (pairendtf) {
tmp <- tmppair
startpos <- pmin(start(first(tmp)), start(last(tmp)))
endpos <- pmax(end(first(tmp)), end(last(tmp)))
id <- which(!is.na(as.character(seqnames(tmp))))
tmp <- GRanges(seqnames=as.character(seqnames(tmp))[id],IRanges(start=startpos[id],end=endpos[id]))
}
else {
tmp <- GRanges(tmpsingle)
}
}
else if (singlepair == "single") {
tmp <- GRanges(readGAlignments(filepath))
}
else if (singlepair == "pair") {
tmp <- readGAlignmentPairs(filepath)
startpos <- pmin(start(first(tmp)), start(last(tmp)))
endpos <- pmax(end(first(tmp)), end(last(tmp)))
id <- which(!is.na(as.character(seqnames(tmp))))
tmp <- GRanges(seqnames=as.character(seqnames(tmp))[id],IRanges(start=startpos[id],end=endpos[id]))
}
if (removeblacklist) {
load(paste0(datapath, "/gr/blacklist.rda"))
tmp <- tmp[-as.matrix(findOverlaps(tmp, gr))[, 1],
]
}
bamdata[[i]] <- tmp
}
bamsummary <- sapply(bamdata, length)
allres <- NULL
datapath <- system.file("extdata", package = paste0("SCRATdata",
genome))
if ("GENE" %in% featurelist) {
print("Processing GENE features")
load(paste0(datapath, "/gr/generegion.rda"))
if (Genestarttype == "TSSup") {
grstart <- ifelse(as.character(strand(gr)) == "+",
start(gr) - as.numeric(Genestartbp), end(gr) +
as.numeric(Genestartbp))
}
else if (Genestarttype == "TSSdown") {
grstart <- ifelse(as.character(strand(gr)) == "+",
start(gr) + as.numeric(Genestartbp), end(gr) -
as.numeric(Genestartbp))
}
else if (Genestarttype == "TESup") {
grstart <- ifelse(as.character(strand(gr)) == "+",
end(gr) - as.numeric(Genestartbp), start(gr) +
as.numeric(Genestartbp))
}
else if (Genestarttype == "TESdown") {
grstart <- ifelse(as.character(strand(gr)) == "+",
end(gr) + as.numeric(Genestartbp), start(gr) -
as.numeric(Genestartbp))
}
if (Geneendtype == "TSSup") {
grend <- ifelse(as.character(strand(gr)) == "+",
start(gr) - as.numeric(Geneendbp), end(gr) +
as.numeric(Geneendbp))
}
else if (Geneendtype == "TSSdown") {
grend <- ifelse(as.character(strand(gr)) == "+",
start(gr) + as.numeric(Geneendbp), end(gr) -
as.numeric(Geneendbp))
}
else if (Geneendtype == "TESup") {
grend <- ifelse(as.character(strand(gr)) == "+",
end(gr) - as.numeric(Geneendbp), start(gr) +
as.numeric(Geneendbp))
}
else if (Geneendtype == "TESdown") {
grend <- ifelse(as.character(strand(gr)) == "+",
end(gr) + as.numeric(Geneendbp), start(gr) -
as.numeric(Geneendbp))
}
ngr <- names(gr)
gr <- GRanges(seqnames = seqnames(gr), IRanges(start = pmin(grstart,
grend), end = pmax(grstart, grend)))
names(gr) <- ngr
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- end(gr) - start(gr) + 1
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
}
if ("ENCL" %in% featurelist) {
print("Processing ENCL features")
load(paste0(datapath, "/gr/ENCL", ENCLclunum, ".rda"))
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) - start(i) +
1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
}
if ("MOTIF_TRANSFAC" %in% featurelist) {
print("Processing MOTIF_TRANSFAC features")
load(paste0(datapath, "/gr/transfac1.rda"))
gr <- flank(gr, as.numeric(Motifflank), both = T)
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) - start(i) +
1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
load(paste0(datapath, "/gr/transfac2.rda"))
gr <- flank(gr, as.numeric(Motifflank), both = T)
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) - start(i) +
1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
if (genome %in% c("hg19", "hg38")) {
load(paste0(datapath, "/gr/transfac3.rda"))
gr <- flank(gr, as.numeric(Motifflank), both = T)
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) -
start(i) + 1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
}
}
if ("MOTIF_JASPAR" %in% featurelist) {
print("Processing MOTIF_JASPAR features")
load(paste0(datapath, "/gr/jaspar1.rda"))
gr <- flank(gr, as.numeric(Motifflank), both = T)
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) - start(i) +
1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
load(paste0(datapath, "/gr/jaspar2.rda"))
gr <- flank(gr, as.numeric(Motifflank), both = T)
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) - start(i) +
1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
}
if ("GSEA" %in% featurelist) {
print("Processing GSEA features")
for (i in GSEAterm) {
load(paste0(datapath, "/gr/GSEA", i, ".rda"))
allgr <- gr
for (sgrn in names(allgr)) {
gr <- allgr[[sgrn]]
if (GSEAstarttype == "TSSup") {
grstart <- ifelse(as.character(strand(gr)) ==
"+", start(gr) - as.numeric(GSEAstartbp),
end(gr) + as.numeric(GSEAstartbp))
}
else if (GSEAstarttype == "TSSdown") {
grstart <- ifelse(as.character(strand(gr)) ==
"+", start(gr) + as.numeric(GSEAstartbp),
end(gr) - as.numeric(GSEAstartbp))
}
else if (GSEAstarttype == "TESup") {
grstart <- ifelse(as.character(strand(gr)) ==
"+", end(gr) - as.numeric(GSEAstartbp), start(gr) +
as.numeric(GSEAstartbp))
}
else if (GSEAstarttype == "TESdown") {
grstart <- ifelse(as.character(strand(gr)) ==
"+", end(gr) + as.numeric(GSEAstartbp), start(gr) -
as.numeric(GSEAstartbp))
}
if (GSEAendtype == "TSSup") {
grend <- ifelse(as.character(strand(gr)) ==
"+", start(gr) - as.numeric(GSEAendbp), end(gr) +
as.numeric(GSEAendbp))
}
else if (GSEAendtype == "TSSdown") {
grend <- ifelse(as.character(strand(gr)) ==
"+", start(gr) + as.numeric(GSEAendbp), end(gr) -
as.numeric(GSEAendbp))
}
else if (GSEAendtype == "TESup") {
grend <- ifelse(as.character(strand(gr)) ==
"+", end(gr) - as.numeric(GSEAendbp), start(gr) +
as.numeric(GSEAendbp))
}
else if (GSEAendtype == "TESdown") {
grend <- ifelse(as.character(strand(gr)) ==
"+", end(gr) + as.numeric(GSEAendbp), start(gr) -
as.numeric(GSEAendbp))
}
ngr <- names(gr)
gr <- GRanges(seqnames = seqnames(gr), IRanges(start = pmin(grstart,
grend), end = pmax(grstart, grend)))
names(gr) <- ngr
allgr[[sgrn]] <- gr
}
gr <- allgr
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- sapply(gr, function(i) sum(end(i) -
start(i) + 1))
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
}
}
if ("Custom" %in% featurelist) {
print("Processing custom features")
gr <- read.table(customfeature, as.is = T, sep = "\t")
gr <- GRanges(seqnames = gr[, 1], IRanges(start = gr[,
2], end = gr[, 3]))
tmp <- sapply(bamdata, function(i) countOverlaps(gr,
i))
tmp <- sweep(tmp, 2, bamsummary, "/") * 10000
if (log2transform) {
tmp <- log2(tmp + 1)
}
if (adjustlen) {
grrange <- end(gr) - start(gr) + 1
tmp <- sweep(tmp, 1, grrange, "/") * 1e+06
}
tmp <- tmp[rowSums(tmp) > 0, , drop = F]
allres <- rbind(allres, tmp)
}
allres
}
df_out <- SCRATsummary(dir = "./input/sc-bams_nodup/",
genome = "hg19",
featurelist="MOTIF_JASPAR",
log2transform = FALSE, adjustlen = FALSE, removeblacklist=FALSE)
end_time <- Sys.time()
end_time - start_time
dim(df_out)
df_out[1:5,1:5]
colnames(df_out) = sapply(strsplit(colnames(df_out), "\\."),'[',1)
dim(df_out)
df_out[1:5,1:5]
if(! all(colnames(df_out) == rownames(metadata))){
df_out = df_out[,rownames(metadata)]
dim(df_out)
df_out[1:5,1:5]
}
dim(df_out)
df_out[1:5,1:5]
saveRDS(df_out, file = './output/feature_matrices/FM_SCRAT_buenrostro2018_no_blacklist.rds')
sessionInfo()
save.image(file = 'SCRAT_buenrostro2018.RData')
```
| github_jupyter |
# Copper grains classification based on thermal images
This demo shows construction and usage of a neural network that classifies
copper grains.
The grains are recorded with a thermal camera using active thermovision
approach.
The network is fed with numbers of low emissivity spots on every stage of
cooling down the grain samples.
For more information about tracking and counting these spots refer to other
Jupyter demos in this project.
```
from inspect import getsource
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from blob_series_tracker import count_blobs_with_all_methods
from img_processing import decode_labels, default_img_set, full_prepare
from neural_network import (default_grain_classifier_model,
network_cross_validation)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
## Data preparation
Let's visualize how a sample image of grains looks like.
To start with we will load the whole default dataset, then we will select a
sample labeled as E5 class at the beginning of the colling process and show it.
```
X, y = default_img_set()
plt.imshow(X[0][0]);
```
Next we will prepare the images for classification. They should be cropped to
remove the FLIR UI overlay.
Then they ought to be converted to grayscale and inverted.
The task is to track points with low thermal emissivity which are presented as
black spots on the image.
However multiple image processing algorithms treat white spots as foreground
features and black space as background.
This is opposite to the way in which colors display temperature on our image so
we have to invert the photo.
All these preparations are completed by a single function.
```
X = [[full_prepare(img) for img in same_sample] for same_sample in X]
```
Subsequently we have to track and count low emissivity blobs on each stage of
cooling the grains.
There are three ways of counting the spots:
* counting all spots at every stage of cooling,
* tracking blobs that are present from the begging of the cooling and ignoring
blobs that appear later,
* tracking blobs that are present from the begging of the cooling and
calculating the ratio of remaining blobs to their initial number.
To inspect how these methods work relate to other Jupyter demos.
In this notebook we will simply implement a function that uses every one of
these approaches and compare results.
```
Xs = count_blobs_with_all_methods(X)
```
## Classification
We will write a function to classify given grains data.
We have three X datasets, one for every blob counting method.
Later we will call the classification demo function on each of them.
The function turns datasets into NumPy arrays as most of the libraries use this
container for computation efficiency.
Then the data is split into train and test sets.
A function from Scikit-learn is used to perform the split.
Notice that the function is given constant random seed to ensure repetitiveness
of the demo.
So the call looks like this:
```python
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.33, random_state=1)
```
### Neural network model
The classifier model is returned by the `default_grain_classifier_model` function.
Let's take a look at the model definition.
```
print(getsource(default_grain_classifier_model))
```
The model includes four layers, with two hidden ones.
The number of neurons in the input layer is equal to the size of the input
vector, that is five.
If you wish to know why the vector has this size, please refer to the other demo
notebooks.
The input is equal to the number of classes, in this case it is four.
Outputs of the network represent the probability of the input belonging to each
of the classes.
The number of the neurons in hidden layers and their activation functions were
chosen based on experiments.
You can examine the comparison of various network structures in the thesis.
### Network training
The last step of constructing a network is the training.
The model compiler has to be given `optimizer`, `loss` and `metrics` parameters
that define the training process.
The best loss function that calculates error between output of the network and
training label is the `sparse_categorical_crossentropy`.
During comparisons adam optimiser proved to be the best one, as it provides
adaptive learning rate.
The comparison of different network training parameters is included in the
thesis.
The `classification_demo` function implements the ideas presented above.
It also prints model evaluation on test set and prints training history.
```
def classification_demo(X, y):
'''
Demo grain classification on given data.
Train and test default model.
'''
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.33, random_state=3)
model = default_grain_classifier_model()
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=300, verbose=0)
print("Test y:", y_test)
print("Test y:", [decode_labels(y) for y in y_test])
print('Test prediction outputs:')
classes = ('E5R', 'E6R', 'E11R', 'E16R')
print(pd.DataFrame(model.predict(X_test), columns=classes, index=classes))
prediction = model.predict_classes(X_test)
print('Test prediction classification:\n', prediction)
print('Test prediction classification:\n',
[decode_labels(y) for y in prediction])
print('Model evaluation loss and accuracy:\n',
model.evaluate(X_test, y_test, verbose=0), '\n')
return history
```
## Test results
Now we can use the function to print the results of classification.
The output is relatively lengthy, let's clarify it a bit.
It prints classes of test set in encoded and decoded form.
Then the prediction output for each test is shown.
The closer the output is to one, the more sure the network is that a sample
belongs to given class.
Then the predictions are presented in encoded and decoded format.
Each demo is summarized by the loss and accuracy scores.
The method of tracking and calculating ratio of blobs yield the best results.
Not only it has the highest scores, but also the output is more stable.
The first two methods of counting blobs cause the network to produce unstable
results.
The efficiency varies between each run of the training.
The last method of the data extraction yields the best and clearly visible
classes separation in the evaluation.
However, this way of validating the classifier is unreliable especially on the
extremely small data set.
This way of testing wastes one-third of the very limited data set and introduces
negative bias to the evaluation.
A better way of judging the efficiency of the network is presented later in the
notebook.
```
demo_names = ('All blobs detection',
'Detect only remaining blobs',
'Ratio of remaining blobs')
training_histories = []
for X, demo_name in zip(Xs, demo_names):
print(demo_name)
training_histories.append(classification_demo(X, y))
```
To investigate the network learning process and ensure it's propriety we can
plot the training history.
We will write a small helper function and plot accuracy and loss versus epochs.
```
def plot_history(ax, history, demo_name):
ax.title.set_text('Model training history. ' + demo_name)
ax.set_xlabel('Epoch')
lns1 = ax.plot(history.history['accuracy'], c='b', label='Accuracy');
ax.set_ylabel('Accuracy')
ax.twinx()
lns2 = ax.plot(history.history['loss'], c='r', label='Loss');
plt.ylabel('Loss')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs)
_, ax = plt.subplots(1, 3, figsize=(24, 6))
for a, history, name in zip(ax, training_histories, demo_names):
plot_history(a, history, name)
```
The plots are generated for networks employing different methods of counting blobs for feature extraction.
As could have been foreseen, the latter methods that make a use of blob tracking mechanisms have more clear shape, greater accuracy and smaller loss.
# Network validation
The results of the test presented above are fully conclusive.
The best way to validate the network and check if the idea of grains
classification does work is to perform cross-validation of the model.
This method splits the data k-times and performs the validation on each part of
train and test sets.
The ultimate result of validation is averaged.
Let's crate a function that will validate perform the cross-validation for every
way of blob counting.
```
def cross_val_demo(X, y):
'''Demo cross validation of default grain classifier on given data.'''
X = np.array(X)
y = np.array(y)
model = default_grain_classifier_model()
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
scores = network_cross_validation(model, X, y, 3)
print('Folds scores: (loss, acc)\n', scores)
scores = np.array(scores)
print('Cross validation mean score (loss, acc):\n',
scores.mean(axis=0), '\n')
```
The `network_cross_validation` takes the model to test, dataset and number of
splits to perform.
We can investigate it's implementation to see how it works.
```
print(getsource(network_cross_validation))
```
Now we can perform the validation.
Notice that the accuracy of the last blob tracking method becomes stable 0.92,
which is a satisfactory result.
It should be preferred as the best way to classify copper grains.
The other methods of data extraction yield poor classification results and
should be rejected.
```
for X, demo_name in zip(Xs, demo_names):
print(demo_name)
cross_val_demo(X, y)
```
## Conclusion
The results of the validation proved that the copper grains can be classified
using active thermography approach.
The classifier has been tested and trained on a extremely small dataset.
However the results show that the suggested idea is worth further investigation.
The next steps in the project may include creating larger dataset with enhanced
measurement stand and development of a more advanced neural network, such as
convolutional neural network.
| github_jupyter |
# Задание 2.1 - Нейронные сети
В этом задании вы реализуете и натренируете настоящую нейроную сеть своими руками!
В некотором смысле это будет расширением прошлого задания - нам нужно просто составить несколько линейных классификаторов вместе!
<img src="https://i.redd.it/n9fgba8b0qr01.png" alt="Stack_more_layers" width="400px"/>
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
from dataset import load_svhn, random_split_train_val
from gradient_check import check_layer_gradient, check_layer_param_gradient, check_model_gradient
from layers import FullyConnectedLayer, ReLULayer
from model import TwoLayerNet
from trainer import Trainer, Dataset
from optim import SGD, MomentumSGD
from metrics import multiclass_accuracy
```
# Загружаем данные
И разделяем их на training и validation.
```
def prepare_for_neural_network(train_X, test_X):
train_flat = train_X.reshape(train_X.shape[0], -1).astype(np.float) / 255.0
test_flat = test_X.reshape(test_X.shape[0], -1).astype(np.float) / 255.0
# Subtract mean
mean_image = np.mean(train_flat, axis = 0)
train_flat -= mean_image
test_flat -= mean_image
return train_flat, test_flat
train_X, train_y, test_X, test_y = load_svhn("data", max_train=10000, max_test=1000)
train_X, test_X = prepare_for_neural_network(train_X, test_X)
# Split train into train and val
train_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000)
```
# Как всегда, начинаем с кирпичиков
Мы будем реализовывать необходимые нам слои по очереди. Каждый слой должен реализовать:
- прямой проход (forward pass), который генерирует выход слоя по входу и запоминает необходимые данные
- обратный проход (backward pass), который получает градиент по выходу слоя и вычисляет градиент по входу и по параметрам
Начнем с ReLU, у которого параметров нет.
```
# TODO: Implement ReLULayer layer in layers.py
# Note: you'll need to copy implementation of the gradient_check function from the previous assignment
X = np.array([[1,-2,3],
[-1, 2, 0.1]
])
assert check_layer_gradient(ReLULayer(), X)
```
А теперь реализуем полносвязный слой (fully connected layer), у которого будет два массива параметров: W (weights) и B (bias).
Все параметры наши слои будут использовать для параметров специальный класс `Param`, в котором будут храниться значения параметров и градиенты этих параметров, вычисляемые во время обратного прохода.
Это даст возможность аккумулировать (суммировать) градиенты из разных частей функции потерь, например, из cross-entropy loss и regularization loss.
```
# TODO: Implement FullyConnected layer forward and backward methods
assert check_layer_gradient(FullyConnectedLayer(3, 4), X)
# TODO: Implement storing gradients for W and B
assert check_layer_param_gradient(FullyConnectedLayer(3, 4), X, 'W')
assert check_layer_param_gradient(FullyConnectedLayer(3, 4), X, 'B')
```
## Создаем нейронную сеть
Теперь мы реализуем простейшую нейронную сеть с двумя полносвязным слоями и нелинейностью ReLU. Реализуйте функцию `compute_loss_and_gradients`, она должна запустить прямой и обратный проход через оба слоя для вычисления градиентов.
Не забудьте реализовать очистку градиентов в начале функции.
```
# TODO: In model.py, implement compute_loss_and_gradients function
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 3, reg = 0)
loss = model.compute_loss_and_gradients(train_X[:2], train_y[:2])
# TODO Now implement backward pass and aggregate all of the params
check_model_gradient(model, train_X[:2], train_y[:2])
```
Теперь добавьте к модели регуляризацию - она должна прибавляться к loss и делать свой вклад в градиенты.
```
# TODO Now implement l2 regularization in the forward and backward pass
model_with_reg = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 3, reg = 1e1)
loss_with_reg = model_with_reg.compute_loss_and_gradients(train_X[:2], train_y[:2])
assert loss_with_reg > loss and not np.isclose(loss_with_reg, loss), \
"Loss with regularization (%2.4f) should be higher than without it (%2.4f)!" % (loss, loss_with_reg)
check_model_gradient(model_with_reg, train_X[:2], train_y[:2])
```
Также реализуем функцию предсказания (вычисления значения) модели на новых данных.
Какое значение точности мы ожидаем увидеть до начала тренировки?
```
# Finally, implement predict function!
# TODO: Implement predict function
# What would be the value we expect?
multiclass_accuracy(model_with_reg.predict(train_X[:30]), train_y[:30])
```
# Допишем код для процесса тренировки
```
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 2e-3);
dataset = Dataset(train_X, train_y, val_X, val_y);
trainer = Trainer(model, dataset, SGD(), num_epochs=100, batch_size=100,
learning_rate=5e-1, learning_rate_decay= 0.95);
# TODO Implement missing pieces in Trainer.fit function
# You should expect loss to go down and train and val accuracy go up for every epoch
loss_history, train_history, val_history = trainer.fit()
train_X[model.predict(train_X) != 1]
train_y
def ReLU(x):
if x <= 0:
return 0;
else:
return x;
ReLU_vec = np.vectorize(ReLU);
train_X[ReLU_vec(train_X) != 0]
val_X_W = model.first.forward(val_X)
val_X_W
model.second.forward(model.ReLU.forward(val_X_W))
plt.plot(train_history)
plt.plot(val_history)
plt.plot(loss_history)
```
# Улучшаем процесс тренировки
Мы реализуем несколько ключевых оптимизаций, необходимых для тренировки современных нейросетей.
## Уменьшение скорости обучения (learning rate decay)
Одна из необходимых оптимизаций во время тренировки нейронных сетей - постепенное уменьшение скорости обучения по мере тренировки.
Один из стандартных методов - уменьшение скорости обучения (learning rate) каждые N эпох на коэффициент d (часто называемый decay). Значения N и d, как всегда, являются гиперпараметрами и должны подбираться на основе эффективности на проверочных данных (validation data).
В нашем случае N будет равным 1.
```
# TODO Implement learning rate decay inside Trainer.fit method
# Decay should happen once per epoch
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-3)
dataset = Dataset(train_X, train_y, val_X, val_y)
trainer = Trainer(model, dataset, SGD(), num_epochs=10, batch_size=100,
learning_rate=5e-1, learning_rate_decay=0.99)
initial_learning_rate = trainer.learning_rate
loss_history, train_history, val_history = trainer.fit()
assert trainer.learning_rate < initial_learning_rate, "Learning rate should've been reduced"
assert trainer.learning_rate > 0.5*initial_learning_rate, "Learning rate shouldn'tve been reduced that much!"
```
# Накопление импульса (Momentum SGD)
Другой большой класс оптимизаций - использование более эффективных методов градиентного спуска. Мы реализуем один из них - накопление импульса (Momentum SGD).
Этот метод хранит скорость движения, использует градиент для ее изменения на каждом шаге, и изменяет веса пропорционально значению скорости.
(Физическая аналогия: Вместо скорости градиенты теперь будут задавать ускорение, но будет присутствовать сила трения.)
```
velocity = momentum * velocity - learning_rate * gradient
w = w + velocity
```
`momentum` здесь коэффициент затухания, который тоже является гиперпараметром (к счастью, для него часто есть хорошее значение по умолчанию, типичный диапазон -- 0.8-0.99).
Несколько полезных ссылок, где метод разбирается более подробно:
http://cs231n.github.io/neural-networks-3/#sgd
https://distill.pub/2017/momentum/
```
# TODO: Implement MomentumSGD.update function in optim.py
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-3)
dataset = Dataset(train_X, train_y, val_X, val_y)
trainer = Trainer(model, dataset, MomentumSGD(), num_epochs=10, batch_size=30,
learning_rate=5e-2, learning_rate_decay=0.99)
# You should see even better results than before!
loss_history, train_history, val_history = trainer.fit()
```
# Ну что, давайте уже тренировать сеть!
## Последний тест - переобучимся (overfit) на маленьком наборе данных
Хороший способ проверить, все ли реализовано корректно - переобучить сеть на маленьком наборе данных.
Наша модель обладает достаточной мощностью, чтобы приблизить маленький набор данных идеально, поэтому мы ожидаем, что на нем мы быстро дойдем до 100% точности на тренировочном наборе.
Если этого не происходит, то где-то была допущена ошибка!
```
data_size = 15
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-1)
dataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size])
trainer = Trainer(model, dataset, SGD(), learning_rate=1e-1, num_epochs=80, batch_size=5)
# You should expect this to reach 1.0 training accuracy
loss_history, train_history, val_history = trainer.fit()
```
Теперь найдем гипепараметры, для которых этот процесс сходится быстрее.
Если все реализовано корректно, то существуют параметры, при которых процесс сходится в **20** эпох или еще быстрее.
Найдите их!
```
# Now, tweak some hyper parameters and make it train to 1.0 accuracy in 20 epochs or less
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 0)
dataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size])
# TODO: Change any hyperparamers or optimizators to reach training accuracy in 20 epochs
trainer = Trainer(model, dataset, SGD(), learning_rate=1e-1, num_epochs=20, batch_size=3)
loss_history, train_history, val_history = trainer.fit()
```
# Итак, основное мероприятие!
Натренируйте лучшую нейросеть! Можно добавлять и изменять параметры, менять количество нейронов в слоях сети и как угодно экспериментировать.
Добейтесь точности лучше **40%** на validation set.
```
# Let's train the best one-hidden-layer network we can
learning_rates = 1e-4
reg_strength = 1e-3
learning_rate_decay = 0.999
hidden_layer_size = 128
num_epochs = 200
batch_size = 64
best_classifier = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-3);
dataset = Dataset(train_X, train_y, val_X, val_y);
trainer = Trainer(best_classifier, dataset, MomentumSGD(), num_epochs=100, batch_size=100,
learning_rate=1e-1, learning_rate_decay= 0.99);
# TODO Implement missing pieces in Trainer.fit function
# You should expect loss to go down and train and val accuracy go up for every epoch
loss_history, train_history, val_history = trainer.fit();
best_val_accuracy = val_history[-1];
# TODO find the best hyperparameters to train the network
# Don't hesitate to add new values to the arrays above, perform experiments, use any tricks you want
# You should expect to get to at least 40% of valudation accuracy
# Save loss/train/history of the best classifier to the variables above
print('best validation accuracy achieved: %f' % best_val_accuracy)
plt.figure(figsize=(15, 7))
plt.subplot(211)
plt.title("Loss")
plt.plot(loss_history)
plt.subplot(212)
plt.title("Train/validation accuracy")
plt.plot(train_history)
plt.plot(val_history)
```
# Как обычно, посмотрим, как наша лучшая модель работает на тестовых данных
```
test_pred = best_classifier.predict(test_X)
test_accuracy = multiclass_accuracy(test_pred, test_y)
print('Neural net test set accuracy: %f' % (test_accuracy, ))
```
| github_jupyter |
# Project: Valuing real estate properties using machine learning
## Part 1: From EDA to data preparation
The objective of this project is to create a machine learning model that values real estate properties in Argentina.
For this we will use the dataset available at https://www.properati.com.ar.
This dataset contains the following features:
- **id**: Identifier of the ad. It is not unique: if the notice is updated by the real estate agency (new version of the notice) a new record is created with the same id but different dates: registration and cancellation.
- **ad_type**: Type of ad (Property, Development/Project).
- **start_date**: Date of entry of the ad.
- **end_date**: Date of cancellation of the ad.
- **created_on**: Date of registration of the first version of the ad.
- **lat**: Latitude.
- **lon**: Longitude.
- **l1**: Administrative level 1: country.
- **l2**: Administrative level 2: usually province.
- **l3**: Administrative level 3: usually city.
- **l4**: Administrative level 4: usually neighborhood.
- **l5**: Administrative level 5: not defined.
- **l6**: Administrative level 6: not defined.
- **rooms**: Number of environments (useful in Argentina).
- **bedrooms**: Number of bedrooms (useful in the rest of the countries).
- **bathrooms**: Number of bathrooms.
- **surface_total**: Total area in m².
- **surface_covered**: Covered area in m².
- **price**: Price published in the ad.
- **currency**: Currency of the published price.
- **price_period**: Price Period (Daily, Weekly, Monthly)
- **title**: Title of the ad.
- **description**: Ad Description.
- **property_type**: Type of property (House, Apartment, PH, plot of land, etc.).
- **operation_type**: Type of operation (Sale, Rent).
```
#Importings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
from sklearn.impute import SimpleImputer
%matplotlib inline
#Helper function
def cat_plot(data, col):
total_ads = len(data)
temp_df = pd.Series((data[col].value_counts() / total_ads) * 100)
fig = temp_df.sort_values(ascending=False).plot.bar()
fig.set_xlabel(col)
fig.axhline(y=5, color='red')
fig.set_ylabel('Percentage of ads')
plt.show()
return fig
#Loding the data
dataset = pd.read_csv("ar_properties.csv")
```
## Exploratory Data Analysis
```
dataset.head()
print("The dataframe contains {} rows and {} columns".format(dataset.shape[0], dataset.shape[1]))
dataset.info()
#Checking duplicate ads
dataset.duplicated(subset="id").sum()
```
This dataframe has no duplicated values but we can see that it has NaNs values. Besides, taking in account the objective of this project we will evaluate which columns are necessary. Those not necessary will be discarded.
```
#Drop not necessary columns
df = dataset.drop(["ad_type","id", "start_date", "end_date", "created_on", "lat", "lon", "l6", "title", "description"], axis=1)
df.shape
```
For some reason, there are publications of properties that have a larger covered surface area than the total surface area. This is not correct, so this dataset will be filtered.
```
#Restriction
mask = df.surface_covered <= df.surface_total
df = df[mask]
df.reset_index(inplace=True, drop=True)
```
### Categorical features analysis
```
#Categorical features analysis:
categorical = df.select_dtypes(include="object")
categorical_list = categorical.columns.to_list()
categorical.isna().sum()
```
The dataset contains several features with NaN values. Therefore, handling missing values will be necessary.
### Feature analysis
**l1 column**
```
df.l1.value_counts()
cat_plot(df, "l1")
```
We can see that most ads are from Argentina. It is our objective to construct a model that predict real state values from Argentina. So we will discard those ads from other countries. We chose a country since the real estate market will vary strongly according to state policies that are foreign to the data provided. In addition, the other countries represent less than 5% of the ads in the dataset.
```
mask1 = df.l1 == "Argentina"
df = df[mask1]
```
**l2 column**
```
cat_plot(df, "l2")
```
We note that most of the publications belong to the regions of Buenos Aires and the province of Santa Fe. The other regions have publications that account for less than 5% of the total number of publications.
```
list(set(df.l2))
bsas = [x for x in list(set(df.l2)) if x.startswith('B')]
bsas
interior = [x for x in list(set(df.l2)) if (x != "Capital Federal") and (x not in bsas)]
interior
df.l2.replace(to_replace=interior, value= "Interior", inplace=True)
df.l2.replace(to_replace=bsas, value= "Buenos Aires", inplace=True)
cat_plot(df, "l2")
```
**l3 column**
```
len(df.l3.unique())
cat_plot(df, "l3")
```
This feature will introduce high cardinality into the model. There are 698 different location, most of them with low prevalence. This would lead to overfitting problems.
```
df = df.drop("l3", axis=1)
```
**currency column**
```
df.currency.fillna("unknown", inplace=True)
df.currency.value_counts()
cat_plot(df, "currency")
```
We can observe that for most advertisements currency is dollars. As this data is directly related to the target "precio" (price), we should unify the unit of the paper currency used. One option would be to convert the price in argentine pesos to dollars. Since the Argentine economy is very unstable and the value of the dollar is so variable and dependent on several factors, it is difficult to follow this option to obtain a reliable model. Therefore, we will choose to eliminate those publications made in currencies other than dollars.
```
to_replace = {"unknown": np.nan}
df.currency.replace(to_replace, value=None, inplace=True)
mask2 = df.currency == "USD"
df = df[mask2]
```
**property_type column**
```
df.property_type.value_counts()
cat_plot(df, "property_type")
```
Some property type categories represent less than 5% of the published ads. We will only retain those whose number of publications is greater than 5%. But we can group those categories under "Otro" (others). We already have an "Otro" category so one solution could be to append those ads whose property_type is under 5% to "Otro" category.
```
df.property_type.replace(["Casa de campo", "Cochera", "Depósito", "Lote", "Local comercial", "Oficina"], value= "Otro", inplace=True)
cat_plot(df, "property_type")
```
**price_period column**
```
df.price_period.fillna("unknown", inplace=True)
df.price_period.value_counts()
cat_plot(df, "price_period")
```
Most publications are unknown and monthly ads. Daily and weekly publications do not exceed 5% of the publications.
```
print("Percentage of unknown =" ,round(df[(df['price_period'] =='unknown')].shape[0]/df.shape[0]*100,2),'%')
```
Here we have two options: discard unknown values and select only those monthly posted or impute unknown values as monthly posted ads.
```
df.price_period.replace(to_replace, value=None, inplace=True)
#We select only those ads that are monthly paid
mask3 = df.price_period == "Mensual"
df = df[mask3]
```
**operation_type column**
```
df.operation_type.value_counts()
cat_plot(df, "operation_type")
```
The type of operation of most of the publications is sale. Sale and rent are very different operations that would definitely influence the target. As most operations are sale we will only take those.
```
#df.operation_type.replace(to_replace="Alquiler temporal", value="Alquiler", inplace=True)
#df = df.drop("operation_type", axis=1)
```
### Outliers detection and elimination
```
df.describe()
```
We can see that there are indeed outliers. It is unlikely that there are properties with 35 rooms, 123 bedrooms, or 20 bathrooms, for example.
```
#Numeric features
numeric_cols = df.select_dtypes(include=["int", "float"]).columns.tolist()
plt.figure(figsize = (20,20))
plt.subplot(3, 2, 1)
sns.boxplot(data = df, x= 'rooms', y = 'property_type', palette = 'colorblind')
plt.title('Rooms boxplot')
plt.xlabel('Rooms')
plt.ylabel('Property types')
plt.subplot(3, 2, 2)
sns.boxplot(data = df, x= 'bedrooms', y = 'property_type', palette = 'colorblind')
plt.title('Bedrooms boxplot')
plt.xlabel('Bedrooms')
plt.ylabel('Property types')
plt.subplot(3, 2, 3)
sns.boxplot(data = df, x= 'bathrooms', y = 'property_type', palette = 'colorblind')
plt.title('Bathrooms boxplot')
plt.xlabel('Bathrooms')
plt.ylabel('Property types')
plt.subplot(3, 2, 4)
sns.boxplot(data = df, x= 'surface_total', y = 'property_type', palette = 'colorblind')
plt.title('Total area boxplot')
plt.xlabel('Total area')
plt.ylabel('Property type')
plt.subplot(3, 2, 5)
sns.boxplot(data = df, x= 'surface_covered', y = 'property_type', palette = 'colorblind')
plt.title('Covered area boxplot')
plt.xlabel('Covered area')
plt.ylabel('Property type')
plt.subplot(3, 2, 6)
sns.boxplot(data = df, x= 'price', y = 'property_type', palette = 'colorblind')
plt.title('Price boxplot')
plt.xlabel('Price')
plt.ylabel('Property type')
plt.show()
def remove_outliers(dfx):
q1 = dfx.quantile(0.25)
q3 = dfx.quantile(0.75)
iqr = q3 - q1
cut_off = iqr*1.5
df_filtred = dfx[~((dfx < (dfx.quantile(0.25) - cut_off)) | (dfx > (dfx.quantile(0.75) + cut_off))).any(axis=1)]
return df_filtred
df2 = remove_outliers(df)
df2.shape
```
### Missing values analysis
```
num_nans = df2.isna().sum()
num_nans
total_ads = len(df2)
cols_df2 = df2.columns.to_list()
num_nans = df2.isna().sum()
fig = df2.isna().sum().sort_values(ascending=False).plot.bar()
fig.set_xlabel("Column")
fig.set_ylabel("Number of NaNs")
plt.show()
for col in cols_df2:
print(f"Percentage of NaNs in {col} =", round(df2[(df2[col].isna())].shape[0] / df2.shape[0]*100,2), '%')
```
A large number of missing values are observed in columns l4 and l5 features (more than 20% of NaN values). Those features refers to regions or neighborhoods that would be difficult to impute. Also imputing them would create a great bias. Therefore, this features will be eliminated.
```
df3 = df2.drop(["l4", "l5"], axis=1) # Eliminate NaNs values where %NaNs > 20%
df3.reset_index(inplace=True, drop=True)
```
### **MCAR, MAR or MNAR**
```
msno.matrix(df3, figsize=(15,5))
df3_sorted = df3.sort_values("property_type")
msno.matrix(df3_sorted, figsize=(15,5))
df3_sorted.property_type.unique()
```
Even when we sort by "property_type", the values of l3 still look random. This could prove these values are missing completely at random (MCAR). For "rooms", "bedrooms" and "bathrooms" NaNs values could be missing at random (MAR). When the dataset is sorted by "property_type" it is observed a grouping behaviour in NaNs values from those features. Maybe this is due to property types that do not have a room, bedroom or a bathrooms like "Lote" or "Cochera".
Another way would be by plotting a heat map to see if the missingness has any correlation:
```
fig = msno.heatmap(df3, figsize=(15,5))
plt.show()
```
Missing observations in rooms, bedrooms and bathrooms have little correlation.
### Missing values imputation
```
#Missing values by property type
df3.set_index("property_type")[["bedrooms", "rooms", "bathrooms"]].isnull().groupby(level=0).sum()
#imputation by property type
property_types = df3.property_type.unique().tolist()
# Buscamos los valores más frecuentes
most_frequent_bath = df3.groupby(['property_type'])['bathrooms'].agg(pd.Series.mode)
most_frequent_bath = dict(most_frequent_bath)
print(most_frequent_bath)
most_frequent_rooms = df3.groupby(['property_type'])['rooms'].agg(pd.Series.mode)
most_frequent_rooms = dict(most_frequent_rooms)
print(most_frequent_rooms)
most_frequent_bed = df3.groupby('property_type')['bedrooms'].agg(pd.Series.mode)
most_frequent_bed = dict(most_frequent_bed)
print(most_frequent_bed)
df3.bathrooms = df3.bathrooms.fillna(df3.property_type.map(most_frequent_bath))
df3.rooms = df3.rooms.fillna(df3.property_type.map(most_frequent_rooms))
df3.bedrooms = df3.bedrooms.fillna(df3.property_type.map(most_frequent_bed))
df3.shape
```
### Numerical features analysis
```
#Numeric features
num_cols = df3.select_dtypes(include=["int", "float"]).columns.tolist()
print(num_cols)
```
**rooms**
```
df3.groupby("property_type")["rooms"].describe()
```
**bedrooms**
```
df3.groupby("property_type")["bedrooms"].describe()
```
Notice that department have negative values. This is not correct, so need to be fixed.
```
df3[df3.property_type == "Departamento"]["bedrooms"].hist()
```
**bathrooms**
```
df3.groupby("property_type")["bathrooms"].describe()
```
**surface_total**
```
df3.groupby("property_type")["surface_total"].describe()
```
**surface_covered**
```
df3.groupby("property_type")["surface_covered"].describe()
```
**price**
```
df3.groupby("property_type")["price"].describe()
```
### Target distribution
```
#Target distribution
sns.histplot(data = df3, x="price", bins=10)
# create a copy of data
data_copy = df3.copy()# create a new feature Log_Price
data_copy['Log_Price'] = np.log(df3['price'])
from sklearn.preprocessing import PowerTransformer
pt = PowerTransformer(method='box-cox')
a = pt.fit_transform(df3[['price']])
data_copy['box_cox_price'] = a
sns.displot(data=data_copy, x="box_cox_price", bins=10)
plt.title("Histogram of Price")
plt.show()
```
### Final dataset:
```
df3.reset_index(inplace=True, drop=True)
final_df = df3.drop(["l1", "currency"], axis=1)
final_df.head()
final_df.describe()
sns.pairplot(final_df)
final_df.corr()
final_df.to_csv('Properaty_data_clean.csv', index=False)
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import statistics
from scipy import stats
buldy_RGG_50_rep100_045 = pd.read_csv('Raw_data/Processed/proc_buldy_RGG_50_rep100_045.csv')
del buldy_RGG_50_rep100_045['Unnamed: 0']
buldy_RGG_50_rep100_045
buldy_RGG_50_rep100_067 = pd.read_csv('proc_buldy_RGG_50_rep100_067.csv')
del buldy_RGG_50_rep100_067['Unnamed: 0']
buldy_RGG_50_rep100_067
buldy_RGG_200_rep100_0685 = pd.read_csv('proc_buldy_RGG_200_rep100_0685.csv')
del buldy_RGG_200_rep100_0685['Unnamed: 0']
buldy_RGG_200_rep100_0685
buldy_RGG_200_rep100_095 = pd.read_csv('proc_buldy_RGG_200_rep100_095.csv')
del buldy_RGG_200_rep100_095['Unnamed: 0']
buldy_RGG_200_rep100_095
buldy_RGG_50_rep100_045_rgg_rgg_data = buldy_RGG_50_rep100_045.copy()
buldy_RGG_50_rep100_045_rgg_rand_data = buldy_RGG_50_rep100_045.copy()
buldy_RGG_50_rep100_045_rand_rgg_data = buldy_RGG_50_rep100_045.copy()
buldy_RGG_50_rep100_045_rand_rand_data = buldy_RGG_50_rep100_045.copy()
rgg_rgg_drop_list = []
rgg_rand_drop_list = []
rand_rgg_drop_list = []
rand_rand_drop_list = []
for i in range(400):
if i % 4 == 0:
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 1:
rgg_rgg_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 2:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 3:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
buldy_RGG_50_rep100_045_rgg_rgg_data = buldy_RGG_50_rep100_045_rgg_rgg_data.drop(rgg_rgg_drop_list)
buldy_RGG_50_rep100_045_rgg_rand_data = buldy_RGG_50_rep100_045_rgg_rand_data.drop(rgg_rand_drop_list)
buldy_RGG_50_rep100_045_rand_rgg_data = buldy_RGG_50_rep100_045_rand_rgg_data.drop(rand_rgg_drop_list)
buldy_RGG_50_rep100_045_rand_rand_data = buldy_RGG_50_rep100_045_rand_rand_data.drop(rand_rand_drop_list)
buldy_RGG_50_rep100_045_rgg_rgg_data = buldy_RGG_50_rep100_045_rgg_rgg_data.reset_index(drop=True)
buldy_RGG_50_rep100_045_rgg_rand_data = buldy_RGG_50_rep100_045_rgg_rand_data.reset_index(drop=True)
buldy_RGG_50_rep100_045_rand_rgg_data = buldy_RGG_50_rep100_045_rand_rgg_data.reset_index(drop=True)
buldy_RGG_50_rep100_045_rand_rand_data = buldy_RGG_50_rep100_045_rand_rand_data.reset_index(drop=True)
buldy_RGG_50_rep100_045_rgg_rgg_data
buldy_RGG_50_rep100_067_rgg_rgg_data = buldy_RGG_50_rep100_067.copy()
buldy_RGG_50_rep100_067_rgg_rand_data = buldy_RGG_50_rep100_067.copy()
buldy_RGG_50_rep100_067_rand_rgg_data = buldy_RGG_50_rep100_067.copy()
buldy_RGG_50_rep100_067_rand_rand_data = buldy_RGG_50_rep100_067.copy()
rgg_rgg_drop_list = []
rgg_rand_drop_list = []
rand_rgg_drop_list = []
rand_rand_drop_list = []
for i in range(400):
if i % 4 == 0:
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 1:
rgg_rgg_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 2:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 3:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
buldy_RGG_50_rep100_067_rgg_rgg_data = buldy_RGG_50_rep100_067_rgg_rgg_data.drop(rgg_rgg_drop_list)
buldy_RGG_50_rep100_067_rgg_rand_data = buldy_RGG_50_rep100_067_rgg_rand_data.drop(rgg_rand_drop_list)
buldy_RGG_50_rep100_067_rand_rgg_data = buldy_RGG_50_rep100_067_rand_rgg_data.drop(rand_rgg_drop_list)
buldy_RGG_50_rep100_067_rand_rand_data = buldy_RGG_50_rep100_067_rand_rand_data.drop(rand_rand_drop_list)
buldy_RGG_50_rep100_067_rgg_rgg_data = buldy_RGG_50_rep100_067_rgg_rgg_data.reset_index(drop=True)
buldy_RGG_50_rep100_067_rgg_rand_data = buldy_RGG_50_rep100_067_rgg_rand_data.reset_index(drop=True)
buldy_RGG_50_rep100_067_rand_rgg_data = buldy_RGG_50_rep100_067_rand_rgg_data.reset_index(drop=True)
buldy_RGG_50_rep100_067_rand_rand_data = buldy_RGG_50_rep100_067_rand_rand_data.reset_index(drop=True)
buldy_RGG_50_rep100_067_rgg_rgg_data
buldy_RGG_200_rep100_0685_rgg_rgg_data = buldy_RGG_200_rep100_0685.copy()
buldy_RGG_200_rep100_0685_rgg_rand_data = buldy_RGG_200_rep100_0685.copy()
buldy_RGG_200_rep100_0685_rand_rgg_data = buldy_RGG_200_rep100_0685.copy()
buldy_RGG_200_rep100_0685_rand_rand_data = buldy_RGG_200_rep100_0685.copy()
rgg_rgg_drop_list = []
rgg_rand_drop_list = []
rand_rgg_drop_list = []
rand_rand_drop_list = []
for i in range(400):
if i % 4 == 0:
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 1:
rgg_rgg_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 2:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 3:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
buldy_RGG_200_rep100_0685_rgg_rgg_data = buldy_RGG_200_rep100_0685_rgg_rgg_data.drop(rgg_rgg_drop_list)
buldy_RGG_200_rep100_0685_rgg_rand_data = buldy_RGG_200_rep100_0685_rgg_rand_data.drop(rgg_rand_drop_list)
buldy_RGG_200_rep100_0685_rand_rgg_data = buldy_RGG_200_rep100_0685_rand_rgg_data.drop(rand_rgg_drop_list)
buldy_RGG_200_rep100_0685_rand_rand_data = buldy_RGG_200_rep100_0685_rand_rand_data.drop(rand_rand_drop_list)
buldy_RGG_200_rep100_0685_rgg_rgg_data = buldy_RGG_200_rep100_0685_rgg_rgg_data.reset_index(drop=True)
buldy_RGG_200_rep100_0685_rgg_rand_data = buldy_RGG_200_rep100_0685_rgg_rand_data.reset_index(drop=True)
buldy_RGG_200_rep100_0685_rand_rgg_data = buldy_RGG_200_rep100_0685_rand_rgg_data.reset_index(drop=True)
buldy_RGG_200_rep100_0685_rand_rand_data = buldy_RGG_200_rep100_0685_rand_rand_data.reset_index(drop=True)
buldy_RGG_200_rep100_0685_rgg_rgg_data
buldy_RGG_200_rep100_095_rgg_rgg_data = buldy_RGG_200_rep100_095.copy()
buldy_RGG_200_rep100_095_rgg_rand_data = buldy_RGG_200_rep100_095.copy()
buldy_RGG_200_rep100_095_rand_rgg_data = buldy_RGG_200_rep100_095.copy()
buldy_RGG_200_rep100_095_rand_rand_data = buldy_RGG_200_rep100_095.copy()
rgg_rgg_drop_list = []
rgg_rand_drop_list = []
rand_rgg_drop_list = []
rand_rand_drop_list = []
for i in range(400):
if i % 4 == 0:
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 1:
rgg_rgg_drop_list.append(i)
rand_rgg_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 2:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rand_drop_list.append(i)
elif i % 4 == 3:
rgg_rgg_drop_list.append(i)
rgg_rand_drop_list.append(i)
rand_rgg_drop_list.append(i)
buldy_RGG_200_rep100_095_rgg_rgg_data = buldy_RGG_200_rep100_095_rgg_rgg_data.drop(rgg_rgg_drop_list)
buldy_RGG_200_rep100_095_rgg_rand_data = buldy_RGG_200_rep100_095_rgg_rand_data.drop(rgg_rand_drop_list)
buldy_RGG_200_rep100_095_rand_rgg_data = buldy_RGG_200_rep100_095_rand_rgg_data.drop(rand_rgg_drop_list)
buldy_RGG_200_rep100_095_rand_rand_data = buldy_RGG_200_rep100_095_rand_rand_data.drop(rand_rand_drop_list)
buldy_RGG_200_rep100_095_rgg_rgg_data = buldy_RGG_200_rep100_095_rgg_rgg_data.reset_index(drop=True)
buldy_RGG_200_rep100_095_rgg_rand_data = buldy_RGG_200_rep100_095_rgg_rand_data.reset_index(drop=True)
buldy_RGG_200_rep100_095_rand_rgg_data = buldy_RGG_200_rep100_095_rand_rgg_data.reset_index(drop=True)
buldy_RGG_200_rep100_095_rand_rand_data = buldy_RGG_200_rep100_095_rand_rand_data.reset_index(drop=True)
buldy_RGG_200_rep100_095_rgg_rgg_data
stats.kstest(buldy_RGG_200_rep100_0685_rand_rgg_data['alive_nodes'], 'norm')
stats.kstest(buldy_RGG_200_rep100_0685_rand_rand_data['alive_nodes'], 'norm')
stats.mannwhitneyu(buldy_RGG_200_rep100_0685_rand_rgg_data['alive_nodes'], buldy_RGG_200_rep100_0685_rand_rand_data['alive_nodes'])
stats.kstest(buldy_RGG_200_rep100_095_rgg_rgg_data['alive_nodes'], 'norm')
stats.kstest(buldy_RGG_200_rep100_095_rgg_rand_data['alive_nodes'], 'norm')
stats.mannwhitneyu(buldy_RGG_200_rep100_095_rgg_rgg_data['alive_nodes'], buldy_RGG_200_rep100_095_rgg_rand_data['alive_nodes'])
```
# Data Dividing Done
# -----------------------------------------------------------------------------------------------
# Plotting Starts
## find_inter_thres
```
find_inter_thres_list = []
for col in find_inter_thres.columns:
if col != 'rep':
find_inter_thres_list.append(statistics.mean(find_inter_thres[col].values.tolist()))
print(find_inter_thres_list)
Xs = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0]
plt.plot(Xs, [i/500 for i in find_inter_thres_list])
plt.xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.axvline(x=0.7, color='r', linestyle='--')
plt.savefig('find_inter_thres.png')
```
## rep5_04_002
```
rgg_rgg_dict = {}
rgg_rand_dict = {}
rand_rgg_dict = {}
rand_rand_dict = {}
for i in range(20):
target = [i*5 + 0, i*5 + 1, i*5 + 2, i*5 + 3, i*5 + 4]
temp_rgg_rgg = rgg_rgg_data[i*5 + 0 : i*5 + 5]
temp_rgg_rand = rgg_rand_data[i*5 + 0 : i*5 + 5]
temp_rand_rgg = rand_rgg_data[i*5 + 0 : i*5 + 5]
temp_rand_rand = rand_rand_data[i*5 + 0 : i*5 + 5]
if i == 0:
rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]
rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]
rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]
rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]
rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]
rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]
rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]
rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]
rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]
rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]
rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]
rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]
else:
rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))
rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))
rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))
rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))
rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))
rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))
rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))
rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))
rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))
rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))
rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))
rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))
plt.plot(rgg_rgg_dict['intra_thres'], rgg_rgg_dict['alive_nodes'])
plt.plot(rgg_rgg_dict['intra_thres'], rgg_rand_dict['alive_nodes'])
plt.plot(rgg_rgg_dict['intra_thres'], rand_rgg_dict['alive_nodes'])
plt.plot(rgg_rgg_dict['intra_thres'], rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
p = 0.9
plt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rgg_rgg_dict['alive_nodes'])
plt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rgg_rand_dict['alive_nodes'])
plt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rand_rgg_dict['alive_nodes'])
plt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
```
## att30_rep5_04_002
```
rgg_rgg_2_dict = {}
rgg_rand_2_dict = {}
rand_rgg_2_dict = {}
rand_rand_2_dict = {}
for i in range(50):
target = [i*5 + 0, i*5 + 1, i*5 + 2, i*5 + 3, i*5 + 4]
temp_rgg_rgg = rgg_rgg_2_data[i*5 + 0 : i*5 + 5]
temp_rgg_rand = rgg_rand_2_data[i*5 + 0 : i*5 + 5]
temp_rand_rgg = rand_rgg_2_data[i*5 + 0 : i*5 + 5]
temp_rand_rand = rand_rand_2_data[i*5 + 0 : i*5 + 5]
if i == 0:
rgg_rgg_2_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]
rgg_rgg_2_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]
rgg_rgg_2_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]
rgg_rand_2_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]
rgg_rand_2_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]
rgg_rand_2_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]
rand_rgg_2_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]
rand_rgg_2_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]
rand_rgg_2_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]
rand_rand_2_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]
rand_rand_2_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]
rand_rand_2_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]
else:
rgg_rgg_2_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))
rgg_rgg_2_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))
rgg_rgg_2_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))
rgg_rand_2_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))
rgg_rand_2_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))
rgg_rand_2_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))
rand_rgg_2_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))
rand_rgg_2_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))
rand_rgg_2_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))
rand_rand_2_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))
rand_rand_2_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))
rand_rand_2_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))
plt.plot(rgg_rgg_2_dict['intra_thres'], rgg_rgg_2_dict['alive_nodes'])
plt.plot(rgg_rgg_2_dict['intra_thres'], rgg_rand_2_dict['alive_nodes'])
plt.plot(rgg_rgg_2_dict['intra_thres'], rand_rgg_2_dict['alive_nodes'])
plt.plot(rgg_rgg_2_dict['intra_thres'], rand_rand_2_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
p = 0.9
plt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rgg_rgg_2_dict['alive_nodes'])
plt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rgg_rand_2_dict['alive_nodes'])
plt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rand_rgg_2_dict['alive_nodes'])
plt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rand_rand_2_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
```
## buldy_RGG_rep30_03_0005
```
buldy_RGG_rep30_03_0005_rgg_rgg_dict = {}
buldy_RGG_rep30_03_0005_rgg_rand_dict = {}
buldy_RGG_rep30_03_0005_rand_rgg_dict = {}
buldy_RGG_rep30_03_0005_rand_rand_dict = {}
for i in range(100):
target = list(range(i*30, (i+1)*30))
temp_rgg_rgg = buldy_RGG_rep30_03_0005_rgg_rgg_data[i*30 : (i+1)*30]
temp_rgg_rand = buldy_RGG_rep30_03_0005_rgg_rand_data[i*30 : (i+1)*30]
temp_rand_rgg = buldy_RGG_rep30_03_0005_rand_rgg_data[i*30 : (i+1)*30]
temp_rand_rand = buldy_RGG_rep30_03_0005_rand_rand_data[i*30 : (i+1)*30]
rgg_rgg_alive = 0
rgg_rand_alive = 0
rand_rgg_alive = 0
rand_rand_alive = 0
for index in target:
if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):
rgg_rgg_alive += 1
if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):
rgg_rand_alive += 1
if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):
rand_rgg_alive += 1
if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):
rand_rand_alive += 1
if i == 0:
buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]
buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]
buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]
buldy_RGG_rep30_03_0005_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]
buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]
buldy_RGG_rep30_03_0005_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]
buldy_RGG_rep30_03_0005_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]
buldy_RGG_rep30_03_0005_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]
buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]
buldy_RGG_rep30_03_0005_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_rep30_03_0005_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]
buldy_RGG_rep30_03_0005_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]
buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]
buldy_RGG_rep30_03_0005_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]
buldy_RGG_rep30_03_0005_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]
else:
buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))
buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))
buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)
buldy_RGG_rep30_03_0005_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))
buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))
buldy_RGG_rep30_03_0005_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))
buldy_RGG_rep30_03_0005_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)
buldy_RGG_rep30_03_0005_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))
buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))
buldy_RGG_rep30_03_0005_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_rep30_03_0005_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)
buldy_RGG_rep30_03_0005_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))
buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))
buldy_RGG_rep30_03_0005_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))
buldy_RGG_rep30_03_0005_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)
plt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'])
plt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
p = 0.9
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('500 Nodes, 2 Layers, 50 attack size')
plt.xlabel('p<k>')
plt.ylabel('mean alive nodes')
plt.savefig('buldy_RGG_rep30_03_0005.png')
plt.show()
p = 0.9
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rand_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rand_dict['alive ratio'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('500 Nodes, 2 Layers, 50 attack size')
plt.xlabel('p<k>')
plt.ylabel('alive ratio')
plt.savefig('buldy_RGG_rep30_03_0005_ratio.png')
plt.show()
```
## buldy_RGG_100_rep30_03_0005
```
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict = {}
buldy_RGG_100_rep30_03_0005_rgg_rand_dict = {}
buldy_RGG_100_rep30_03_0005_rand_rgg_dict = {}
buldy_RGG_100_rep30_03_0005_rand_rand_dict = {}
for i in range(100):
target = list(range(i*30, (i+1)*30))
temp_rgg_rgg = buldy_RGG_100_rep30_03_0005_rgg_rgg_data[i*30 : (i+1)*30]
temp_rgg_rand = buldy_RGG_100_rep30_03_0005_rgg_rand_data[i*30 : (i+1)*30]
temp_rand_rgg = buldy_RGG_100_rep30_03_0005_rand_rgg_data[i*30 : (i+1)*30]
temp_rand_rand = buldy_RGG_100_rep30_03_0005_rand_rand_data[i*30 : (i+1)*30]
rgg_rgg_alive = 0
rgg_rand_alive = 0
rand_rgg_alive = 0
rand_rand_alive = 0
for index in target:
if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):
rgg_rgg_alive += 1
if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):
rgg_rand_alive += 1
if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):
rand_rgg_alive += 1
if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):
rand_rand_alive += 1
if i == 0:
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]
buldy_RGG_100_rep30_03_0005_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]
buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]
else:
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)
buldy_RGG_100_rep30_03_0005_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))
buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)
plt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'])
plt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
p = 0.8
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('500 Nodes, 2 Layers, 100 attack size')
plt.xlabel('p<k>')
plt.ylabel('mean alive nodes')
plt.savefig('buldy_RGG_100_rep30_03_0005.png')
plt.show()
p = 0.8
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive ratio'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('500 Nodes, 2 Layers, 100 attack size')
plt.xlabel('p<k>')
plt.ylabel('alive ratio')
plt.savefig('buldy_RGG_100_rep30_03_0005_ratio.png')
plt.show()
```
## buldy_RGG_200_rep30_03_0005
```
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict = {}
buldy_RGG_200_rep30_03_0005_rgg_rand_dict = {}
buldy_RGG_200_rep30_03_0005_rand_rgg_dict = {}
buldy_RGG_200_rep30_03_0005_rand_rand_dict = {}
for i in range(100):
target = list(range(i*30, (i+1)*30))
temp_rgg_rgg = buldy_RGG_200_rep30_03_0005_rgg_rgg_data[i*30 : (i+1)*30]
temp_rgg_rand = buldy_RGG_200_rep30_03_0005_rgg_rand_data[i*30 : (i+1)*30]
temp_rand_rgg = buldy_RGG_200_rep30_03_0005_rand_rgg_data[i*30 : (i+1)*30]
temp_rand_rand = buldy_RGG_200_rep30_03_0005_rand_rand_data[i*30 : (i+1)*30]
rgg_rgg_alive = 0
rgg_rand_alive = 0
rand_rgg_alive = 0
rand_rand_alive = 0
for index in target:
if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):
rgg_rgg_alive += 1
if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):
rgg_rand_alive += 1
if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):
rand_rgg_alive += 1
if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):
rand_rand_alive += 1
if i == 0:
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]
buldy_RGG_200_rep30_03_0005_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]
buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]
else:
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)
buldy_RGG_200_rep30_03_0005_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))
buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)
plt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'])
plt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
p = 0.6
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('500 Nodes, 2 Layers, 200 attack size')
plt.xlabel('p<k>')
plt.ylabel('mean alive nodes')
plt.savefig('buldy_RGG_200_rep30_03_0005.png')
plt.show()
p = 0.6
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive ratio'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('500 Nodes, 2 Layers, 200 attack size')
plt.xlabel('p<k>')
plt.ylabel('alive ratio')
plt.savefig('buldy_RGG_200_rep30_03_0005_ratio.png')
plt.show()
```
## buldy_RGG_30_rep30_04_0007
```
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict = {}
buldy_RGG_30_rep30_04_0007_rgg_rand_dict = {}
buldy_RGG_30_rep30_04_0007_rand_rgg_dict = {}
buldy_RGG_30_rep30_04_0007_rand_rand_dict = {}
for i in range(100):
target = list(range(i*30, (i+1)*30))
temp_rgg_rgg = buldy_RGG_30_rep30_04_0007_rgg_rgg_data[i*30 : (i+1)*30]
temp_rgg_rand = buldy_RGG_30_rep30_04_0007_rgg_rand_data[i*30 : (i+1)*30]
temp_rand_rgg = buldy_RGG_30_rep30_04_0007_rand_rgg_data[i*30 : (i+1)*30]
temp_rand_rand = buldy_RGG_30_rep30_04_0007_rand_rand_data[i*30 : (i+1)*30]
rgg_rgg_alive = 0
rgg_rand_alive = 0
rand_rgg_alive = 0
rand_rand_alive = 0
for index in target:
if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):
rgg_rgg_alive += 1
if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):
rgg_rand_alive += 1
if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):
rand_rgg_alive += 1
if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):
rand_rand_alive += 1
if i == 0:
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]
buldy_RGG_30_rep30_04_0007_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]
buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]
else:
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)
buldy_RGG_30_rep30_04_0007_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))
buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)
plt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'])
plt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'])
plt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('Mean Alive nodes')
plt.show()
p = 0.9
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'])
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'])
plt.title('300 Nodes, 2 Layers, 30 attack size')
plt.xlabel('p<k>')
plt.ylabel('mean alive nodes')
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.savefig('buldy_RGG_30_rep30_04_0007')
plt.show()
p = 0.9
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive ratio'])
plt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive ratio'])
plt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])
plt.title('300 Nodes, 2 Layers, 30 attack size')
plt.xlabel('p<k>')
plt.ylabel('alive ratio')
plt.savefig('buldy_RGG_30_rep30_04_0007_ratio')
plt.show()
```
## buldy_RGG_50_rep100_045
```
buldy_RGG_50_rep100_045_far_dead_node = {}
cum_far_dead_node = {'rgg_rgg': [],
'rgg_rand': [],
'rand_rgg': [],
'rand_rand': []}
for index in range(len(buldy_RGG_50_rep100_045_rgg_rgg_data.columns) - 21):
for j in range(100):
if buldy_RGG_50_rep100_045_rgg_rgg_data['step%d_far_dead_node' % index][j] != 0:
if i == 0:
buldy_RGG_50_rep100_045_far_dead_node['rgg_rgg'] = [statistics.mean(buldy_RGG_50_rep100_045_rgg_rgg_data['step%d_far_dead_node' % index].values.tolist())]
buldy_RGG_50_rep100_045_far_dead_node['rgg_rand'] = [statistics.mean(buldy_RGG_50_rep100_045_rgg_rand_data['step%d_far_dead_node' % index].values.tolist())]
buldy_RGG_50_rep100_045_far_dead_node['rand_rgg'] = [statistics.mean(buldy_RGG_50_rep100_045_rand_rgg_data['step%d_far_dead_node' % index].values.tolist())]
buldy_RGG_50_rep100_045_far_dead_node['rand_rand'] = [statistics.mean(buldy_RGG_50_rep100_045_rand_rand_data['step%d_far_dead_node' % index].values.tolist())]
else:
buldy_RGG_50_rep100_045_far_dead_node['rgg_rgg'].append(statistics.mean(buldy_RGG_50_rep100_045_rgg_rgg_data['step%d_far_dead_node' % index].values.tolist()))
buldy_RGG_50_rep100_045_far_dead_node['rgg_rand'].append(statistics.mean(buldy_RGG_50_rep100_045_rgg_rand_data['step%d_far_dead_node' % index].values.tolist()))
buldy_RGG_50_rep100_045_far_dead_node['rand_rgg'].append(statistics.mean(buldy_RGG_50_rep100_045_rand_rgg_data['step%d_far_dead_node' % index].values.tolist()))
buldy_RGG_50_rep100_045_far_dead_node['rand_rand'].append(statistics.mean(buldy_RGG_50_rep100_045_rand_rand_data['step%d_far_dead_node' % index].values.tolist()))
cum_far_dead_node = {'rgg_rgg': [],
'rgg_rand': [],
'rand_rgg': [],
'rand_rand': []}
for index, row in buldy_RGG_50_rep100_045_rgg_rgg_data.iterrows():
cur_row = row.tolist()
length = int((len(buldy_RGG_50_rep100_045_rgg_rgg_data.columns) - 21) / 3)
temp = []
for i in range(length):
if cur_row[(3*i) + 23] != 0:
temp.append(cur_row[(3*i) + 23])
else:
temp.append(temp[i-2])
cum_far_dead_node['rgg_rgg'].append(temp)
print(cum_far_dead_node['rgg_rgg'])
step_nums = []
step_nums.append(statistics.mean(rgg_rgg_data['cas_steps'].values.tolist()))
step_nums.append(statistics.mean(rgg_rand_data['cas_steps'].values.tolist()))
step_nums.append(statistics.mean(rand_rgg_data['cas_steps'].values.tolist()))
step_nums.append(statistics.mean(rand_rand_data['cas_steps'].values.tolist()))
index = np.arange(4)
graph_types = ['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand']
plt.bar(index, step_nums, width=0.3, color='gray')
plt.xticks(index, graph_types)
plt.title('Number of steps')
plt.savefig('The number of steps.png')
plt.show()
rgg_rgg_isol = []
rgg_rgg_unsupp = []
rgg_rand_isol = []
rgg_rand_unsupp = []
rand_rgg_isol = []
rand_rgg_unsupp = []
rand_rand_isol = []
rand_rand_unsupp =[]
index = 1
for col_name in rgg_rgg_data:
if col_name == ('step%d_isol' % index):
rgg_rgg_isol.append(statistics.mean(rgg_rgg_data[col_name].values.tolist()))
if col_name == ('step%d_unsupp' % index):
rgg_rgg_unsupp.append(statistics.mean(rgg_rgg_data[col_name].values.tolist()))
index += 1
index = 1
for col_name in rgg_rand_data:
if col_name == ('step%d_isol' % index):
rgg_rand_isol.append(statistics.mean(rgg_rand_data[col_name].values.tolist()))
if col_name == ('step%d_unsupp' % index):
rgg_rand_unsupp.append(statistics.mean(rgg_rand_data[col_name].values.tolist()))
index += 1
index = 1
for col_name in rand_rgg_data:
if col_name == ('step%d_isol' % index):
rand_rgg_isol.append(statistics.mean(rand_rgg_data[col_name].values.tolist()))
if col_name == ('step%d_unsupp' % index):
rand_rgg_unsupp.append(statistics.mean(rand_rgg_data[col_name].values.tolist()))
index += 1
index = 1
for col_name in rand_rand_data:
if col_name == ('step%d_isol' % index):
rand_rand_isol.append(statistics.mean(rand_rand_data[col_name].values.tolist()))
if col_name == ('step%d_unsupp' % index):
rand_rand_unsupp.append(statistics.mean(rand_rand_data[col_name].values.tolist()))
index += 1
print(len(rgg_rgg_isol))
print(len(rgg_rgg_unsupp))
print(len(rgg_rand_isol))
print(len(rgg_rand_unsupp))
print(len(rand_rgg_isol))
print(len(rand_rgg_unsupp))
print(len(rand_rand_isol))
print(len(rand_rand_unsupp))
cum_rgg_rgg_isol = []
cum_rgg_rgg_unsupp = []
cum_rgg_rand_isol = []
cum_rgg_rand_unsupp = []
cum_rand_rgg_isol = []
cum_rand_rgg_unsupp = []
cum_rand_rand_isol = []
cum_rand_rand_unsupp = []
total = []
for i in range(len(rgg_rgg_isol)):
if i == 0:
total.append(rgg_rgg_isol[i])
total.append(rgg_rgg_unsupp[i])
else:
total[0] += rgg_rgg_isol[i]
total[1] += rgg_rgg_unsupp[i]
cum_rgg_rgg_isol.append(total[0])
cum_rgg_rgg_unsupp.append(total[1])
total = []
for i in range(len(rgg_rand_isol)):
if i == 0:
total.append(rgg_rand_isol[i])
total.append(rgg_rand_unsupp[i])
else:
total[0] += rgg_rand_isol[i]
total[1] += rgg_rand_unsupp[i]
cum_rgg_rand_isol.append(total[0])
cum_rgg_rand_unsupp.append(total[1])
total = []
for i in range(len(rand_rgg_isol)):
if i == 0:
total.append(rand_rgg_isol[i])
total.append(rand_rgg_unsupp[i])
else:
total[0] += rand_rgg_isol[i]
total[1] += rand_rgg_unsupp[i]
cum_rand_rgg_isol.append(total[0])
cum_rand_rgg_unsupp.append(total[1])
total = []
for i in range(len(rand_rand_isol)):
if i == 0:
total.append(rand_rand_isol[i])
total.append(rand_rand_unsupp[i])
else:
total[0] += rand_rand_isol[i]
total[1] += rand_rand_unsupp[i]
cum_rand_rand_isol.append(total[0])
cum_rand_rand_unsupp.append(total[1])
```
## Isolation vs Unsupport
```
plt.plot(range(len(cum_rgg_rgg_isol)), cum_rgg_rgg_isol)
plt.plot(range(len(cum_rgg_rgg_isol)), cum_rgg_rgg_unsupp)
plt.legend(['rgg_rgg_isol','rgg_rgg_unsupp'])
plt.title('Isolation vs Unsupport: RGG-RGG')
plt.savefig('Isolation vs Unsupport_RGG-RGG.png')
plt.show()
plt.plot(range(len(cum_rgg_rand_isol)), cum_rgg_rand_isol)
plt.plot(range(len(cum_rgg_rand_isol)), cum_rgg_rand_unsupp)
plt.legend(['rgg_rand_isol','rgg_rand_unsupp'])
plt.title('Isolation vs Unsupport: RGG-Rand')
plt.savefig('Isolation vs Unsupport_RGG-Rand.png')
plt.show()
plt.plot(range(len(cum_rand_rgg_isol)), cum_rand_rgg_isol)
plt.plot(range(len(cum_rand_rgg_isol)), cum_rand_rgg_unsupp)
plt.legend(['rand_rgg_isol','rand_rgg_unsupp'])
plt.title('Isolation vs Unsupport: Rand-RGG')
plt.savefig('Isolation vs Unsupport_Rand-RGG.png')
plt.show()
plt.plot(range(len(cum_rand_rand_isol)), cum_rand_rand_isol)
plt.plot(range(len(cum_rand_rand_isol)), cum_rand_rand_unsupp)
plt.legend(['rand_rand_isol','rand_rand_unsupp'])
plt.title('Isolation vs Unsupport: Rand-Rand')
plt.savefig('Isolation vs Unsupport_Rand-Rand.png')
plt.show()
df_len = []
df_len.append(list(rgg_rgg_isol))
df_len.append(list(rgg_rand_isol))
df_len.append(list(rand_rgg_isol))
df_len.append(list(rand_rand_isol))
max_df_len = max(df_len, key=len)
x_val = list(range(len(max_df_len)))
proc_isol = []
proc_unsupp = []
proc_isol.append(cum_rgg_rgg_isol)
proc_isol.append(cum_rgg_rand_isol)
proc_isol.append(cum_rand_rgg_isol)
proc_isol.append(cum_rand_rand_isol)
proc_unsupp.append(cum_rgg_rgg_unsupp)
proc_unsupp.append(cum_rgg_rand_unsupp)
proc_unsupp.append(cum_rand_rgg_unsupp)
proc_unsupp.append(cum_rand_rand_unsupp)
for x in x_val:
if len(rgg_rgg_isol) <= x:
proc_isol[0].append(cum_rgg_rgg_isol[len(rgg_rgg_isol) - 1])
proc_unsupp[0].append(cum_rgg_rgg_unsupp[len(rgg_rgg_isol) - 1])
if len(rgg_rand_isol) <= x:
proc_isol[1].append(cum_rgg_rand_isol[len(rgg_rand_isol) - 1])
proc_unsupp[1].append(cum_rgg_rand_unsupp[len(rgg_rand_isol) - 1])
if len(rand_rgg_isol) <= x:
proc_isol[2].append(cum_rand_rgg_isol[len(rand_rgg_isol) - 1])
proc_unsupp[2].append(cum_rand_rgg_unsupp[len(rand_rgg_isol) - 1])
if len(rand_rand_isol) <= x:
proc_isol[3].append(cum_rand_rand_isol[len(rand_rand_isol) - 1])
proc_unsupp[3].append(cum_rand_rand_unsupp[len(rand_rand_isol) - 1])
plt.plot(x_val, proc_isol[0])
plt.plot(x_val, proc_isol[1])
plt.plot(x_val, proc_isol[2])
plt.plot(x_val, proc_isol[3])
plt.legend(['rgg_rgg_isol','rgg_rand_isol', 'rand_rgg_isol', 'rand_rand_isol'])
plt.title('Isolation trend')
plt.show()
plt.plot(x_val, proc_unsupp[0])
plt.plot(x_val, proc_unsupp[1])
plt.plot(x_val, proc_unsupp[2])
plt.plot(x_val, proc_unsupp[3])
plt.legend(['rgg_rgg_unsupp','rgg_rand_unsupp', 'rand_rgg_unsupp', 'rand_rand_unsupp'])
plt.title('Unsupport trend')
plt.show()
```
## Pie Chart
```
init_death = 150
labels = ['Alive nodes', 'Initial death', 'Dead nodes from isolation', 'Dead nodes from unsupport']
alive = []
alive.append(statistics.mean(rgg_rgg_data['alive_nodes']))
alive.append(statistics.mean(rgg_rand_data['alive_nodes']))
alive.append(statistics.mean(rand_rgg_data['alive_nodes']))
alive.append(statistics.mean(rand_rand_data['alive_nodes']))
tot_isol = []
tot_isol.append(statistics.mean(rgg_rgg_data['tot_isol_node']))
tot_isol.append(statistics.mean(rgg_rand_data['tot_isol_node']))
tot_isol.append(statistics.mean(rand_rgg_data['tot_isol_node']))
tot_isol.append(statistics.mean(rand_rand_data['tot_isol_node']))
tot_unsupp = []
tot_unsupp.append(statistics.mean(rgg_rgg_data['tot_unsupp_node']))
tot_unsupp.append(statistics.mean(rgg_rand_data['tot_unsupp_node']))
tot_unsupp.append(statistics.mean(rand_rgg_data['tot_unsupp_node']))
tot_unsupp.append(statistics.mean(rand_rand_data['tot_unsupp_node']))
deaths = [alive[0], init_death, tot_isol[0], tot_unsupp[0]]
plt.pie(deaths, labels=labels, autopct='%.1f%%')
plt.title('RGG-RGG death trend')
plt.show()
deaths = [alive[1], init_death, tot_isol[1], tot_unsupp[1]]
plt.pie(deaths, labels=labels, autopct='%.1f%%')
plt.title('RGG-Rand death trend')
plt.show()
deaths = [alive[2], init_death, tot_isol[2], tot_unsupp[2]]
plt.pie(deaths, labels=labels, autopct='%.1f%%')
plt.title('Rand-RGG death trend')
plt.show()
deaths = [alive[3], init_death, tot_isol[3], tot_unsupp[3]]
plt.pie(deaths, labels=labels, autopct='%.1f%%')
plt.title('Rand-Rand death trend')
plt.show()
```
## Compute the number of nodes
```
x_val = np.arange(4)
labels = ['initial', 'final']
plt.bar(x_val, alive)
plt.xticks(x_val, graph_types)
plt.title('Alive nodes')
plt.savefig('alive nodes.png')
plt.show()
```
## Compare the number of edges
```
init_intra = []
init_intra.append(statistics.mean(rgg_rgg_data['init_intra_edge']))
init_intra.append(statistics.mean(rgg_rand_data['init_intra_edge']))
init_intra.append(statistics.mean(rand_rgg_data['init_intra_edge']))
init_intra.append(statistics.mean(rand_rand_data['init_intra_edge']))
init_inter = []
init_inter.append(statistics.mean(rgg_rgg_data['init_inter_edge']))
init_inter.append(statistics.mean(rgg_rand_data['init_inter_edge']))
init_inter.append(statistics.mean(rand_rgg_data['init_inter_edge']))
init_inter.append(statistics.mean(rand_rand_data['init_inter_edge']))
init_supp = []
init_supp.append(statistics.mean(rgg_rgg_data['init_supp_edge']))
init_supp.append(statistics.mean(rgg_rand_data['init_supp_edge']))
init_supp.append(statistics.mean(rand_rgg_data['init_supp_edge']))
init_supp.append(statistics.mean(rand_rand_data['init_supp_edge']))
fin_intra = []
fin_intra.append(statistics.mean(rgg_rgg_data['fin_intra_edge']))
fin_intra.append(statistics.mean(rgg_rand_data['fin_intra_edge']))
fin_intra.append(statistics.mean(rand_rgg_data['fin_intra_edge']))
fin_intra.append(statistics.mean(rand_rand_data['fin_intra_edge']))
fin_inter = []
fin_inter.append(statistics.mean(rgg_rgg_data['fin_inter_edge']))
fin_inter.append(statistics.mean(rgg_rand_data['fin_inter_edge']))
fin_inter.append(statistics.mean(rand_rgg_data['fin_inter_edge']))
fin_inter.append(statistics.mean(rand_rand_data['fin_inter_edge']))
fin_supp = []
fin_supp.append(statistics.mean(rgg_rgg_data['fin_supp_edge']))
fin_supp.append(statistics.mean(rgg_rand_data['fin_supp_edge']))
fin_supp.append(statistics.mean(rand_rgg_data['fin_supp_edge']))
fin_supp.append(statistics.mean(rand_rand_data['fin_supp_edge']))
plt.bar(x_val-0.1, init_intra, width=0.2)
plt.bar(x_val+0.1, fin_intra, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_intra_edge vs Final_intra_edge')
plt.show()
plt.bar(x_val-0.1, init_inter, width=0.2)
plt.bar(x_val+0.1, fin_inter, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_inter_edge vs Final_inter_edge')
plt.show()
plt.bar(x_val-0.1, init_supp, width=0.2)
plt.bar(x_val+0.1, fin_supp, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_support_edge vs Final_support_edge')
plt.show()
```
## Network Analysis
```
init_far = []
init_far.append(statistics.mean(rgg_rgg_data['init_far_node']))
init_far.append(statistics.mean(rgg_rand_data['init_far_node']))
init_far.append(statistics.mean(rand_rgg_data['init_far_node']))
init_far.append(statistics.mean(rand_rand_data['init_far_node']))
fin_far = []
fin_far.append(statistics.mean(rgg_rgg_data['fin_far_node']))
fin_far.append(statistics.mean(rgg_rand_data['fin_far_node']))
fin_far.append(statistics.mean(rand_rgg_data['fin_far_node']))
fin_far.append(statistics.mean(rand_rand_data['fin_far_node']))
plt.bar(x_val-0.1, init_far, width=0.2)
plt.bar(x_val+0.1, fin_far, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_far_node vs Final_far_node')
plt.show()
init_clust = []
init_clust.append(statistics.mean(rgg_rgg_data['init_clust']))
init_clust.append(statistics.mean(rgg_rand_data['init_clust']))
init_clust.append(statistics.mean(rand_rgg_data['init_clust']))
init_clust.append(statistics.mean(rand_rand_data['init_clust']))
fin_clust = []
fin_clust.append(statistics.mean(rgg_rgg_data['fin_clust']))
fin_clust.append(statistics.mean(rgg_rand_data['fin_clust']))
fin_clust.append(statistics.mean(rand_rgg_data['fin_clust']))
fin_clust.append(statistics.mean(rand_rand_data['fin_clust']))
plt.bar(x_val-0.1, init_clust, width=0.2)
plt.bar(x_val+0.1, fin_clust, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_clustering_coefficient vs Final_clustering_coefficient')
plt.show()
init_mean_deg = []
init_mean_deg.append(statistics.mean(rgg_rgg_data['init_mean_deg']))
init_mean_deg.append(statistics.mean(rgg_rand_data['init_mean_deg']))
init_mean_deg.append(statistics.mean(rand_rgg_data['init_mean_deg']))
init_mean_deg.append(statistics.mean(rand_rand_data['init_mean_deg']))
fin_mean_deg = []
fin_mean_deg.append(statistics.mean(rgg_rgg_data['fin_mean_deg']))
fin_mean_deg.append(statistics.mean(rgg_rand_data['fin_mean_deg']))
fin_mean_deg.append(statistics.mean(rand_rgg_data['fin_mean_deg']))
fin_mean_deg.append(statistics.mean(rand_rand_data['fin_mean_deg']))
plt.bar(x_val-0.1, init_mean_deg, width=0.2)
plt.bar(x_val+0.1, fin_mean_deg, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_mean_degree vs Final_mean_degree')
plt.show()
init_larg_comp = []
init_larg_comp.append(statistics.mean(rgg_rgg_data['init_larg_comp']))
init_larg_comp.append(statistics.mean(rgg_rand_data['init_larg_comp']))
init_larg_comp.append(statistics.mean(rand_rgg_data['init_larg_comp']))
init_larg_comp.append(statistics.mean(rand_rand_data['init_larg_comp']))
fin_larg_comp = []
fin_larg_comp.append(statistics.mean(rgg_rgg_data['fin_larg_comp']))
fin_larg_comp.append(statistics.mean(rgg_rand_data['fin_larg_comp']))
fin_larg_comp.append(statistics.mean(rand_rgg_data['fin_larg_comp']))
fin_larg_comp.append(statistics.mean(rand_rand_data['fin_larg_comp']))
plt.bar(x_val-0.1, init_larg_comp, width=0.2)
plt.bar(x_val+0.1, fin_larg_comp, width=0.2)
plt.legend(labels)
plt.xticks(x_val, graph_types)
plt.title('Initial_largest_component_size vs Final_largest_component_size')
plt.show()
deg_assort = []
a = rgg_rgg_data['deg_assort'].fillna(0)
b = rgg_rand_data['deg_assort'].fillna(0)
c = rand_rgg_data['deg_assort'].fillna(0)
d = rand_rand_data['deg_assort'].fillna(0)
deg_assort.append(statistics.mean(a))
deg_assort.append(statistics.mean(b))
deg_assort.append(statistics.mean(c))
deg_assort.append(statistics.mean(d))
plt.bar(x_val, deg_assort)
plt.xticks(x_val, graph_types)
plt.title('Degree Assortativity')
plt.show()
dist_deg_cent = []
dist_deg_cent.append(statistics.mean(rgg_rgg_data['dist_deg_cent']))
dist_deg_cent.append(statistics.mean(rgg_rand_data['dist_deg_cent']))
dist_deg_cent.append(statistics.mean(rand_rgg_data['dist_deg_cent']))
dist_deg_cent.append(statistics.mean(rand_rand_data['dist_deg_cent']))
plt.bar(x_val, dist_deg_cent)
plt.xticks(x_val, graph_types)
plt.title('Distance to degree centre from the attack point')
plt.show()
dist_bet_cent = []
dist_bet_cent.append(statistics.mean(rgg_rgg_data['dist_bet_cent']))
dist_bet_cent.append(statistics.mean(rgg_rand_data['dist_bet_cent']))
dist_bet_cent.append(statistics.mean(rand_rgg_data['dist_bet_cent']))
dist_bet_cent.append(statistics.mean(rand_rand_data['dist_bet_cent']))
plt.bar(x_val, dist_bet_cent)
plt.xticks(x_val, graph_types)
plt.title('Distance to betweenes centre from the attack point')
plt.show()
```
| github_jupyter |
# Access Computation
This tutorial demonstrates how to compute access.
## Setup
```
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from ostk.mathematics.objects import RealInterval
from ostk.physics.units import Length
from ostk.physics.units import Angle
from ostk.physics.time import Scale
from ostk.physics.time import Instant
from ostk.physics.time import Duration
from ostk.physics.time import Interval
from ostk.physics.time import DateTime
from ostk.physics.time import Time
from ostk.physics.coordinate.spherical import LLA
from ostk.physics.coordinate.spherical import AER
from ostk.physics.coordinate import Position
from ostk.physics.coordinate import Frame
from ostk.physics import Environment
from ostk.physics.environment.objects.celestial_bodies import Earth
from ostk.astrodynamics import Trajectory
from ostk.astrodynamics.trajectory import Orbit
from ostk.astrodynamics.trajectory.orbit.models import Kepler
from ostk.astrodynamics.trajectory.orbit.models.kepler import COE
from ostk.astrodynamics.trajectory.orbit.models import SGP4
from ostk.astrodynamics.trajectory.orbit.models.sgp4 import TLE
from ostk.astrodynamics import Access
from ostk.astrodynamics.access import Generator as AccessGenerator
```
---
## Access
An access represents an object-to-object visibility period.
In this example, let's compute accesses between a fixed position on the ground and a satellite in LEO.
## Environment
Let's setup an environment (which describes where planets are, etc...):
```
environment = Environment.default() ;
```
### Origin
Let's define a fixed ground position, using its geographic coordinates:
```
latitude = Angle.degrees(50.0)
longitude = Angle.degrees(20.0)
altitude = Length.meters(30.0)
from_lla = LLA(latitude, longitude, altitude)
from_position = Position.meters(from_lla.to_cartesian(Earth.equatorial_radius, Earth.flattening), Frame.ITRF())
```
And derive a trajectory, fixed at that position:
```
from_trajectory = Trajectory.position(from_position)
```
### Target
Let's consider a satellite in **Low-Earth Orbit**.
```
earth = environment.access_celestial_object_with_name("Earth")
```
We can define its orbit with **Classical Orbital Elements**:
```
a = Earth.equatorial_radius + Length.kilometers(500.0)
e = 0.000
i = Angle.degrees(97.8893)
raan = Angle.degrees(100.372)
aop = Angle.degrees(0.0)
nu = Angle.degrees(0.0201851)
coe = COE(a, e, i, raan, aop, nu)
```
... and by using a **Keplerian** orbital model:
```
epoch = Instant.date_time(DateTime(2018, 1, 1, 0, 0, 0), Scale.UTC)
keplerian_model = Kepler(coe, epoch, earth, Kepler.PerturbationType.J2)
```
Or with a **Two-Line Element** (TLE) set:
```
tle = TLE(
"ISS (ZARYA)",
"1 25544U 98067A 18268.86272795 .00002184 00000-0 40781-4 0 9990",
"2 25544 51.6405 237.0010 0003980 205.4375 242.3358 15.53733046134172"
)
```
... along with its associated **SGP4** orbital model:
```
sgp4_model = SGP4(tle)
```
Below, we select which orbital model to use:
```
orbital_model = keplerian_model
# orbital_model = sgp4_model
```
We then obtain the satellite orbit (which is a **Trajectory** object):
```
satellite_orbit = Orbit(orbital_model, earth)
```
Alternatively, the **Orbit** class can provide some useful shortcuts (for usual orbit types):
```
epoch = Instant.date_time(DateTime(2018, 1, 1, 0, 0, 0), Scale.UTC)
satellite_orbit = Orbit.sun_synchronous(epoch, Length.kilometers(500.0), Time(12, 0, 0), earth)
```
### Access
Now that the origin and the target trajectories are well defined, we can compute the **Access**.
Let's first define an **analysis interval**:
```
start_instant = Instant.date_time(DateTime.parse("2018-01-01 00:00:00"), Scale.UTC) ;
end_instant = Instant.date_time(DateTime.parse("2018-01-10 00:00:00"), Scale.UTC) ;
interval = Interval.closed(start_instant, end_instant) ;
```
Then, using an **Access Generator**, we can compute the accesses within the intervals of interest:
```
azimuth_range = RealInterval.closed(0.0, 360.0) # [deg]
elevation_range = RealInterval.closed(20.0, 90.0) # [deg]
range_range = RealInterval.closed(0.0, 10000e3) # [m]
# Access generator with Azimuth-Range-Elevation constraints
access_generator = AccessGenerator.aer_ranges(azimuth_range, elevation_range, range_range, environment)
accesses = access_generator.compute_accesses(interval, from_trajectory, satellite_orbit)
```
And format the output using a dataframe:
```
accesses_df = pd.DataFrame([[str(access.get_type()), repr(access.get_acquisition_of_signal()), repr(access.get_time_of_closest_approach()), repr(access.get_loss_of_signal()), float(access.get_duration().in_seconds())] for access in accesses], columns=['Type', 'AOS', 'TCA', 'LOS', 'Duration'])
```
### Output
Print accesses:
```
accesses_df
```
Let's calculate the geographic coordinate of the satellite, during access:
```
def compute_lla (state):
lla = LLA.cartesian(state.get_position().in_frame(Frame.ITRF(), state.get_instant()).get_coordinates(), Earth.equatorial_radius, Earth.flattening)
return [float(lla.get_latitude().in_degrees()), float(lla.get_longitude().in_degrees()), float(lla.get_altitude().in_meters())]
def compute_aer (instant, from_lla, to_position):
nedFrame = earth.get_frame_at(from_lla, Earth.FrameType.NED)
fromPosition_NED = from_position.in_frame(nedFrame, instant)
sunPosition_NED = to_position.in_frame(nedFrame, instant)
aer = AER.from_position_to_position(fromPosition_NED, sunPosition_NED, True)
return [float(aer.get_azimuth().in_degrees()), float(aer.get_elevation().in_degrees()), float(aer.get_range().in_meters())]
def compute_time_lla_aer_state (state):
instant = state.get_instant()
lla = compute_lla(state)
aer = compute_aer(instant, from_lla, state.get_position().in_frame(Frame.ITRF(), state.get_instant()))
return [instant, lla[0], lla[1], lla[2], aer[0], aer[1], aer[2]]
def compute_trajectory_geometry (aTrajectory, anInterval):
return [compute_lla(state) for state in aTrajectory.get_states_at(anInterval.generate_grid(Duration.minutes(1.0)))]
def compute_access_geometry (access):
return [compute_time_lla_aer_state(state) for state in satellite_orbit.get_states_at(access.get_interval().generate_grid(Duration.seconds(1.0)))]
satellite_orbit_geometry_df = pd.DataFrame(compute_trajectory_geometry(satellite_orbit, interval), columns=['Latitude', 'Longitude', 'Altitude'])
satellite_orbit_geometry_df.head()
access_geometry_dfs = [pd.DataFrame(compute_access_geometry(access), columns=['Time', 'Latitude', 'Longitude', 'Altitude', 'Azimuth', 'Elevation', 'Range']) for access in accesses] ;
def get_max_elevation (df):
return df.loc[df['Elevation'].idxmax()]['Elevation']
```
And plot the geometries onto a map:
```
data = []
# Target geometry
data.append(
dict(
type = 'scattergeo',
lon = [float(longitude.in_degrees())],
lat = [float(latitude.in_degrees())],
mode = 'markers',
marker = dict(
size = 10,
color = 'orange'
)
)
)
# Orbit geometry
data.append(
dict(
type = 'scattergeo',
lon = satellite_orbit_geometry_df['Longitude'],
lat = satellite_orbit_geometry_df['Latitude'],
mode = 'lines',
line = dict(
width = 1,
color = 'rgba(0, 0, 0, 0.1)',
)
)
)
# Access geometry
for access_geometry_df in access_geometry_dfs:
data.append(
dict(
type = 'scattergeo',
lon = access_geometry_df['Longitude'],
lat = access_geometry_df['Latitude'],
mode = 'lines',
line = dict(
width = 1,
color = 'red',
)
)
)
layout = dict(
title = None,
showlegend = False,
height = 1000,
geo = dict(
showland = True,
landcolor = 'rgb(243, 243, 243)',
countrycolor = 'rgb(204, 204, 204)',
),
)
figure = go.Figure(data = data, layout = layout)
figure.show()
```
---
| github_jupyter |
```
import tensorflow as tf
from tensorflow.keras import models
import numpy as np
import matplotlib.pyplot as plt
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
#creating a callback function that activates if the accuracy is greater than 60%
if(logs.get('accuracy')>0.99):
print("\nim maxed out baby, too goated!")
self.model.stop_training = True
path = "mnist.npz"
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data(path=path)
callbacks = myCallback()
x_train = x_train / 255.0
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
x_test = x_test / 255.0
model = tf.keras.models.Sequential([
#convolution part
# creates a convolution layer with 64 filters with 3 by 3 dimensions
# sets activation function to relu, with drops all negative values
# sets input shape to 28 by 28 array, same as before, 1 denotes that the image is gray-scale, only 1 color channel
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
# create a max pooling layer with a 2 by 2 pooling filter
# means that the largest pixel value with be chosen out of every 4 pixels
tf.keras.layers.MaxPooling2D(2, 2),
# insert another set of convolutions and pooling so that the network can learn another set of convolutions
# then pooling layer is added so that the images can get smaller again
# this reduces number of dense layers needed
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#deep neural network part
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.summary() #generates summary of parameters so we can see images journey throughout the network
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#the model is much slower now when compiling
#this is because there are 64 filters that are getting passed on each image multiple times, so the computation is much heavier
#but our accuracy is much better now, hitting 99.7% on the first epoch
model.fit(x_test, y_test, epochs=10, callbacks=[callbacks])
print(y_test[:100])
f, axarr = plt.subplots(3,4)
FIRST_IMAGE=0 #0th element is 7
SECOND_IMAGE=11 #7th element is 9
THIRD_IMAGE=26 #26th element is 7
CONVOLUTION_NUMBER = 1
layer_outputs = [layer.output for layer in model.layers]
activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)
#looking at effect that the convolution has on our model
for x in range(4):
f1 = activation_model.predict(x_test[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[0,x].grid(False)
f2 = activation_model.predict(x_test[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[1,x].grid(False)
f3 = activation_model.predict(x_test[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]
axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
axarr[2,x].grid(False)
```
| github_jupyter |
```
import numpy as np
from sklearn.linear_model import LogisticRegression
import mlflow
import mlflow.sklearn
if __name__ == "__main__":
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression()
lr.fit(X, y)
score = lr.score(X, y)
print("Score: %s" % score)
mlflow.log_metric("score", score)
mlflow.sklearn.log_model(lr, "model")
print("Model saved in run %s" % mlflow.active_run().info.run_uuid)
mlflow.end_run()
# Wine Quality Sample
def train(in_alpha, in_l1_ratio):
import os
import warnings
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
import mlflow
import mlflow.sklearn
import logging
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
warnings.filterwarnings("ignore")
np.random.seed(40)
# Read the wine-quality csv file from the URL
csv_url =\
'http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'
try:
data = pd.read_csv(csv_url, sep=';')
except Exception as e:
logger.exception(
"Unable to download training & test CSV, check your internet connection. Error: %s", e)
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "quality" which is a scalar from [3, 9]
train_x = train.drop(["quality"], axis=1)
test_x = test.drop(["quality"], axis=1)
train_y = train[["quality"]]
test_y = test[["quality"]]
# Set default values if no alpha is provided
if float(in_alpha) is None:
alpha = 0.5
else:
alpha = float(in_alpha)
# Set default values if no l1_ratio is provided
if float(in_l1_ratio) is None:
l1_ratio = 0.5
else:
l1_ratio = float(in_l1_ratio)
# Useful for multiple runs (only doing one run in this sample notebook)
with mlflow.start_run():
# Execute ElasticNet
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
# Evaluate Metrics
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
# Print out metrics
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
# Log parameter, metrics, and model to MLflow
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
mlflow.sklearn.log_model(lr, "model")
train(0.5, 0.5)
train(0.1, 0.1)
!mlflow ui
```
| github_jupyter |
# TV Script Generation
In this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).
## Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
# test by Lingchen Zhu
print("First 100 characters in text: {}".format(text[0:100]))
words = text.split()
print("First 10 words in text after splitting: {}".format(words[0:10]))
charset = sorted(set(text)) # set up an ordered set of unique characters in text
print("Number of unique characters in text: {}".format(len(charset)))
print(charset)
vocab = sorted(set(words))
print("Number of unique words in text (before pre-processing): {}".format(len(vocab)))
```
## Explore the Data
Play around with `view_sentence_range` to view different parts of the data.
```
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
```
## Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
### Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call `vocab_to_int`
- Dictionary to go from the id to word, we'll call `int_to_vocab`
Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
```
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
vocab = sorted(set(text)) # set up an ordered set of unique elements in text
int_to_vocab = dict(enumerate(vocab)) # set up a dictionary with int keys and char values
vocab_to_int = {c: i for i, c in enumerate(vocab)} # set up a dictionary with char keys and int values
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
```
### Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
```
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
token_dict = {'.' : "||period||",
',' : "||comma||",
'"' : "||quotation_mark||",
';' : "||semicolon||",
'!' : "||exclamation_mark||",
'?' : "||question_mark||",
'(' : "||left_parentheses||",
')' : "||right_parentheses||",
'--' : "||dash||",
'\n': "||return||"}
return token_dict
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
```
## Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
# load text, split text into words, set up vocabulary <-> int lookup tables and save data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
```
# Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
# test by Lingchen Zhu
print("Number of total words in the vocabulary (after-preprocessing): {}".format(len(int_text)))
print("Number of unique words in the vocabulary (after pre-processing): {}".format(len(vocab_to_int)))
```
## Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
### Check the Version of TensorFlow and Access to GPU
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.3'), 'Please use TensorFlow version 1.3 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
```
### Input
Implement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple `(Input, Targets, LearningRate)`
```
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
Input = tf.placeholder(tf.int32, [None, None], name='input')
Targets = tf.placeholder(tf.int32, [None, None], name='targets')
LearningRate = tf.placeholder(tf.float32, name='learning_rate')
return Input, Targets, LearningRate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
```
### Build RNN Cell and Initialize
Stack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).
- The Rnn size should be set using `rnn_size`
- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function
- Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
Return the cell and initial state in the following tuple `(Cell, InitialState)`
```
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
num_layers = 2
keep_prob = 0.6
def build_single_lstm_layer(rnn_size, keep_prob):
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_with_dropout = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return lstm_with_dropout
Cell = tf.contrib.rnn.MultiRNNCell([build_single_lstm_layer(rnn_size, keep_prob) for l in range(num_layers)])
InitialState = Cell.zero_state(batch_size, tf.float32)
InitialState = tf.identity(InitialState, name='initial_state')
return Cell, InitialState
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
```
### Word Embedding
Apply embedding to `input_data` using TensorFlow. Return the embedded sequence.
```
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embed = tf.contrib.layers.embed_sequence(input_data, vocab_size, embed_dim)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
```
### Build RNN
You created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.
- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)
- Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
```
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
Outputs, FinalState = tf.nn.dynamic_rnn(cell, inputs, initial_state=None, dtype=tf.float32)
FinalState = tf.identity(FinalState, name='final_state')
return Outputs, FinalState
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
```
### Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.
- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.
- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
```
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
input_embed = get_embed(input_data, vocab_size, embed_dim)
rnn_output, FinalState = build_rnn(cell, input_embed)
Logits = tf.contrib.layers.fully_connected(rnn_output, vocab_size, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
biases_initializer=tf.zeros_initializer())
return Logits, FinalState
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
```
### Batches
Implement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:
- The first element is a single batch of **input** with the shape `[batch size, sequence length]`
- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`
If you can't fill the last batch with enough data, drop the last batch.
For example, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
```
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
n_word_per_batch = batch_size * seq_length # number of words per batch
n_batch = len(int_text) // n_word_per_batch # number of batches
x_data = np.array(int_text[:n_batch * n_word_per_batch]) # keep only enough words to make full batches
y_data = np.roll(x_data, -1) # shift the text to left by one place
x_batches = np.split(x_data.reshape((batch_size, seq_length * n_batch)), n_batch, axis=1)
y_batches = np.split(y_data.reshape((batch_size, seq_length * n_batch)), n_batch, axis=1)
Batches = np.array(list(zip(x_batches, y_batches)))
return Batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
# test by Lingchen Zhu
test_batches = get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)
print("test_batches.shape = {}".format(test_batches.shape))
print(test_batches)
```
## Neural Network Training
### Hyperparameters
Tune the following parameters:
- Set `num_epochs` to the number of epochs.
- Set `batch_size` to the batch size.
- Set `rnn_size` to the size of the RNNs.
- Set `embed_dim` to the size of the embedding.
- Set `seq_length` to the length of sequence.
- Set `learning_rate` to the learning rate.
- Set `show_every_n_batches` to the number of batches the neural network should print progress.
```
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 1024
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.005
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
```
### Build the Graph
Build the graph using the neural network you implemented.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
```
## Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
```
## Save Parameters
Save `seq_length` and `save_dir` for generating a new TV script.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
```
# Checkpoint
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
```
## Implement Generate Functions
### Get Tensors
Get tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
```
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name('input:0')
InitialStateTensor = loaded_graph.get_tensor_by_name('initial_state:0')
FinalStateTensor = loaded_graph.get_tensor_by_name('final_state:0')
ProbsTensor = loaded_graph.get_tensor_by_name('probs:0')
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
```
### Choose Word
Implement the `pick_word()` function to select the next word using `probabilities`.
```
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
# greedy search: may result in the network "getting stuck" and picking the same word over and over
# idx_max_prob = np.argmax(probabilities)
# word_predict = int_to_vocab[idx_max_prob]
top_n = 5 # number of the next word with highest probabilities
probabilities[np.argsort(probabilities)[:-top_n]] = 0 # suppress small probabilities to zeros
probabilities = probabilities / np.sum(probabilities) # normalize the remaining large probabilities
idx_max_prob_random = np.random.choice(len(int_to_vocab), 1, p=probabilities)[0] # generates a random sample index from range(len(int_to_vocab)) with probabilities
word_predict = int_to_vocab[idx_max_prob_random]
return word_predict
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
```
## Generate TV Script
This will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
```
gen_length = 500
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[0][dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
```
# The TV Script is Nonsensical
It's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckily there's more data! As we mentioned in the beggining of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.
# Submitting This Project
When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
| github_jupyter |
## Importing Libraries & getting Data
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv("dataset/winequalityN.csv")
data.head()
data.info()
data.describe()
data.columns
columns = ['type', 'fixed acidity', 'volatile acidity', 'citric acid','residual sugar', 'chlorides', 'free sulfur dioxide','total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol','quality']
data['type']
sns.countplot(data=data ,x="quality")
```
## Handling Missing Values
```
sns.heatmap(data.isnull(), yticklabels=False, cmap="viridis", cbar=False)
data.isnull().values.sum()
# replacing missing values with mean
data = data.fillna(data.mean())
sns.heatmap(data.isnull(), yticklabels=False, cmap="viridis", cbar=False)
# as 'type' is categorical variable ,remove it from the list of our feature columns
labels = data.pop('type')
cat_columns = ['fixed acidity', 'volatile acidity', 'citric acid','residual sugar', 'chlorides', 'free sulfur dioxide','total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol','quality']
data.head()
```
## Scaling & Encoding
```
from sklearn.preprocessing import MinMaxScaler , LabelEncoder
def scale_data(data):
scaler = MinMaxScaler(feature_range=(0,1))
X = np.array(data)
X = scaler.fit_transform(X)
return X , scaler
def encode_data(labels):
y = np.array(labels)
le = LabelEncoder()
y = le.fit_transform(y)
return y , le
# another way to encode
# labels.type = labels.type.apply(lambda x: 0 if x == "red" else 1)
X , scaler = scale_data(data)
print(X)
print(scaler.inverse_transform(X))
y , le = encode_data(labels)
print(y)
print(le.inverse_transform(y))
```
## EDA
```
plt.figure(figsize=(10,10))
sns.heatmap(data.corr() , annot=True)
plt.show()
```
### For Handling Outliers
```
def univariate(var):
sns.boxplot(data=data , y=var)
plt.show()
cat_columns
univariate('fixed acidity')
univariate('volatile acidity')
univariate('citric acid')
univariate('pH')
univariate('sulphates')
univariate('alcohol')
univariate('total sulfur dioxide')
univariate('chlorides')
univariate('residual sugar')
```
### Density and pH
```
sns.displot(data ,x="density" ,color='r',col="quality")
sns.displot(data, x="pH", color='g', col="quality")
```
## Bivariate Analysis
```
data['quality'].describe()
```
### Numerical variables vs Target variable
```
for i in cat_columns:
fig , ax = plt.subplots(1,3,figsize=(20,5))
plt.subplots_adjust(hspace=1)
sns.barplot(data=data , y=i ,x="quality" , ax=ax[0])
sns.lineplot(data=data, y=i, x="quality", ax=ax[1])
sns.violinplot(data=data, y=i, x="quality", ax=ax[2])
```
## Model building with Random Forest classifier
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42)
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(X_train , y_train)
y_predicted = rfc.predict(X_test)
y_predicted[:15] , y_test[:15]
```
## Evaluation
```
from sklearn.metrics import accuracy_score ,confusion_matrix
print("Accuracy :" , (accuracy_score(y_predicted , y_test)))
sns.heatmap(confusion_matrix(y_predicted ,y_test),annot=True ,cmap='Purples' ,fmt='.4g')
```
| github_jupyter |
```
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.utils.mem import *
from fastai.vision.gan import *
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.dataset import TensorDataset
import pdb
path = Path()/'data'/'horse2zebra'
```
# Custom DataBunch Object
```
import fastai.vision.image as im
class DoubleImage(ItemBase):
def __init__(self, img1, img2):
self.img1,self.img2 = img1,img2
self.data = [(-1+2*img1.data),(-1+2*img2.data)]
def apply_tfms(self, tfms, **kwargs):
self.img1 = self.img1.apply_tfms(tfms, **kwargs)
self.img2 = self.img2.apply_tfms(tfms, **kwargs)
self.data = [-1+2*self.img1.data,-1+2*self.img2.data]
return self
def __repr__(self)->str: return f'{self.__class__.__name__}'
def to_one(self):
tensor = 0.5+torch.cat(self.data,2)/2
return im.Image(tensor)
class DoubleImageList(ImageList):
def __init__(self, items, itemsB=None, **kwargs):
super().__init__(items, **kwargs)
self.itemsB = itemsB
self.copy_new.append('itemsB')
def get(self, i):
img1 = super().get(i)
fn = self.itemsB[random.randint(0, len(self.itemsB)-1)]
return DoubleImage(img1, open_image(fn))
def reconstruct(self, t:Tensor): return t
@classmethod
def from_folders(cls, path, folderA, folderB, **kwargs):
itemsB = ImageList.from_folder(path/folderB).items
res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs)
res.path = path
return res
def transform(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the xs of the train and validation set."
if not tfms: tfms=(None,None)
assert is_listy(tfms) and len(tfms) == 2, "Please pass a list of two lists of transforms (train and valid)."
self.train.transform(tfms[0], **kwargs)
self.valid.transform(tfms[1], **kwargs)
if self.test: self.test.transform(tfms[1], **kwargs)
return self
def show_xys(self, xs, ys, figsize:Tuple[int,int]=(12,6), **kwargs):
"Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method."
rows = int(math.sqrt(len(xs)))
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
xs[i] = DoubleImage((xs[i][0]/2+0.5),(xs[i][1]/2+0.5))
xs[i].to_one().show(ax=ax, **kwargs)
plt.tight_layout()
#UNTESTED
def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs):
"""Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`.
`kwargs` are passed to the show method."""
figsize = ifnone(figsize, (12,3*len(xs)))
fig,axs = plt.subplots(len(xs), 2, figsize=figsize)
fig.suptitle('Ground truth / Predictions', weight='bold', size=14)
for i,(x,z) in enumerate(zip(xs,zs)):
x.to_one().show(ax=axs[i,0], **kwargs)
z.to_one().show(ax=axs[i,1], **kwargs)
data = DoubleImageList.from_folders(path, 'horse', 'zebra').split_by_rand_pct(0.2).label_from_folder()
data = ImageDataBunch.create_from_ll(data, bs=1, size=224)
data.show_batch()
```
# MultiUnet Trainer
```
class UnetBlock(nn.Module):
"A quasi-UNet block, using `PixelShuffle_ICNR upsampling`."
def __init__(self, up_in_c:int, x_in_c:int, hook:Hook, final_div:bool=True, blur:bool=False, leaky:float=None,
self_attention:bool=False):
super().__init__()
self.hook = hook
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, leaky=leaky)
self.bn = batchnorm_2d(x_in_c)
ni = up_in_c//2 + x_in_c
nf = ni if final_div else ni//2
self.conv1 = conv_layer(ni, nf, leaky=leaky)
self.conv2 = conv_layer(nf, nf, leaky=leaky, self_attention=self_attention)
self.relu = relu(leaky=leaky)
def forward(self, up_in:Tensor) -> Tensor:
s = self.hook.stored
up_out = self.shuf(up_in)
ssh = s.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
return self.conv2(self.conv1(cat_x))
def _get_sfs_idxs(sizes:Sizes) -> List[int]:
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sfs_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0])
if feature_szs[0] != feature_szs[1]: sfs_idxs = [0] + sfs_idxs
return sfs_idxs
class UpBlock(nn.Module):
def __init__(self, ni, nf):
super(UpBlock, self).__init__()
self.bn = batchnorm_2d(nf)
self.conv = Conv2dBlock(nf, nf, ks=5, stride=1, norm="bn", activation="relu", padding=2)
self.shuf = PixelShuffle_ICNR(ni, nf, blur=False, leaky=None)
self.relu = nn.ReLU()
def forward(self, xb, body=None):
up_out = self.shuf(xb)
if(body is not None):
ssh = body.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, body.shape[-2:], mode='nearest')
up_out = self.relu(up_out+self.bn(body))
xb = self.conv(up_out)
return xb
class Conv2dBlock(nn.Module):
def __init__(self, ni, nf, ks, stride, norm, activation, padding=1):
super(Conv2dBlock, self).__init__()
self.pad = nn.ZeroPad2d(padding)
norm_dim = nf
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
#self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
self.conv = nn.Conv2d(ni, nf, ks, stride)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
# print(x.size())
if x.size(0) == 1:
# These two lines run much faster in pytorch 0.4 than the two lines listed below.
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='in', activation='relu', padding=1):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, padding=padding)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', activation='relu', padding=1):
super(ResBlock, self).__init__()
self.model = []
self.model += [Conv2dBlock(dim, dim, 3, 1, norm, activation, padding)]
self.model += [Conv2dBlock(dim, dim, 3, 1, norm, activation, padding)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x) + x
class MultiUnet(nn.Module):
def __init__(self, arch:Callable, pretrained:bool=True, cut=None):
super().__init__()
self.relu = relu(leaky=None)
self.bodyA = create_body(arch, pretrained, cut=-3)
self.bodyB = create_body(arch, pretrained, cut=-3)
self.sfs_szs = model_sizes(self.bodyA, size=(224,224))
self.sfs_idxs = list(reversed(_get_sfs_idxs(self.sfs_szs)))
self.sfsA = hook_outputs([self.bodyA[i] for i in self.sfs_idxs])
x = dummy_eval(self.bodyA, (224, 224)).detach()
self.sfsB = hook_outputs([self.bodyB[i] for i in self.sfs_idxs])
x = dummy_eval(self.bodyB, (224, 224)).detach()
unet_blocksA = []
x = torch.tensor([])
x = x.new_full((1, 512, 7, 7), 0)
up_in_c = []
x_in_c = []
for i,idx in enumerate(self.sfs_idxs):
up_in_c.append(int(x.shape[1]))
x_in_c.append(int(self.sfs_szs[idx][1]))
not_final = i!=len(self.sfs_idxs)-1
block = UnetBlock(int(x.shape[1]), int(self.sfs_szs[idx][1]), self.sfsA[i], final_div=not_final, blur=False, self_attention=False).eval()
x = block(x)
#DecoderA
self.UpBlockA1 = UpBlock(256, 128)
self.UpBlockA2 = UpBlock(128, 64)
self.UpBlockA3 = UpBlock(64, 64)
self.finalDecoderA = nn.Sequential(PixelShuffle_ICNR(64), conv_layer(64, 3))
self.ResA = ResBlocks(4, 256, 'in', 'relu', padding=1)
#DecoderB
self.UpBlockB1 = UpBlock(256, 128)
self.UpBlockB2 = UpBlock(128, 64)
self.UpBlockB3 = UpBlock(64, 64)
self.ResB = ResBlocks(4, 256, 'in', 'relu', padding=1)
self.finalDecoderB = nn.Sequential(PixelShuffle_ICNR(64), conv_layer(64, 3))
#Shared Layers
self.sharedEncoderLayer = conv_layer(256, 512, stride=2)
self.middleConv = nn.Sequential(nn.BatchNorm2d(512), nn.ReLU(512), conv_layer(512, 512*2, stride=1), nn.Conv2d(512*2, 512, 3, stride=1))
self.UpShared = UpBlock(512, 256)
#Tan layer
self.tanLayer = nn.Tanh()
def EncoderA(self, xb):
result = self.bodyA(xb)
return result
def EncoderB(self, xb):
result = self.bodyB(xb)
return result
def sharedEncoder(self, xb):
result = self.sharedEncoderLayer(xb)
return result
def MiddleConv(self, xb):
result = self.middleConv(xb)
return result
def sharedDecoder(self, xb):
return self.UpShared(xb, None)
def DecoderA(self, xb, body):
xb = self.ResA(xb)
xb = self.UpBlockA1(xb, body[0].stored)
xb = self.UpBlockA2(xb, body[1].stored)
xb = self.UpBlockA3(xb, body[2].stored)
return self.finalDecoderA(xb)
def DecoderB(self, xb, body):
xb = self.ResB(xb)
xb = self.UpBlockB1(xb, body[0].stored)
xb = self.UpBlockB2(xb, body[1].stored)
xb = self.UpBlockB3(xb, body[2].stored)
return self.finalDecoderB(xb)
def forward(self, a, b, *pred):
#get initial encodings of both
a,b = self.EncoderA(a), self.EncoderB(b)
#put both through shared encoder and middle conv
a,b = self.sharedEncoder(a), self.sharedEncoder(b)
a,b = self.middleConv(a), self.middleConv(b)
#put images through shared decoder
a,b = self.sharedDecoder(a), self.sharedDecoder(b)
#Get images that are supposed to be
aToA, bToB = self.DecoderA(a, body=self.sfsA),self.DecoderB(b, body=self.sfsB)
#Get switched images
aToB, bToA = self.DecoderB(a, body=self.sfsA), self.DecoderA(b, body=self.sfsB)
allIm = torch.cat((self.tanLayer(aToA), self.tanLayer(bToB), self.tanLayer(aToB), self.tanLayer(bToA)), 0)
return allIm
```
# Critic
```
def conv_and_res(ni, nf): return nn.Sequential(res_block(ni), conv_layer(ni, nf, stride=2, bias=True, use_activ=False, leaky=0.1))
class MultiUNITDiscriminator(nn.Module):
def __init__(self):
super(MultiUNITDiscriminator, self).__init__()
self.convs = nn.Sequential(
nn.Conv2d(3, 64, 3, 2, 1),
conv_and_res(64, 128),
conv_and_res(128, 256),
conv_and_res(256, 512),
nn.Conv2d(512, 1, 3, stride=1),
Flatten()
)
def forward(self, not_switched, switched, down=2):
not_switched = self.convs(not_switched)
switched = self.convs(switched)
return (not_switched,switched)
class critic_loss(nn.Module):
#a is 0 and b is 1 for predictions
def forward(self, output, garbage):
pred_winter = output[0]
pred_summer = output[1]
targWin = pred_winter.new_zeros(*pred_winter.size())
targSum = pred_summer.new_ones(*pred_summer.size())
result_winter = F.mse_loss(pred_winter, targWin)
result_summer = F.mse_loss(pred_summer, targSum)
return result_winter + result_summer
critic_learner = Learner(data, MultiUNITDiscriminator(), loss_func=critic_loss(), wd=1e-3)
#critic_learner.fit_one_cycle(4, wd=0.1)
#critic_learner.save('critic')
#critic_learner.load('criticV5-h2z-zfirst')
critic_learner.load('criticV5-sum2win-wfirst')
#critic_learner.load('criticV5-an2la')
```
# Gan Wrapper
```
class GANLearner(Learner):
"A `Learner` suitable for GANs."
def __init__(self, data:DataBunch, generator:nn.Module, critic:nn.Module, gen_loss_func:LossFunction,
crit_loss_func:LossFunction, n_crit=None, n_gen=None, switcher:Callback=None, gen_first:bool=False, switch_eval:bool=True,
show_img:bool=True, clip:float=None, **learn_kwargs):
print('in GANLearner')
gan = GANModule(generator, critic)
loss_func = GANLoss(gen_loss_func, crit_loss_func, gan)
switcher = ifnone(switcher, partial(FixedGANSwitcher, n_crit=n_crit, n_gen=n_gen))
super().__init__(data, gan, loss_func=loss_func, callback_fns=[switcher], **learn_kwargs)
trainer = GANTrainer(self, clip=clip, switch_eval=switch_eval, show_img=show_img)
self.gan_trainer = trainer
self.callbacks.append(trainer)
class GANModule(nn.Module):
"Wrapper around a `generator` and a `critic` to create a GAN."
def __init__(self, generator:nn.Module=None, critic:nn.Module=None, gen_mode:bool=True):
super().__init__()
print('in GANModule')
self.gen_mode = gen_mode
if generator: self.generator,self.critic = generator,critic
def forward(self, *args):
return self.generator(*args) if self.gen_mode else self.critic(*args)
def switch(self, gen_mode:bool=None):
"Put the model in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
class GANLoss(GANModule):
"Wrapper around `loss_funcC` (for the critic) and `loss_funcG` (for the generator)."
def __init__(self, loss_funcG:Callable, loss_funcC:Callable, gan_model:GANModule):
super().__init__()
print('in GANLoss')
self.loss_funcG,self.loss_funcC,self.gan_model = loss_funcG,loss_funcC,gan_model
def generator(self, output, x_a, x_b):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
output = torch.split(output, 2, dim=0)
x_a_recon, x_b_recon = torch.split(output[0], 1, dim=0)
x_ab, x_ba = torch.split(output[1], 1, dim=0)
fake_pred_x_aa, fake_pred_x_bb = self.gan_model.critic(x_a_recon, x_b_recon)
fake_pred_x_ab, fake_pred_x_ba = self.gan_model.critic(x_ab, x_ba)
cycled_output = self.gan_model.generator(x_ba, x_ab)
cycle_a = cycled_output[3]
cycle_b = cycled_output[2]
return self.loss_funcG(x_a, x_b, x_a_recon, x_b_recon, cycle_a, cycle_b, fake_pred_x_ab, fake_pred_x_ba)
def critic(self, real_pred, b, c):
fake = self.gan_model.generator(b.requires_grad_(False), c.requires_grad_(False)).requires_grad_(True)
fake = torch.split(fake, 2, dim=0)
fake_ns = torch.split(fake[0], 1, dim=0)
fake_s = torch.split(fake[1], 1, dim=0)
fake_pred_aToA, fake_pred_bToB = self.gan_model.critic(fake_ns[0], fake_ns[1])
fake_pred_aToB, fake_pred_bToA = self.gan_model.critic(fake_s[0], fake_s[1])
return self.loss_funcC(real_pred[0], real_pred[1], fake_pred_aToA, fake_pred_bToB, fake_pred_aToB, fake_pred_bToA)
class GANTrainer(LearnerCallback):
"Handles GAN Training."
_order=-20
def __init__(self, learn:Learner, switch_eval:bool=False, clip:float=None, beta:float=0.98, gen_first:bool=False,
show_img:bool=True):
super().__init__(learn)
self.switch_eval,self.clip,self.beta,self.gen_first,self.show_img = switch_eval,clip,beta,gen_first,show_img
self.generator,self.critic = self.model.generator,self.model.critic
def _set_trainable(self):
train_model = self.generator if self.gen_mode else self.critic
loss_model = self.generator if not self.gen_mode else self.critic
requires_grad(train_model, True)
requires_grad(loss_model, False)
if self.switch_eval:
train_model.train()
loss_model.eval()
def on_train_begin(self, **kwargs):
"Create the optimizers for the generator and critic if necessary, initialize smootheners."
if not getattr(self,'opt_gen',None):
self.opt_gen = self.opt.new([nn.Sequential(*flatten_model(self.generator))])
else: self.opt_gen.lr,self.opt_gen.wd = self.opt.lr,self.opt.wd
if not getattr(self,'opt_critic',None):
self.opt_critic = self.opt.new([nn.Sequential(*flatten_model(self.critic))])
else: self.opt_critic.lr,self.opt_critic.wd = self.opt.lr,self.opt.wd
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.closses,self.glosses = [],[]
self.smoothenerG,self.smoothenerC = SmoothenValue(self.beta),SmoothenValue(self.beta)
self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
self.imgs,self.titles = [],[]
def on_train_end(self, **kwargs):
"Switch in generator mode for showing results."
self.switch(gen_mode=True)
def on_batch_begin(self, last_input, last_target, **kwargs):
"Clamp the weights with `self.clip` if it's not None, return the correct input."
if self.gen_mode:
self.last_input = last_input
if self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
test = {'last_input':last_input,'last_target':last_input}
#print(test)
return test
def on_backward_begin(self, last_loss, last_output, **kwargs):
"Record `last_loss` in the proper list."
last_loss = last_loss.detach().cpu()
if self.gen_mode:
self.smoothenerG.add_value(last_loss)
self.glosses.append(self.smoothenerG.smooth)
self.last_gen = last_output.detach().cpu()
last_gen_split = torch.split(self.last_gen, 1, 0)
self.last_critic_preds_ns = self.gan_trainer.critic(last_gen_split[0].cuda(), last_gen_split[1].cuda())
self.last_critic_preds_s = self.gan_trainer.critic(last_gen_split[2].cuda(), last_gen_split[3].cuda())
else:
self.smoothenerC.add_value(last_loss)
self.closses.append(self.smoothenerC.smooth)
def on_epoch_begin(self, epoch, **kwargs):
"Put the critic or the generator back to eval if necessary."
self.switch(self.gen_mode)
def on_epoch_end(self, pbar, epoch, last_metrics, **kwargs):
"Put the various losses in the recorder and show a sample image."
if not hasattr(self, 'last_gen') or not self.show_img: return
data = self.learn.data
inputBPre = torch.unbind(self.last_input[1], dim=0)
aToA = im.Image(self.last_gen[0]/2+0.5)
bToB = im.Image(self.last_gen[1]/2+0.5)
aToB = im.Image(self.last_gen[2]/2+0.5)
bToA = im.Image(self.last_gen[3]/2+0.5)
self.imgs.append(aToA)
self.imgs.append(aToB)
self.imgs.append(bToB)
self.imgs.append(bToA)
self.titles.append(f'Epoch {epoch}-A to A')
self.titles.append(f'Epoch {epoch}-A to B')
self.titles.append(f'Epoch {epoch}-B to B')
self.titles.append(f'Epoch {epoch}-B to A')
pbar.show_imgs(self.imgs, self.titles)
return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
def switch(self, gen_mode:bool=None):
"Switch the model, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self.opt.opt = self.opt_gen.opt if self.gen_mode else self.opt_critic.opt
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
class FixedGANSwitcher(LearnerCallback):
"Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator."
def __init__(self, learn:Learner, n_crit=5, n_gen=1):
super().__init__(learn)
self.n_crit,self.n_gen = 1,1
def on_train_begin(self, **kwargs):
"Initiate the iteration counts."
self.n_c,self.n_g = 0,0
def on_batch_end(self, iteration, **kwargs):
"Switch the model if necessary."
if self.learn.gan_trainer.gen_mode:
self.n_g += 1
n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g
else:
self.n_c += 1
n_iter,n_in,n_out = self.n_crit,self.n_g,self.n_c
target = n_iter if isinstance(n_iter, int) else n_iter(n_in)
if target == n_out:
self.learn.gan_trainer.switch()
self.n_c,self.n_g = 0,0
```
# Training
```
class disc_loss(nn.Module):
#a is 0 and b is 1 for predictions
def forward(self, real_pred_a, real_pred_b, aToA, bToB, aToB, bToA):
loss = 0
#Real Image Predictions
loss += F.mse_loss(real_pred_a, real_pred_a.new_zeros(*real_pred_a.size()))
loss += F.mse_loss(real_pred_b, real_pred_b.new_zeros(*real_pred_b.size()))
#Translated Predictions
loss += F.mse_loss(aToB, aToB.new_zeros(*aToB.size()))
loss += F.mse_loss(bToA, bToA.new_ones(*bToA.size()))
return loss
class gen_loss(nn.Module):
def content_similar(self, input, target):
return F.l1_loss(input, target)*(10)
def should_look_like_a(self, input_fake_pred):
target = input_fake_pred.new_zeros(*input_fake_pred.size())
return F.mse_loss(input_fake_pred, target)
def should_look_like_b(self, input_fake_pred):
target = input_fake_pred.new_ones(*input_fake_pred.size())
return F.mse_loss(input_fake_pred, target)
def forward(self, x_a, x_b, x_a_recon, x_b_recon, x_a_cycled, x_b_cycled, fake_pred_x_ab, fake_pred_x_ba):
loss = 0
x_a, x_b, x_a_recon, x_b_recon = torch.unbind(x_a, dim=0)[0], torch.unbind(x_b, dim=0)[0], torch.unbind(x_a_recon, dim=0)[0], torch.unbind(x_a_recon, dim=0)[0]
loss += self.should_look_like_a(fake_pred_x_ba)
loss += self.should_look_like_b(fake_pred_x_ab)
loss += self.content_similar(x_a, x_a_recon)*(0.5)
loss += self.content_similar(x_b, x_b_recon)*(0.5)
loss += self.content_similar(x_a, x_a_cycled)
loss += self.content_similar(x_b, x_b_cycled)
return loss
```
# GAN Training
```
generator = MultiUnet(models.resnet34)
multiGan = GANLearner(data,
generator=generator,
critic=critic_learner.model,
gen_loss_func=gen_loss(),
crit_loss_func=disc_loss(), opt_func=partial(optim.Adam, betas=(0.5,0.99)))
multiGan.fit_one_cycle(100, 1e-4)
multiGan.load('v5-trial1')
```
# Results
```
#Show input images
rows=2
x,y = next(iter(data.train_dl))
beforeA = torch.unbind(x[0], dim=0)[0].cpu()
beforeA = im.Image(beforeA/2+0.5)
beforeB = torch.unbind(x[1], dim=0)[0].cpu()
beforeB = im.Image(beforeB/2+0.5)
images = [beforeA, beforeB]
fig, axs = plt.subplots(1,2,figsize=(8,8))
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
images[i].show(ax=ax)
plt.tight_layout()
#Show results
pred = multiGan.gan_trainer.generator(x[0], x[1], True)
predAA = pred[0]
predBB = pred[1]
predAB = pred[2]
predBA = pred[3]
predAA = im.Image(predAA.detach()/2+0.5)
predBB = im.Image(predBB.detach()/2+0.5)
predAB = im.Image(predAB.detach()/2+0.5)
predBA = im.Image(predBA.detach()/2+0.5)
images = [predAA, predAB, predBB, predBA]
titles = ["A to A", "A to B", "B to B", "B to A"]
fig, axs = plt.subplots(2,2,figsize=(8,8))
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
images[i].show(ax=ax, title=titles[i])
plt.tight_layout()
```
| github_jupyter |
```
import xgboost as xgb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("ggplot")
%matplotlib inline
from xgboost import XGBRegressor
from sklearn import preprocessing
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.linear_model import ElasticNetCV, LassoLarsCV
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.pipeline import make_pipeline
from sklearn.utils import check_array
from sklearn.decomposition import PCA, FastICA
from sklearn.metrics import r2_score
class StackingEstimator(BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, X, y=None, **fit_params):
self.estimator.fit(X, y, **fit_params)
return self
def transform(self, X):
X = check_array(X)
X_transformed = np.copy(X)
X_transformed = np.hstack((np.reshape(self.estimator.predict(X), (-1, 1)), X_transformed))
return X_transformed
train = pd.read_csv("data/train.csv", index_col="ID")
test = pd.read_csv("data/test.csv", index_col="ID")
for c in train.columns:
if train[c].dtype == "object":
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[c].values) + list(test[c].values))
train[c] = lbl.transform(list(train[c].values))
test[c] = lbl.transform(list(test[c].values))
n_comp = 10
# PCA
pca = PCA(n_components=n_comp, random_state=42)
pca2_results_train = pca.fit_transform(train.drop(["y"], axis=1))
pca2_results_test = pca.transform(test)
# ICA
ica = FastICA(n_components=n_comp, random_state=42)
ica2_results_train = ica.fit_transform(train.drop(["y"], axis=1))
ica2_results_test = ica.transform(test)
y_train = train["y"].values
y_mean = np.mean(y_train)
stacked_pipeline = make_pipeline(
StackingEstimator(estimator=LassoLarsCV(normalize=True)),
StackingEstimator(estimator=XGBRegressor(
n_estimators=700,
learning_rate=0.005,
max_depth=3,
gamma=0.3,
min_child_weight=6,
subsample=0.8,
colsample_bytree=0.65,
objective="reg:linear",
base_score=y_mean)),
LassoLarsCV()
)
stacked_pipeline.fit(train.drop("y", axis=1), y_train)
y_pred_train_stack = stacked_pipeline.predict(train.drop("y", axis=1))
print(r2_score(y_train, y_pred_train_stack))
y_predict_stacked = stacked_pipeline.predict(test)
y_predict_stacked
```
### XGBoost
```
xgb_params = XGBRegressor(n_estimators=700,
learning_rate=0.005,
max_depth=3,
gamma=0.3,
min_child_weight=6,
subsample=0.8,
colsample_bytree=0.65,
objective="reg:linear",
base_score=y_mean)
dtrain = xgb.DMatrix(train.drop("y", axis=1), y_train)
dtest = xgb.DMatrix(test)
cv_output = xgb.cv(xgb_params.get_params(), dtrain, num_boost_round=2000, early_stopping_rounds=50,
verbose_eval=50, show_stdv=False)
cv_output[["train-rmse-mean", "test-rmse-mean"]].plot()
pass
num_boost_rounds = len(cv_output)
model = xgb.train(dict(xgb_params.get_params(), silent=0), dtrain, num_boost_round=num_boost_rounds)
y_pred_train = model.predict(dtrain)
r2_score(y_train, y_pred_train)
y_predict_xgb = model.predict(dtest)
y_predict_xgb
```
### Need to do cross-validation for stack and xgb; in terms of r2_score
0.5 * xgb + 0.5 * stacked = test vs. test -> optimization problem; find x1 and x2 where it perfect
```
x1 = np.arange(0, 1.1, 0.1)
r2_values = []
for i in x1:
r2_values.append(r2_score(y_train, i * y_pred_train_stack + (1-i) * y_pred_train))
r2_values
```
### Visualization
```
pd.Series(y_train).hist(bins=100)
pd.Series(y_pred_train_stack).hist(bins=100)
pd.Series(y_predict_stacked).hist(bins=100)
# xgboost normal
pd.Series(y_pred_train).hist(bins=100)
pd.Series(y_train).plot()
```
### Output
```
weight = 0.2
y_predict_combined = weight * y_predict_stacked + (1-weight) * y_predict_xgb
output = pd.DataFrame({"ID": test.index, "y": y_predict_combined})
output.to_csv("submissions_stacked_combined.csv", index=False)
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Deep Learning
## Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
---
## Step 0: Load The Data
```
# Load pickled data
import pickle
training_file = 'traffic-signs-data/train.p'
validation_file='traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
```
---
## Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
```
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
import numpy as np
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
n_validation = X_valid.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape[1:3]
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Number of validation examples =", n_validation)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
```
### Include an exploratory visualization of the dataset
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
```
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
```
### Plotting random images of the training set
Below is shown a 5x5 grid of randomly chosen images of the dataset.
The images are of dimension 32x32x3, so 32x32 pixel of RGB values.
```
width = 5
height = 5
fig, axes = plt.subplots(height, width, figsize=(18, 16))
for i in range(height):
for j in range(width):
idx = np.random.choice(n_train, 1)[0]
axes[i, j].imshow(X_train[idx])
```
To visualize the different datasets (test, train & validation), the amount of each image type in each of the sets, is shown below in a bar plot.
```
import pandas as pd
unique, train_counts = np.unique(y_train, return_counts=True)
unique, test_counts = np.unique(y_test, return_counts=True)
unique, valid_counts = np.unique(y_valid, return_counts=True)
df = pd.DataFrame({'train': train_counts, 'test': test_counts, 'validation': valid_counts}, index=unique)
axes = df.plot(kind="bar", subplots=True, layout=(2,2), sharey=True, sharex=True, figsize=(18, 16))
fig=axes[0,0].figure
fig.text(0.5,0.04, "Image labels", ha="center", va="center")
fig.text(0.05,0.5, "Number of images", ha="center", va="center", rotation=90)
plt.show()
```
At a first glance the validation set seemingly is much more uniform in its distribution than e.g. the training dataset, which can contain vastly different numbers of each label. An example would be the labels `0` and `2`, where there are almost 10 times (approx. 2000 images) the amount of images of label `2` than that of label `0` (around 220 images).
However, on further inspection, all the datasets seem to have the same "shape" of the distribution across labels, this distribution is just scaled.
This make sense, if all the datasets are completely randomly sampled for the same pool of images, each subset of would contain more or less the same distribution (in an ideal world).
The training, testing and validation sets are divided from the total dataset, as follows:
Training : 34799/51839 * 100 = 67.13 %
Testings : 12630/51839 * 100 = 24.36 %
Validation: 4410/51839 * 100 = 8.50 %
Which means that the network will only ever learn from 75.5 % of the data, and will then be compared to the remainder.
----
## Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
There are various aspects to consider when thinking about this problem:
- Neural network architecture (is the network over or underfitting?)
- Play around preprocessing techniques (normalization, rgb to grayscale, etc)
- Number of examples per label (some have more than others).
- Generate fake data.
Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
### Pre-process the Data Set (normalization, grayscale, etc.)
Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
```
from skimage.color import rgb2gray
def rgb2grayscale(rbg):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])[..., np.newaxis]
def normalize(img):
"""Normalize the data"""
return (img.astype(np.float32) - 128)/128
def preprocess_image(img, channels=3):
out = img
# if channels == 1:
# out = rgb2gray(out)
# out = out[:, :, np.newaxis]
out = rgb2gray(out)
out = (out - 0.5) * 2.
out = out[:, :, np.newaxis]
return out
def preprocess_data(samples, channels=3):
return np.array([preprocess_image(im, channels) for im in samples])
CHANNELS = 1
X_train_proc = preprocess_data(X_train, CHANNELS) # convert_img(X_train)
X_test_proc = preprocess_data(X_test, CHANNELS) # convert_img(X_test)
X_valid_proc = preprocess_data(X_valid, CHANNELS) # convert_img(X_valid)
```
### Model Architecture
```
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from sklearn.utils import shuffle
def convolutional_layer(x, W, b, strides=1, padding='SAME'):
"""Create a convolutional layer
Returns a ful convolutional layer with a RELU activation function at the end.
Args:
x (np.array): Input to the layer
W (np.array): Weights matrix
b (np.array): Bias vector
strides (int, optional): The number of pixel between each application of the kernel
padding (str, optional): Padding of the image.
Returns:
tf.nn.conv2d: Convolutional layer
"""
layer = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding=padding)
layer = tf.nn.bias_add(layer, b)
layer = tf.nn.relu(layer)
return layer
def fully_connected_layer(x, W, b, relu_activation=False):
"""Creates a fully connected linear layer
Returns a fully connected layer possible RELU activation function
Args:
x (np.array): Input to the layer
W (np.array): Weights matrix
b (np.array): Bias vector
Returns:
tf.Operation: Fully connected layer
"""
layer = tf.nn.bias_add(tf.matmul(x, W), b)
if relu_activation:
layer = tf.nn.relu(layer)
return layer
def maxpool2d(x, k=2):
return tf.nn.max_pool(
x,
ksize=[1, k, k, 1],
strides=[1, k, k, 1],
padding='SAME')
```
#### Hyper parameters
```
MU = 0
SIGMA = 0.1
LEARNING_RATE = 0.001
EPOCHS = 10
BATCH_SIZE = 128
KERNEL_SIZE = 5
KEEP_PROB = 0.
```
Create dictionaries for the weights and biases for the net
```
WEIGHTS = {
'conv1': tf.Variable(tf.truncated_normal(shape=(KERNEL_SIZE, KERNEL_SIZE, CHANNELS, 6), mean = MU, stddev = SIGMA)),
'conv2': tf.Variable(tf.truncated_normal(shape=(KERNEL_SIZE, KERNEL_SIZE, 6, 16), mean = MU, stddev = SIGMA)),
'fc1': tf.Variable(tf.truncated_normal(shape=(400, 120), mean = MU, stddev = SIGMA)),
'fc2': tf.Variable(tf.truncated_normal(shape=(120, 84), mean = MU, stddev = SIGMA)),
'fc3': tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = MU, stddev = SIGMA)),
}
BIASES = {
'conv1': tf.Variable(tf.zeros(6)),
'conv2': tf.Variable(tf.zeros(16)),
'fc1': tf.Variable(tf.zeros(120)),
'fc2': tf.Variable(tf.zeros(84)),
'fc3': tf.Variable(tf.zeros(n_classes)),
}
x = tf.placeholder(tf.float32, [None, 32, 32, CHANNELS], name='input')
y = tf.placeholder(tf.int32, (None), name='output')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
one_hot_y = tf.one_hot(y, n_classes)
```
#### Define the network
The network is defined in the function `traffic_sign_net()`.
```
def traffic_sign_net(x, dropout=False):
# First layer: Convolutional layer - Input is 32x32x1 - output is 28x28x6
# Maxpooling with a stride of 2, converts output to 14x14x6
conv1 = convolutional_layer(x, WEIGHTS['conv1'], BIASES['conv1'], strides=1, padding='VALID')
conv1 = maxpool2d(conv1, k=2)
# Second layer: Convolutional layer - input is 14x14x6 output is 10x10x16
# Maxpooling with a stride of 2, converts output to 5x5x16
conv2 = convolutional_layer(conv1, WEIGHTS['conv2'], BIASES['conv2'], strides=1, padding='VALID')
conv2 = maxpool2d(conv2, k=2)
# Flatten output of the convolutional layer: output is 5x5x16 = 400.
fc0 = flatten(conv2)
# if dropout:
fc0 = tf.nn.dropout(fc0, keep_prob)
# Third layer: Fully Connected. Input = 400. Output = 120.
fc1 = fully_connected_layer(fc0, WEIGHTS['fc1'], BIASES['fc1'], relu_activation=True)
if dropout:
fc1 = tf.nn.dropout(fc1, keep_prob)
# Fourth layer: Fully Connected. Input = 120. Output = 84.
fc2 = fully_connected_layer(fc1, WEIGHTS['fc2'], BIASES['fc2'], relu_activation=True)
if dropout:
fc2 = tf.nn.dropout(fc2, keep_prob)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
logits = fully_connected_layer(fc2, WEIGHTS['fc3'], BIASES['fc3'], relu_activation=False)
return logits
# Create a Model
logits = traffic_sign_net(x, dropout=True)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = LEARNING_RATE)
training_operation = optimizer.minimize(loss_operation)
# Instanciate saver class
saver = tf.train.Saver()
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
```
### Train, Validate and Test the Model
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
```
from datetime import datetime
start_time = datetime.now()
print('Starting at: {}'.format(start_time))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
epoch_start = datetime.now()
X_train_proc, y_train = shuffle(X_train_proc, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train_proc[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: KEEP_PROB})
train_accuracy = evaluate(X_train_proc, y_train)
validation_accuracy = evaluate(X_valid_proc, y_valid)
print("EPOCH {} ...".format(i+1))
print("Training Accuracy = {:.3f}".format(train_accuracy))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print('Epoch took: {}'.format(datetime.now() - epoch_start))
print()
saver.save(sess, './lenet')
print("Model saved")
print('Total time: {}'.format(datetime.now() - start_time))
```
### Model validation
We can now validate the models performance, by testing it on images it has not seen before.
Using the `test` dataset, we get the following performance:
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test_proc, y_test)
print("Test Set Accuracy = {:.3f}".format(test_accuracy))
```
---
## Step 3: Test a Model on New Images
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
### Load and Output the Images
```
import os
import matplotlib.pyplot as plt
# Load images
path = 'test_data/'
files = [os.path.join(path, file) for file in os.listdir(path) if file.endswith('.jpg')]
# Grab label from the filename
y_webdata = np.array([int(os.path.splitext(os.path.basename(file))[0]) for file in files])
def import_img(path, resize_dim=(32, 32)):
"""Read image from path and resize"""
img = plt.imread(path)
return cv2.resize(img, resize_dim)
# Import the data
X_webdata = np.array([import_img(file) for file in files])
X_webdata = normalize(X_webdata)
X_webdata = np.array([plt.imread('test_data/web1.png'),
plt.imread('test_data/web2.png'),
plt.imread('test_data/web3.png'),
plt.imread('test_data/web4.png'),
plt.imread('test_data/web5.png')])
X_webdata = preprocess_data(X_webdata, channels=CHANNELS)
y_webdata = np.array([18, 12, 13, 11, 4])
print("Actual Sign Types:", y_webdata)
```
### Predict the Sign Type for Each Image
```
# Run the predictions using the model to output the prediction for each image.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
predicted = sess.run(tf.argmax(logits, 1), feed_dict={x: X_webdata, y: y_webdata, keep_prob: 1.0})
print('Predicted Sign Types:', predicted)
print('Actual labels:', y_webdata)
```
### Analyze Performance
```
correct = np.sum(y_webdata == predicted)
total = len(y_webdata)
print('Model estimated {} out of {} images correctly, giving {:.0f} % accuracy'.format(correct, total, correct/total*100))
```
### Output Top 5 Softmax Probabilities For Each Image Found on the Web
```
k = 5
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
top_k_softmax, indicies = sess.run(tf.nn.top_k(tf.nn.softmax(logits), k), feed_dict={x: X_webdata, y: y_webdata, keep_prob: 1.0})
for idx, val in enumerate(top_k_softmax):
print('Image {} - Top {} probabilites: {}'.format(idx+1, k, val))
print('Image {} - Corresponding indicies: {}'.format(idx+1, indicies[idx]))
```
### Project Writeup
Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
---
## Step 4 (Optional): Visualize the Neural Network's State with Test Images
This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
<figure>
<img src="visualize_cnn.png" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above)</p>
</figcaption>
</figure>
<p></p>
```
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
os.listdir("test_images/")
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
return result
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import numpy as np
import tensorflow as tf
import json
with open('dataset-bpe.json') as fopen:
data = json.load(fopen)
train_X = data['train_X']
train_Y = data['train_Y']
test_X = data['test_X']
test_Y = data['test_Y']
EOS = 2
GO = 1
vocab_size = 32000
train_Y = [i + [2] for i in train_Y]
test_Y = [i + [2] for i in test_Y]
from tensor2tensor.utils import beam_search
def pad_second_dim(x, desired_size):
padding = tf.tile([[[0.0]]], tf.stack([tf.shape(x)[0], desired_size - tf.shape(x)[1], tf.shape(x)[2]], 0))
return tf.concat([x, padding], 1)
class Translator:
def __init__(self, size_layer, num_layers, embedded_size, learning_rate,
beam_width = 5):
def cells(size_layer = size_layer, reuse=False):
return tf.nn.rnn_cell.LSTMCell(size_layer,initializer=tf.orthogonal_initializer(),reuse=reuse)
def attention(encoder_out, seq_len, reuse=False):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size_layer,
memory = encoder_out,
memory_sequence_length = seq_len)
return tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(reuse=reuse) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype = tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype = tf.int32)
batch_size = tf.shape(self.X)[0]
embeddings = tf.Variable(tf.random_uniform([vocab_size, embedded_size], -1, 1))
encoder_out = tf.nn.embedding_lookup(embeddings, self.X)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(size_layer // 2),
cell_bw = cells(size_layer // 2),
inputs = encoder_out,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_out = tf.concat((out_fw, out_bw), 2)
bi_state_c = tf.concat((state_fw.c, state_bw.c), -1)
bi_state_h = tf.concat((state_fw.h, state_bw.h), -1)
bi_lstm_state = tf.nn.rnn_cell.LSTMStateTuple(c=bi_state_c, h=bi_state_h)
encoder_state = tuple([bi_lstm_state] * num_layers)
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
dense = tf.layers.Dense(vocab_size)
with tf.variable_scope('decode'):
decoder_cells = attention(encoder_out, self.X_seq_len)
states = decoder_cells.zero_state(batch_size, tf.float32).clone(cell_state=encoder_state)
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(embeddings, decoder_input),
sequence_length = self.Y_seq_len,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cells,
helper = training_helper,
initial_state = states,
output_layer = dense)
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
self.training_logits = training_decoder_output.rnn_output
with tf.variable_scope('decode', reuse=True):
encoder_out_tiled = tf.contrib.seq2seq.tile_batch(encoder_out, beam_width)
encoder_state_tiled = tf.contrib.seq2seq.tile_batch(encoder_state, beam_width)
X_seq_len_tiled = tf.contrib.seq2seq.tile_batch(self.X_seq_len, beam_width)
decoder_cell = attention(encoder_out_tiled, X_seq_len_tiled, reuse=True)
states = decoder_cell.zero_state(batch_size * beam_width, tf.float32).clone(
cell_state = encoder_state_tiled)
predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell = decoder_cell,
embedding = embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS,
initial_state = states,
beam_width = beam_width,
output_layer = dense,
length_penalty_weight = 0.0)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = False,
maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
self.fast_result = predicting_decoder_output.predicted_ids[:, :, 0]
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
size_layer = 512
num_layers = 2
embedded_size = 256
learning_rate = 1e-3
batch_size = 128
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Translator(size_layer, num_layers, embedded_size, learning_rate)
sess.run(tf.global_variables_initializer())
pad_sequences = tf.keras.preprocessing.sequence.pad_sequences
batch_x = pad_sequences(train_X[:10], padding='post')
batch_y = pad_sequences(train_Y[:10], padding='post')
sess.run([model.fast_result, model.cost, model.accuracy],
feed_dict = {model.X: batch_x, model.Y: batch_y})
import tqdm
for e in range(epoch):
pbar = tqdm.tqdm(
range(0, len(train_X), batch_size), desc = 'minibatch loop')
train_loss, train_acc, test_loss, test_acc = [], [], [], []
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = pad_sequences(train_X[i : index], padding='post')
batch_y = pad_sequences(train_Y[i : index], padding='post')
feed = {model.X: batch_x,
model.Y: batch_y}
accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer],
feed_dict = feed)
train_loss.append(loss)
train_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
pbar = tqdm.tqdm(
range(0, len(test_X), batch_size), desc = 'minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = pad_sequences(test_X[i : index], padding='post')
batch_y = pad_sequences(test_Y[i : index], padding='post')
feed = {model.X: batch_x,
model.Y: batch_y,}
accuracy, loss = sess.run([model.accuracy,model.cost],
feed_dict = feed)
test_loss.append(loss)
test_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
print('epoch %d, training avg loss %f, training avg acc %f'%(e+1,
np.mean(train_loss),np.mean(train_acc)))
print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1,
np.mean(test_loss),np.mean(test_acc)))
from tensor2tensor.utils import bleu_hook
results = []
for i in tqdm.tqdm(range(0, len(test_X), batch_size)):
index = min(i + batch_size, len(test_X))
batch_x = pad_sequences(test_X[i : index], padding='post')
feed = {model.X: batch_x}
p = sess.run(model.fast_result,feed_dict = feed)
result = []
for row in p:
result.append([i for i in row if i > 3])
results.extend(result)
rights = []
for r in test_Y:
rights.append([i for i in r if i > 3])
bleu_hook.compute_bleu(reference_corpus = rights,
translation_corpus = results)
```
| github_jupyter |
# Workshop 2: Regression and Neural Networks
https://github.com/Imperial-College-Data-Science-Society/workshops
1. Introduction to Data Science
2. **Regression and Neural Networks**
3. Classifying Character and Organ Images
4. Demystifying Causality and Causal Inference
5. A Primer to Data Engineering
6. Natural Language Processing (NLP) by using Attention
7. Art and Music using GANs
8. Probabilistic Programming in Practice
9. Missing Data in Supervised Learning
## Today ##
You can access the material via:
- Binder
- Local Jupyter Notebook with a suitable virtual environment and dependencies installed
- The PDF slides
- Following my slides on MS Teams

# Projects
Thoughts?

References I used to prepare this session:
- Past ICDSS workshops
- Patrick Rebischini's notes: http://www.stats.ox.ac.uk/~rebeschi/teaching/AFoL/20/index.html
- https://fleuret.org/ee559/
- https://en.wikipedia.org/wiki/Ordinary_least_squares
- https://www.astroml.org/book_figures/chapter9/fig_neural_network.html
- https://github.com/pytorch/examples/blob/master/mnist/main.py
- Lakshminarayanan et al. (2016) http://papers.nips.cc/paper/5234-mondrian-forests-efficient-online-random-forests.pdf
- Garnelo et al. (2018) https://arxiv.org/pdf/1807.01622.pdf
Other recommended reading:
- Regression and Other Stories by Andrew Gelman, Jennifer Hill and Aki Vehtari
- Elements of Statistical Learning
## Introduction
Suppose we have some $(x_1, y_1),\ldots,(x_{100},y_{100})$ that is generated by $y=2x + \text{noise}$.
```
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 1, 30)
noise = 0.1*np.random.normal(size=30)
y = 2*x + noise
plt.scatter(x, y)
plt.plot(x, 2*x)
plt.xlabel("x")
plt.ylabel("y")
```
In practice, we don't know the underlying data-generating process, but rather we can pose **"hypotheses"** as to how the data is generated. For example in the above example:
- **Linear Models:** $y = x\beta + \sigma\mathcal{N}(0,1)$ where $\beta$ represents the gradient of the slope and $\sigma$ is the amplitude of the noise.
- **Nonparametric models:**$y = f(x) + \sigma\mathcal{N}(0,1)$ where $f$ is some function in some hypothesis function space $\mathcal{H}$. E.g. $f(x) = x\beta$
- Neural networks
- Regression trees, random forests
- Gaussian processes
- $y = \sum_{i=1}^T w_i\times f_i(x) + g(x)$ where $w_i$ represent weights, $f_i\in\mathcal{H}$ and $g(x)$ represents the noise for value $x$.
- etc...
Once we have a hypothesis we can **estimate $f$** using many different tools!
## Out of sample prediction##
Given $x_*$, the prediction would be $f_*(x_*)$, where $f_*$ is the estimated function of $f$.
But first, to formulate the hypothesis, we need to scrutinate the data via exploratory data analysis.
For the data above, clearly a linear model (straight line) plus some small Gaussian noise is sufficient. So the task is just to estimate $\beta$ and $\sigma$ so that the line **fits the dots well**.
## General setting
In practice, we have to deal with data coming in different formats and possible generating processes. E.g. from the exponential family:
- **Count data**: $y_i\sim\text{Poisson}(f(x_i))$ or $y_i\sim\text{NegativeBinomial}(f(x_i))$
- Football goals, disease infection counts
- **Binomial or Multinomial**: $y_i\sim\text{Binomial}(n, f(x_i))$, $y_i\sim\text{Multinomial}(f_1(x_i),\ldots, f_k(x_i))$ etc...
- Coin toss outcomes, customer subscription outcome, classifying digits or characters
- **Gamma**: $y_i\sim \text{Gamma}(k, f(x_i))$
- Rainfall
## Gaussian noise regression
For illustration purposes, let's focus on regression in the setting $y=f(x) + \sigma \mathcal{N}(0,1)$ for $f\in\mathcal{H}$ and $\sigma\geq0$.
## Foundations of Statistical Learning
Previously, I mentioned that we need to build a function that **fits the dots well**. There are 2 types of notions in statistical learning: **prediction** and **estimation**. We will use the following notation:
- $n$ training points
- $X_1,\ldots,X_n $ are *features* in a *feature space* $\mathcal{X}$. Could be a mixture of categorial or continuous features.
- $Y_1,\ldots,Y_n $ are labels/response in a space $\mathcal{Y}$ (e.g. $\mathbb{R}$ or $\mathbb{R}^k$)
- **[For your interest:]** Some probability space $(\mathcal{X}, \mathcal{B}, \mathbb{P})$ where we can measure probabilities of events in the set $\mathcal{B}$. e.g. the set of all possible cointoss outcomes is a set $\mathcal{B}$
- **Hypothesis space** $\mathcal{H}\subset \mathcal{C}:=\{f: f:\mathcal{X}\rightarrow\mathcal{Y}\}$: Restriction of the types of functions we want to use. e.g. for a type of neural network, the multilayer perceptron (MLP) with $m$ layers, we have $\mathcal{H}:= \{f:\mathcal{X}\rightarrow\mathcal{Y}: f(\cdot) = f(\cdot; \sigma_1,\ldots,\sigma_m, W_1,\ldots,W_m), \text{ where }\sigma_i, W_i \text{ are the activation functions and weights} \}$.
- **Prediction Loss function** $\ell:\mathcal{H}\times\mathcal{X}\times\mathcal{Y}\rightarrow \mathbb{R}_+$: To define what **fits the dots well** means.
## Prediction
We want to pick $f$ such that it minimises the **expected or population risk** when a new independent datapoint $(X, Y)$ comes in
$$
f_* := \text{argmin}_{f\in\mathcal{C}} \mathbb{E}_{\mathbb{P}}\left[ \ell(f, X, Y) \right] := \text{argmin}_{f\in\mathcal{C}} r(f)
$$
We denote $f_*$ is the **optimum**, which is unknown. We want to construct an approximation to $f_*$ based on the $n$ training points and the hypothesis $\mathcal{H}$ that controls the complexity of $f$. This approximation is close to
$$
f_{**}:= \text{argmin}_{f\in\mathcal{H}} \mathbb{E}_{\mathbb{P}}\left[ \ell(f, X, Y) \right]
$$
Define the **excess risk** as
$$
r(f) - r(f_{**}) = [r(f) - r(f_*)] + [r(f_*) - r(f_{**})],
$$
where $f\in \mathcal{H}$. **The goal of statistical learning for prediction is to minimise the excess risk** with respect to the sample size $n$ and the space of functions $\mathcal{H}$. Note that the decomposition yields an approximation and estimation error.
Difficult to do in practice, so we need **empirical risk minimisation** via the observed training set $(X_i,Y_i)_{i=1}^n$ as a proxy for the expected/population risk:
$$
R(f):= \frac{1}{n}\sum_{i=1}^n \ell(f, X_i, Y_i) ,\quad f_*^R := \text{argmin}_{f\in\mathcal{H}} R(f)
$$
to minimise
$$
r(f) - r(f_{**}).
$$
## Key takeaways and Bigger Picture:##
- It is important to understand the tradeoff between optimisation and statistical errors.
- Optimisation is only 1 part of the inequality, and vice versa for statistical modelling errors.
More details in Rebischini's notes!
## Estimation
We need:
- Some training set of size $n$ generated by $f_*\in\mathcal{H}$
- Loss function $\ell:\mathcal{H}\times\mathcal{H}\rightarrow \mathbb{R}_+$
Return:
- An algorithm that returns an estimate of $f_*$ that minimises and controls $\ell(f,f_*)$ based on the $n$ training points and $\mathcal{H}$.
## Back to Gaussian noise regression
There are lots of ways we can pose this problem. One way is to use
- $\ell(f, X, Y) = ||f(X) - y||_2^2 = (f(X) - y)^2$ - the **\ell_2 loss**
- $\ell(f, X, Y) = |f(X) - y|$ - the **\ell_1 loss**
- This yields the **mean squared error (MSE)** $R(f) = \frac{1}{n}\sum_{i=1}^n (f(x_i) - y_i)^2$
In theory, these give
$$\ell_2: f_{**}(x) = E[Y|X=x]$$
$$\ell_1: f_{**}(x) = \text{Median}[Y|X=x]$$
Depending on the situation, we can either use approximate gradient-based methods (e.g. gradient descent), Monte Carlo methods or the analytical maximum likelihood estimation (MLE).
## Linear regression
$$
y = X\beta_0 + \sigma\mathcal{N}(0,1)
$$
$\beta_0 = (1,\beta_{0,1},\ldots,\beta_{0,d-1})^T$ - the 1 represents the intercept.
We also call this **ordinary least squares**:
- Assume that $X$ is full rank
$$\hat{\beta} = \text{argmin}_{\beta} ||y- X\beta ||_2^2 \iff X^T(y - X\beta) = 0 \iff \hat{\beta} = X(X^TX)^{-1}X^T y \sim \mathcal{N}(X\beta_0, \sigma^2 X(X^TX)^{-1}X^T)$$
Geometrically: $y - X\hat{\beta} \perp X\beta_0 \iff \hat{\beta}$ minimises $||y-X\beta ||_2^2$
https://en.wikipedia.org/wiki/Ordinary_least_squares:

Can also solve this via gradient descent:
- Remember excess risk <= approximationLoss + statisticalLoss
```
import statsmodels.api as sm
# fit the model
m = sm.OLS(y, sm.tools.add_constant(x))
res = m.fit()
print(res.summary(), "\n sigma~", np.sqrt(sum(res.resid**2) / (30 - 2)))
```
We can see that our algorithm manages to estimate the parameters of the models pretty well:
- $\hat{\beta}\approx 2$ with $95\%$ confidence intervals [1.945, 2.091]
- $const\approx 0$ with $95\%$ confidence intervals [-0.048, 0.036]
- $\hat{\sigma}^2 \approx 0.01$
- **95% confidence intervals** = if I sample the data infinitely many times and estimate infinitely many confidence intervals, I will expect that 95% of the time the confidence intervals will contain the true, unknown parameter value.
Given $x_*$ as a test point, the prediction would be $\hat{y} = x_*^T \hat{\beta}$.
```
# Fit of the OLS estimator
x_test = np.linspace(1, 2, 10)
noise = 0.1*np.random.normal(size=10)
y_test = 2*x_test + noise
plt.figure(figsize=(4,4))
plt.scatter(x, y)
plt.plot(x, res.predict(sm.add_constant(x)))
pred_int_train = res.get_prediction(sm.add_constant(x)).conf_int()
plt.plot(x, pred_int_train[:,0], 'r--', lw=2); plt.plot(x, pred_int_train[:,1], 'r--', lw=2)
# the prediction intervals. Note that htey can be larger
plt.scatter(x_test, y_test)
plt.plot(x_test, res.predict(sm.add_constant(x_test)))
pred_int_test = res.get_prediction(sm.add_constant(x_test)).conf_int()
plt.plot(x_test, pred_int_test[:,0], 'r--', lw=2); plt.plot(x_test, pred_int_test[:,1], 'r--', lw=2)
plt.xlabel("x"); plt.ylabel("y")
```
## Other regression methods
- Regression trees: Classification and Regression Trees (CART)
- XGBoost: Tree-boosting algorithm widely used in production pipelines for firms like Amazon
- Random forest: Most popular tree-based algorithm
- Mondrian Forest: Nice statistical and online properties
## Regression tree
A tree is a histogram or step function. $f(x) = \sum_{k=1}^K \beta_k I(x\in \Omega_k)$.
Example of a tree (Lakshminarayanan et al. (2016))

```
from sklearn.tree import DecisionTreeRegressor
# Fit regression model
m_tree = DecisionTreeRegressor(max_depth=5)
m_tree.fit(np.expand_dims(x, 1), y)
y_pred = m_tree.predict(np.expand_dims(x_test, 1))
plt.figure(figsize=(4,4))
plt.scatter(x, y)
plt.plot(x, m_tree.predict(np.expand_dims(x, 1)))
# the prediction intervals. Note that htey can be larger
plt.scatter(x_test, y_test)
plt.plot(x_test, y_pred)
plt.xlabel("x")
plt.ylabel("y")
```
## XGBoost
```
import xgboost as xgb
num_round = 10
m_xgb = xgb.XGBRegressor(objective ='reg:squarederror', n_estimators=1000)
m_xgb.fit(np.expand_dims(x, 1), y)
plt.figure(figsize=(4,4))
plt.scatter(x, y)
plt.plot(x, m_xgb.predict(np.expand_dims(x, 1)))
# the prediction intervals. Note that htey can be larger
plt.scatter(x_test, y_test)
plt.plot(x_test, m_xgb.predict(np.expand_dims(x_test, 1)))
plt.xlabel("x")
plt.ylabel("y")
```
## Random Forest
This essentially uses bagging:
$$\hat{f}(x) = \frac{1}{T}\sum_{t=1}^T \hat{f}_t(x)$$,
where $\hat{f}_t$ are trained regression trees from randomly sampled (with replacement) sets $\{(x_j, y_j)_j\}_t$ using random feature subsets.
```
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
m_rf = RandomForestRegressor(max_depth=2, random_state=0)
m_rf.fit(np.expand_dims(x, 1), y)
m_rf.predict(np.expand_dims(x_test, 1))
plt.figure(figsize=(4,4))
plt.scatter(x, y)
plt.plot(x, m_rf.predict(np.expand_dims(x, 1)))
# the prediction intervals. Note that htey can be larger
plt.scatter(x_test, y_test)
plt.plot(x_test,m_rf.predict(np.expand_dims(x_test, 1)))
plt.xlabel("x")
plt.ylabel("y")
```
## Neural Networks
Neural networks are essentially parametric functions that are composed of **layers of neurons**.
https://www.astroml.org/book_figures/chapter9/fig_neural_network.html

- Multilayer Perceptron (MLP): $f(x) = f_n\circ\cdots f_1(x)$ with $f_j = W_j x + b_j$ with weights $W_j$ and biases $b_j$.
- Can also have other useful layers like max-pooling, batch normalisation, attention and convolution (feature extraction).
- Parameter optimisation via gradient-based methods such as stochastic gradient descent. Using the backpropagation trick, can allow for efficient optimisation. Optimisation speed can be enhanced using multiple GPU or TPU memory.
**Key applications:**
- Image processing: classification, denoising, inpainting, generation
- Function approximations for complex models and algorithms
- Time series, recommendation engines
**Key issues:**
- Overparameterisation: regularisation and sparsity
- Feature engineering
- Vanishing gradient: batch normalisation and dropout
## Image Classification
```
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Code is in the folder in the main.py script
# Don't run it during the session - might take a while!
# %run main.py
# we will use pretrained models from torchvision
import torch
from torchvision import datasets, transforms
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# https://github.com/rois-codh/kmnist
# We will use the Kuzushiji-MNIST dataset
dataset2 = datasets.KMNIST('../data', train=False,
transform=transform, download=True)
test_loader = torch.utils.data.DataLoader(dataset2, batch_size=16)
for img_batch, label_batch in test_loader:
print(img_batch.shape, label_batch)
break
```

```
from torchvision.utils import make_grid
# Read images into torch.Tensor
all_imgs = img_batch
# Visualize sample on a grid
img_grid = make_grid(all_imgs, nrow=4)
plt.figure(figsize=(5,5))
plt.imshow(img_grid.permute(1, 2, 0).numpy())
from main import Net
model = Net()
# load some model I pretrained on GPU memory into CPU memory
model.load_state_dict(torch.load("kmnist_cnn.pt", map_location=torch.device('cpu')))
model.eval()
model(img_batch).argmax(dim=1, keepdim=True), label_batch
```
## Image Inpainting
https://arxiv.org/pdf/1807.01622.pdf

## High-dimensional regression
Overview:
- Classical statistics: $d < \infty$, $n\rightarrow\infty$
- Non-asymptotic: $d<\infty, n<\infty$
- Non-parametric: $d\rightarrow \infty, n<\infty$
- Asymptotic: $d\rightarrow \infty, n\rightarrow\infty$
In the realm of high-dimensional statistics, we usually have $d>n$ or e.g. $d= \mathcal{O}(n^\alpha)$, where $d$ is the number of features and $n$ is the number of data points.
This happens when you have lots of features and the actual data generating features are **sparse**, i.e. $d$ is large but a small $d_0$ is used or are important for the regression. Therefore the usual linear regression assumption that **$X$ is full rank** will not hold. We can, however, introduce regularisation and use the Least-angle regression (LARS; Efron, Hastie, Johnstone and Tibshirani (2004)) algorithm to fit our model. The Lasso
$$
\text{minimise } || y - X\beta||_2^2, \quad \text{subject to } \sum_{j=1}^d |\beta_j| \leq t
$$
We now work with a diabetes dataset:
Suppose we have a large number of features. We want to **select** the ones that can represent the sparsity.
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
X, y = datasets.load_diabetes(return_X_y=True)
print("Computing regularization path using the LARS ...")
_, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
```
## Importance Notice!
We are proposal a constitution change so that we can better deliver quality events to you! Please do participate in our upcoming general meeting (even just coming for a vote will be very helpful!).
Some of the changes:
- Introduction of more official committee roles
- Update of the manifesto e.g. societal goals, motto - we were only founded 3 years ago!
More details to come!
Thank you for your attention!
https://github.com/Imperial-College-Data-Science-Society/workshops
1. Introduction to Data Science
2. **Regression and Neural Networks**
3. Classifying Character and Organ Images
4. Demystifying Causality and Causal Inference
5. A Primer to Data Engineering
6. Natural Language Processing (NLP) by using Attention
7. Art and Music using GANs
8. Probabilistic Programming in Practice
9. Missing Data in Supervised Learning

| github_jupyter |
```
%matplotlib widget
from pathlib import Path
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import svd
import imageio
from scipy import ndimage
import h5py
import stempy.io as stio
import stempy.image as stim
# Set up Cori paths
ncemhub = Path('/global/cfs/cdirs/ncemhub/4Dcamera/')
scratch = Path('/global/cscratch1/sd/percius/')
# Set up mothership6 paths
hdd1 = Path('/mnt/hdd1')
def com_sparse_iterative(electron_events, scan_dimensions, crop_to=(576,576)):
# Iterative version. Will be replaced in a future stempy release
# Calculate the center of mass as a test
com2 = np.zeros((2, scan_dimensions[0]*scan_dimensions[1]), np.float32)
for ii, ev in enumerate(electron_events):
if len(ev) > 0:
x,y = np.unravel_index(ev,(576,576))
mm = len(ev)
comx0 = np.sum(x) / mm
comy0 = np.sum(y) / mm
# Crop around the center
keep = (x > comx0-crop_to[0]) & (x <= comx0+crop_to[0]) & (y > comy0-crop_to[1]) & (y <= comy0+crop_to[1])
x = x[keep]
y = y[keep]
mm = len(x)
if mm > 0:
comx = np.sum(x)
comy = np.sum(y)
comx = comx / mm
comy = comy / mm
else:
comx = comx0
comy = comy0
com2[:, ii] = (comy,comx)
else:
com2[:, ii] = (np.nan, np.nan)
com2 = com2.reshape((2, scan_dimensions[0], scan_dimensions[1]))
return com2
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
# Close all previous windows to avoid too many windows
plt.close('all')
# Load a sparse vacuum 4D camera data set
scan_num = 105
threshold = 4.0
data_dir = Path('2020.11.23')
fname = hdd1 / data_dir / Path('data_scan{}_th{}_electrons.h5'.format(scan_num, threshold))
vacuum_scan = stio.load_electron_counts(fname)
print('File: {}'.format(fname))
print('Initial scan dimensions = {}'.format(vacuum_scan.scan_dimensions))
# Show the summed diffraction pattern
dp = stim.calculate_sum_sparse(vacuum_scan.data, vacuum_scan.frame_dimensions)
fg,ax = plt.subplots(1,1)
ax.imshow(dp)
# Calculate the com iteratively
#com2 = stim.com_sparse(vacuum_scan.data, vacuum_scan.frame_dimensions)
com2 = com_sparse_iterative(vacuum_scan.data, vacuum_scan.scan_dimensions, crop_to=(30, 30))
# These will be removed in a future release
print('Remove the code below in a future release.')
# Nan values to average value
np.nan_to_num(com2[0,],copy=False,nan=np.nanmean(com2[0,]))
np.nan_to_num(com2[1,],copy=False,nan=np.nanmean(com2[1,]));
com2 = com2.reshape((2,*vacuum_scan.scan_dimensions[::-1]))
# Remove the outliers by median filtering
com2_filt = np.zeros_like(com2)
com2_filt[0,] = ndimage.median_filter(com2[0,], size=(3,3))
com2_filt[1,] = ndimage.median_filter(com2[1,], size=(3,3))
com2_median = np.median(com2_filt,axis=(1,2))
fg,ax = plt.subplots(1, 2,sharex=True,sharey=True)
ax[0].imshow(com2_filt[0,]-com2_median[0],cmap='bwr',vmin=-25,vmax=25)
ax[1].imshow(com2_filt[1,]-com2_median[1],cmap='bwr',vmin=-25,vmax=25)
# Fit the COMs to planes to smooth it out
YY, XX = np.mgrid[0:com2.shape[1],0:com2.shape[2]]
planeCOM0 = planeFit(np.stack((YY,XX,com2_filt[0,])))
planeCOM1 = planeFit(np.stack((YY,XX,com2_filt[1,])))
print(planeCOM0)
print(planeCOM1)
# Generate points on the plane to fit the dataset size
YY, XX = np.mgrid[0:vacuum_scan.scan_dimensions[1], 0:vacuum_scan.scan_dimensions[0]]
normal = planeCOM0[1]
d = np.dot(-planeCOM0[0], normal)
# calculate corresponding z
z0 = (-normal[0]*YY - normal[1]*XX - d)/normal[2]
normal = planeCOM1[1]
d = np.dot(-planeCOM1[0], normal)
# calculate corresponding z
z1 = (-normal[0]*YY - normal[1]*XX - d)/normal[2]
fg,ax = plt.subplots(2,2)
ax[0,0].imshow(com2_filt[0,],cmap='bwr')
ax[0,1].imshow(z0, cmap='bwr')
ax[1,0].imshow(com2_filt[1,],cmap='bwr')
ax[1,1].imshow(z1, cmap='bwr');
# Test centering on the vacuum scan itself
vacuum_scan_centered = namedtuple('ElectronCountedData',
['data', 'scan_dimensions', 'frame_dimensions'])
vacuum_scan_centered.scan_dimensions = vacuum_scan.scan_dimensions
vacuum_scan_centered.frame_dimensions = vacuum_scan.frame_dimensions
vacuum_scan_centered.data = []
z0_round = np.round(z0).astype(np.int32) - int(z0.mean())
z1_round = np.round(z1).astype(np.int32) - int(z1.mean())
for ev, x, y in zip(vacuum_scan.data, z0_round.ravel(), z1_round.ravel()):
evx, evy = np.unravel_index(ev, (576,576))
evx_centered = evx - y
evy_centered = evy - x
keep = (evx_centered < 576) & (evx_centered >= 0) * (evy_centered < 576) & (evy_centered >= 0)
evx_centered = evx_centered[keep]
evy_centered = evy_centered[keep]
vacuum_scan_centered.data.append(np.ravel_multi_index((evx_centered,evy_centered), (576,576)))
vacuum_scan_centered.data = np.array(vacuum_scan_centered.data, dtype=object)
dp = stim.calculate_sum_sparse(vacuum_scan.data, vacuum_scan.frame_dimensions)
dp2 = stim.calculate_sum_sparse(vacuum_scan_centered.data, vacuum_scan_centered.frame_dimensions)
fg,ax = plt.subplots(1,2,sharex=True,sharey=True)
ax[0].imshow(dp)
ax[1].imshow(dp2)
# Compare com_filtered to plane fit
# Nan values to average value
np.nan_to_num(com2[0,],copy=False,nan=np.nanmean(com2[0,]))
np.nan_to_num(com2[1,],copy=False,nan=np.nanmean(com2[1,]))
fg,ax = plt.subplots(2,2)
ax[0,0].imshow(z0,cmap='bwr')
ax[0,1].imshow(z1,cmap='bwr')
ax[1,0].imshow(com2[0,]-z0,cmap='bwr')
ax[1,1].imshow(com2[1,]-z1,cmap='bwr')
```
# Apply to experiment from a sample
```
# Load a sparse 4D camera data set
scan_num =102
threshold = 4.0
data_dir = Path('2020.11.23')
fname = hdd1 / data_dir / Path('data_scan{}_th{}_electrons.h5'.format(scan_num, threshold))
#fname = Path.home() / Path('data/temp/data_scan{scan_num}_th{}_electrons.h5'.format(scan_num, threshold))
experiment = stio.load_electron_counts(fname)
print('File: {}'.format(fname))
print('Initial scan dimensions = {}'.format(experiment.scan_dimensions))
# Generate points on the plane to fit the dataset size
factor = (experiment.scan_dimensions[0] / vacuum_scan.scan_dimensions[0],
experiment.scan_dimensions[1] / vacuum_scan.scan_dimensions[1])
# Generate positions between vacuum positions
YY, XX = np.mgrid[0:experiment.scan_dimensions[0], 0:experiment.scan_dimensions[1]]
YY = YY.astype('<f4') / factor[1]
XX = XX.astype('<f4') / factor[0]
normal = planeCOM0[1]
d = np.dot(-planeCOM0[0], normal)
# calculate corresponding z
z0 = (-normal[0]*YY - normal[1]*XX - d)/normal[2]
normal = planeCOM1[1]
d = np.dot(-planeCOM1[0], normal)
# calculate corresponding z
z1 = (-normal[0]*YY - normal[1]*XX - d)/normal[2]
# Round to integers
z0_round = np.round(z0 - z0.mean()).astype(np.int64)
z1_round = np.round(z1 - z1.mean()).astype(np.int64)
fg,ax = plt.subplots(2,2)
ax[0,0].imshow(z0,cmap='bwr')
ax[0,1].imshow(z0_round, cmap='bwr')
ax[1,0].imshow(z1,cmap='bwr')
ax[1,1].imshow(z1_round, cmap='bwr');
# Use the fitted plane from the vacuum scan to recenter the events
scan_centered = []
for ev, x, y in zip(experiment.data, z0_round.ravel(), z1_round.ravel()):
evx, evy = np.unravel_index(ev, (576,576))
evx_centered = evx - y # need to flip x and y
evy_centered = evy - x
# Some events will get pushed off the detetor by the shift. Remove them
keep = (evx_centered < 576) & (evx_centered >= 0) & (evy_centered < 576) & (evy_centered >= 0)
evx_centered = evx_centered[keep]
evy_centered = evy_centered[keep]
scan_centered.append(np.ravel_multi_index((evx_centered,evy_centered), (576,576)))
scan_centered = np.array(scan_centered, dtype=object)
# Create a stempy counted data namedtuple
experiment_centered = namedtuple('ElectronCountedData',
['data', 'scan_dimensions', 'frame_dimensions'])
experiment_centered.data = scan_centered
experiment_centered.scan_dimensions = experiment.scan_dimensions[::1]
experiment_centered.frame_dimensions = experiment.frame_dimensions
dp = stim.calculate_sum_sparse(experiment.data, experiment.frame_dimensions)
dp2 = stim.calculate_sum_sparse(experiment_centered.data, experiment_centered.frame_dimensions)
fg,ax = plt.subplots(2,1,sharex=True,sharey=True)
ax[0].imshow(dp)
ax[1].imshow(np.log(dp2+0.1))
# Save to a stempy dataset
out_name = fname.with_name('data_scan{}_th{}_electrons_centered.h5'.format(scan_num,threshold))
stio.save_electron_counts(out_name,experiment_centered)
print(out_name)
```
| github_jupyter |
```
# input
# pmid list: ../../data/ft_info/ft_id_lst.csv
# (ft json file) ../../data/raw_data/ft/
# (ft abs file) ../../data/raw_data/abs/
# result file at ../../data/raw_data/ft/T0 (all section)
# ../../data/raw_data/ft/T1 (no abs), etc
# setp 1 download full-text
import pandas as pd
import pickle
import os
# get pmid list
tar_lst = pd.read_csv("../../data/ft_info/ft_id_lst.csv", dtype=str)
tar_lst.head()
tar_lst.PMID
# _f = os.path.join('../../data/ft_info/', 'PMID_lst')
# with open(_f, 'rb' ) as fp:
# PMID_lst = pickle.load(fp)
# print(len(PMID_lst))
nl = list(tar_lst.PMID.values)
import numpy as np
np.random.shuffle(nl)
nl
# PMID_lst
# if we only have pmid, we can obatin pmcid by:
# # get pmcid
# # !wget https://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz
# # !gzip -d PMC-ids.csv.gz
# _pmc_id_map = pd.read_csv("../../data/ft_info/PMC-ids.csv", dtype=str)
# pmc_id_map = _pmc_id_map[['PMCID', 'PMID']]
# pmc_id_map = pmc_id_map[pmc_id_map.notnull()]
# tar_lst = pmc_id_map[pmc_id_map['PMID'].isin(pmid_l)]
import sys
sys.path.insert(1, '..')
# import download_data
# # downloading full-text
# tar_id_lst = list(tar_lst.PMCID.values)
# tar_dir = '../../data/raw_data/ft/'
# url_prefix = "https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/biocjson?pmcids="
# _type='json'
# cores=3
# hit_rec = download_data.download_from_lst_hd(tar_id_lst, tar_dir, url_prefix, _type, cores)
# # downloading abs (as some full-text have no abstract)
# tar_id_lst = list(tar_lst.PMID.values)
# tar_dir = '../../data/raw_data/abs/'
# url_prefix = "https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/pubtator?pmids="
# _type='abs'
# cores=3
# hit_rec = download_data.download_from_lst_hd(tar_id_lst, tar_dir, url_prefix, _type, cores)
s_df = pd.read_csv('../../data/ft_info/ft_500_n.tsv', sep = '\t')
print('annotated table shape', s_df.shape)
s_df = s_df[s_df['2nd_label'] != 0][['pmid', 'geneId', 'diseaseId', '2nd_label']]
s_df.rename(columns={'2nd_label':'label'}, inplace=True)
s_df.to_csv('../../data/ft_info/labels_n.tsv', sep=',', index=False)
%load_ext autoreload
%autoreload 2
import parse_data
import subprocess
# original all sections
# in_pmid_d = '/mnt/bal31/jhsu/old/data/ptc/raw_ft/abs_off/'
# in_pmcid_d = '/mnt/bal31/jhsu/old/data/ptc/raw_ft/ft/'
# import subprocess
# _lst = list(tar_lst.PMID.values)
# for i in list(_lst):
# cmd = 'cp ' + '/mnt/bal31/jhsu/old/data/ptc/raw_ft/abs_off/' + i + ' ' + '../../data/raw_data/abs/' + i
# subprocess.check_call(cmd, shell=True)
# _lst = list(tar_lst.PMCID.values)
# for i in list(_lst):
# cmd = 'cp ' + '/mnt/bal31/jhsu/old/data/ptc/raw_ft/ft/' + i + ' ' + '../../data/raw_data/ft/' + i
# subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T0
out_dir = '../../data/raw_data/ft/T0/'
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T1
out_dir = '../../data/raw_data/ft/T1/'
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
ori_tar=['TITLE', '#', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T2
out_dir = '../../data/raw_data/ft/T2/'
#ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
ori_tar=['TITLE', 'ABSTRACT', '#', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T3
out_dir = '../../data/raw_data/ft/T3/'
#ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', '#', 'RESULTS', 'DISCUSS', 'CONCL']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T4
out_dir = '../../data/raw_data/ft/T4/'
#ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', '#', 'DISCUSS', 'CONCL']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T5
out_dir = '../../data/raw_data/ft/T5/'
#ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', '#', 'CONCL']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T5
out_dir = '../../data/raw_data/ft/T5/'
#ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', '#']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
!mkdir -p ../../data/raw_data/ft/T6
out_dir = '../../data/raw_data/ft/T6/'
#ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', '#', 'RESULTS', 'DISCUSS', '#']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
#optional normalize the annotation
cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
print(cmd)
subprocess.check_call(cmd, shell=True)
# import json
# _pmcid = 'PMC7102640'
# _pmid = '32171866'
# abs_f_path = in_pmid_d + _pmid
# print(_pmcid, end=', ')
# with open(in_pmcid_d + _pmcid, encoding='utf-8') as f:
# data = json.load(f)
# rst = parse_data.parse_doc(data, abs_f_path, ori_tar, is_SeFi)
# rst
!mkdir -p ../../data/raw_data/ft/T7
out_dir = '../../data/raw_data/ft/T7/'
ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
# ori_tar=['TITLE', 'ABSTRACT', 'INTRO', 'METHODS', 'RESULTS', 'DISCUSS', 'CONCL']
# ori_tar=['TITLE', 'ABSTRACT', '#', '#', '#', '#', '#']
tar_id_lst = list(tar_lst.PMCID.values)
in_pmid_d = '../../data/raw_data/abs/'
in_pmcid_d = '../../data/raw_data/ft/'
parse_t = 'ft' # 'ft' or 'abs'
if_has_s_f = True # if have section file
is_SeFi = True
info_l = parse_data.parse_data_lst_hd(tar_id_lst, in_pmid_d, in_pmcid_d, parse_t, out_dir, if_has_s_f, ori_tar, is_SeFi)
# #optional normalize the annotation
# cmd = 'python ../normalize_ann.py ' + '--in_f ' + out_dir + 'anns.txt' + ' ' + '--out_f ' + out_dir + 'anns_n.txt'
# print(cmd)
# subprocess.check_call(cmd, shell=True)
info_l
info_df = pd.DataFrame(info_l, columns=['pmid', 'pmcid', 'ttl_l', 'abs_l', 'par_l', 'txt_l', 'g_#', 'd_#', 'gd_p', 'gd_vp'])
info_df.describe()
# READING Sentences and tokenizer
import argparse
import sys
import os
import pandas as pd
import numpy as np
from raw import load_documents_vis
from raw_handler import init_parser, loading_tokenizer
from IPython.display import display, clear_output, HTML
import ipywidgets as widgets
import sys
sys.argv = ['']
parser = init_parser()
args = parser.parse_args()
args.ori_tokenizer = loading_tokenizer(args)
args.token_voc_l = len(args.ori_tokenizer)
print('tokenizer size %d' % (args.token_voc_l))
# RENET2 input data dir, target GDA file, etc
args.raw_data_dir = out_dir
args.fix_snt_n, args.fix_token_n = 400, 54
print('fix input sentences# %d, tokens# %d, batch size %d' % (args.fix_snt_n, args.fix_token_n, args.batch_size))
args.no_cache_file = True
text_path = os.path.join(args.raw_data_dir, args.file_name_doc)
sentence_path = os.path.join(args.raw_data_dir, args.file_name_snt)
ner_path = os.path.join(args.raw_data_dir, args.file_name_ann)
all_ori_seq, ner_df, session_info, ori_ner, all_sentence, all_session_map = load_documents_vis(text_path, sentence_path, ner_path, args.ori_tokenizer, args)
def get_token_l(snt_l):
tokens_s = 0
snt_s = 0
for snt in snt_l:
tokens = tokenize(snt)
tokens_l = len(tokens)
tokens_s += tokens_l
snt_s += 1
# print(tokens_l, tokens)
return tokens_s, snt_s
# all_sentence['16963499']
get_token_l(all_sentence['16963499'])
t_s, t_t = 0, 0
for k, v in all_sentence.items():
_a, _b = get_token_l(v)
t_s += _a
t_t += _b
# print(token_s)
# break
print(t_s / 500, t_t/500)
token_s / 500
from utils.tokenizer import tokenize
tokenize(all_sentence['16963499'][0])
all_sentence['16963499'][0]
12.4*16.1
PMID32171866
```
| github_jupyter |
```
from __future__ import print_function
import warnings
warnings.filterwarnings(action='ignore')
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, BatchNormalization
import os
batch_size = 16
num_classes = 10
epochs = 15
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(y_train)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
y_train
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=60, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.3,
shear_range=0., # set range for random shear
zoom_range=1.5, # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
filepath = "./savemodels/cifar10-model-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.summary()
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
history = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
workers=4, callbacks=[checkpoint])
epochs_range = range(15)
validation_accuracy = history.history['val_accuracy']
training_accuracy = history.history['accuracy']
import matplotlib.pyplot as plt
plt.plot(epochs_range, training_accuracy, 'b+', label='training accuracy')
plt.plot(epochs_range, validation_accuracy, 'bo', label='validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Validation accuracy')
plt.legend()
plt.show()
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
```
| github_jupyter |
# A Guided Tour of Ray Core: Multiprocessing Pool

© 2019-2022, Anyscale. All Rights Reserved
[*Distributed multiprocessing.Pool*](https://docs.ray.io/en/latest/multiprocessing.html) makes it easy to scale existing Python applications that use [`multiprocessing.Pool`](https://docs.python.org/3/library/multiprocessing.html) by leveraging *actors*. Ray supports running distributed python programs with the **multiprocessing.Pool** API using Ray Actors, each running on a [workder node](https://docs.ray.io/en/latest/ray-core/actors.html#faq-actors-workers-and-resources), instead of local processes. This makes it easy to scale existing applications that use `multiprocessing.Pool` from a single node to a cluster.
<img src="../images/dist_multi_pool.png" width="70%" height="35%">
First, let's start Ray…
```
import multiprocessing as mp
import time
import logging
import ray
```
## Multiprocessing Pool example
The following is a simple Python function with a slight delay added (to make it behave like a more complex calculation)...
```
# this could be some complicated and compute intensive task
def func(x):
time.sleep(1.5)
return x ** 2
```
Then, use the Ray's drop-in replacement for [multiprocessing pool](https://docs.ray.io/en/latest/multiprocessing.html)
```
ray.init(
ignore_reinit_error=True,
logging_level=logging.ERROR,
)
```
Now we'll create a *Pool* using and distribute its tasks across a cluster (or across the available cores on a laptop):
```
%%time
from ray.util.multiprocessing import Pool
pool = Pool()
for result in pool.map(func, range(10)):
print(result)
```
The distributed version has the trade-off of increased overhead, although now it can scale-out horizontally across a cluster. The benefits would be more pronounced with a more computationally expensive calculation.
```
pool.terminate()
```
Let's define a compute intensive class that does some matrix computation. Consider this could be a compute intenstive task doing massive tensor transformation or computation.
```
def task(n):
# Simulate a long intensive task
#TODO
# do some matrix computation
# and return results
return
```
Define a Ray remote task that launches task() across a pool of Actors on the cluster. It creates a pool of Ray Actors, each scheduled on a cluster worker.
```
@ray.remote
def launch_long_running_tasks(num_pool):
# Doing the work, collecting data, updating the database
# create an Actor pool of num_pool workers nodes
pool = Pool(num_pool)
results = []
# Iterate over 50 times in batches of 10
for result in pool.map(func, range(1, 50, 10)):
results.append(result)
# Done so terminate pool
pool.terminate()
return results
```
### Create a Actor like supervisor that launches all these remote tasks
```
@ray.remote
class LaunchDistributedTasks:
def __init__(self, limit=5):
self._limit = limit
def launch(self):
# launch the remote task
return launch_long_running_tasks.remote(self._limit)
```
### Launch our supervisor
```
hdl = LaunchDistributedTasks.remote(5)
print("Launched remote jobs")
```
### Launched remote jobs
```
values = ray.get(ray.get(hdl.launch.remote()))
print(f" list of results :{values}")
print(f" Total results: {len(values)}")
```
Finally, shutdown Ray
```
ray.shutdown()
```
### Excercises
1. Can you convert task() into a complicated function?
2. Use `task()` in pool.map(task,....)
### Homework
1. Write a Python multiprocessing.pool version of task() and compare the timings with the Ray distributed multiprocessing.pool.
2. Do you see a difference in timings?
3. Write a distributed crawler that downloads gifs or pdfs
| github_jupyter |
# <b>Blog project - Airbnb revenue maximization
## <b> What do I want to learn about the data
1. What is the main factor for high revenue?
2. Which neighbourhoods in Boston and Seattle are giving the most revenue and which ones the least?
3. Is it more lucrative to rent out a full apartment/house or individual rooms?
4. How important are the review scores and which one in particular?
5. Which months are yielding the highest revenue in Boston vs. Seattle?
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import data_prep as dp
path_abnb="//RT01FS21.DE.BOSCH.COM/KRS1BBH$/udacity/Data_Science/project_1_blogpost/data/"
sns.set(font_scale=1.5)
sns.set_style('ticks')
```
<b>Load AirBnB date from path_abnb:
- the data is organized by city in folders
- in each folder are the files
- listings.csv
- reviews.csv
- calendar.csv
- the files are parsed and in the case of listings only mutual columns are kept in the join of different city files
- the city is then specified in the new column "folder"
```
listings, calendar, review=dp.load_data(path_abnb)
```
## <b>Understanding the data and preparing it for a linear regression analysis
<b>Sum revenue from calendar up for each listing and provide in new dataframe:
- extract month of date column
- calculate revenue by month and year
- pivot the monthly sums by listing_id in "calendar_revenue"
- provide the unpivoted data in "calendar_stats"
```
calendar_revenue, calendar_stats=dp.analyze_calendar(calendar)
```
<b>Preprocessing of listings:
- df_vis is a merge of listings and the calendar_revenue results
- df is the ML ready dataframe after the merge calender_revenue results
- filtering of irrelevant and duplicate data
- dropping of nan-containing rows
- using mode to convert non-numeric columns
- removal of "$" and "%" and conversion into numeric columns
- in order to quantify the influence of written descriptions or amenities I simply counted the number of characters w/o an analysis if it is a good or bad description
```
df_vis, df=dp.preprocess_listings(listings, calendar_revenue)
```
<b>Plot of the revenue_year distribution<b>:
- most of the distribution is between 0 and \\$200000
- there are lots of outliers above
- I chose \\$150000 as a cut-off to allow my linear regression model to find a stabil solution
```
plt.figure(figsize=(20,8))
p2=sns.histplot(data=df_vis, x='revenue_year')
p2.axvline(150000)
plt.show()
```
<b>Plot of the distribution of "accommodates:
- the overall distribution is quite similar between the two cities
- Boston has more single person listings
- after 8 persons there are only few listings, which is why I chose to put a cut-off there
```
plt.figure(figsize=(10,5))
p3=sns.histplot(data=df_vis, x="accommodates", hue="folder", stat="probability", common_norm=False, multiple="dodge")
p3.axvline(8.5)
plt.show()
```
<b>Plot of listings per "neighbourhood_cleansed" and "property_type":
- in the first trials to apply a linear regression model the neighbourhoods with few listings disturbed the modelling
- I defined a cut-off at a minimum of 10 listings for a neighbourhood/p to be included
```
neighbourhoods=df_vis['neighbourhood_cleansed'].value_counts().reset_index().rename(columns={'index':'neighbourhood_cleansed', 'neighbourhood_cleansed':'counts'})
plt.figure(figsize=(17,7.5))
p3=sns.barplot(x="neighbourhood_cleansed", y="counts", data=neighbourhoods)
p3.axhline(10)
plt.xticks(rotation=90, fontsize=9)
plt.show()
property_type=df_vis['property_type'].value_counts().reset_index().rename(columns={'index':'property_type', 'property_type':'counts'})
plt.figure(figsize=(17,7.5))
p3=sns.barplot(x="property_type", y="counts", data=property_type)
p3.axhline(20)
plt.xticks(rotation=90, fontsize=9)
plt.show()
```
## <b>Setting up the model and applying it to the data
I now applied a <b>linear modelling</b> to my dataset df to find the strongest influences to yearly revenue
- for that I dropped all columns that are duplicate (e.g., folder) or contain revenue information
- the r-squared is quite alright with about <b>0.73</b> for this
```
target='revenue_year'
drop=['revenue_year',1,2,3,4,5,6,7,8,9,10,11,12,'folder_Boston','folder_Seattle','revenue_month_mean']
lm_model_rev, X_train_rev=dp.regression_model(df,target, drop)
```
First I looked at the features that <b>positively</b> influence my revenue:
The answer is not surprising for any realtor... <b>location, location, location</b>
The top 20 features that <b>negatively</b> influence my revenue are property types and location
## <b> Evalution of the results and their interpretation
```
#Use the function and look at the top 30
coef_df_rev = dp.coef_weights(lm_model_rev.coef_, X_train_rev)
coef_df_rev.nlargest(20,'coefs')
coef_df_rev.nsmallest(20,'coefs')
```
The top influences were all neighbourhoods, but I also wanted to know which of the review scores
- location is also here the number 1
- cleanliness is number 2
```
coef_df_rev[coef_df_rev['est_int'].str[0:6]=='review'].sort_values('coefs', ascending=False)
```
The description of the listing or the neighbourhood seems rather unimportant. Only the number of listed amenities seems to count. A rather long name seems to be more repelling than inviting.
```
coef_df_rev[coef_df_rev['est_int'].str[-5:]=='count'].sort_values('coefs', ascending=False)
coef_df_rev[coef_df_rev['est_int'].str[0:5]=='prope'].sort_values('coefs', ascending=False)
coef_df_rev[coef_df_rev['est_int'].str[0:5]=='accom'].sort_values('coefs', ascending=False)
```
The plot of the yearly revenue by neighbourhood reflects the results of the linear model
```
df_2=df_plot.groupby(by=['neighbourhood_cleansed','folder'], as_index=False)['revenue_year'].median()
plt.figure(figsize=(15,6))
p6=sns.scatterplot(data=df_2.nlargest(20,'revenue_year'), x="neighbourhood_cleansed", y="revenue_year", hue="folder")
plt.xticks(rotation=90)
plt.show()
plt.figure(figsize=(15,6))
p7=sns.scatterplot(data=df_2.nsmallest(20,'revenue_year'), x="neighbourhood_cleansed", y="revenue_year", hue="folder")
plt.xticks(rotation=90)
plt.show()
```
How do Boston and Seattle compare in the average revenue per month over a year?
```
df_plot_month=df_plot.merge(calendar_stats, left_on=['id','folder'],right_on=['listing_id','folder'])
df_plot_month_2=df_plot_month.groupby(by=['month','folder'], as_index=False)['revenue'].median()
plt.figure(figsize=(10,5))
p8=sns.scatterplot(data=df_plot_month_2, x="month", y="revenue", hue="folder")
plt.show()
```
<b>df_plot</b> is the version of df_vis that has all those previous cut-offs applied
```
df_plot=df_vis[(df_vis['revenue_year']<150000)&(df_vis['accommodates']<8.5)]
neighbourhoods=df_plot['neighbourhood_cleansed'].value_counts().reset_index()
remove_list=neighbourhoods[neighbourhoods['neighbourhood_cleansed']<10]['index'].to_list()
df_plot=df_plot[~df_plot['neighbourhood_cleansed'].isin(remove_list)]
```
In this plot the <b>revenue_year</b> is plotted against the number of people a listing <b>accommodates</b>
- Up to 8 people it is a near linear function
- the slope of it is not 1 though
- therefore you can expect more revenue per person for smaller listings
```
plt.figure(figsize=(12,7))
p5=sns.regplot(data=df_plot.groupby(by=['accommodates'], as_index=False)['revenue_year'].mean(), x="accommodates", y="revenue_year",ci=None)#, hue="folder")
plt.show()
```
| github_jupyter |
# Graphical view of param sweep results (Figure 3A-C, G)
```
%matplotlib inline
from copy import deepcopy as copy
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from disp import set_font_size, set_n_x_ticks, set_n_y_ticks
from replay import analysis
cc = np.concatenate
HMAP_PARAMS = {
'FR_NTRJ_MIN_BLOWUP': 1.5, # Hz
'FR_NTRJ_TRJ_BLOWUP_RATIO': .3, # frac
'MIN_EVTS_ONE_WAY_FRAC_CALC': 5,
'MIN_EVTS_SPD_CALC': 5,
'MIN_SPD_SPD_CALC': 10, # m/s
}
SCT_SIZE = 150
W_SCALE_PLOT = .05/.0005 # T_M_PC/DT
def evt_frq_hmap(f_name, v_mins, v_maxs, x_ticks, c_ticks, hmap_params):
"""Make heatmaps of spontaneous replay event frequency given param sweep file."""
df, rslts, header = analysis.make_df(f_name)
params_varied = header['params_varied']
assert len(params_varied) == 2
n_trials = header['sweep_params']['N_TRIALS']
dur = header['s_params']['schedule']['D_SMLN'] - header['a_params']['MIN_START']
# get data into numpy arrays
p_0 = np.array(df[params_varied[0]])
p_1 = np.array(df[params_varied[1]])
if params_varied[0].startswith('W_PC'):
p_0 = p_0 * W_SCALE_PLOT
if params_varied[1].startswith('W_PC'):
p_1 = p_1 * W_SCALE_PLOT
p_0_min = np.min(p_0)
p_0_max = np.max(p_0)
p_0_range = p_0_max - p_0_min
p_1_min = np.min(p_1)
p_1_max = np.max(p_1)
p_1_range = p_1_max - p_1_min
fr_trj = np.array(df['FR_TRJ'])
fr_ntrj = np.array(df['FR_NTRJ'])
print('Min fr_trj = ', np.nanmin(fr_trj), 'Hz')
print('Max fr_trj = ', np.nanmax(fr_trj), 'Hz')
print('Min fr_ntrj = ', np.nanmin(fr_ntrj), 'Hz')
print('Max fr_ntrj = ', np.nanmax(fr_ntrj), 'Hz')
evt_ct = np.array(df['EVT_CT'])
evt_frq = evt_ct / dur
print('Min evt_ct = ', np.min(evt_ct))
print('Max evt_ct = ', np.max(evt_ct))
print('Min evt_frq = ', np.min(evt_frq), 'Hz')
print('Max evt_frq = ', np.max(evt_frq), 'Hz')
one_way_ct = np.array(df['ONE_WAY_CT'])
one_way_frq = one_way_ct / dur
print('Min one_way_ct', np.min(one_way_ct))
print('Max one_way_ct', np.max(one_way_ct))
# make plots
fig, axs = plt.subplots(3, 2, figsize=(12.5, 15), tight_layout=True)
## fr trj
im_00 = axs[0, 0].scatter(p_0, p_1, SCT_SIZE, c=fr_trj, marker='s', vmin=v_mins['fr_trj'], vmax=v_maxs['fr_trj'])
axs[0, 0].set_title('LTP-IE-tagged firing rates')
cb_00 = fig.colorbar(im_00, ax=axs[0, 0])
cb_00.set_label('Firing rate (Hz)', fontsize=16)
## fr ntrj
im_01 = axs[0, 1].scatter(p_0, p_1, SCT_SIZE, c=fr_ntrj, marker='s', vmin=v_mins['fr_ntrj'], vmax=v_maxs['fr_ntrj'])
axs[0, 1].set_title('Non-LTP-IE-tagged firing rates')
cb_01 = fig.colorbar(im_01, ax=axs[0, 1])
cb_01.set_label('Firing rate (Hz)', fontsize=16)
## evt frq
im_10 = axs[1, 0].scatter(p_0, p_1, SCT_SIZE, c=evt_frq, marker='s', vmin=v_mins['evt_frq'], vmax=v_maxs['evt_frq'])
axs[1, 0].set_title('Event frequency')
cb_10 = fig.colorbar(im_10, ax=axs[1, 0])
cb_10.set_label('Event frequency (Hz)', fontsize=16)
## replay zones
### plot event freq
im_11 = axs[1, 1].scatter(
p_0, p_1, SCT_SIZE, c=evt_frq, marker='s', vmin=v_mins['evt_frq'], vmax=v_maxs['evt_frq'], cmap='plasma', zorder=0)
axs[1, 1].set_title('Replay regimes')
cb_11 = fig.colorbar(im_11, ax=axs[1, 1], ticks=c_ticks)
cb_11.set_label('Event frequency (Hz)', fontsize=16)
### set blowup vals to green
mask = \
(fr_ntrj >= hmap_params['FR_NTRJ_MIN_BLOWUP']) \
| (fr_ntrj / fr_trj >= hmap_params['FR_NTRJ_TRJ_BLOWUP_RATIO'])
axs[1, 1].scatter(
p_0[mask], p_1[mask], SCT_SIZE+2, c='k', marker='s', zorder=2)
c_tick_labels = [str(c_tick) for c_tick in c_ticks]
c_tick_labels[-1] = '>{}'.format(c_ticks[-1])
cb_11.ax.set_yticklabels(c_tick_labels)
## plot frequency of one-way events
im_20 = axs[2, 0].scatter(p_0, p_1, SCT_SIZE, c=one_way_frq, marker='s', vmin=v_mins['evt_frq'], vmax=v_maxs['evt_frq'])
axs[2, 0].set_title('One-way frequency')
cb_20 = fig.colorbar(im_20, ax=axs[2, 0])
cb_20.set_label('One-way frequency (Hz)', fontsize=16)
## plot fraction of one-way events
### put trial-wise total event counts into np array
evt_cts_trials = np.transpose([np.array(df['EVT_CT_{}'.format(ctr)]) for ctr in range(n_trials)])
### put trial-wise one-way event counts into np array
one_way_cts_trials = np.transpose([np.array(df['ONE_WAY_CT_{}'.format(ctr)]) for ctr in range(n_trials)])
### get trial-wise one-way count fractions
#### set evt cts to nan if less than min evts to use for one-way frac calc
evt_cts_trials[evt_cts_trials < hmap_params['MIN_EVTS_ONE_WAY_FRAC_CALC']]
#### divide
one_way_fracs_trials = one_way_cts_trials / evt_cts_trials
#### take mean across trials
one_way_fracs = np.nanmean(one_way_fracs_trials, axis=1)
## plot one-way event fractions
im_21 = axs[2, 1].scatter(p_0, p_1, SCT_SIZE, c=one_way_fracs, vmin=0, vmax=1, marker='s', cmap='cool')
axs[2, 1].set_title('Unidirectional replay')
cb_21 = fig.colorbar(im_21, ax=axs[2, 1])
cb_21.set_label('Fraction unidirectional', fontsize=16)
## set blowup vals to green
mask = \
(fr_ntrj >= hmap_params['FR_NTRJ_MIN_BLOWUP']) \
| (fr_ntrj / fr_trj >= hmap_params['FR_NTRJ_TRJ_BLOWUP_RATIO'])
axs[2, 1].scatter(
p_0[mask], p_1[mask], SCT_SIZE+2, c='k', marker='s', zorder=2)
for ax in axs.flatten():
ax.set_xlim(p_0_min - .05*p_0_range, p_0_max + .05*p_0_range)
ax.set_ylim(p_1_min - .05*p_1_range, p_1_max + .05*p_1_range)
if x_ticks is not None:
ax.set_xticks(x_ticks)
ax.set_xlabel(params_varied[0])
ax.set_ylabel(params_varied[1])
ax.set_facecolor((.8, .8, .8))
set_font_size(ax, 16)
set_font_size(axs[2, 1], 20)
for cb in [cb_00, cb_01, cb_10, cb_11]:
set_font_size(cb.ax, 16)
set_font_size(cb_21.ax, 20)
return df, rslts, header
```
## Dynamics vs $\lambda^{PC,PC}$ and $w^{PC,PC}$ (Figure 3B)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 4}
x_ticks = [2, 2.5, 3, 3.5, 4]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_0_W_PC_PC_L_PC_PC.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Dynamics vs $w^{PC,PC}$ and $w^{PC,INH}$ (Figure 3A)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 4}
x_ticks = [0.0, .02, 0.04, 0.06]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_1_W_PC_PC_W_PC_INH.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Additional param sweep (not in manuscript)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 4}
x_ticks = [2, 2.5, 3, 3.5, 4]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_2_W_PC_PC_SGM_MAX.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Additional param sweep (not in manuscript)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 4}
x_ticks = [0.0, .02, 0.04, 0.06]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_3_L_PC_PC_W_PC_INH.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Dynamics vs $r^G$ and $\sigma_{max}$ (Figure 3C)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 4}
x_ticks = None # [0.0, .0002, 0.0004, 0.0006]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_4_R_G_SGM_MAX.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Additional param sweep (not in manuscript)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 6}
x_ticks = None # [0.0, .0002, 0.0004, 0.0006]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_5_R_G_W_PC_INH.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Additional param sweep (not in manuscript)
```
v_mins = {'fr_trj': 0, 'fr_ntrj': 0, 'evt_frq': 0}
v_maxs = {'fr_trj': 10, 'fr_ntrj': 10, 'evt_frq': 6}
x_ticks = None # [0.0, .0002, 0.0004, 0.0006]
c_ticks = [0, 1, 2, 3, 4]
dfs, rslts, header = evt_frq_hmap(
'PARAM_SWEEP_RESULTS_6_R_G_W_PC_G.txt',
v_mins, v_maxs, x_ticks, c_ticks,
HMAP_PARAMS)
```
## Replay speed param sweep (Figure 3G)
```
def plot_speed_hmap(f_name, v_min, v_max, fig_w=6.5, n_x_ticks=5):
"""Plot heatmap of triggered replay speeds."""
# load dataframe
df, rslts, header = analysis.make_df_ext(f_name)
fig, ax = plt.subplots(1, 1, figsize=(fig_w, 5.5), tight_layout=True)
mask = (df['EVT_CT'] >= HMAP_PARAMS['MIN_EVTS_SPD_CALC']) & (df['SPD_MIN'] >= HMAP_PARAMS['MIN_SPD_SPD_CALC'])
df_valid = df[mask]
param_0, param_1 = header['params_varied']
p_0 = df_valid[param_0]
p_1 = df_valid[param_1]
if param_0.startswith('W_PC'):
p_0 = p_0 * W_SCALE_PLOT
if param_1.startswith('W_PC'):
p_1 = p_1 * W_SCALE_PLOT
im = ax.scatter(
p_0, p_1, c=df_valid['SPD_MED'], vmin=v_min, vmax=v_max,
s=SCT_SIZE, marker='s')
cb = fig.colorbar(im, ax=ax)
cb.set_label('Speed (m/s)')
print('PARAMS:', header['m_params'])
ax.set_xlim(p_0.min(), p_0.max())
ax.set_ylim(p_1.min(), p_1.max())
ax.set_facecolor((.8, .8, .8))
set_n_x_ticks(ax, n_x_ticks)
ax.set_xlabel(param_0)
ax.set_ylabel(param_1)
ax.set_title('Replay speed')
set_font_size(ax, 20)
set_font_size(cb.ax, 20)
plot_speed_hmap('PARAM_SWEEP_RESULTS_EXT_0_W_PC_PC_L_PC_PC.txt', v_min=None, v_max=None, fig_w=6.5)
```
## Additional param sweep for replay speed (not in manuscript)
```
plot_speed_hmap('PARAM_SWEEP_RESULTS_EXT_1_W_PC_PC_W_PC_INH.txt', v_min=10, v_max=None, fig_w=7, n_x_ticks=4)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TensorFlow 2 quickstart for beginners
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/quickstart/beginner"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Lihat di TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/id/tutorials/quickstart/beginner.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Jalankan di Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/id/tutorials/quickstart/beginner.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Lihat sumber kode di GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/id/tutorials/quickstart/beginner.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Unduh notebook</a>
</td>
</table>
Note: Komunitas TensorFlow kami telah menerjemahkan dokumen-dokumen ini. Tidak ada jaminan bahwa translasi ini akurat, dan translasi terbaru dari [Official Dokumentasi - Bahasa Inggris](https://www.tensorflow.org/?hl=en) karena komunitas translasi ini adalah usaha terbaik dari komunitas translasi.
Jika Anda memiliki saran untuk meningkatkan terjemahan ini, silakan kirim pull request ke [tensorflow/docs](https://github.com/tensorflow/docs) repositori GitHub.
Untuk menjadi sukarelawan untuk menulis atau memeriksa terjemahan komunitas, hubungi
[daftar docs@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
Panduan singkat ini akan menggunakan [Keras](https://www.tensorflow.org/guide/keras/overview) untuk:
1. Membangun jaringan saraf tiruan yang mengklasifikasikan gambar.
2. Melatih jaringan saraf tiruan tersebut.
3. Dan, pada akhirnya, mengevaluasi keakuratan dari model.
Ini adalah file notebook [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb). Program python akan dijalankan langsung dari browser — cara yang bagus untuk mempelajari dan menggunakan TensorFlow. Untuk mengikuti tutorial ini, jalankan notebook di Google Colab dengan mengklik tombol di bagian atas halaman ini.
1. Di halaman Colab, sambungkan ke runtime Python: Di menu sebelah kanan atas, pilih * CONNECT *.
2. Untuk menjalankan semua sel kode pada notebook: Pilih * Runtime *> * Run all *.
Download dan instal TensorFlow 2 dan impor TensorFlow ke dalam program Anda:
```
from __future__ import absolute_import, division, print_function, unicode_literals
# Install TensorFlow
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
```
Siapkan [dataset MNIST](http://yann.lecun.com/exdb/mnist/). Ubah sampel dari bilangan bulat menjadi angka floating-point (desimal):
```
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
```
Build model `tf.keras.Sequential` dengan cara menumpuk lapisan layer. Untuk melatih data, pilih fungsi untuk mengoptimalkan dan fungsi untuk menghitung kerugian:
```
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
Melatih dan mengevaluasi model:
```
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
```
Penggolong gambar tersebut, sekarang dilatih untuk akurasi ~ 98% pada dataset ini. Untuk mempelajari lebih lanjut, baca [tutorial TensorFlow](https://www.tensorflow.org/tutorials/).
| github_jupyter |
# Interpreting Neural Network Weights
Neural nets (especially deep neural nets) are some of the most powerful machine learning algorithms available. However, it can be difficult to understand (intuitively) how they work.
In the first part of this notebook, I highlight the connection between neural networks and template matching--a simple technique for classification that is popular in computer vision and signal processing. I find this observation is helpful for intution about how neural nets classify images--I hope you find this useful too!
In the second part of the notebook, I point out that for convolutional neural nets it can be helpful to think of weights as sliding filters (e.g. edge detecting filters) which in the early network layers detect low-level image features .
## Template Matching
[Template matching](https://en.wikipedia.org/wiki/Template_matching) is used in computer vision to compare images. It does this by treating images as vectors and computing their dot product: very similar images give a large dot product, and very disimilar images give a small (or negative) dot product. Why?
Mathematically, if you represent images as vectors, you can compute the difference between two images $I_1$ and $I_2$ like
$$|I_1 - I_2 |^2 = |I_1|^2 + |I_2|^2 - 2 \, I_1 \cdot I_2$$
Note that the dot product $I_1 \cdot I_2$ between two images is largest when the difference $|I_1 - I_2|$ between images is smallest, and vice versa.
For example, here's a template for each digit:
```
import matplotlib.pyplot as plt
import cv2
templates = []
for i in range(10):
img = cv2.imread("templates/{}.png".format(str(i)), cv2.IMREAD_GRAYSCALE)
if img is None:
raise Exception("Error: Failed to load image {}".format(i))
templates.append(img)
plt.subplot(2, 5, 1 + i)
plt.imshow(img, cmap=plt.get_cmap('gray'))
plt.show()
```
We can illustrate template matching by computing the dot products between digit 1 and every other digit. To make the results more robust, we compute the normalized dot product
$$ \frac{I_1 \cdot I_2}{|I_1| |I_2|}$$
(It's important to normalize the dot product--otherwise brighter images will give stronger matches than darker ones, and that would not make sense.)
```
img = templates[1]
for i in range(10):
template = templates[i]
print(" Normalized dot product between 1 and {} = {}".format(i,
cv2.matchTemplate(img, template, cv2.TM_CCORR_NORMED )[0][0] ))
```
We can see that 1 is more strongly correlated to 1 than any other digit. That's the principle behind template matching--it can measure image similarity.
Unfortunately, template matching is not robust to changes in image shapes, sizes, rotations, or partial occlusion. However, neural nets can be very robust to such image defects--that's why they are more powerful.
## Viewing Neural Network Weights as Templates
In a neuron inside neural net, inputs $x$ (a vector) are combined with weights $W$ (another vector) to generate an output. Pictorally
<img src="figs/neuron.png" style="width: 250px;">
The output of the neuron is computed by
$$ f( W \cdot x + b) $$
where $f$ is called the *activation function* and b is called the *bias term*. Most important for this discussion, the dot product $W \cdot x$ resembles template matching.
As we will show, in very simple neural nets (sometimes called *linear classifiers*) we can interpret the weights $W$ as templates--the neural net learns how to perform template matching!
We want to make a linear classifier to recognize digits 0-9. We implement a softmax architecture (shown below) with 10 outputs. For example, if digit 7 is recognized, neuron 7 will have an output close to 1 and the remaining neurons will have outputs close to 0. (FYI, this means we will have to one-hot encode the labels before training.)
The input (the image) is $x$, which we draw as a flattened (1d) vector. There are 10 weight vectors $W_0$ - $W_9$ (one for each neuron).
<img src="figs/nnet.png" style="width: 400px;">
We write the $i^{\mathrm th}$ output as
$$ \mathrm{output}_i = f( W_{i} \cdot x + b_i)$$
As we said, we expect each weight vector $W_i$ learned during training will be a template for digit $i$.
Let's train the neural net on the MNIST data set (a set of 70,000 images of hand-written digits 0-9.) We'll use Keras to implement and train the neural net.
```
#Developed with Keras 2.0.2 and the tensorflow backend
#Load the MNIST data set of handwritten digits 0-9:
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#Invert the image colors, convert to float, and normalize values:
x_train = (255 - x_train).astype('float')/255.0
x_test = (255 - x_test).astype('float')/255.0
# plot first 5 images
for i in range(5):
plt.subplot(1, 5, 1+i)
plt.imshow(x_train[i], cmap=plt.get_cmap('gray'))
plt.show()
#Let's flatten the images to 1-d vectors for processing
image_shape = (28, 28)
num_pixels = 28*28
x_train = x_train.reshape(x_train.shape[0], num_pixels)
x_test = x_test.reshape(x_test.shape[0], num_pixels)
#Now let's one-hot encode the target variable before training
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
```
We can see that after 1-hot encoding, each digit label becomes a 10-d vector. For example, for the digit 1, there is a 1 in position 1 and 0's in all other positions of the 10-d vector:
```
y_train[3]
```
Now let us create the neural net and train it
```
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from keras.utils import plot_model
# fix random seed for reproducibility
seed = 123
np.random.seed(seed)
# Define the model:
def linear_classifier_model():
"""single layer, 10 output classes"""
# create model
model = Sequential()
# one layer
model.add(Dense(num_classes, kernel_initializer='normal',
activation='softmax', input_shape=(num_pixels,)))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#Instantiate model
model = linear_classifier_model()
#Train model
model.fit(x_train, y_train, validation_data=(x_test, y_test),
epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model.evaluate(x_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
```
Keras lets us easily visualize the model to check that it has the correct architecture
```
plot_model(model, to_file='figs/model.png', show_shapes=True)
```
<img src="figs/model.png">
Finally, let us grab the weights from layer dense_1, unflatten the shape, and graph them:
```
layer_dense_1 = model.get_layer('dense_1')
weights1 = layer_dense_1.get_weights()[0]
bias1 = layer_dense_1.get_weights()[1]
#Cast shape to 2d
weights1 = weights1.reshape(28, 28, 10)
#lot the weights for the first 4 digits
for i in range(4):
plt.subplot(1, 4, 1 + i)
plt.imshow(weights1[:, :, i], cmap=plt.get_cmap('gray'))
plt.show()
```
We can see that indeed the learned weights resember digits 0, 1, 2, 3, ... just as we expected of template matching.
For further details, take a look at [http://cs231n.github.io/linear-classify/](http://cs231n.github.io/linear-classify/)
## Filters in Convolutional Neural Nets
In convolutional neural networks, it is common to use small (3x3 or 5x5) sliding convolutional layers instead of large, fully-connected layers. In that case, it may be more helpful to think of the weights as sliding filters to detect low-level features such as edges, textures, and blobs. Indeed, the learned weights often resemble standard image processing filters. Let us try to see this.
First, let us reshape (unflatten) the data so the images are again rectangular:
```
from keras import backend as K
K.set_image_data_format('channels_last') #specify image format
img_shape = (28, 28, 1)
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
```
Now let us define the neural net:
```
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D
def covnet_model():
"""Convolutional neural net"""
model = Sequential()
model.add(Convolution2D(32, kernel_size = 7, strides=1, padding='valid',
input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
#flatten 3d tensors into 1d vector.
model.add(Flatten())
#Add 128 neuron feed forward hidden layer
model.add(Dense(128, activation='relu'))
#Add output, 10 neurons for 10 target classes, activation is softmax so outputs are
#probabilities
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model_covnet = covnet_model()
```
Let us plot the network architecture
```
plot_model(model_covnet, to_file='figs/model2.png', show_shapes=True)
```
<img src="figs/model2.png" style="width: 350px;">
Now let us train the network
```
model_covnet.fit(x_train, y_train, validation_data=(x_test, y_test),
epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model_covnet.evaluate(x_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
```
Let us now plot 16 convolutional weights (16 filters) learned in the first convolutional layer:
```
layer_conv2d_4 = model_covnet.get_layer('conv2d_1')
weights2 = layer_conv2d_4.get_weights()[0]
bias2 = layer_conv2d_4.get_weights()[1]
#Cast shape to 2d
weights2 = weights2.reshape(7, 7, 32)
#Now plot the weights for the first 16 filters
plt.figure(figsize=(7,5))
for i in range(16):
plt.subplot(4, 4, 1 + i)
plt.imshow(weights2[:, :, i], cmap=plt.get_cmap('gray'))
plt.show()
```
For comparison, let's plot Sobel filters which are used in computer vision to detect horizontal and vertical edges. We see that the neural net weights look similar to Sobel filters.
```
sobelx = np.array([[1, 2, 0, -2, -1],
[4, 8, 0, -8, -4],
[6, 12, 0, -12, -6],
[4, 8, 0, -8, -4],
[1, 2, 0, -2, -1]]).astype('float32')/12.0
sobely = sobelx.transpose()
plt.figure(figsize=(2, 2))
plt.subplot(1, 2, 1)
plt.imshow(sobelx, cmap=plt.get_cmap('gray'))
plt.subplot(1, 2, 2)
plt.imshow(sobely, cmap=plt.get_cmap('gray'))
plt.show()
```
| github_jupyter |
# Qcodes example with Alazar ATS 9360
```
# import all necessary things
%matplotlib nbagg
import qcodes as qc
import qcodes.instrument.parameter as parameter
import qcodes.instrument_drivers.AlazarTech.ATS9360 as ATSdriver
import qcodes.instrument_drivers.AlazarTech.ATS_acquisition_controllers as ats_contr
```
First lets list all the Alazar boards connected to this machine. In most cases this will probably only be a single one
```
ATSdriver.AlazarTech_ATS.find_boards()
ats_inst = ATSdriver.AlazarTech_ATS9360(name='Alazar1')
# Print all information about this Alazar card
ats_inst.get_idn()
```
The Alazar is unusual compared to other instruments as it works together with an acquisition controller. The acquisition controller takes care of post processing and the driver takes care of the communication with the card. At the moment QCoDeS only ships with some very basic acquisition controllers. Here we use a controller that allows us to perform a demodulation of the signal acquired
```
# Instantiate an acquisition controller (In this case we are doing a simple DFT) and
# provide the name of the name of the alazar card that this controller should control
acquisition_controller = ats_contr.Demodulation_AcquisitionController(name='acquisition_controller',
demodulation_frequency=10e6,
alazar_name='Alazar1')
```
The parameters on the Alazar card are set in a slightly unusual way. As communicating with the card is slow and multiple parameters needs to be set with the same command we use a context manager that takes care of syncing all the paramters to the card after we set them.
```
with ats_inst.syncing():
ats_inst.clock_source('INTERNAL_CLOCK')
ats_inst.sample_rate(1_000_000_000)
ats_inst.clock_edge('CLOCK_EDGE_RISING')
ats_inst.decimation(1)
ats_inst.coupling1('DC')
ats_inst.coupling2('DC')
ats_inst.channel_range1(.4)
ats_inst.channel_range2(.4)
ats_inst.impedance1(50)
ats_inst.impedance2(50)
ats_inst.trigger_operation('TRIG_ENGINE_OP_J')
ats_inst.trigger_engine1('TRIG_ENGINE_J')
ats_inst.trigger_source1('EXTERNAL')
ats_inst.trigger_slope1('TRIG_SLOPE_POSITIVE')
ats_inst.trigger_level1(160)
ats_inst.trigger_engine2('TRIG_ENGINE_K')
ats_inst.trigger_source2('DISABLE')
ats_inst.trigger_slope2('TRIG_SLOPE_POSITIVE')
ats_inst.trigger_level2(128)
ats_inst.external_trigger_coupling('DC')
ats_inst.external_trigger_range('ETR_2V5')
ats_inst.trigger_delay(0)
ats_inst.timeout_ticks(0)
ats_inst.aux_io_mode('AUX_IN_AUXILIARY') # AUX_IN_TRIGGER_ENABLE for seq mode on
ats_inst.aux_io_param('NONE') # TRIG_SLOPE_POSITIVE for seq mode on
```
This command is specific to this acquisition controller. The kwargs provided here are being forwarded to instrument acquire function
This way, it becomes easy to change acquisition specific settings from the ipython notebook
```
acquisition_controller.update_acquisitionkwargs(#mode='NPT',
samples_per_record=1024,
records_per_buffer=70,
buffers_per_acquisition=1,
#channel_selection='AB',
#transfer_offset=0,
#external_startcapture='ENABLED',
#enable_record_headers='DISABLED',
#alloc_buffers='DISABLED',
#fifo_only_streaming='DISABLED',
#interleave_samples='DISABLED',
#get_processed_data='DISABLED',
allocated_buffers=1,
#buffer_timeout=1000
)
```
Getting the value of the parameter `acquisition` of the instrument `acquisition_controller` performes the entire acquisition protocol. This again depends on the specific implementation of the acquisition controller
```
acquisition_controller.acquisition()
# make a snapshot of the 'ats_inst' instrument
ats_inst.snapshot()
```
Finally show that this instrument also works within a loop
```
dummy = parameter.ManualParameter(name="dummy")
data = qc.Loop(dummy[0:50:1]).each(acquisition_controller.acquisition).run(name='AlazarTest')
qc.MatPlot(data.acquisition_controller_acquisition)
```
| github_jupyter |
# *tridesclous* example with olfactory bulb dataset
```
%matplotlib inline
import time
import numpy as np
import matplotlib.pyplot as plt
import tridesclous as tdc
from tridesclous import DataIO, CatalogueConstructor, Peeler
```
# DataIO = define datasource and working dir
trideclous provide some datasets than can be downloaded.
Note this dataset contains 2 trials in 2 different files. (the original contains more!)
Each file is considers as a *segment*. *tridesclous* automatically deal with it.
Theses 2 files are in **RawData** format this means binary format with interleaved channels.
```
#download dataset
localdir, filenames, params = tdc.download_dataset(name='olfactory_bulb')
print(filenames)
print(params)
print()
#create a DataIO
import os, shutil
dirname = 'tridesclous_olfactory_bulb'
if os.path.exists(dirname):
#remove is already exists
shutil.rmtree(dirname)
dataio = DataIO(dirname=dirname)
# feed DataIO
dataio.set_data_source(type='RawData', filenames=filenames, **params)
dataio.add_one_channel_group(channels=range(14))
print(dataio)
```
# CatalogueConstructor
```
cc = CatalogueConstructor(dataio=dataio)
print(cc)
```
## Use automatic parameters and apply the whole chain
tridesclous propose an automatic parameters choice and can apply in one function all the steps.
```
from pprint import pprint
params = tdc.get_auto_params_for_catalogue(dataio, chan_grp=0)
pprint(params)
```
## apply all catalogue steps
```
tdc.apply_all_catalogue_steps(cc, params, verbose=True)
print(cc)
```
## Open CatalogueWindow for visual check
At the end we can save the catalogue.
```
%gui qt5
import pyqtgraph as pg
app = pg.mkQApp()
win = tdc.CatalogueWindow(catalogueconstructor)
win.show()
app.exec_()
cc.make_catalogue_for_peeler()
```
# Peeler
Use automatic parameters.
```
peeler_params = tdc.get_auto_params_for_peelers(dataio, chan_grp=0)
pprint(peeler_params)
catalogue = dataio.load_catalogue()
peeler = Peeler(dataio)
peeler.change_params(catalogue=catalogue, **peeler_params)
t1 = time.perf_counter()
peeler.run()
t2 = time.perf_counter()
print('peeler.run', t2-t1)
print()
for seg_num in range(dataio.nb_segment):
spikes = dataio.get_spikes(seg_num)
print('seg_num', seg_num, 'nb_spikes', spikes.size)
print(spikes[:3])
```
## Open PeelerWindow for visual checking
```
%gui qt5
import pyqtgraph as pg
app = pg.mkQApp()
win = tdc.PeelerWindow(dataio=dataio, catalogue=initial_catalogue)
win.show()
app.exec_()
```
Here a snappshot of PeelerWindow
<img src="../doc/img/snapshot_peelerwindow.png">
| github_jupyter |
[//]: #
<img src="idaes_icon.png" width="100">
<h1><center>Welcome to the IDAES Stakeholder Workshop</center></h1>
Welcome and thank you for taking the time to attend today's workshop. Today we will introduce you to the fundamentals of working with the IDAES process modeling toolset, and we will demonstrate how these tools can be applied for optimization applications.
Today's workshop will be conducted using Jupyter Notebooks which provide an online, interactive Python environment for you to use (without the need for installing anything).
Before we get started on some actual examples, let's make sure that everything is working correctly. The cell below contains a command to run a simple test script that will test that everything we will need for today is working properly.
You can execute a cell by pressing `Shift+Enter`.
```
run "notebook_test_script.py"
```
If everything worked properly, you should see a message saying `All good!` and a summary of all the checks that were run. If you don't see this, please contact someone for assistance.
## Outline of Workshop
Today's workshop is divided into four modules which will take you through the steps of setting up a flowsheet within the IDAES framework.
Welcome Module (this one):
* Introduction to Jupyter notebooks and Python
* Introduction to Pyomo
Module 1 will cover:
* how to import models from the core IDAES model library,
* how to create a model for a single unit operation,
* how to define feed and operating conditions,
* how to initialize and solve a single unit model,
* some ways we can manipulate the model and examine the results.
Module 2 will demonstrate:
* how to combine unit models together to form flowsheets,
* tools to initialize and solve flowsheets with recycle loops,
* how to optimize process operating conditions to meet product specifications.
Module 3 will demonstrate:
* how to build new unit models using the IDAES tools,
* how to include new unit models into flowsheets.
## Introduction to Jupyter Notebooks and Python
In this short notebook, we will briefly describe the uses of Jupyter notebooks like this one, and provide you with the necessary background in Python for this workshop. We will cover `if` statements, looping, array-like containers called lists and dictionaries, as well as the use of some external packages for working with data.
There are many additional tutorials online to learn more about the Python syntax.
In Python, variables do not need to be declared before they are used. You can simply define a new variable using `x = 5`.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
In the cell below, assign a value of 5 to the variable x. Don't forget to type Shift+Enter to execute the line.</div>
You can easily see the value of a variable using the built-in `print` function. For example, to print the value of `x` use `print(x)`.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Write the code to print the value of x. Don't forget to hit Shift+Enter to execute the cell.
</div>
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Now change the value of the x variable to 8 and execute the cell.
</div>
### Jupyter notebooks and execution order
<div class="alert alert-block alert-warning">
<b>Note:</b>
When using Jupyter notebooks, it is very important to know that the cells can be executed out of order (intentionally or not). The state of the environment (e.g., values of variables, imports, etc.) is defined by the execution order.
</div>
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
To see this concept, select the cell above that contained the print statement and execute the cell again using Shift+Enter.
</div>
You should see that the value `8` is now printed. This may seem problematic if you are used to programming in environments where the state is linked to the order of the commands as *written*, not as *executed*.
**Again, notice that the state of the environment is determined by the execution order.**
Note also that the square brackets to the left of the cell show the order that cells were executed. If you scroll to the top, you should see that the code cells show an execution order of `[1]`, `[2]`, `[5]`, and `[4]`, indicating the actual execution order.
There are some useful menu commands at the top of the Jupyter notebook to help with these problems and make sure you retain the execution order as expected.
Some important commands to remember:
* You can clear the current state with the menu item `Kernel | Restart & Clear Output`
* It is often useful to clear the state using the menu command just described, and then execute all the lines **above the currently selected cell** using `Cell | Run All Above`.
* You can clear all the state and re-run the entire notebook using `Kernel | Restart & Run All`.
To show the use of these commands, complete the following.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
<ul>
<li>Clear the current state (using Kernel | Restart & Clear Output). You should notice that the square brackets that listed the execution order are all now empty.</li>
<li>Select the cell immediately below this text
<li>Re-run all the code up to this point (Cell | Run All Above). You should now see that the square brackets indicate the expected execution order.</li>
<li>Print the value of x again using the print function. You should see the value 8 printed, while the earlier cell printing x shows the value of 5 as expected.</li>
</ul>
</div>
```
print(x)
```
### Python `if` statements
In the code below, we show an example of an `if` statement in Python.
```python
temp = 325
# some other code
if temp > 320:
print('temperature is too high')
elif x < 290:
print('temperature is too low')
else:
print('temperature is just right')
```
<div class="alert alert-block alert-warning">
<b>Note:</b>
You will notice that there are no braces to separate blocks in the if-else tree. In Python, indentation is used to delineate blocks of code throughout Python (e.g., if statements, for loops, functions, etc.). The indentation in the above example is not only to improve legibility of the code. It is necessary for the code to run correctly. As well, the number of spaces required to define the indentation is arbitrary, but it must be consistent throughout the code. For example, we could use 3 spaces (instead of the 4 used in the example above, but we could not use 3 for one of the blocks and 4 for another).
</div>
Using the syntax above for the `if` statement, write the following code.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
<ul>
<li>set the value of the variable T_degC to 20</li>
<li>convert this from degrees Celsius to degrees Fahrenheit (use variable name T_degF)</li>
<li>write an `if` statement that prints a message if the degrees Fahrenheit are below 70</li>
</ul>
</div>
```
T_degC = 20
# some other code
T_degF = (T_degC * 9.0/5.0) + 32.0
# Todo: put the if statement here
```
### Python list containers
Now we will illustrate the use of lists in Python. Lists are similar to vectors or arrays in other languages. A list in Python is indexed by integers from 0 up to the length of the array minus 1. The list can contain standard types (int, float, string), or other objects.
In the next inline exercise, we will create a list that contains the values from 0 to 50 by steps of 5 using a for loop. Note that the python function `range(n)` can be used to iterate from 0 to (n-1) in a for loop. Also note that lists have an `append` method which adds an entry to the end of the list (e.g., if the list `l` currently has 5 elements, then `l.append('temp')` will add the string "temp" as the sixth element). Print the new list after the for loop. If this is done correctly, you should see:
`[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]` printed after the cell.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Complete the code block below to create the desired list and print the result.
</div>
```
# Create a list with the values 0 to 50 with steps of 5.
xlist = list()
for i in range(11):
# Todo: use the append method of list to append the correct value
print(xlist) # Todo: print the value of xlist to verify the results
```
Python provides a short-hand notation for building lists called *list comprehensions*. An example of a list comprehension that creates all even numbers from 0 to 40 is:
```python
values = [q*2 for q in range(21)]
```
Note also that list comprehensions can include if clauses. For example, we could also implement the above example with the following code:
```python
values = [q for q in range(41) if q % 2 == 0]
```
Note that `%` is the modulus operator (it returns the remainder of the division). Therefore, in the above code, `q % 2` returns 0 if the value in `q` is exactly divisible by 2 (i.e., an even number).
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
In the cell below, create the same xlist that we created previously, but use the list comprehension notation. Verify that this result is correct by printing it.
</div>
```
# Todo: define the list comprehension
print(xlist)
```
You can easily check the length of a list using the python `len(l)` function.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Print the length of `xlist`. It should be 11.
</div>
```
# Todo: print the len of the list
```
If you have a list of values or objects, it is easy to iterate through that list in a for loop. In the next inline exercise, we will create another list, `ylist` where each of the values is equal to the corresponding value in `xlist` squared. That is, $y_i = x_i^2$.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Modify the code below to create ylist as described above. Print the values in ylist to check the result.
</div>
```
ylist = list()
# Todo: define the for loop to add elements to ylist using the values in xlist
print(ylist)
```
This same task could have been done with a list comprehension (using much less code).
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Write the list comprehension to compute the values of ylist. Print the values in ylist to check the result.
</div>
```
# Todo: create ylist using a list comprehension and print the result
```
### Python dictionary containers
Another valuable data structure in Python are *dictionaries*. Dictionaries are an associative array; that is, a map from keys to values or objects. The keys can be *almost* anything, including floats, integers, and strings. The code below shows an example of creating a dictionary (here, to store the areas of some of the states).
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the lines below to see the areas dictionary.
</div>
```
areas = dict()
areas['South Dakota'] = 199742
areas['Oklahoma'] = 181035
print(areas)
```
Dictionaries can contain mixed types (i.e., it is valid to add `areas['Texas'] = 'Really big!'`) but this may lead to unpredictable behavior if the different types are unexpected in other parts of the code.
You can loop through dictionaries in different ways. For example,
```python
d = {'A': 2, 'B': 4, 'D': 16}
for k in d.keys():
# loop through the keys in the dictionary
# access the value with d[k]
print('key=', k, 'value=', d[k])
for v in d.values():
# loop through the values in the dictionary, ignoring the keys
print('value=', v)
for k,v in d.items():
# loop through the entries in the dictionary, retrieving both
# the key and the value
print('key=', k, 'value=', v)
```
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
The areas listed above for the two states are in square kilometers. Modify the loop below to create a new dictionary that contains the areas in square miles. Print the new dictionary to verify the correct behavior. Note that 1 kilometer is equal to 0.62137 miles.
</div>
```
areas_mi = dict()
for state_name, area in areas.items():
# Todo: convert the area to sq. mi and assign to the areas_mi dict.
print(areas_mi)
```
Python also supports dictionary comprehensions much like list comprehensions. For example:
```python
d = {'A': 2, 'B': 4, 'D': 16}
d2 = {k:v**2 for k,v in d.items()}
```
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Redo the conversion from square kilometers to square miles using a dictionary comprehension.
</div>
```
# Todo: define areas_mi using a dictionary comprehension and print the result
```
### Matplotlib for generating figures
We will now briefly explore the use of the `matplotlib` package to generate figures. Before we do this, we will introduce some other helpful tools.
Another effective way to create a list of evenly spaced numbers (e.g., for plotting or other computation) is to use the `linspace` function from the `numpy` package (more information [here](https://www.numpy.org/devdocs/)). Let's import the `numpy` package and use linspace function to create a list of 15 evenly spaced intervals (that is, 16 points) from 0 to 50 and store this in `xlist`. We will also create the `ylist` that corresponds to the square of the values in `xlist`. Note, we must first import the `numpy` package.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the next two cells to see the output.
</div>
```
import numpy as np
xlist = list(np.linspace(0,50,16))
ylist = [x**2 for x in xlist]
print(xlist)
print(ylist)
```
This printed output is not a very effective way to communicate these results. Let's use matplotlib to create a figure of x versus y. A full treatment of the `matplotlib` package is beyond the scope of this tutorial, and further documentation can be found [here](https://matplotlib.org/). For now, we will import the plotting capability and show how to generate a straightforward figure. You can consult the documentation for matplotlib for further details.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the next two cells to see the output.
</div>
```
import matplotlib.pyplot as plt
plt.plot(xlist, ylist)
plt.title('Embedded x vs y figure')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(['data'])
plt.show()
```
Next, we will use what you have learned so far to create a plot of `sin(x)` for `x` from 0 to $2 \pi$ with 100 points. Note, you can get the `sin` function and the value for $\pi$ from the `math` package.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the import statement in the next cell, and then complete the missing code in the following cell to create the figure discussed above.
</div>
```
import math
x = list(np.linspace(0,2*math.pi, 100))
# Todo: create the list for y
# Todo: Generate the figure
```
### Importing and exporting data using Pandas
Often, it is useful to output the data in a general format so it can be imported into other tools or presented in a familiar application. Python makes this easy with many great packages already available. The next code shows how to use the `pandas` package to create a dataframe and export the data to a csv file that we can import to excel. You could also consult [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to see how to export the data directly to excel.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the code below that shows how to import some data into a DataFrame from the Pandas package and then export this data to a csv file.
</div>
```
import pandas as pd
df_sin = pd.DataFrame({'x': x, 'sin(x) (radians)': y})
print(df_sin)
df_sin.to_csv('sin_data.csv')
```
If you go back to the browser tab that showed all the Jupyter notebook files and refresh, you will now see that there is a csv file with the x and y data. You can consult the Pandas documentation do learn about the many data analysis and statistical features of the `pandas` package.
### Further Information
Further information of the packages mentioned above can be found using the following links:
* [numpy](https://www.numpy.org/devdocs/)
* [matplotlib](https://matplotlib.org/)
* [pandas](http://pandas.pydata.org/pandas-docs/stable/)
## Introduction to Pyomo
[Pyomo](www.pyomo.org) is an object-oriented, python-based package for equation-oriented (or *algebraic*) modeling and optimization, and the IDAES framework is built upon the Pyomo package. IDAES extends the Pyomo package and defines a class heirarchy for flowsheet based modeling, including definition of property packages, unit models, and flowsheets.
The use of IDAES does not require extensive knowledge about Pyomo, however, it can be beneficial to have some familiarity with the Pyomo package for certain tasks:
* IDAES models are open, and you can interrogating the underlying Pyomo model to view the variables, constraints, and objective functions defined in the model.
* You can use Pyomo components to define your objective function or to create additional constraints.
* Since IDAES models **are** Pyomo models, any advanced meta-algorithms or analysis tools that can be developed and/or used on a Pyomo model can also be used on an IDAES model.
A full tutorial on Pyomo is beyond the scope of this workshop, however in this section we will briefly cover the commands required to specify an objective function or add a constraint to an existing model.
In the next cell, we will create a Pyomo model, and add a couple of variables to that model. When using IDAES, you will define a flowsheet and the addition of variables and model equations will be handled by the IDAES framework.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the following cell to create a Pyomo model with some variables that will be used later.
</div>
```
from pyomo.environ import ConcreteModel, Var
model = ConcreteModel()
model.x = Var()
model.y = Var()
```
The Pyomo syntax to define a scalar objective function is shown below. This defines the objective function as $x^2$. By default Pyomo models (and IDAES models) seek to *minimize* the objective function.
```python
model.obj = Objective(expr=model.x**2)
```
To maximize a quantity, include the keyword argument `sense=maximize` as in the following:
```python
model.obj = Objective(expr=model.y, sense=maximize)
```
Note that `Objective` and `maximize` would need to be imported from `pyomo.environ`.
The Pyomo syntax to define a scalar constraint is shown below. This code defines the equality constraint $x^2 + y^2 = 1$.
```python
model.on_unit_circle_con = Constraint(expr=model.x**2 + model.y**2 == 1)
```
Pyomo also supports inequalities. For example, the code for the inequality constraint $x^2 + y^2 \le 1$ is given as the following.
```python
model.inside_unit_circle_con = Constraint(expr=model.x**2 + model.y**2 <= 1)
```
Note that, as before, we would need to include the appropriate imports. In this case `Constraint` would need to be imported from `pyomo.environ`.
Using the syntax shown above, we will now add the objective function: $\min x^2 + y^2$ and the constraint $x + y = 1$.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Complete the missing code in the cell below. If this is done correctly, after executing the cell, you should see the log output from the solver and the printed solution should show that x, y, and the objective value are all equal to 0.5.
</div>
```
from pyomo.environ import Objective, Constraint, value, SolverFactory
# Todo: add the objective function here
# Todo: add the constraint here
# now solve the problem
status = SolverFactory('ipopt').solve(model, tee=True) # tee=True shows the solver log
# print the values of x, y, and the objective function at the solution
# Note that the results are automatically stored in the model variables
print('x =', value(model.x))
print('y =', value(model.y))
print('obj =', value(model.obj))
```
Notice that the code above also imported the `value` function. This is a Pyomo function that should be used to retrieve the value of variables in Pyomo (or IDAES) models. Note that you can display the complete list of all variables, objectives, and constraints (with their expressions) using `model.pprint()`. The `display` method is similar to the `pprint` method except that is shows the *values* of the constraints and objectives instead of the underlying expressions. The `pprint` and `display` methods can also be used on individual components.
<div class="alert alert-block alert-info">
<b>Inline Exercise:</b>
Execute the lines of code below to see the output from pprint and display for a Pyomo model.
</div>
```
print('*** Output from model.pprint():')
model.pprint()
print()
print('*** Output from model.display():')
model.display()
```
| github_jupyter |
#### Copyright 2017 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Feature Crosses
**Learning Objectives:**
* Improve a linear regression model with the addition of additional synthetic features (this is a continuation of the previous exercise)
* Use an input function to convert pandas `DataFrame` objects to `Tensors` and invoke the input function in `fit()` and `predict()` operations
* Use the FTRL optimization algorithm for model training
* Create new synthetic features through one-hot encoding, binning, and feature crosses
## Setup
First, as we've done in previous exercises, let's define the input and create the data-loading code.
```
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
```
## FTRL Optimization Algorithm
High dimensional linear models benefit from using a variant of gradient-based optimization called FTRL. This algorithm has the benefit of scaling the learning rate differently for different coefficients, which can be useful if some features rarely take non-zero values (it also is well suited to support L1 regularization). We can apply FTRL using the [FtrlOptimizer](https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer).
```
def train_model(
learning_rate,
steps,
batch_size,
feature_columns,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
feature_columns: A `set` specifying the input feature columns to use.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(training_examples),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## One-Hot Encoding for Discrete Features
Discrete (i.e. strings, enumerations, integers) features are usually converted into families of binary features before training a logistic regression model.
For example, suppose we created a synthetic feature that can take any of the values `0`, `1` or `2`, and that we have a few training points:
| # | feature_value |
|---|---------------|
| 0 | 2 |
| 1 | 0 |
| 2 | 1 |
For each possible categorical value, we make a new **binary** feature of **real values** that can take one of just two possible values: 1.0 if the example has that value, and 0.0 if not. In the example above, the categorical feature would be converted into three features, and the training points now look like:
| # | feature_value_0 | feature_value_1 | feature_value_2 |
|---|-----------------|-----------------|-----------------|
| 0 | 0.0 | 0.0 | 1.0 |
| 1 | 1.0 | 0.0 | 0.0 |
| 2 | 0.0 | 1.0 | 0.0 |
## Bucketized (Binned) Features
Bucketization is also known as binning.
We can bucketize `population` into the following 3 buckets (for instance):
- `bucket_0` (`< 5000`): corresponding to less populated blocks
- `bucket_1` (`5000 - 25000`): corresponding to mid populated blocks
- `bucket_2` (`> 25000`): corresponding to highly populated blocks
Given the preceding bucket definitions, the following `population` vector:
[[10001], [42004], [2500], [18000]]
becomes the following bucketized feature vector:
[[1], [2], [0], [1]]
The feature values are now the bucket indices. Note that these indices are considered to be discrete features. Typically, these will be further converted in one-hot representations as above, but this is done transparently.
To define feature columns for bucketized features, instead of using `numeric_column`, we can use [`bucketized_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column), which takes a numeric column as input and transforms it to a bucketized feature using the bucket boundaries specified in the `boundardies` argument. The following code defines bucketized feature columns for `households` and `longitude`; the `get_quantile_based_boundaries` function calculates boundaries based on quantiles, so that each bucket contains an equal number of elements.
```
def get_quantile_based_boundaries(feature_values, num_buckets):
boundaries = np.arange(1.0, num_buckets) / num_buckets
quantiles = feature_values.quantile(boundaries)
return [quantiles[q] for q in quantiles.keys()]
# Divide households into 7 buckets.
households = tf.feature_column.numeric_column("households")
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
california_housing_dataframe["households"], 7))
# Divide longitude into 10 buckets.
longitude = tf.feature_column.numeric_column("longitude")
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
california_housing_dataframe["longitude"], 10))
```
## Task 1: Train the Model on Bucketized Feature Columns
**Bucketize all the real valued features in our example, train the model and see if the results improve.**
In the preceding code block, two real valued columns (namely `households` and `longitude`) have been transformed into bucketized feature columns. Your task is to bucketize the rest of the columns, then run the code to train the model. There are various heuristics to find the range of the buckets. This exercise uses a quantile-based technique, which chooses the bucket boundaries in such a way that each bucket has the same number of examples.
```
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
#
# YOUR CODE HERE: bucketize the following columns, following the example above:
#
bucketized_latitude =
bucketized_housing_median_age =
bucketized_median_income =
bucketized_rooms_per_person =
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
### Solution
Click below for a solution.
You may be wondering how to determine how many buckets to use. That is of course data-dependent. Here, we just selected arbitrary values so as to obtain a not-too-large model.
```
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
# Divide latitude into 10 buckets.
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 10))
# Divide housing_median_age into 7 buckets.
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 7))
# Divide median_income into 7 buckets.
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 7))
# Divide rooms_per_person into 7 buckets.
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 7))
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## Feature Crosses
Crossing two (or more) features is a clever way to learn non-linear relations using a linear model. In our problem, if we just use the feature `latitude` for learning, the model might learn that city blocks at a particular latitude (or within a particular range of latitudes since we have bucketized it) are more likely to be expensive than others. Similarly for the feature `longitude`. However, if we cross `longitude` by `latitude`, the crossed feature represents a well defined city block. If the model learns that certain city blocks (within range of latitudes and longitudes) are more likely to be more expensive than others, it is a stronger signal than two features considered individually.
Currently, the feature columns API only supports discrete features for crosses. To cross two continuous values, like `latitude` or `longitude`, we can bucketize them.
If we cross the `latitude` and `longitude` features (supposing, for example, that `longitude` was bucketized into `2` buckets, while `latitude` has `3` buckets), we actually get six crossed binary features. Each of these features will get its own separate weight when we train the model.
## Task 2: Train the Model Using Feature Crosses
**Add a feature cross of `longitude` and `latitude` to your model, train it, and determine whether the results improve.**
Refer to the TensorFlow API docs for [`crossed_column()`](https://www.tensorflow.org/api_docs/python/tf/feature_column/crossed_column) to build the feature column for your cross. Use a `hash_bucket_size` of `1000`.
```
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
# Divide latitude into 10 buckets.
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 10))
# Divide housing_median_age into 7 buckets.
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 7))
# Divide median_income into 7 buckets.
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 7))
# Divide rooms_per_person into 7 buckets.
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 7))
# YOUR CODE HERE: Make a feature column for the long_x_lat feature cross
long_x_lat =
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person,
long_x_lat])
return feature_columns
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
### Solution
Click below for the solution.
```
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
# Divide latitude into 10 buckets.
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 10))
# Divide housing_median_age into 7 buckets.
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 7))
# Divide median_income into 7 buckets.
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 7))
# Divide rooms_per_person into 7 buckets.
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 7))
# YOUR CODE HERE: Make a feature column for the long_x_lat feature cross
long_x_lat = tf.feature_column.crossed_column(
set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000)
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person,
long_x_lat])
return feature_columns
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## Optional Challenge: Try Out More Synthetic Features
So far, we've tried simple bucketized columns and feature crosses, but there are many more combinations that could potentially improve the results. For example, you could cross multiple columns. What happens if you vary the number of buckets? What other synthetic features can you think of? Do they improve the model?
| github_jupyter |
# Lazy Mode and Logging
So far, we have seen Ibis in interactive mode. Interactive mode (also known as eager mode) makes Ibis return the
results of an operation immediately.
In most cases, instead of using interactive mode, it makes more sense to use the default lazy mode.
In lazy mode, Ibis won't be executing the operations automatically, but instead, will generate an
expression to be executed at a later time.
Let's see this in practice, starting with the same example as in previous tutorials - the geography database.
```
!curl -LsS -o $TEMPDIR/geography.db 'https://storage.googleapis.com/ibis-tutorial-data/geography.db'
import os
import tempfile
import ibis
connection = ibis.sqlite.connect(os.path.join(tempfile.gettempdir(), 'geography.db'))
countries = connection.table('countries')
```
In previous tutorials, we set interactive mode to `True`, and we obtained the result
of every operation.
```
ibis.options.interactive = True
countries['name', 'continent', 'population'].limit(3)
```
But now let's see what happens if we leave the `interactive` option to `False` (the default),
and we operate in lazy mode.
```
ibis.options.interactive = False
countries['name', 'continent', 'population'].limit(3)
```
What we find is the graph of the expressions that would return the desired result instead of the result itself.
Let's analyze the expressions in the graph:
- We query the `countries` table (all rows and all columns)
- We select the `name`, `continent` and `population` columns
- We limit the results to only the first `3` rows
Now consider that the data is in a database, possibly in a different host than the one executing Ibis.
Also consider that the results returned to the user need to be moved to the memory of the host executing Ibis.
When using interactive (or eager) mode, if we perform one operation at a time, we would do:
- We would move all the rows and columns from the backend (database, big data system, etc.) into memory
- Once in memory, we would discard all the columns but `name`, `continent` and `population`
- After that, we would discard all the rows, except the first `3`
This is not very efficient. If you consider that the table can have millions of rows, backed by a
big data system like Spark or Impala, this may not even be possible (not enough memory to load all the data).
The solution is to use lazy mode. In lazy mode, instead of obtaining the results after each operation,
we build an expression (a graph) of all the operations that need to be done. After all the operations
are recorded, the graph is sent to the backend which will perform the operation in an efficient way - only
moving to memory the required data.
You can think of this as writing a shopping list and requesting someone to go to the supermarket and buy
everything you need once the list is complete. As opposed as getting someone to bring all the products of
the supermarket to your home and then return everything you don't want.
Let's continue with our example, save the expression in a variable `countries_expression`, and check its type.
```
countries_expression = countries['name', 'continent', 'population'].limit(3)
type(countries_expression)
```
The type is an Ibis `TableExpr`, since the result is a table (in a broad way, you can consider it a dataframe).
Now we have our query instructions (our expression, fetching only 3 columns and 3 rows) in the variable `countries_expression`.
At this point, nothing has been requested from the database. We have defined what we want to extract, but we didn't
request it from the database yet. We can continue building our expression if we haven't finished yet. Or once we
are done, we can simply request it from the database using the method `.execute()`.
```
countries_expression.execute()
```
We can build other types of expressions, for example, one that instead of returning a table,
returns a columns.
```
population_in_millions = (countries['population'] / 1_000_000).name('population_in_millions')
population_in_millions
```
If we check its type, we can see how it is a `FloatingColumn` expression.
```
type(population_in_millions)
```
We can combine the previous expression to be a column of a table expression.
```
countries['name', 'continent', population_in_millions].limit(3)
```
Since we are in lazy mode (not interactive), those expressions don't request any data from the database
unless explicitly requested with `.execute()`.
## Logging queries
For SQL backends (and for others when it makes sense), the query sent to the database can be logged.
This can be done by setting the `verbose` option to `True`.
```
ibis.options.verbose = True
countries['name', 'continent', population_in_millions].limit(3).execute()
```
By default, the logging is done to the terminal, but we can process the query with a custom function.
This allows us to save executed queries to a file, save to a database, send them to a web service, etc.
For example, to save queries to a file, we can write a custom function that given a query, saves it to a
log file.
```
import os
import datetime
import tempfile
from pathlib import Path
def log_query_to_file(query: str) -> None:
"""
Log queries to `data/tutorial_queries.log`.
Each file is a query. Line breaks in the query are
represented with the string '\n'.
A timestamp of when the query is executed is added.
"""
dirname = Path(tempfile.gettempdir())
fname = dirname / 'tutorial_queries.log'
query_in_a_single_line = query.replace('\n', r'\n')
with fname.open(mode='a') as f:
f.write(f'{query_in_a_single_line}\n')
```
Then we can set the `verbose_log` option to the custom function, execute one query,
wait one second, and execute another query.
```
import time
ibis.options.verbose_log = log_query_to_file
countries.execute()
time.sleep(1.)
countries['name', 'continent', population_in_millions].limit(3).execute()
```
This has created a log file in `data/tutorial_queries.log` where the executed queries have been logged.
```
!cat -n data/tutorial_queries.log
```
| github_jupyter |
## Mutual information
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import mutual_info_classif, mutual_info_regression
from sklearn.feature_selection import SelectKBest, SelectPercentile
```
## Read Data
```
data = pd.read_csv('../UNSW_Train.csv')
data.shape
data.head()
```
### Train - Test Split
```
# separate train and test sets
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['is_intrusion'], axis=1),
data['is_intrusion'],
test_size=0.2,
random_state=0)
X_train.shape, X_test.shape
```
### Determine Mutual Information
```
mi = mutual_info_classif(X_test, y_test)
mi
# 1. Let's capture the above array in a pandas series
# 2. Add the variable names in the index
# 3. Sort the features based on their mutual information value
# 4. And make a var plot
mi = pd.Series(mi)
mi.index = X_test.columns
mi.sort_values(ascending=False).plot.bar(figsize=(20, 6))
plt.ylabel('Mutual Information')
```
### Select top k features based on MI
```
# select the top 15 features based on their mutual information value
sel_ = SelectKBest(mutual_info_classif, k=15).fit(X_test, y_test)
# display features
X_test.columns[sel_.get_support()]
# to remove the rest of the features:
X_train = sel_.transform(X_train)
X_test = sel_.transform(X_test)
X_train.shape, X_test.shape
```
## Standardize Data
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
```
## Classifiers
```
from sklearn import linear_model
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoostClassifier
```
## Metrics Evaluation
```
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, f1_score
from sklearn import metrics
from sklearn.model_selection import cross_val_score
```
### Logistic Regression
```
%%time
clf_LR = linear_model.LogisticRegression(n_jobs=-1, random_state=42, C=25).fit(X_train, y_train)
pred_y_test = clf_LR.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_test))
f1 = f1_score(y_test, pred_y_test)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_test)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
```
### Naive Bayes
```
%%time
clf_NB = GaussianNB(var_smoothing=1e-08).fit(X_train, y_train)
pred_y_testNB = clf_NB.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_testNB))
f1 = f1_score(y_test, pred_y_testNB)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testNB)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
```
### Random Forest
```
%%time
clf_RF = RandomForestClassifier(random_state=0,max_depth=100,n_estimators=1000).fit(X_train, y_train)
pred_y_testRF = clf_RF.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_testRF))
f1 = f1_score(y_test, pred_y_testRF, average='weighted', zero_division=0)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testRF)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
```
### KNN
```
%%time
clf_KNN = KNeighborsClassifier(algorithm='ball_tree',leaf_size=1,n_neighbors=5,weights='uniform').fit(X_train, y_train)
pred_y_testKNN = clf_KNN.predict(X_test)
print('accuracy_score:', accuracy_score(y_test, pred_y_testKNN))
f1 = f1_score(y_test, pred_y_testKNN)
print('f1:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testKNN)
print('fpr:', fpr[1])
print('tpr:', tpr[1])
```
### CatBoost
```
%%time
clf_CB = CatBoostClassifier(random_state=0,depth=7,iterations=50,learning_rate=0.04).fit(X_train, y_train)
pred_y_testCB = clf_CB.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_testCB))
f1 = f1_score(y_test, pred_y_testCB, average='weighted', zero_division=0)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testCB)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
```
## Model Evaluation
```
data = pd.read_csv('../UNSW_Test.csv')
data.shape
# Create feature matrix X and target vextor y
y_eval = data['is_intrusion']
X_eval = data.drop(columns=['is_intrusion'])
X_eval = sel_.transform(X_eval)
X_eval.shape
```
### Model Evaluation - Logistic Regression
```
modelLR = linear_model.LogisticRegression(n_jobs=-1, random_state=42, C=25)
modelLR.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredLR = modelLR.predict(X_eval)
y_predLR = modelLR.predict(X_test)
train_scoreLR = modelLR.score(X_train, y_train)
test_scoreLR = modelLR.score(X_test, y_test)
print("Training accuracy is ", train_scoreLR)
print("Testing accuracy is ", test_scoreLR)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreLR)
print('F1 Score:',f1_score(y_test, y_predLR))
print('Precision Score:',precision_score(y_test, y_predLR))
print('Recall Score:', recall_score(y_test, y_predLR))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predLR))
```
### Cross validation - Logistic Regression
```
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
```
### Model Evaluation - Naive Bayes
```
modelNB = GaussianNB(var_smoothing=1e-08)
modelNB.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredNB = modelNB.predict(X_eval)
y_predNB = modelNB.predict(X_test)
train_scoreNB = modelNB.score(X_train, y_train)
test_scoreNB = modelNB.score(X_test, y_test)
print("Training accuracy is ", train_scoreNB)
print("Testing accuracy is ", test_scoreNB)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreNB)
print('F1 Score:',f1_score(y_test, y_predNB))
print('Precision Score:',precision_score(y_test, y_predNB))
print('Recall Score:', recall_score(y_test, y_predNB))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predNB))
```
### Cross validation - Naive Bayes
```
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
```
### Model Evaluation - Random Forest
```
modelRF = RandomForestClassifier(random_state=0,max_depth=100,n_estimators=1000)
modelRF.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredRF = modelRF.predict(X_eval)
y_predRF = modelRF.predict(X_test)
train_scoreRF = modelRF.score(X_train, y_train)
test_scoreRF = modelRF.score(X_test, y_test)
print("Training accuracy is ", train_scoreRF)
print("Testing accuracy is ", test_scoreRF)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreRF)
print('F1 Score:', f1_score(y_test, y_predRF, average='weighted', zero_division=0))
print('Precision Score:', precision_score(y_test, y_predRF, average='weighted', zero_division=0))
print('Recall Score:', recall_score(y_test, y_predRF, average='weighted', zero_division=0))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predRF))
```
### Cross validation - Random Forest
```
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
```
### Model Evaluation - KNN
```
modelKNN = KNeighborsClassifier(algorithm='ball_tree',leaf_size=1,n_neighbors=5,weights='uniform')
modelKNN.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredKNN = modelKNN.predict(X_eval)
y_predKNN = modelKNN.predict(X_test)
train_scoreKNN = modelKNN.score(X_train, y_train)
test_scoreKNN = modelKNN.score(X_test, y_test)
print("Training accuracy is ", train_scoreKNN)
print("Testing accuracy is ", test_scoreKNN)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreKNN)
print('F1 Score:', f1_score(y_test, y_predKNN))
print('Precision Score:', precision_score(y_test, y_predKNN))
print('Recall Score:', recall_score(y_test, y_predKNN))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predKNN))
```
### Cross validation - KNN
```
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
```
### Model Evaluation - CatBoost
```
modelCB = CatBoostClassifier(random_state=0,depth=7,iterations=50,learning_rate=0.04)
modelCB.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredCB = modelCB.predict(X_eval)
y_predCB = modelCB.predict(X_test)
train_scoreCB = modelCB.score(X_train, y_train)
test_scoreCB = modelCB.score(X_test, y_test)
print("Training accuracy is ", train_scoreCB)
print("Testing accuracy is ", test_scoreCB)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreCB)
print('F1 Score:',f1_score(y_test, y_predCB, average='weighted', zero_division=0))
print('Precision Score:',precision_score(y_test, y_predCB, average='weighted', zero_division=0))
print('Recall Score:', recall_score(y_test, y_predCB, average='weighted', zero_division=0))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predCB))
```
### Cross validation - CatBoost
```
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='accuracy')
f = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='f1')
precision = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='precision')
recall = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='recall')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
```
| github_jupyter |
# Plotting
In this notebook, I'll develop a function to plot subjects and their labels.
```
from astropy.coordinates import SkyCoord
import astropy.io.fits
import astropy.wcs
import h5py
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy
import skimage.exposure
import sklearn.neighbors
import sklearn.pipeline
import sklearn.preprocessing
CROWDASTRO_H5_PATH = 'data/crowdastro.h5'
PATCH_DIAMETER = 200
FITS_CONVENTION = 1
ARCMIN = 1 / 60
IMAGE_SIZE = 200 * 200
NORRIS_DAT_PATH = 'data/norris_2006_atlas_classifications_ra_dec_only.dat'
TRAINING_H5_PATH = 'data/training.h5'
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
N_ASTRO = 5 if f_h5.attrs['ir_survey'] == 'wise' else 6
%matplotlib inline
```
## Displaying radio images
Radio images look pretty terrible, so let's run a filter over them to make them a little easier to see. I'll use skimage and try a few different ones.
Let's get an example and look at the basic output.
```
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
image = f_h5['/atlas/cdfs/numeric'][250, 2 : 2 + PATCH_DIAMETER ** 2].reshape((PATCH_DIAMETER, PATCH_DIAMETER))
plt.imshow(image, cmap='gray')
plt.show()
```
It's hard to make out any features. Now, let's run some filters on it.
```
fig = plt.figure(figsize=(18, 27))
def subplot_imshow_hist(i, fig, im, title):
ax = fig.add_subplot(6, 3, i)
ax.imshow(im, cmap='gray')
ax.set_title(title)
ax.axis('off')
ax = fig.add_subplot(6, 3, i + 3)
ax.hist(im.ravel(), bins=256, histtype='step', color='black')
ax.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
subplot_imshow_hist(1, fig, image, 'Default')
subplot_imshow_hist(2, fig, skimage.exposure.equalize_adapthist(image, clip_limit=0.01), 'Adaptive equalisation')
subplot_imshow_hist(3, fig, skimage.exposure.equalize_hist(image), 'Histogram equalisation')
subplot_imshow_hist(7, fig, skimage.exposure.rescale_intensity(image, in_range=tuple(numpy.percentile(image, (0.75, 99.25)))),
'Constant stretching 0.75 - 99.25')
subplot_imshow_hist(8, fig, skimage.exposure.rescale_intensity(image, in_range=tuple(numpy.percentile(image, (1, 99)))),
'Constant stretching 1 - 99')
subplot_imshow_hist(9, fig, skimage.exposure.rescale_intensity(image, in_range=tuple(numpy.percentile(image, (2, 98)))),
'Constant stretching 2 - 98')
subplot_imshow_hist(13, fig, numpy.sqrt(image - image.min()), 'Square root')
subplot_imshow_hist(14, fig, numpy.log(image - image.min() + 1e-5), 'Logarithm + 1e-5')
subplot_imshow_hist(15, fig, numpy.log(image + 1), 'Logarithm + 1')
```
Square root looks good, so let's blitz that over some random images and see how it looks.
```
fig = plt.figure(figsize=(18, 25))
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
indices = numpy.arange(f_h5['/atlas/cdfs/numeric'].shape[0])
numpy.random.seed(10000)
numpy.random.shuffle(indices)
for j, i in enumerate(indices[:3]):
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape((PATCH_DIAMETER, PATCH_DIAMETER))
subplot_imshow_hist(j + 1, fig, numpy.sqrt(image - image.min()), str(i))
```
## Plotting IR objects
This is an extremely unpleasant operation: We have to find the pixel coordinates of each IR location, which are all specified in RA/DEC.
```
from crowdastro.config import config
with astropy.io.fits.open(config['data_sources']['atlas_image'],
ignore_blank=True) as atlas_image:
wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2)
def ra_dec_to_pixels(subject_coords, coords):
offset, = wcs.all_world2pix([subject_coords], FITS_CONVENTION)
# The coords are of the middle of the subject.
coords = wcs.all_world2pix(coords, FITS_CONVENTION)
coords -= offset
coords[:, 0] /= config['surveys']['atlas']['mosaic_scale_x'] * 424 / 200
coords[:, 1] /= config['surveys']['atlas']['mosaic_scale_y'] * 424 / 200
coords += [40, 40]
return coords
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 296
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
plt.scatter(ir_coords[:, 0], ir_coords[:, 1])
```
## Displaying classifications
The simplest thing we can do is to just highlight the host galaxies, so let's load up the Norris et al. classifications and have a look.
```
# Load labels.
with h5py.File(TRAINING_H5_PATH, 'r') as training_h5:
crowdsourced_labels = training_h5['labels'].value
with h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5:
ir_names = crowdastro_h5['/swire/cdfs/string'].value
ir_positions = crowdastro_h5['/swire/cdfs/numeric'].value[:, :2]
ir_tree = sklearn.neighbors.KDTree(ir_positions)
with open(NORRIS_DAT_PATH, 'r') as norris_dat:
norris_coords = [r.strip().split('|') for r in norris_dat]
norris_labels = numpy.zeros((len(ir_positions)))
for ra, dec in norris_coords:
# Find a neighbour.
skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg'))
ra = skycoord.ra.degree
dec = skycoord.dec.degree
((dist,),), ((ir,),) = ir_tree.query([(ra, dec)])
if dist < 0.1:
norris_labels[ir] = 1
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
plt.scatter(ir_coords[:, 0], ir_coords[:, 1])
labels = norris_labels[nearby].astype(bool)
nearby_hosts = ir_coords[labels]
plt.scatter(nearby_hosts[:, 0], nearby_hosts[:, 1], c='red')
```
What about displaying classifications from my classifier?
```
from crowdastro.classifier import RGZClassifier
from sklearn.ensemble import RandomForestClassifier
with h5py.File(TRAINING_H5_PATH, 'r') as f_h5:
classifier = RGZClassifier(f_h5['features'].value, N_ASTRO)
classifier.train(numpy.arange(f_h5['features'].shape[0]), norris_labels)
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
vec = f_h5['/atlas/cdfs/numeric'][i, :]
probs = classifier.predict_probabilities(vec)[nearby]
nearby_norris = ir_coords[norris_labels[nearby].astype('bool')]
nearby_rgz = ir_coords[crowdsourced_labels[nearby].astype('bool')]
plt.figure(figsize=(15, 15))
base_size = 200
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
plt.scatter(ir_coords[:, 0], ir_coords[:, 1], s=probs * base_size, c=probs, marker='o', cmap='cool')
plt.scatter(nearby_norris[:, 0], nearby_norris[:, 1], s=base_size, c='green', marker='*')
plt.axis('off')
# plt.scatter(nearby_rgz[:, 0], nearby_rgz[:, 1], s=50, c='cyan', marker='x', alpha=0.5)
plt.xlim((0, 80))
plt.ylim((0, 80))
```
## Plotting a committee
If we have multiple classifiers, how should we output their predictions?
```
with h5py.File(TRAINING_H5_PATH, 'r') as f_h5:
classifiers = [RGZClassifier(f_h5['features'], N_ASTRO) for _ in range(10)]
for classifier in classifiers:
subset = numpy.arange(f_h5['features'].shape[0])
numpy.random.shuffle(subset)
subset = subset[:len(subset) // 50]
subset = sorted(subset)
classifier.train(list(subset), norris_labels[subset])
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
image = f_h5['/atlas/cdfs/numeric'][i, 2 : 2 + PATCH_DIAMETER ** 2].reshape(
(PATCH_DIAMETER, PATCH_DIAMETER))[60:140, 60:140]
radio_coords = f_h5['/atlas/cdfs/numeric'][i, :2]
nearby = f_h5['/atlas/cdfs/numeric'][i, 2 + PATCH_DIAMETER ** 2:] < ARCMIN
ir_coords = f_h5['/swire/cdfs/numeric'][nearby, :2]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
vec = f_h5['/atlas/cdfs/numeric'][i, :]
probs = [classifier.predict_probabilities(vec)[nearby] for classifier in classifiers]
# Set all but the top n predictions to zero.
n = 1
for probs_ in probs:
top_n = sorted(probs_, reverse=True)[:n]
for j, prob in enumerate(probs_):
if prob not in top_n:
probs_[j] = 0
plt.figure(figsize=(10, 10))
base_size = 200
plt.imshow(numpy.sqrt(image - image.min()), cmap='gray')
colours = cm.rainbow(numpy.linspace(0, 1, 10))
for colour, probs_ in zip(colours, probs):
plt.scatter(ir_coords[:, 0] + numpy.random.normal(size=ir_coords.shape[0], scale=0.5),
ir_coords[:, 1] + numpy.random.normal(size=ir_coords.shape[0], scale=0.5),
s=probs_ * base_size, marker='x', c=colour, alpha=1)
plt.axis('off')
plt.xlim((0, 80))
plt.ylim((0, 80))
```
These classifiers have really low diversity because of the way I divided up the data, but this should work fine.
```
def plot_points_on_background(points, background, noise=False, base_size=200):
plt.imshow(background, cmap='gray')
colours = cm.rainbow(numpy.linspace(0, 1, len(points)))
for colour, (x, y) in zip(colours, points):
if noise:
x += numpy.random.normal(scale=0.5)
y += numpy.random.normal(scale=0.5)
plt.scatter(x, y, marker='o', c=colour, s=base_size)
plt.axis('off')
plt.xlim((0, background.shape[0]))
plt.ylim((0, background.shape[1]))
def plot_classifications(atlas_vector, ir_matrix, labels, base_size=200):
image = atlas_vector[2 : 2 + PATCH_DIAMETER ** 2].reshape((PATCH_DIAMETER, PATCH_DIAMETER)
)[60:140, 60:140]
radio_coords = atlas_vector[:2]
nearby = atlas_vector[2 + PATCH_DIAMETER ** 2:] < ARCMIN
labels = labels[nearby]
ir_coords = ir_matrix[nearby, :2][labels.astype(bool)]
ir_coords = ra_dec_to_pixels(radio_coords, ir_coords)
plot_points_on_background(ir_coords, image, base_size=base_size)
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
atlas_vector = f_h5['/atlas/cdfs/numeric'][i, :]
ir_coords = f_h5['/swire/cdfs/numeric']
plot_classifications(atlas_vector, ir_coords, norris_labels)
```
## Bringing it all together
We want to plot classifications, RGZ labels, and Norris labels in the same row.
```
def plot_classifications_row(atlas_vector, ir_matrix, classifier_labels, rgz_labels, norris_labels, base_size=200):
plt.subplot(1, 3, 1)
plt.title('Classifier')
plot_classifications(atlas_vector, ir_matrix, classifier_labels, base_size=base_size)
plt.subplot(1, 3, 2)
plt.title('RGZ')
plot_classifications(atlas_vector, ir_matrix, rgz_labels, base_size=base_size)
plt.subplot(1, 3, 3)
plt.title('Norris')
plot_classifications(atlas_vector, ir_matrix, norris_labels, base_size=base_size)
with h5py.File(TRAINING_H5_PATH, 'r') as f_h5:
classifier = RGZClassifier(f_h5['features'].value, N_ASTRO)
classifier.train(numpy.arange(f_h5['features'].shape[0]), norris_labels)
with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5:
i = 250
vec = f_h5['/atlas/cdfs/numeric'][i, :]
mat = f_h5['/swire/cdfs/numeric']
probs = classifier.predict_probabilities(vec)
labels = numpy.zeros(probs.shape)
labels[probs.argmax()] = 1
plt.figure(figsize=(20, 10))
plot_classifications_row(vec, mat, labels, crowdsourced_labels, norris_labels, base_size=200)
```
| github_jupyter |
# Particle Filtering for Sequential Bayesian Inference
## Introduction
In this notebook, we produce an implementation of the particle filtering methods discussed in the accompanying article. In particular, we introduce the *bearings-only tracking* problem, a nonlinear non-Gaussian sequential inference problem. Next, we implement sequential importance sampling (SIS) to solve bearings-only tracking and observe the method's limitations. Finally, we achieve a respectable solution to our problem using the bootstrap filter.
## The Problem
### Statement
The bearings-only tracking problem is as follows. Suppose a plane begins at $x_0 = (0.01, 0.95)$ with a velocity $v_0=(0.02, -0.013)$ and for $50$ time steps moves according to the following dynamical system.
\begin{align}
v_{t+1} &\sim \mathcal{N}(v_t, \Sigma_v)\\
x_{t+1} & = x_t + v_t
\end{align}
where $\Sigma_v = 10^{-6}I_2$. At each time $t$, we receive a noisy measurement of the plane's bearing, that is
\begin{equation} y_t = \mathrm{angle}(x_t) + w_t \end{equation}
where $w_t \sim \mathcal{N}(0, \sigma=0.005)$.
Our goal is to infer $p(x_{0:50}, v_{0:50}| y_{1:50})$.
### Code
First, we do our imports and set a random seed.
```
import numpy as np
import scipy.stats as sp_stats
import scipy.special as sp_special
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
import math
np.random.seed(3)
```
Next, we parametrize our prior.
```
# number of time steps
num_samples = 50
# number of particles
num_particles = 100
# Parameters of the prior
x0 = [0.01, 0.95]
v0 = [0.02, -0.013]
vx_var, vy_var = (1.0/1e6, 1.0/1e6)
v_cov = [[vx_var, 0], [0, vy_var]]
measurement_noise = 0.005
```
The `BearingsData` class performs the following functions
1. Simulates a ground-truth state $(x_{0:50}, v_{0:50})$ and accompanying measurements $y_{1:50}$.
2. Provides plotting functions.
3. Gives us a `__getitem__` function, which allows us to sequentially retrieve datapoints.
```
class BearingsData(object):
def __init__(self, num_samples):
# Simulates velocities
v_matrix = np.zeros((num_samples, 2))
v_matrix[0, :] = np.random.multivariate_normal(v0, v_cov)
for i in range(1, num_samples):
v_matrix[i, :] = np.random.multivariate_normal(v_matrix[i-1, :], v_cov)
self.v = v_matrix
# Calculates positions
x = np.cumsum(v_matrix, axis=0) + x0 - v_matrix # subtract v_matrix because x_t depends on v_{t-1}
self.x = x
# Calculates bearings
noise_vector = np.random.multivariate_normal(np.zeros(num_samples),
(measurement_noise**2) * np.identity(num_samples))
y = np.arctan(x[:, 1]/x[:, 0]) + noise_vector
self.y = y
"""Plots trajectory and bearings"""
def plot(self):
plt.scatter(self.x[:, 0], self.x[:, 1])
y = np.arctan(self.x[:, 1]/self.x[:, 0])
for yt in self.y:
dx = math.cos(yt) * .5
dy = math.sin(yt) * .5
plt.plot([0., dx], [0., dy], color="red", alpha=0.3)
plt.show()
"""Plots inferences overlayed on ground-truth trajectory and measurements"""
def plot_inferences(self, inferences):
plt.plot(self.x[:, 0], self.x[:, 1], color="black", lw=2,
path_effects=[pe.Stroke(linewidth=5, foreground='white'), pe.Normal()])
for yt in self.y:
dx = math.cos(yt) * .5
dy = math.sin(yt) * .5
plt.plot([0., dx], [0., dy], color="red", alpha=0.3)
for x in inferences:
plt.scatter(x[:, 0], x[:, 1])
plt.show()
"""Retrieves position, velocity, and measurement at time t"""
def __getitem__(self, idx):
if idx < self.x.shape[0]:
xt = self.x[idx, :]
vt = self.v[idx, :]
yt = self.y[idx]
return xt, vt, yt
else:
raise IndexError('high enough')
```
Here, we simulate one run of the data-generating process.
```
data = BearingsData(num_samples)
data.plot()
```
## SIS
In this section, we implement SIS in order to infer the hidden-state given the measurements of the data we generated in the last section.
Throughout our calculations, we use log probabilities for numerical stability.
The following defines the Markov transition kernel from state $(x_t, v_t)$ to $(x_{t+1}, v_{t+1})$. Note that the kernel only takes velocities as input: we assume that the deterministic relation $x_{t+1} = x_t + v_t$ is satisfied.
```
"""Markov transition kernel"""
def transition_kernel(v1, v2):
v2_dist = sp_stats.multivariate_normal(mean=v1, cov=v_cov)
return v2_dist.logpdf(v2)
```
Next, we define the marginal distribution $p(y_t| x_t, v_t)$. Note that $p(y_t|x_t, v_t)$ does not depend on $v_t$.
```
"""Probability of measurement y given state xt, vt"""
def measurement_prob(yt, xt):
error = np.arctan(xt[1]/xt[0]) - yt
return sp_stats.norm.logpdf(error, scale=measurement_noise)
```
We represent our particles as a dictionary with keys {"locations", "velocities", "weight", "prior_prob"}. We store all the particles in a list.
The following function computes a particle filter step, outputting particles with unnormalized weights. It takes as input particles $\{(x_{0:t}^{(i)}, v_{0:t}^{(i)})\}$, draws each $(x_{t+1}^{(i)}, v_{t+1}^{(i)})$, and updates weights and prior probabilities accordingly. Since we reuse this function when implementing the bootstrap filter, we add the `selection` argument that modifies how the weights are updated.
```
def particle_filter_step(particles, yt, selection=False):
new_particles = []
for i in range(len(particles)):
particle = particles[i]
prev_x = np.squeeze(particle["locations"][-1:, :])
prev_v = np.squeeze(particle["velocities"][-1:, :])
vt = np.random.multivariate_normal(prev_v, v_cov)
xt = prev_x + prev_v
new_particle = {}
new_particle["prior_prob"] = particle["prior_prob"] + transition_kernel(prev_v, vt)
if selection:
new_particle["weight"] = measurement_prob(yt, xt)
else:
new_particle["weight"] = particle["weight"] + measurement_prob(yt, xt)
new_particle["locations"] = np.append(particle["locations"], np.expand_dims(xt, axis=0), axis=0)
new_particle["velocities"] = np.append(particle["velocities"], np.expand_dims(vt, axis=0), axis=0)
new_particles.append(new_particle)
return new_particles
```
The following function normalizes particle weights. Note that we smooth the probability distribution induced by the weights by a factor of $10^{-10}$. If we do not do so, our algorithm attempts to compute $\log(0)$ .
```
def normalize_weights(particles):
log_weights = np.array([particle["weight"] for particle in particles])
# For numerical stability, we smooth the output of softmax by 1e-10
smoothed_log_weights = np.log(sp_special.softmax(log_weights) + 1e-10)
normalized_log_weights = np.log(sp_special.softmax(smoothed_log_weights))
for i in range(len(particles)):
particles[i]['weight'] = normalized_log_weights[i]
return particles
```
The following step combines the functions we have defined so far in order to perform the SIS algorithm.
```
particles = [{
"locations": np.expand_dims(x0, axis=0),
"velocities": np.expand_dims(v0, axis=0),
"weight": np.log(1.0/num_particles),
"prior_prob": 0
} for _ in range(num_particles)]
for _, _, yt in data:
particles = particle_filter_step(particles, yt)
particles = normalize_weights(particles)
```
Let's visualize first all 100 particle and then the highest likelihood particle. The ground-truth trajectory is the black line and each colored trajectory is a particle.
```
data.plot_inferences([particle['locations'] for particle in particles])
best_idx = np.argmax([particle['weight'] * particle['prior_prob'] for particle in particles])
print('particle weight: ', np.exp(particles[best_idx]['weight']))
data.plot_inferences([particles[best_idx]['locations']])
```
We can see that SIS correctly identifies a particle that is quite close to the ground-truth trajectory. However, we can also see the two limitations of SIS.
1. Since we are sampling trajectories from the prior, we end up with many particles that are obviously inconsistent with our measurements. These are a waste of computation.
2. All but one of the importance weights converge to 0. This means that we effectively only have drawn one sample from $p(x_{0:t}|y_{1:t})$, meaning we only have a very low-resolution picture of the distribution.
In the next section, we see that the bootstrap filter addresses these issues.
## The Boostrap Filter
The following function defines the selection step from the boostrap filter algorithm.
```
def selection(particles):
new_particles = []
probs = [np.exp(particle["weight"]) for particle in particles]
for _ in range(len(particles)):
idx = np.argmax(np.random.multinomial(1, probs))
new_particles.append(particles[idx])
return new_particles
```
This following code snippet performs the boostrap filter algorithm on our data. We have made one modification from the bootstrap filter algorithm described in the accompanying article: instead of applying the selection step at every time step, we only apply it every 10 times steps. The rationale for this is that in our particular case, doing selection at every time step still results in all but one of the importance weights becoming $0$. However, doing selection only every $\Delta t$ steps scales the variance of the log-importance weights at selection time by $1/{\Delta t}$. Setting $t=10$ is sufficient to preserve the diversity of particles.
```
better_particles = [{
"locations": np.expand_dims(x0, axis=0),
"velocities": np.expand_dims(v0, axis=0),
"weight": np.log(1.0/num_particles),
"prior_prob": 0
} for _ in range(num_particles)]
for t, (_, _, yt) in enumerate(data):
if t%10==0:
better_particles = selection(better_particles)
better_particles = particle_filter_step(better_particles, yt, selection=True)
better_particles = normalize_weights(better_particles)
else:
better_particles = particle_filter_step(better_particles, yt, selection=False)
better_particles = normalize_weights(better_particles)
data.plot_inferences([particle['locations'] for particle in better_particles])
```
As we can see, the bootstrap filter resolves the problems associate with SIS. Namely,
1. All particles have equal weight.
2. Since all our particles are informative, we have rich information about $p(x_{0:t}|y_{1:t})$.
| github_jupyter |
# Single Beam
This notebook will run the ISR simulator with a set of data created from a function that makes test data. The results along with error bars are plotted below.
```
%matplotlib inline
import matplotlib.pyplot as plt
import os,inspect
from SimISR import Path
import scipy as sp
from SimISR.utilFunctions import readconfigfile,makeconfigfile
from SimISR.IonoContainer import IonoContainer,MakeTestIonoclass
from SimISR.runsim import main as runsim
from SimISR.analysisplots import analysisdump
import seaborn as sns
```
## Set up Config Files
Setting up a configuration files and the directory needed to run the simulation. The simualtor assumes that for each simulation there is a dedicated directory to save out data along the different processing stages. The simulator also assumes that there is a configuration file which is created in the following cell using a default one that comes with the code base. The only parameter the user should have to set is the number of pulses.
```
# set the number of pulses
npulses = 2000
curloc = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
testpath = os.path.join(os.path.split(curloc)[0],'Testdata','Notebookexample1')
if not os.path.isdir(testpath):
os.mkdir(testpath)
defaultpath = os.path.join(os.path.split(curloc)[0],'Test')
defcon = os.path.join(defaultpath,'statsbase.ini')
(sensdict,simparams) = readconfigfile(defcon)
tint = simparams['IPP']*npulses
ratio1 = tint/simparams['Tint']
simparams['Tint']=ratio1 * simparams['Tint']
simparams['Fitinter'] = ratio1 * simparams['Fitinter']
simparams['TimeLim'] = tint
simparams['startfile']='startfile.h5'
makeconfigfile(os.path.join(testpath,'stats.ini'),simparams['Beamlist'],sensdict['Name'],simparams)
```
## Make Input Data
This section will create a set of input parmeters that can be used to create ISR Data. It uses a function MakeTestIonoclass which will create a set of plasma parameters that varies with altitude depending on the the function inputs. This data is put into an ionocontainer class, which is used as a container class to move data between the radarData class, fitter class and plotting modules. It has a standard format so any radar data or plasma parameters for the simulator can be saved in this.
A start file is also made which will be used as the starting parameter values used in the fitter. The starting points for the fitter use a nearest neighbor in space to what is found in the start file.
```
finalpath = os.path.join(testpath,'Origparams')
if not os.path.isdir(finalpath):
os.mkdir(finalpath)
z = (50.+sp.arange(120)*5.)
nz = len(z)
coords = sp.column_stack((sp.zeros((nz,2)),z))
Icont1=MakeTestIonoclass(testv=False,testtemp=True,N_0=1e11,z_0=250.0,H_0=50.0,coords=coords,times =sp.array([[0,1e6]]))
Icontstart = MakeTestIonoclass(testv=False,testtemp=False,N_0=1e11,z_0=250.0,H_0=50.0,coords=coords,times =sp.array([[0,1e6]]))
finalfile = os.path.join(finalpath,'0 stats.h5')
Icont1.saveh5(finalfile)
Icontstart.saveh5(os.path.join(testpath,'startfile.h5'))
```
## Run Simulation
The simulation is run through the submodule runsim and its main function, renamed in this as runsim. This function will call all of the neccesary classes and functions to run the simulator. It will save out the data based off of an internal set of file names.
This function must get a configuration file and a list of functionalities it is to perform. Below the runsim function will create spectra form the plasma parameters, create radar data and then fit it.
```
functlist = ['spectrums','radardata','fitting']
config = os.path.join(testpath,'stats.ini')
runsim(functlist,testpath,config,True)
```
## Plotting
The data is plotted along with error bars derived from the fitter.
```
sns.set_style("whitegrid")
sns.set_context("notebook")
fig1,axmat =plt.subplots(1,3,figsize = (16,7),sharey=True)
axvec = axmat.flatten()
fittedfile = os.path.join(testpath,'Fitted','fitteddata.h5')
fitiono = IonoContainer.readh5(fittedfile)
paramlist = ['Ne','Te','Ti']
indlist =[sp.argwhere(ip==fitiono.Param_Names)[0][0] for ip in paramlist]
n_indlist =[sp.argwhere(('n'+ip)==fitiono.Param_Names)[0][0] for ip in paramlist]
altin =Icont1.Cart_Coords[:,2]
altfit = fitiono.Cart_Coords[:,2]
in_ind=[[1,0],[1,1],[0,1]]
pbounds = [[1e10,1.2e11],[200.,3000.],[200.,2500.],[-100.,100.]]
for i,iax in enumerate(axvec):
iinind = in_ind[i]
ifitind = indlist[i]
n_ifitind = n_indlist[i]
#plot input
indata = Icont1.Param_List[:,0,iinind[0],iinind[1]]
iax.plot(indata,altin)
#plot fitted data
fitdata = fitiono.Param_List[:,0,ifitind]
fit_error = fitiono.Param_List[:,0,n_ifitind]
ploth=iax.plot(fitdata,altfit)[0]
iax.set_xlim(pbounds[i])
iax.errorbar(fitdata,altfit,xerr=fit_error,fmt='-o',color=ploth.get_color())
iax.set_title(paramlist[i])
```
| github_jupyter |
## Monte Carlo on policy evaluation
Monte Carlo on policy evaluation is an important model free policy evaluation algorithm which uses the popular computational method called the Monte Carlo method.
It is important since it is usually the first model free algorithm studied in reinforcement learning.
Model free algorithms are the ones that do not need a full knowledge of all states and transition dynamics.
This makes Monte Carlo on policy evaluation very important since it can be applied into a wide range of real-world scenarios.
It is also agnostic to the Markov Decision Process setting, i.e. it can be applied into reinforcement learning problems that do not follow the MDP setting.
It is guaranteed to converge to a global optima.
Monte Carlo on policy evaluation can be implemented in three versions, which differ on how they calculate multiple visits of the same state given an episodic (terminating) history: first visit MC, every visit MC and incremental MC.
### Characteristics of Monte Carlo on policy evaluation:
##### Model free
Monte Carlo methods are model free, i.e. they do not require full knowledge of all states or transition dynamics.
##### Finite and discrete state and action spaces
In order for Monte Carlo on policy evaluation to work, the environment has to have a finite state and action space, because it saves state values in a dictionary internally.
The aforementioned is only possible if the state and action spaces are finite and discrete.
##### On policy / Off policy
- On policy methods attempt to evaluate or improve the policy that is used to make decisions.
- Off policy methods evaluate or improve a policy different from that used to generate the data.
Something very positive for off policy methods is that they can figure out the optimal policy **regardless** of the agent’s actions and motivation.
The Monte Carlo methods we will see here are on policy.
Nevertheless there are also off policy versions of Monte Carlo which we will not show here.
##### Convergence
Monte Carlo policy evaluation converges to a *global optimum* value function due to the law of large numbers.
##### Sample based
Monte Carlo methods are sample based.
Monte Carlo samples many histories for many trajectories which frees us from using a model.
As there is no bootstrapping and we need to calculate the return of a state until the end of an episode, one sample in the case of Monte Carlo methods is the full episode.
This means that the update rule for the state values only happens after the current episode has been completely sampled.
##### Unbiased estimation of state values
Because we are taking an average over the true distribution of returns in Monte Carlo, we obtain an unbiased estimator of the state value at each state.
##### Finite Horizon
Monte Carlo methods can only be used in a finite horizon setting, i.e. with episodic (terminating) domains only.
This is inherent from the fact that Monte Carlo update rule for the state value function only happens at the end of each episode, i.e. they are sample based.
##### Epsilon greedy policy
Epsilon greedy policies determine how often will the agent explore and how often will the agent exploit.
Furthermore, we want the epsilon greedy policy to be **greedy in the limit of exploration (GLIE)**.
- all state-action pairs are visited an infinite number of times
- $\epsilon_{t} → 0$ as $ t → 0 $, i.e. the policy is greedy in the limit and converges to 0
In our case, the update rule after each step for our epsilon is the following:
$ \epsilon \gets 1 / ( c_{\epsilon} \times f_{\epsilon})$, where $ c_{\epsilon} $ is a counter that increments after each episode has ended, whereas $ f_{\epsilon} $ is a constant factor.
##### Markov Decision Process agnostic
Monte Carlo methods can be applied in non-MDP settings, i.e. they are MDP agnostic.
##### Discount factor
The discount factor must take a value in the range $[0...1]$.
- setting it to $1$ means that we put as much value to future states as the current state.
- setting it to $0$ means that we do not value future states at all, only the current state.
##### Initialization
For Monte Carlo on policy evaluation we keep track of the following:
- state value functions, initially set to $0$
- for internal calculations we keep track of total reward up to a specific state as well as the number of times that state was visited
- samples array contains the latest sampled episode. It is initially set to an empty array and it is cleared after each episode.
- `self.discount_factor` is set to $1$.
- we set `self.decaying_epsilon_mul_factor` to a value of $0.2$.
This is done to allow the agent to explore longer.
`self.epsilon` starts from $5$ and decreases with each episode.
```
import numpy as np
import random
from collections import defaultdict
from environment import Env
# Monte Carlo Agent which learns every episode from the sample
class MCAgent:
def __init__(self, actions):
self.width = 5
self.height = 5
self.actions = actions
self.discount_factor = 0.9
self.decaying_epsilon_counter = 1
self.decaying_epsilon_mul_factor = 0.2
self.epsilon = None
self.samples = []
self.value_table = defaultdict(VisitState)
# class containing information for visited states
class VisitState:
def __init__(self, total_G = 0, N = 0, V = 0):
self.total_G = total_G
self.N = N
self.V = V
```
### Monte Carlo on policy evaluation
Monte Carlo methods sample an episode *first* and only after that do they update the V value function.
The class `MCAgent` is a parent class for the three versions of Monte Carlo on policy evaluation: first visit Monte Carlo, every visit Monte Carlo and incremental Monte Carlo.
##### Calculating the discounted returns
At the end of an episode, we start by calculating the discounted returns for each visited state.
We implement the method `preprocess_visited_states()` that calculates the discounted future sum of rewards $G_t$ for each state.
Notice that the calculation of $G_t$ for each visited state is a common process for any version of Monte Carlo methods.
During the calculations, the sample is reversed since it simplifies the calculations, i.e. the discount factor can be applied more easily to the $G_t$ sums in reverse and we do not need to calculate high powers of the discount factor.
In the end it returns the states and their discounted sums in the correct order.
```
class MCAgent(MCAgent):
# for each episode calculate discounted returns and return info
def preprocess_visited_states(self):
# state name and G for each state as appeared in the episode
all_states = []
G = 0
for reward in reversed(self.samples):
state_name = str(reward[0])
G = reward[1] + self.discount_factor * G
all_states.append([state_name, G])
all_states.reverse()
self.decaying_epsilon_counter = self.decaying_epsilon_counter + 1
return all_states
```
##### Abstract methods
We define the following two abstract methods:
- `mc()`
- `update_global_value_table()`
These have to be implemented from the specific version of Monte Carlo method.
```
class MCAgent(MCAgent):
# to be defined in children classes
def mc(self):
pass
# update visited states for first visit or every visit MC
def update_global_value_table(self, state_name, G_t):
pass
```
#### First Visit Monte Carlo
First visit Monte Carlo is a Monte Carlo method that considers only the first visits to a state *in one episode*.
Notice that we can consider multiple visits to a state, but not on the same episode.
We define a child class for the First Visit Monte Carlo agent.
- in the method `mc()` we first call the `preprocess_visited_states()` method that will give us an array of visited states and their returns.
- we make sure to check whether a state has already been visited or not.
If it had been visited, we do not consider that state, we do not update the V values with it.
- in the method `update_global_value_table()` we update the V values according to textbook update formulas.
Notice that the visited states are saved in a dictionary.
##### Update rule
The update rule for V values in the First Visit Monte Carlo is the following:
$ V^{\pi}(s_t) \gets G_{total}(s_t) / N(s_t) $ where:
- $ N(s_t) $ - the number of times the state has been visited during multiple episodes.
Notice that although we are in the first visit case, the number of times a state has been visited can be more than 1.
That same state could have been visited multiple times in *different episodes*.
- $ G_{total}(s_t) $ - cumulative return of multiple visits to that state
```
from mc_agent import MCAgent, VisitState
from environment import Env
class FVMCAgent(MCAgent):
def __init__(self, actions):
super(FVMCAgent, self).__init__(actions)
# for every episode, update V values of visited states
def mc(self):
all_states = super(FVMCAgent, self).preprocess_visited_states()
visit_state = []
for state in all_states:
if state[0] not in visit_state:
visit_state.append(state[0])
self.update_global_value_table(state[0], state[1])
# update V values of visited states for first visit or every visit MC
def update_global_value_table(self, state_name, G_t):
updated = False
if state_name in self.value_table:
state = self.value_table[state_name]
state.total_G = state.total_G + G_t
state.N = state.N + 1
state.V = state.total_G / state.N
updated = True
if not updated:
self.value_table[state_name] = VisitState(total_G=G_t, N=1, V=G_t)
```
#### Every Visit Monte Carlo
Every Visit Monte Carlo is a Monte Carlo method that does not differentiate if the state has been visited multiple times or not during an episode.
We define a child class for the Every Visit Monte Carlo agent.
- in the method `mc()` we first call the `preprocess_visited_states()` method that will give us an array of visited states and their returns.
- this time we do not check whether that state has already been visited or not. We update our V values with every state in the array.
- in the method `update_global_value_table()` we update the V values according to textbook update formulas.
Notice that the visited states are saved in a dictionary.
##### Update rule
The update rule for V values in the Every Visit Monte Carlo is the following:
$ V^{\pi}(s_t) = G_{total}(s_t) / N(s_t) $ where:
- $ N(s_t) $ - the number of times the state has been visited during multiple episodes.
One state can be visited multiple times in the same episode or in different episodes.
- $ G_{total}(s_t) $ - cumulative return of multiple visits to that state.
```
from mc_agent import MCAgent, VisitState
from environment import Env
class EVMCAgent(MCAgent):
def __init__(self, actions):
super(EVMCAgent, self).__init__(actions)
# for every episode, update V values of visited states
def mc(self):
all_states = super(EVMCAgent, self).preprocess_visited_states()
for state in all_states:
self.update_global_value_table(state[0], state[1])
# update V values of visited states for first visit or every visit MC
def update_global_value_table(self, state_name, G_t):
updated = False
if state_name in self.value_table:
state = self.value_table[state_name]
state.total_G = state.total_G + G_t
state.N = state.N + 1
state.V = state.total_G / state.N
updated = True
if not updated:
self.value_table[state_name] = VisitState(total_G=G_t, N=1, V=G_t)
```
#### Incremental Monte Carlo
Incremental Monte Carlo is a Monte Carlo method that introduces a new update rule. It has the following key characteristics:
- most importantly, it introduces the notion of a **learning rate**, which we will see below.
- it can take two versions: Incremental First Visit Monte Carlo and Incremental Every Visit Monte Carlo.
We will see the latter one, although the first one can be easily derived.
We define a child class for the Incremental Monte Carlo agent.
- in the method `mc()` we first call the `preprocess_visited_states()` method that will give us an array of visited states and their returns.
- We do not check whether that state has already been visited or not. We update our V values with every state in the array.
- in the method `update_global_value_table()` we update the V values according to textbook update formulas.
Notice that the visited states are saved in a dictionary.
- `update_global_value_table()` is different for Incremental Monte Carlo.
##### Update rule
The update rule for V values in the Incremental Monte Carlo is the following:
$ V^{\pi}(s_t) \gets V^{\pi}(s_t) + \alpha [ G(s_t) - V^{\pi}(s_t) ] $ where:
- $V^{\pi}(s_t)$ - state value of current state following the policy $\pi$
- $ \alpha $ - it is called the **learning rate**.
In our case, we use a **decaying, step-based learning rate** which takes the value of $ \alpha = 0.5 * 1 / N(s_t) $
- $ N(s_t) $ - the number of times the state has been visited during multiple episodes.
Notice that although we are in the first visit case, the number of times a state has been visited can be more than 1.
That same state could have been visited multiple times in *different episodes*.
- $ G(s_t) $ - return until the end of the episode of current state.
##### Setting the learning rate
Incremental Monte Carlo can be thought of as a general case of the previous two methods.
- setting $\alpha = 1 / N(s_t)$ recovers the original Monte Carlo on policy evaluation algorithms.
- setting $\alpha < 1 / N(s_t)$ gives a higher weight to older data
- setting $\alpha > 1 / N(s_t)$ gives a higher weight to newer data, which can help learning in non-stationary domains.
If we are in a truly Markovian domain, Every Visit Monte Carlo will be more data efficient, because we update our average return for a state every time we visit the state.
```
from mc_agent import MCAgent, VisitState
from environment import Env
class IMCAgent(MCAgent):
def __init__(self, actions):
super(IMCAgent, self).__init__(actions)
# for every episode, update V values of visited states
def mc(self):
all_states = super(IMCAgent, self).preprocess_visited_states()
for state in all_states:
self.update_global_visit_state(state[0], state[1])
# redefined V value update of visited states for incremental MC
def update_global_visit_state(self, state_name, G_t):
updated = False
if state_name in self.value_table:
state = self.value_table[state_name]
state.N = state.N + 1
learning_rate = 0.5 * 1 / state.N
state.V = state.V + learning_rate * (G_t - state.V)
updated = True
if not updated:
self.value_table[state_name] = VisitState(total_G=G_t, N=1, V=G_t)
```
### Other methods
##### Helper methods
```
class MCAgent(MCAgent):
# get action for the state according to the v function table
# agent pick action of epsilon-greedy policy
def get_action(self, state):
self.update_epsilon()
if np.random.rand() < self.epsilon:
# take random action
action = np.random.choice(self.actions)
else:
# take action according to the v function table
next_state = self.possible_next_state(state)
action = self.arg_max(next_state)
return int(action)
def update_epsilon(self):
self.epsilon = 1 / (self.decaying_epsilon_counter * self.decaying_epsilon_mul_factor)
class MCAgent(MCAgent):
# append sample to memory(state, reward, done)
def save_sample(self, state, reward, done):
self.samples.append([state, reward, done])
# compute arg_max if multiple candidates exit, pick one randomly
@staticmethod
def arg_max(next_state):
max_index_list = []
max_value = next_state[0]
for index, value in enumerate(next_state):
if value > max_value:
max_index_list.clear()
max_value = value
max_index_list.append(index)
elif value == max_value:
max_index_list.append(index)
return random.choice(max_index_list)
class MCAgent(MCAgent):
# get the possible next states
def possible_next_state(self, state):
col, row = state
next_state = [0.0] * 4
if row != 0:
next_state[0] = self.value_table[str([col, row - 1])].V
else:
next_state[0] = self.value_table[str(state)].V
if row != self.height - 1:
next_state[1] = self.value_table[str([col, row + 1])].V
else:
next_state[1] = self.value_table[str(state)].V
if col != 0:
next_state[2] = self.value_table[str([col - 1, row])].V
else:
next_state[2] = self.value_table[str(state)].V
if col != self.width - 1:
next_state[3] = self.value_table[str([col + 1, row])].V
else:
next_state[3] = self.value_table[str(state)].V
return next_state
```
##### Main loop
Since all Monte Carlo methods are closely related, we define a common function called `mainloop()` in the parent class `MCAgent`.
All children MC agents inherit this method and can execute it in their static main functions.
```
class MCAgent(MCAgent):
# to be called in a main loop
def mainloop(self, env, verbose=False):
for episode in range(1000):
state = env.reset()
action = self.get_action(state)
while True:
env.render()
# forward to next state. reward is number and done is boolean
next_state, reward, done = env.step(action)
self.save_sample(next_state, reward, done)
# get next action
action = self.get_action(next_state)
# at the end of each episode, update the v function table
if done:
if(verbose):
print("episode : ", episode, "\t[3, 2]: ", round(self.value_table["[3, 2]"].V, 2),
"\t[2, 3]:", round(self.value_table["[2, 3]"].V, 2),
"\t[2, 2]:", round(self.value_table["[2, 2]"].V, 2),
"\t\tepsilon: ", round(self.epsilon, 2))
self.mc()
self.samples.clear()
break
```
Implementing the main functions for the three Monte Carlo agents is pretty straightforward now.
##### First Visit Monte Carlo agent
```
if __name__ == "__main__":
env = Env()
agent = FVMCAgent(actions=list(range(env.n_actions)))
try:
agent.mainloop(env)
except:
pass
```
##### Every Visit Monte Carlo agent
```
if __name__ == "__main__":
env = Env()
agent = EVMCAgent(actions=list(range(env.n_actions)))
try:
agent.mainloop(env)
except:
pass
```
##### Incremental Monte Carlo agent
```
if __name__ == "__main__":
env = Env()
agent = IMCAgent(actions=list(range(env.n_actions)))
try:
agent.mainloop(env)
except:
pass
```
### Results
All Monte Carlo agents will converge to an optimal policy usually within 40 iterations.
The most effective agents to solve our problem seem to be the following:
- First Visit Monte Carlo - In First Visit Monte Carlo, we discard states visited multiple times inside an episode that have high returns in their late visits.
Basically, we only consider the first return of that state, which is of course much less (more discounted) than the returns of late visits.
This in turn seems to encourage our agent not to waste time going back and forth in order to avoid being penalized by the triangles.
Very important for making Incremental Monte Carlo on policy evaluation converge to an optimal policy in Grid World is the **decaying learning rate**, that decays with increasing number of episodes.
| github_jupyter |
# Monte Carlo Simulation
Today, we will work with the Lennard Jones equation.
$$ U(r) = 4 \epsilon \left[\left(\frac{\sigma}{r}\right)^{12} -\left(\frac{\sigma}{r}\right)^{6} \right] $$
Reduced Units:
$$ U^*\left(r_{ij} \right) = 4 \left[\left(\frac{1}{r^*_{ij}}\right)^{12} -\left(\frac{1}{r^*_{ij}}\right)^{6} \right] $$
```
import math
import os
def calculate_LJ(r_ij):
"""
The LJ interaction energy between two particles.
Computes the pairwise Lennard jones interaction energy based on the separation distance in reduced units.
Parameters
----------
r_ij : float
The distance between the particles in reduced units.
Returns
-------
pairwise_energy : float
The pairwise Lennard Jones interaction energy in reduced units.
"""
r6_term = math.pow(1/r_ij, 6)
r12_term = math.pow(r6_term, 2)
pairwise_energy = 4 * (r12_term - r6_term)
return pairwise_energy
#Defining a function for the tail correction
def calculate_tail_correction(particles, volume, r_cutoff):
"""
This function calculates the tail correction
Parameters
----------
particles : int
The number of particles in the sample
volume : float
The volume of the sample
r_cutoff : float
The cutoff radius
Returns
-------
tail_correction_factor: float
The tail correction factor in reduced units
"""
r3_term = math.pow(1/r_cutoff, 3)
r9_term = math.pow(r3_term, 3)
tail_correction_factor = ((8* math.pi * (particles ** 2)) / (3 * volume) ) * (((1/3) * (r9_term)) - r3_term)
return tail_correction_factor
tail_correction_factor = calculate_tail_correction(800, 10**3, 3)
assert math.isclose(tail_correction_factor, -1.9849E+02, rel_tol=0.00001)
calculate_LJ(1)
calculate_LJ(math.pow(2,(1/6)))
assert 1 == 1
#If we assert something that is not True (otherwise known as False), we will get an assert error
# assert 1 == 0
assert calculate_LJ(1) == 0
assert calculate_LJ(math.pow(2, (1/6))) == -1
def calculate_distance(coord1, coord2):
"""
Calculate the distance between two 3D coordinates.
Parameters
----------
coord1, coord2 : list
The atomic coordinates [x, y, z]
Returns
-------
distance: float
The distance between the two atoms
"""
sum_square_diff = 0
for i in range(len(coord1)):
sum_square_diff += (coord1[i] - coord2[i])**2
distance = math.sqrt(sum_square_diff)
return distance
point_1 = [0, 0, 0]
point_2 = [1, 0, 0]
dist1 = calculate_distance(point_1, point_2)
assert dist1 == 1
point_3 = [0, 0, 0]
point_4 = [0, 1, 1]
dist2 = calculate_distance(point_3, point_4)
assert math.sqrt(2) == dist2
atomic_coordinates = [[0, 0, 0], [0, math.pow(2, 1/6), 0], [0, 2*math.pow(2, 1/6), 0]]
def calculate_total_energy(coordinates, cutoff):
"""
Calculate the total Lennard Jonest energy of a system of particles.
Parameters
----------
coordinates : list
Nested list containing particle coordinates.
Returns
-------
total_energy : float
The total pairwise Lennard jonest energy of the system of particles.
"""
total_energy = 0
num_atoms = len(coordinates)
for i in range(num_atoms):
for j in range(i+1, num_atoms):
dist_ij = calculate_distance(coordinates[i], coordinates[j])
if dist_ij < cutoff:
interaction_energy = calculate_LJ(dist_ij)
total_energy += interaction_energy
return total_energy
def read_xyz(filepath):
"""
Reads coordinates from an xyz file.
Parameters
----------
filepath : str
The path to the xyz file to be processed.
Returns
-------
atomic_coordinates : list
A two dimensional list containing atomic coordinates
"""
with open(filepath) as f:
box_length = float(f.readline().split()[0])
num_atoms = float(f.readline())
coordinates = f.readlines()
atomic_coordinates = []
for atom in coordinates:
split_atoms = atom.split()
float_coords = []
# We split this way to get rid of the atom label.
for coord in split_atoms[1:]:
float_coords.append(float(coord))
atomic_coordinates.append(float_coords)
return atomic_coordinates, box_length
import os
os.getcwd()
#os.chdir('/home/jasminehakim/msse-bootcamp/team1-project/homework/day_2')
file_path = os.path.join('lj_sample_configurations', 'lj_sample_config_periodic1.txt')
coordinates, box_length = read_xyz(file_path)
box_length
len(coordinates)
calculate_total_energy(coordinates)
assert tail_correction_factor == -1.9849E+02
```
| github_jupyter |
```
try:
import tinygp
except ImportError:
%pip install -q tinygp
try:
import jaxopt
except ImportError:
%pip install -q jaxopt
```
(mixture)=
# Mixture of Kernels
It can be useful to model a dataset using a mixture of GPs.
For example, the data might have both systematic effects and a physical signal that can be modeled using a GP.
I know of a few examples where this method has been used in the context of time series analysis for the discovery of transiting exoplanets (for example, [Aigrain et al. 2016](https://arxiv.org/abs/1603.09167) and [Luger et al. 2016](https://arxiv.org/abs/1607.00524)), but I'm sure that these aren't the earliest references.
The idea is pretty simple: if your model is a mixture of two GPs (with covariance matrices $K_1$ and $K_2$ respectively), this is equivalent to a single GP where the kernel is the sum of two kernels, one for each component ($K = K_1 + K_2$).
In this case, the equation for the predictive mean conditioned on a dataset $\boldsymbol{y}$ is
$$
\boldsymbol{\mu} = (K_1 + K_2)\,(K_1 + K_2 + N)^{-1} \, \boldsymbol{y}
$$
where $N$ is the (possibly diagonal) matrix describing the measurement uncertainties.
It turns out that the equation for computing the predictive mean for component 1 is simply
$$
\boldsymbol{\mu}_1 = K_1\,(K_1 + K_2 + N)^{-1} \, \boldsymbol{y}
$$
and the equivalent expression can be written for component 2.
This can be implemented in `tinygp` using the new `kernel` keyword argument in the `predict` method.
To demonstrate this, let's start by generating a synthetic dataset.
Component 1 is a systematic signal that depends on two input parameters ($t$ and $\theta$ following Aigrain) and component 2 is a quasiperiodic oscillation that is the target of our analysis.
```
import jax
import jax.numpy as jnp
import numpy as np
import matplotlib.pyplot as plt
from tinygp import GaussianProcess, kernels, transforms
from jax.config import config
config.update("jax_enable_x64", True)
random = np.random.default_rng(123)
N = 256
t = np.sort(random.uniform(0, 10, N))
theta = random.uniform(-np.pi, np.pi, N)
X = np.vstack((t, theta)).T
def build_gp(params):
kernel1 = jnp.exp(params["log_amp1"]) * transforms.Linear(
jnp.exp(params["log_scale1"]), kernels.Matern32()
)
kernel2 = jnp.exp(params["log_amp2"]) * transforms.Subspace(
0,
kernels.ExpSquared(jnp.exp(params["log_scale2"]))
* kernels.ExpSineSquared(
scale=jnp.exp(params["log_period"]),
gamma=jnp.exp(params["log_gamma"]),
),
)
kernel = kernel1 + kernel2
return GaussianProcess(kernel, X, diag=jnp.exp(params["log_diag"]))
true_params = {
"log_amp1": np.log(2.0),
"log_scale1": np.log([2.0, 0.8]),
"log_amp2": np.log(2.0),
"log_scale2": np.log(3.5),
"log_period": np.log(2.0),
"log_gamma": np.log(10.0),
"log_diag": np.log(0.5),
}
gp = build_gp(true_params)
y = gp.sample(jax.random.PRNGKey(5678))
plt.plot(t, y, ".k")
plt.ylim(-6.5, 6.5)
plt.xlim(0, 10)
plt.xlabel("t")
plt.ylabel("y");
```
The physical (oscillatory) component is not obvious in this dataset because it is swamped by the systematics.
Now, we'll find the maximum likelihood hyperparameters by numerically minimizing the negative log-likelihood function.
```
import jaxopt
@jax.jit
def loss(params):
return -build_gp(params).log_probability(y)
solver = jaxopt.ScipyMinimize(fun=loss)
soln = solver.run(true_params)
print("Maximum likelihood parameters:")
print(soln.params)
```
Now let's use the trick from above to compute the prediction of component 1 and remove it to see the periodic signal.
```
# Compute the predictive means - note the "kernel" argument
gp = build_gp(soln.params)
mu1 = gp.condition(y, kernel=gp.kernel.kernel1).gp.loc
mu2 = gp.condition(y, kernel=gp.kernel.kernel2).gp.loc
plt.plot(t, y, ".k", mec="none", alpha=0.3)
plt.plot(t, y - mu1, ".k")
plt.plot(t, mu2)
plt.ylim(-6.5, 6.5)
plt.xlim(0, 10)
plt.xlabel("t")
plt.ylabel("y");
```
In this plot, the original dataset is plotted in light gray points and the "de-trended" data with component 1 removed is plotted as black points.
The prediction of the GP model for component 2 is shown as a blue line.
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from xentropy import dihedrals
from astropy import units as au
```
# single Gaussian distro
## create artificial data
```
data= np.random.randn(100000)*30
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data,verbose=True)
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(ncols=2,figsize=(12,6))
axs[0].hist(data,180,density=True, label="histogram")
xs, ys = dih_ent.pdf_x_deg,dih_ent.pdf_deg
axs[0].plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(dih_ent.entropy))
axs[0].set(xlabel="artif. dihedrals / degree", ylabel="prob. density / degree$^{-1}$")
axs[1].hist(data/180*np.pi,180,density=True, label="histogram")
xs, ys = dih_ent.pdf_x,dih_ent.pdf
axs[1].plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(dih_ent.entropy))
axs[1].set(xlabel="artif. dihedrals / radian", ylabel="prob. density / radian$^{-1}$")
for ax in axs:
ax.legend(loc="upper right")
```
# Gaussians of variable width
## create artificial data
```
data= [np.random.randn(100000)*20,
np.random.randn(100000)*30,
np.random.randn(100000)*40,
np.random.randn(100000)*50]
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data,verbose=True,input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(2,2,figsize=(12,12),sharex=True,sharey=True)
for ax,dat,xs,ys,S in zip(axs.flatten(),data,dih_ent.pdf_x_deg,dih_ent.pdf_deg, dih_ent.entropy):
ax.hist(dat,180,density=True, label="histogram")
ax.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(S))
ax.set(xlabel="artificial dihedrals", ylabel="probability density")
ax.legend()
f.tight_layout()
```
# binodal distributions
## create artificial data
```
def binodal_data(n_samples=1001,w1=10,w2=10):
n1 = n_samples//2
n2 = n_samples-n1
p1 = np.random.randn(n1)*w1-90
p2 = np.random.randn(n2)*w2+90
return np.concatenate([p1,p2])
data= [binodal_data(100000,5,25),
binodal_data(100000,15,25),
binodal_data(100000,25,25),
binodal_data(100000,35,25)]
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data, verbose=False, input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(2,2,figsize=(12,12),sharex=True,sharey=True)
for ax,dat,xs,ys,S in zip(axs.flatten(), data,dih_ent.pdf_x_deg, dih_ent.pdf_deg, dih_ent.entropy):
ax.hist(dat,180,density=True, label="histogram")
ax.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(S))
ax.set(xlabel="artificial dihedrals", ylabel="probability density")
ax.legend()
f.tight_layout()
```
# shifted binodal distributions
## create artificial data
```
def binodal_data(n_samples=1001,w1=10,w2=10):
n1 = n_samples//2
n2 = n_samples-n1
p1 = np.random.randn(n1)*w1
p2 = np.random.randn(n2)*w2+180
return np.divmod(np.concatenate([p1,p2]),360)[1]-180
data= [binodal_data(100000,5,25),
binodal_data(100000,15,25),
binodal_data(100000,25,25),
binodal_data(100000,35,25)]
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data, verbose=False, input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots(2,2,figsize=(12,12),sharex=True,sharey=True)
for ax,dat,xs,ys,S in zip(axs.flatten(),data,dih_ent.pdf_x_deg,dih_ent.pdf_deg, dih_ent.entropy):
ax.hist(dat,180,density=True, label="histogram")
ax.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(S))
ax.set(xlabel="artificial dihedrals", ylabel="probability density")
ax.legend()
f.tight_layout()
```
# trinodal distributions (butane-like)
## create artificial data
```
def trinodal_data(n_samples=1001,w1=20,w2=20,w3=20):
n1 = int(n_samples*2/5)
n2 = int((n_samples-n1)/2)
n3 = n_samples-n1-n2
p1 = np.random.randn(n1)*w1
p2 = np.random.randn(n2)*w2-120
p3 = np.random.randn(n3)*w3+120
return np.concatenate([p1,p2,p3])
data= trinodal_data(100000)
```
## perform kde
```
dih_ent = dihedrals.dihedralEntropy(data=data, verbose=False, input_unit="degree")
dih_ent.calculate()
```
## plot normalized histogram and kde
```
f,axs = plt.subplots()
xs, ys = dih_ent.pdf_x_deg,dih_ent.pdf_deg
axs.hist(data,180,density=True, label="histogram")
axs.plot(xs,ys, lw=5,alpha=.7, label="XEntropy KDE\nS = {:.3f} J/(mol*K)".format(dih_ent.entropy))
axs.set(xlabel="artificial dihedrals", ylabel="probability density")
axs.legend()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.ticker as ticker
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.inspection import permutation_importance
import multiprocessing
labels = pd.read_csv('../csv/train_labels.csv')
labels.head()
values = pd.read_csv('../csv/train_values.csv')
values.head(10).T
values.isnull().values.any()
labels.isnull().values.any()
values.dtypes
values["building_id"].count() == values["building_id"].drop_duplicates().count()
values.info()
to_be_categorized = ["land_surface_condition", "foundation_type", "roof_type",\
"position", "ground_floor_type", "other_floor_type",\
"plan_configuration", "legal_ownership_status"]
for row in to_be_categorized:
values[row] = values[row].astype("category")
values.info()
datatypes = dict(values.dtypes)
for row in values.columns:
if datatypes[row] != "int64" and datatypes[row] != "int32" and \
datatypes[row] != "int16" and datatypes[row] != "int8":
continue
if values[row].nlargest(1).item() > 32767 and values[row].nlargest(1).item() < 2**31:
values[row] = values[row].astype(np.int32)
elif values[row].nlargest(1).item() > 127:
values[row] = values[row].astype(np.int16)
else:
values[row] = values[row].astype(np.int8)
values.info()
labels.info()
labels["building_id"] = labels["building_id"].astype(np.int32)
labels["damage_grade"] = labels["damage_grade"].astype(np.int8)
labels.info()
```
# Primer Modelo
```
important_values = values\
.merge(labels, on="building_id")
important_values.drop(columns=["building_id"], inplace = True)
important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category")
X_train, X_test, y_train, y_test = train_test_split(important_values.drop(columns = 'damage_grade'),
important_values['damage_grade'], test_size = 0.2, random_state = 123)
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
features_to_encode = ["geo_level_1_id", "land_surface_condition", "foundation_type", "roof_type",\
"position", "ground_floor_type", "other_floor_type",\
"plan_configuration", "legal_ownership_status"]
for feature in features_to_encode:
X_train = encode_and_bind(X_train, feature)
rf_model = RandomForestClassifier(n_estimators = 150, max_depth = 15, criterion = "gini", verbose=True)
rf_model.fit(X_train, y_train)
rf_model.score(X_train, y_train)
rf_model.feature_importances_
fig, ax = plt.subplots(figsize = (20,20))
plt.bar(X_train.columns, rf_model.feature_importances_)
plt.xlabel("Features")
plt.xticks(rotation = 90)
plt.ylabel("Importance")
plt.show()
```
# Obtenemos un grafico que indica la importancia de cada columna para el modelo recien generado.
```
important_columns = [col for col in values.columns if col.startswith('has_superstructure')]
important_values = values[important_columns + \
['building_id', 'geo_level_1_id', 'foundation_type', 'ground_floor_type', 'age', 'area_percentage']]\
.merge(labels, on="building_id")
important_values.drop(columns=["building_id"], inplace = True)
important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category")
important_values
X_train, X_test, y_train, y_test = train_test_split(important_values.drop(columns = 'damage_grade'),
important_values['damage_grade'], test_size = 0.2, random_state = 123)
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
features_to_encode = ["geo_level_1_id", "foundation_type", "ground_floor_type"]
for feature in features_to_encode:
X_train = encode_and_bind(X_train, feature)
X_test = encode_and_bind(X_test, feature)
rf_model = RandomForestClassifier(n_estimators = 100, max_depth = 15, max_features = 45, criterion = "gini", verbose=True)
rf_model.fit(X_train, y_train)
rf_model.score(X_train, y_train)
y_preds = rf_model.predict(X_test)
f1_score(y_test, y_preds, average='micro')
rf_model.feature_importances_
test_values = pd.read_csv('../csv/test_values.csv', index_col = "building_id")
test_values
important_columns = [col for col in test_values.columns if col.startswith('has_superstructure')]
test_values_subset = test_values[important_columns + \
['geo_level_1_id', 'foundation_type', 'ground_floor_type', 'age', 'area_percentage']]
test_values_subset["geo_level_1_id"] = test_values_subset["geo_level_1_id"].astype("category")
test_values_subset
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
features_to_encode = ["geo_level_1_id", "foundation_type", "ground_floor_type"]
for feature in features_to_encode:
test_values_subset = encode_and_bind(test_values_subset, feature)
test_values_subset
preds = rf_model.predict(test_values_subset)
submission_format = pd.read_csv('../csv/submission_format.csv', index_col = "building_id")
my_submission = pd.DataFrame(data=preds,
columns=submission_format.columns,
index=submission_format.index)
```
| github_jupyter |
##### Copyright 2018 The TF-Agents Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Environments
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/agents/tutorials/2_environments_tutorial">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/2_environments_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/2_environments_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/2_environments_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Introduction
The goal of Reinforcement Learning (RL) is to design agents that learn by interacting with an environment. In the standard RL setting, the agent receives an observation at every time step and chooses an action. The action is applied to the environment and the environment returns a reward and a new observation. The agent trains a policy to choose actions to maximize the sum of rewards, also known as return.
In TF-Agents, environments can be implemented either in Python or TensorFlow. Python environments are usually easier to implement, understand, and debug, but TensorFlow environments are more efficient and allow natural parallelization. The most common workflow is to implement an environment in Python and use one of our wrappers to automatically convert it into TensorFlow.
Let us look at Python environments first. TensorFlow environments follow a very similar API.
## Setup
If you haven't installed tf-agents or gym yet, run:
```
!pip install --pre tf-agents[reverb]
!pip install 'gym==0.10.11'
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import numpy as np
from tf_agents.environments import py_environment
from tf_agents.environments import tf_environment
from tf_agents.environments import tf_py_environment
from tf_agents.environments import utils
from tf_agents.specs import array_spec
from tf_agents.environments import wrappers
from tf_agents.environments import suite_gym
from tf_agents.trajectories import time_step as ts
tf.compat.v1.enable_v2_behavior()
```
## Python Environments
Python environments have a `step(action) -> next_time_step` method that applies an action to the environment, and returns the following information about the next step:
1. `observation`: This is the part of the environment state that the agent can observe to choose its actions at the next step.
2. `reward`: The agent is learning to maximize the sum of these rewards across multiple steps.
3. `step_type`: Interactions with the environment are usually part of a sequence/episode. e.g. multiple moves in a game of chess. step_type can be either `FIRST`, `MID` or `LAST` to indicate whether this time step is the first, intermediate or last step in a sequence.
4. `discount`: This is a float representing how much to weight the reward at the next time step relative to the reward at the current time step.
These are grouped into a named tuple `TimeStep(step_type, reward, discount, observation)`.
The interface that all python environments must implement is in `environments/py_environment.PyEnvironment`. The main methods are:
```
class PyEnvironment(object):
def reset(self):
"""Return initial_time_step."""
self._current_time_step = self._reset()
return self._current_time_step
def step(self, action):
"""Apply action and return new time_step."""
if self._current_time_step is None:
return self.reset()
self._current_time_step = self._step(action)
return self._current_time_step
def current_time_step(self):
return self._current_time_step
def time_step_spec(self):
"""Return time_step_spec."""
@abc.abstractmethod
def observation_spec(self):
"""Return observation_spec."""
@abc.abstractmethod
def action_spec(self):
"""Return action_spec."""
@abc.abstractmethod
def _reset(self):
"""Return initial_time_step."""
@abc.abstractmethod
def _step(self, action):
"""Apply action and return new time_step."""
self._current_time_step = self._step(action)
return self._current_time_step
```
In addition to the `step()` method, environments also provide a `reset()` method that starts a new sequence and provides an initial `TimeStep`. It is not necessary to call the `reset` method explicitly. We assume that environments reset automatically, either when they get to the end of an episode or when step() is called the first time.
Note that subclasses do not implement `step()` or `reset()` directly. They instead override the `_step()` and `_reset()` methods. The time steps returned from these methods will be cached and exposed through `current_time_step()`.
The `observation_spec` and the `action_spec` methods return a nest of `(Bounded)ArraySpecs` that describe the name, shape, datatype and ranges of the observations and actions respectively.
In TF-Agents we repeatedly refer to nests which are defined as any tree like structure composed of lists, tuples, named-tuples, or dictionaries. These can be arbitrarily composed to maintain structure of observations and actions. We have found this to be very useful for more complex environments where you have many observations and actions.
### Using Standard Environments
TF Agents has built-in wrappers for many standard environments like the OpenAI Gym, DeepMind-control and Atari, so that they follow our `py_environment.PyEnvironment` interface. These wrapped evironments can be easily loaded using our environment suites. Let's load the CartPole environment from the OpenAI gym and look at the action and time_step_spec.
```
environment = suite_gym.load('CartPole-v0')
print('action_spec:', environment.action_spec())
print('time_step_spec.observation:', environment.time_step_spec().observation)
print('time_step_spec.step_type:', environment.time_step_spec().step_type)
print('time_step_spec.discount:', environment.time_step_spec().discount)
print('time_step_spec.reward:', environment.time_step_spec().reward)
```
So we see that the environment expects actions of type `int64` in [0, 1] and returns `TimeSteps` where the observations are a `float32` vector of length 4 and discount factor is a `float32` in [0.0, 1.0]. Now, let's try to take a fixed action `(1,)` for a whole episode.
```
action = np.array(1, dtype=np.int32)
time_step = environment.reset()
print(time_step)
while not time_step.is_last():
time_step = environment.step(action)
print(time_step)
```
### Creating your own Python Environment
For many clients, a common use case is to apply one of the standard agents (see agents/) in TF-Agents to their problem. To do this, they have to frame their problem as an environment. So let us look at how to implement an environment in Python.
Let's say we want to train an agent to play the following (Black Jack inspired) card game:
1. The game is played using an infinite deck of cards numbered 1...10.
2. At every turn the agent can do 2 things: get a new random card, or stop the current round.
3. The goal is to get the sum of your cards as close to 21 as possible at the end of the round, without going over.
An environment that represents the game could look like this:
1. Actions: We have 2 actions. Action 0: get a new card, and Action 1: terminate the current round.
2. Observations: Sum of the cards in the current round.
3. Reward: The objective is to get as close to 21 as possible without going over, so we can achieve this using the following reward at the end of the round:
sum_of_cards - 21 if sum_of_cards <= 21, else -21
```
class CardGameEnv(py_environment.PyEnvironment):
def __init__(self):
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32, minimum=0, maximum=1, name='action')
self._observation_spec = array_spec.BoundedArraySpec(
shape=(1,), dtype=np.int32, minimum=0, name='observation')
self._state = 0
self._episode_ended = False
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def _reset(self):
self._state = 0
self._episode_ended = False
return ts.restart(np.array([self._state], dtype=np.int32))
def _step(self, action):
if self._episode_ended:
# The last action ended the episode. Ignore the current action and start
# a new episode.
return self.reset()
# Make sure episodes don't go on forever.
if action == 1:
self._episode_ended = True
elif action == 0:
new_card = np.random.randint(1, 11)
self._state += new_card
else:
raise ValueError('`action` should be 0 or 1.')
if self._episode_ended or self._state >= 21:
reward = self._state - 21 if self._state <= 21 else -21
return ts.termination(np.array([self._state], dtype=np.int32), reward)
else:
return ts.transition(
np.array([self._state], dtype=np.int32), reward=0.0, discount=1.0)
```
Let's make sure we did everything correctly defining the above environment. When creating your own environment you must make sure the observations and time_steps generated follow the correct shapes and types as defined in your specs. These are used to generate the TensorFlow graph and as such can create hard to debug problems if we get them wrong.
To validate our environment we will use a random policy to generate actions and we will iterate over 5 episodes to make sure things are working as intended. An error is raised if we receive a time_step that does not follow the environment specs.
```
environment = CardGameEnv()
utils.validate_py_environment(environment, episodes=5)
```
Now that we know the environment is working as intended, let's run this environment using a fixed policy: ask for 3 cards and then end the round.
```
get_new_card_action = np.array(0, dtype=np.int32)
end_round_action = np.array(1, dtype=np.int32)
environment = CardGameEnv()
time_step = environment.reset()
print(time_step)
cumulative_reward = time_step.reward
for _ in range(3):
time_step = environment.step(get_new_card_action)
print(time_step)
cumulative_reward += time_step.reward
time_step = environment.step(end_round_action)
print(time_step)
cumulative_reward += time_step.reward
print('Final Reward = ', cumulative_reward)
```
### Environment Wrappers
An environment wrapper takes a python environment and returns a modified version of the environment. Both the original environment and the modified environment are instances of `py_environment.PyEnvironment`, and multiple wrappers can be chained together.
Some common wrappers can be found in `environments/wrappers.py`. For example:
1. `ActionDiscretizeWrapper`: Converts a continuous action space to a discrete action space.
2. `RunStats`: Captures run statistics of the environment such as number of steps taken, number of episodes completed etc.
3. `TimeLimit`: Terminates the episode after a fixed number of steps.
#### Example 1: Action Discretize Wrapper
InvertedPendulum is a PyBullet environment that accepts continuous actions in the range `[-2, 2]`. If we want to train a discrete action agent such as DQN on this environment, we have to discretize (quantize) the action space. This is exactly what the `ActionDiscretizeWrapper` does. Compare the `action_spec` before and after wrapping:
```
env = suite_gym.load('Pendulum-v0')
print('Action Spec:', env.action_spec())
discrete_action_env = wrappers.ActionDiscretizeWrapper(env, num_actions=5)
print('Discretized Action Spec:', discrete_action_env.action_spec())
```
The wrapped `discrete_action_env` is an instance of `py_environment.PyEnvironment` and can be treated like a regular python environment.
## TensorFlow Environments
The interface for TF environments is defined in `environments/tf_environment.TFEnvironment` and looks very similar to the Python environments. TF Environments differ from python envs in a couple of ways:
* They generate tensor objects instead of arrays
* TF environments add a batch dimension to the tensors generated when compared to the specs.
Converting the python environments into TFEnvs allows tensorflow to parellalize operations. For example, one could define a `collect_experience_op` that collects data from the environment and adds to a `replay_buffer`, and a `train_op` that reads from the `replay_buffer` and trains the agent, and run them in parallel naturally in TensorFlow.
```
class TFEnvironment(object):
def time_step_spec(self):
"""Describes the `TimeStep` tensors returned by `step()`."""
def observation_spec(self):
"""Defines the `TensorSpec` of observations provided by the environment."""
def action_spec(self):
"""Describes the TensorSpecs of the action expected by `step(action)`."""
def reset(self):
"""Returns the current `TimeStep` after resetting the Environment."""
return self._reset()
def current_time_step(self):
"""Returns the current `TimeStep`."""
return self._current_time_step()
def step(self, action):
"""Applies the action and returns the new `TimeStep`."""
return self._step(action)
@abc.abstractmethod
def _reset(self):
"""Returns the current `TimeStep` after resetting the Environment."""
@abc.abstractmethod
def _current_time_step(self):
"""Returns the current `TimeStep`."""
@abc.abstractmethod
def _step(self, action):
"""Applies the action and returns the new `TimeStep`."""
```
The `current_time_step()` method returns the current time_step and initializes the environment if needed.
The `reset()` method forces a reset in the environment and returns the current_step.
If the `action` doesn't depend on the previous `time_step` a `tf.control_dependency` is needed in `Graph` mode.
For now, let us look at how `TFEnvironments` are created.
### Creating your own TensorFlow Environment
This is more complicated than creating environments in Python, so we will not cover it in this colab. An example is available [here](https://github.com/tensorflow/agents/blob/master/tf_agents/environments/tf_environment_test.py). The more common use case is to implement your environment in Python and wrap it in TensorFlow using our `TFPyEnvironment` wrapper (see below).
### Wrapping a Python Environment in TensorFlow
We can easily wrap any Python environment into a TensorFlow environment using the `TFPyEnvironment` wrapper.
```
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
print(isinstance(tf_env, tf_environment.TFEnvironment))
print("TimeStep Specs:", tf_env.time_step_spec())
print("Action Specs:", tf_env.action_spec())
```
Note the specs are now of type: `(Bounded)TensorSpec`.
### Usage Examples
#### Simple Example
```
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
# reset() creates the initial time_step after resetting the environment.
time_step = tf_env.reset()
num_steps = 3
transitions = []
reward = 0
for i in range(num_steps):
action = tf.constant([i % 2])
# applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action)
transitions.append([time_step, action, next_time_step])
reward += next_time_step.reward
time_step = next_time_step
np_transitions = tf.nest.map_structure(lambda x: x.numpy(), transitions)
print('\n'.join(map(str, np_transitions)))
print('Total reward:', reward.numpy())
```
#### Whole Episodes
```
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
time_step = tf_env.reset()
rewards = []
steps = []
num_episodes = 5
for _ in range(num_episodes):
episode_reward = 0
episode_steps = 0
while not time_step.is_last():
action = tf.random.uniform([1], 0, 2, dtype=tf.int32)
time_step = tf_env.step(action)
episode_steps += 1
episode_reward += time_step.reward.numpy()
rewards.append(episode_reward)
steps.append(episode_steps)
time_step = tf_env.reset()
num_steps = np.sum(steps)
avg_length = np.mean(steps)
avg_reward = np.mean(rewards)
print('num_episodes:', num_episodes, 'num_steps:', num_steps)
print('avg_length', avg_length, 'avg_reward:', avg_reward)
```
| github_jupyter |
```
"""
This notebook contains codes to run hyper-parameter tuning using a genetic algorithm.
Use another notebook if you wish to use *grid search* instead.
# Under development.
"""
import os, sys
import numpy as np
import pandas as pd
import tensorflow as tf
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib
import matplotlib.pyplot as plt
from pprint import pprint
from typing import Dict, List
import datetime
import sys
sys.path.append("../")
# If this notebook file is not placed under in /notebook/ directory,
# adding directory "../" might not correly add the project directory.
# If adding "../" does not solve the importing problem, we need to setup
# the directory mannually.
try:
import constants
except ModuleNotFoundError:
core_dir = input("Directory of core files >>> ")
if not core_dir.endswith("/"):
core_dir += "/"
sys.path.append(core_dir)
import constants
from core.tools.metrics import *
import core.tools.visualize as visualize
from core.tools.time_series import *
from core.tools.data_import import *
import core.tools.rnn_prepare as rnn_prepare
import core.tools.param_set_generator as param_set_generator
import core.ga.genetic_hpt as genetic_hpt
import core.models.stacked_lstm as stacked_lstm
import core.training.hps_methods as hps_methods
# data preparation phase.
pprint(constants.DATA_DIR)
choice = None
while choice is None or choice not in constants.DATA_DIR.keys():
if choice is not None:
print("Invalid data location received, try again...")
choice = input("Select Dataset >>> ")
# choice = "a"
FILE_DIR = constants.DATA_DIR[choice]
print(f"Dataset chosen: {FILE_DIR}")
print("Avaiable configuration files found: ")
for cf in os.listdir("../hps_configs"):
if cf.endswith("config.py"):
print("\t" + cf)
config_name = input("Select config file >>> ")
if config_name.endswith(".py"):
config_name = config_name[:-3]
# config_name = "mac_config"
exec(f"import hps_configs.{config_name} as config")
# print("Reading configuration file...")
# for att in dir(config):
# if att.endswith("_config"):
# print(f"\tLoading: {att}")
# exec(f"globals().update(config.{att})")
def obj_func(param) -> float:
df_ready = rnn_prepare.prepare_dataset(
file_dir=FILE_DIR,
periods=int(param["PERIODS"]),
order=int(param["ORDER"]),
remove=None,
verbose=False
)
# Split dataset.
(X_train, X_val, X_test,
y_train, y_val, y_test) = rnn_prepare.split_dataset(
raw=df_ready,
train_ratio=param["TRAIN_RATIO"],
val_ratio=param["VAL_RATIO"],
lags=param["LAGS"]
)
# The gross dataset excluding the test set.
# Excluding the test set for isolation purpose.
data_feed = {
"X_train": X_train,
"X_val": X_val,
"y_train": y_train,
"y_val": y_val,
}
ep = param["epochs"]
ckpts = range(int(ep * 0.95), ep) # Take the final 5% epochs.
tf.reset_default_graph()
model = stacked_lstm.StackedLSTM(
param=param,
prediction_checkpoints=ckpts,
verbose=False
)
ret_pack = model.fit(data=data_feed, ret=["mse_val"])
return float(np.mean(list(ret_pack["mse_val"].values())))
total_gen = 30
init_size = 10
ignore_set = (
"PERIODS", "ORDER", "TRAIN_RATIO", "VAL_RATIO", "num_outputs", "num_inputs", "report_periods",
"tensorboard_path", "model_path", "fig_path"
)
optimizer = genetic_hpt.GeneticHPT(
gene_pool=config.main,
pop_size=init_size,
eval_func=obj_func,
mode="min",
retain=0.5,
shot_prob=0.05,
mutate_prob=0.05,
verbose=False,
ignore=ignore_set
)
# sample_param = {'LAGS': 6,
# 'ORDER': 1,
# 'PERIODS': 1,
# 'TRAIN_RATIO': 0.8,
# 'VAL_RATIO': 0.1,
# 'clip_grad': None,
# 'epochs': 500,
# 'fig_path': '/Volumes/Intel/debug/model_figs/',
# 'learning_rate': 0.1,
# 'model_path': '/Volumes/Intel/debug/saved_models/',
# 'num_inputs': 1,
# 'num_neurons': (32, 64),
# 'num_outputs': 1,
# 'num_time_steps': None,
# 'report_periods': 10,
# 'tensorboard_path': '/Volumes/Intel/debug/tensorboard/'}
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
start_time = datetime.datetime.now()
# Training
best_rec = list()
worst_rec = list()
print("Initial evaluation gen=0...")
optimizer.evaluate(verbose=True)
print(f"\nBest fitted entity validatiton MSE: {optimizer.population[0][1]: 0.7f}\
\nWorst fitted entity validation MSE: {optimizer.population[-1][1]: 0.7f}")
for gen in range(total_gen):
print(f"Generation: [{gen + 1}/{total_gen}]")
optimizer.select()
optimizer.evolve()
optimizer.evaluate(verbose=True)
print(f"\nBest fitted entity validation MSE: {optimizer.population[0][1]: 0.7f}\
\nWorst fitted entity validation MSE: {optimizer.population[-1][1]: 0.7f}")
best_rec.append(optimizer.population[0][1])
worst_rec.append(optimizer.population[-1][1])
print(f"Final generation best fitted entity: {optimizer.population[0][0]}\
\nwith valudation set MSE (fitness): {optimizer.population[0][1]}")
end_time = datetime.datetime.now()
print(f"Time taken: {str(end_time - start_time)}")
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Demo-of-RISE-for-slides-with-Jupyter-notebooks-(Python)" data-toc-modified-id="Demo-of-RISE-for-slides-with-Jupyter-notebooks-(Python)-1"><span class="toc-item-num">1 </span>Demo of RISE for slides with Jupyter notebooks (Python)</a></span><ul class="toc-item"><li><span><a href="#Title-2" data-toc-modified-id="Title-2-1.1"><span class="toc-item-num">1.1 </span>Title 2</a></span><ul class="toc-item"><li><span><a href="#Title-3" data-toc-modified-id="Title-3-1.1.1"><span class="toc-item-num">1.1.1 </span>Title 3</a></span><ul class="toc-item"><li><span><a href="#Title-4" data-toc-modified-id="Title-4-1.1.1.1"><span class="toc-item-num">1.1.1.1 </span>Title 4</a></span></li></ul></li></ul></li><li><span><a href="#Text" data-toc-modified-id="Text-1.2"><span class="toc-item-num">1.2 </span>Text</a></span></li><li><span><a href="#Maths" data-toc-modified-id="Maths-1.3"><span class="toc-item-num">1.3 </span>Maths</a></span></li><li><span><a href="#And-code" data-toc-modified-id="And-code-1.4"><span class="toc-item-num">1.4 </span>And code</a></span></li></ul></li><li><span><a href="#More-demo-of-Markdown-code" data-toc-modified-id="More-demo-of-Markdown-code-2"><span class="toc-item-num">2 </span>More demo of Markdown code</a></span><ul class="toc-item"><li><span><a href="#Lists" data-toc-modified-id="Lists-2.1"><span class="toc-item-num">2.1 </span>Lists</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Images" data-toc-modified-id="Images-2.1.0.1"><span class="toc-item-num">2.1.0.1 </span>Images</a></span></li><li><span><a href="#And-Markdown-can-include-raw-HTML" data-toc-modified-id="And-Markdown-can-include-raw-HTML-2.1.0.2"><span class="toc-item-num">2.1.0.2 </span>And Markdown can include raw HTML</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#End-of-this-demo" data-toc-modified-id="End-of-this-demo-3"><span class="toc-item-num">3 </span>End of this demo</a></span></li></ul></div>
# Demo of RISE for slides with Jupyter notebooks (Python)
- This document is an example of a slideshow, written in a [Jupyter notebook](https://www.jupyter.org/) with the [RISE extension](https://github.com/damianavila/RISE).
> By [Lilian Besson](http://perso.crans.org/besson/), Sept.2017.
---
## Title 2
### Title 3
#### Title 4
##### Title 5
##### Title 6
## Text
With text, *emphasis*, **bold**, ~~striked~~, `inline code` and
> *Quote.*
>
> -- By a guy.
## Maths
With inline math $\sin(x)^2 + \cos(x)^2 = 1$ and equations:
$$\sin(x)^2 + \cos(x)^2 = \left(\frac{\mathrm{e}^{ix} - \mathrm{e}^{-ix}}{2i}\right)^2 + \left(\frac{\mathrm{e}^{ix} + \mathrm{e}^{-ix}}{2}\right)^2 = \frac{-\mathrm{e}^{2ix}-\mathrm{e}^{-2ix}+2 \; ++\mathrm{e}^{2ix}+\mathrm{e}^{-2ix}+2}{4} = 1.$$
## And code
In Markdown:
```python
from sys import version
print(version)
```
And in a executable cell (with Python 3 kernel) :
```
from sys import version
print(version)
```
# More demo of Markdown code
## Lists
- Unordered
- lists
- are easy.
And
1. and ordered also ! Just
2. start lines by `1.`, `2.` etc
3. or simply `1.`, `1.`, ...
#### Images
With a HTML `<img/>` tag or the `` Markdown code:
<img width="100" src="agreg/images/dooku.jpg"/>

```
# https://gist.github.com/dm-wyncode/55823165c104717ca49863fc526d1354
"""Embed a YouTube video via its embed url into a notebook."""
from functools import partial
from IPython.display import display, IFrame
width, height = (560, 315, )
def _iframe_attrs(embed_url):
"""Get IFrame args."""
return (
('src', 'width', 'height'),
(embed_url, width, height, ),
)
def _get_args(embed_url):
"""Get args for type to create a class."""
iframe = dict(zip(*_iframe_attrs(embed_url)))
attrs = {
'display': partial(display, IFrame(**iframe)),
}
return ('YouTubeVideo', (object, ), attrs, )
def youtube_video(embed_url):
"""Embed YouTube video into a notebook.
Place this module into the same directory as the notebook.
>>> from embed import youtube_video
>>> youtube_video(url).display()
"""
YouTubeVideo = type(*_get_args(embed_url)) # make a class
return YouTubeVideo() # return an object
```
#### And Markdown can include raw HTML
<center><span style="color: green;">This is a centered span, colored in green.</span></center>
Iframes are disabled by default, but by using the IPython internals we can include let say a YouTube video:
```
youtube_video("https://www.youtube.com/embed/FNg5_2UUCNU").display()
print(2**2021)
```
# End of this demo
- See [here for more notebooks](https://github.com/Naereen/notebooks/)!
- This document, like my other notebooks, is distributed [under the MIT License](https://lbesson.mit-license.org/).
| github_jupyter |
```
import numpy as np
import pickle
import time
from src.data.make_dataset import generate_dataset
from src.models.train_model import BO_loop, grid_search, dist_loop
from src.models.acquisition import Random, MaxVariance
from functools import partial
# run trig basis tests
iters = 5
rng = np.random.default_rng(seed = 42)
data_seeds = rng.integers(low=0, high=10000, size=iters)
ds_cfg = {'n_cpts': 5, 'supply_truth':False, 'basis':'trig'}
bo_cfg = {'pca_cpts': 4, 'bo_iters':200}
mv_bo_loop = partial(BO_loop, acq_func=MaxVariance )
mv_bo_loop.__name__='mv_bo_loop'
rand_bo_loop = partial(BO_loop, acq_func=Random )
rand_bo_loop.__name__='rand_bo_loop'
loops = [grid_search, dist_loop, mv_bo_loop, rand_bo_loop]
for s in data_seeds:
face_image, data, x_vals = generate_dataset(n_cpts=ds_cfg['n_cpts'],
seed=s, supply_truth=ds_cfg['supply_truth'],
xanes=ds_cfg['basis'])
for loop in [mv_bo_loop]:
# run test
ts = time.time()
_, varis, errs, info_dict = loop(data, n_cpts=bo_cfg['pca_cpts'],
n_iters=bo_cfg['bo_iters'])
# construct results dictionary and save
results = {'max_variances': varis,
'errors': errs,
'info_dict': info_dict,
'bo_cfg': bo_cfg,
'ds_cfg': ds_cfg,
'loop_type': loop.__name__,
'start_time': ts
}
break
# with open(f'results_{ds_cfg["basis"]}_{s}_{loop.__name__}.pkl', 'wb') as f:
# pickle.dump(results, f)
break
plot_component_comp(data, info_dict['curr_cpt_weights'],
torch.Tensor(info_dict['train_x']), info_dict['test_x'],
varis, errs['spec_mse_avg'])
# reading and coallating data
from pathlib import Path
pkl_path = Path.cwd().parent / 'reports' / 'results_xanes_400'
print(pkl_path)
newdist = {n.__name__:[] for n in loops}
for p in (pkl_path.parent / 'results_newdist_400').glob('*.pkl'):
with open(p, 'rb') as f:
a = pickle.load(f)
newdist[a['loop_type']].append(a['errors']['spec_mse_avg'])
spec_mse_err = {n.__name__:[] for n in loops}
for p in pkl_path.glob('*.pkl'):
with open(p, 'rb') as f:
a = pickle.load(f)
spec_mse_err[a['loop_type']].append(a['errors']['spec_mse_avg'])
plt.figure(figsize=(5,5))
new_name = {'grid_search': 'grid search', 'mv_bo_loop': 'max variance',
'rand_bo_loop': 'random search', 'dist_loop': 'distance' }
color_code = ['k', 'r', 'b', 'g']
for i, loop_name in enumerate(['grid_search', 'rand_bo_loop', 'dist_loop', 'mv_bo_loop']):
if loop_name == 'dist_loop':
continue
arr = np.array(spec_mse_err[loop_name])
mean = np.mean(arr, axis=0).flatten()
std = np.std(np.log(arr), axis=0).flatten()
plt.plot(np.log(mean), color_code[i], label=new_name[loop_name])
# plt.plot(arr.T)
plt.fill_between(range(len(mean)), np.log(mean)-std,
np.log(mean)+std, color=color_code[i], alpha=0.2)
arr = np.array(newdist['dist_loop'])
mean = np.mean(arr, axis=0).flatten()
std = np.std(np.log(arr), axis=0).flatten()
plt.plot(np.log(mean), 'b', label='distance' )
plt.fill_between(range(len(mean)), np.log(mean)-std, np.log(mean)+std, color='b', alpha=0.2)
plt.legend()
# plt.ylim(0.000005, 0.001)
plt.ylabel('Log(Reconstruction Error)')
plt.xlabel('iteration')
print(f'statistics over {arr.shape[0]} tests')
plt.savefig('xanes_recon_err.png')
np.shape(spec_mse_err['grid_search'])
from src.visualization.visualize import plot_component_comp
import torch
import re
pickle_list = list(pkl_path.glob('*.pkl'))
i=40
fp = pickle_list[i]
with open(fp, 'rb') as f:
print(fp)
a = pickle.load(f)
# grab numbers
s = int(re.search('\d+',fp.name).group())
print(s)
face_image, data, x_vals = generate_dataset(n_cpts=a['ds_cfg']['n_cpts'],
seed=s, supply_truth=a['ds_cfg']['supply_truth'],
xanes='trig')
plot_component_comp(data, a['info_dict']['curr_cpt_weights'],
torch.Tensor(a['info_dict']['train_x']), a['info_dict']['test_x'],
a['max_variances'], a['errors']['spec_mse_avg'])
xanes = 'xanes'
if (xanes is None) or (xanes == 'xanes'):
print('xanes detected')
if xanes == 'trig':
print('trig detected')
```
| github_jupyter |
# Unit conversion for the valve coefficient
## Friction losses in energy balance
The contribution of friction losses is considered as a head loss in the enrgy balance.
EB   $0~m=\frac{\Delta p}{\rho g}+\Delta z+\frac{\Delta w^2}{2 g}+\Delta H_{v}+\frac{\dot Q}{\rho g \dot V}+\frac{C_v\Delta T}{g}+\frac{-\dot W_{Welle}}{\rho g \dot V}$
Anlagenkennlinie $H_{Anlage}=\underbrace{\Delta z+\frac{p_{aus}-p_{ein}}{\rho g}}_{\text{stat. Förderhöhe, }f(\Delta z)}+\underbrace{\frac{w_{aus}^2-w_{ein}^2}{2g}+\sum H_{Verluste}}_{\text{dyn. Förderhöhe, }f(\dot V)}$
Pumpenkennlinie $H_{Pumpe}=H(\dot V)$
Where $p$ is pressure, $\rho$ is density, $g$ is the local acceleration due to gravy, $z$ is elevation, $w$ is linear velocity, $H_{Verluste}$ are head losses due to friction, $\dot Q$ is heat flow into the system, $\dot V$ is volume flow, $C_v\Delta T$ is change in internal energy due solely to temperature change of incompressible fluid, $\dot W_{Welle}$ is power given by shaft work. $H$ are pressure heads as heights.
The pump curve gives a decreasing dynamic head $H_{Pumpe}$ with increasing volume flow $\dot V$. The plant curve $H_{Anlage}$ consists of a static head dependent on plant conditions like height, inlet and outlet pressure (independent of flow), and a dynamic head incresing with volume flow $\dot V$. Pump must be able to meet the dynamic + static head of the plant at a given flow.
## Definition of valve flow coefficients
See [1] and [2]. Note that below, as in ref. [1] volume flow is $Q$.
### Resistance coefficient $\zeta$ - dimensionless
Friction loss attributable to a pipe, valve, fitting, etc. in the pipeline, in terms of velocity head or velocity pressure. aka Widerstandsbeiwert.
$\Delta H=\zeta \frac{w^2}{2g}$
Expressed as pressure loss, $\Delta H=\frac{\Delta p_{Verlust}}{\rho g}$, or $\Delta p_{Verlust}=\zeta \frac{\rho w^2}{2}$.
The total head losses are composed of the sum of all resistances given by pipes and fittings:
Losses $\sum H_{Verlust}=\zeta_{ges}\cdot\frac{w^2}{2g}$
$\zeta_{ges}$ is the total resistance coefficient.
$\zeta_{ges}=\zeta_{Rohr}+\zeta_{Ventile}+\zeta_{Einbauten}+...$
For a pipe, the resistance coefficient is related to the length (L) to diameter (d) ratio, to give the *Widerstandszahl* (aka Darcy-Weisbach friction factor $f_D=\lambda$).
$\lambda=\zeta\frac{d}{L}=\frac{\Delta p}{\rho g}\frac{2g}{w^2}\frac{d}{L}$
More precisely, the pressure loss through the pipe is proportional to the length and inversely proportional to the diameter, by the Darcy-Weisbach friction factor.
$\frac{\Delta p}{L}=\lambda\frac{\rho}{d}\frac{w^2}{2}$
The Nikuradse-diagram (Nikuradse-Colebrook-Moody) shows $\lambda$ as a function of the Reynolds-number $Re=\frac{w d_{i}\rho}{\eta}=\frac{w d_i}{\nu}$ and the surface relative roughness $k_s/d_i$ (or $\epsilon/d_i$). For laminar flow ($Re<3000$), the friction factor converges to the Poiseuille analytical solution of the momentum balance.
$\lambda_{laminar}=\frac{64}{Re}$
In turbulent flow, an approximate calculation is given by Blasius [2], not considering roughness.
$\lambda_{turb}\approx(100 Re)^{-1/4} \qquad 3000<Re<10^5$
or by Prandtl /. Kasman considering roughness [2,3].
$\frac{1}{\sqrt{\lambda}}=2\cdot ln\left(\frac{d_i}{k}\right)+1,14$
### *US* Flow coefficient $C_v$ - dimensions $\frac{US_{gal}}{min}$
$C_v=Q\sqrt{\frac{\Delta p_0}{\Delta p}\frac{\rho}{\rho_0}}$
Q    flow in US gal/min
$\Delta p_0$   reference differential pressure = 1lb/in²
$\Delta p_0$   operating differential pressure in lb/in²
$\rho_0$     density of reference fluid water = 62.4 lg/ft³
$\rho$     density of operating fluid in lb/ft³
### *Metric* flow coefficient $K_v$ - dimensions $\frac{m^3}{h}$
$K_v=Q\sqrt{\frac{\Delta p_0}{\Delta p}\frac{\rho}{\rho_0}}$
Q    flow in m³/h
$\Delta p_0$   reference differential pressure = 1bar
$\Delta p_0$   operating differential pressure in bar
$\rho_0$     density of reference fluid water = 1000 kg/m³
$\rho$     density of operating fluid in kg/m³
### *Metric* flow coefficient Av - dimensions $m^2$
$A_v=Q\sqrt{\frac{\rho}{\Delta p}}$
Q    flow in m³/s
$\Delta p_0$   operating differential pressure in Pa
$\rho$     density of Newtonian liquid in kg/m³
This coefficient is derived from the dimensionless resistance coefficient $\zeta$, following from its definition in pressure loss form:
$\Delta p_{Verlust}=\zeta \frac{\rho w^2}{2g}$
$\Rightarrow Q=A\cdot w=\underbrace{A\cdot \sqrt{\frac{2}{\zeta}}}_{A_v}\cdot\sqrt{\frac{\Delta p}{\rho}}$
The definition of $A_v$ then follows, as a coefficient proportional to the orifice diameter cross section at complete opening $A$, by the factor $\left(\frac{2}{\zeta}\right)^{1/2}$.
## Interconversions
Due to prevailing historic use of the *imperial* unit system [4], it is unavoidable to carry out units intervonversions. In order to build the relationship between flow coefficients with units, it is necessary to do this in dimensionless form, as follows:
$\begin{align}
\frac{C_v/(US_{gal}/min)}{K_v/(m^3/h)}&=\frac{\left(\frac{Q}{US_{gal}/min}\right)}{\left(\frac{Q}{m^3/h}\right)}\cdot\frac{\sqrt{\frac{1\frac{lb_f}{in^2}}{\Delta p}\cdot\frac{\rho}{62.4\frac{lb_m}{ft^3}}}}{\sqrt{\frac{1~bar}{\Delta p}\cdot\frac{\rho}{1000\frac{kg}{m^3}}}}\\
&=\frac{m^3/h}{US_{gal}/min}\cdot\sqrt{\frac{1\frac{lb_f}{in^2}}{1~bar}\cdot\frac{1000\frac{kg}{m^3}}{62.4\frac{lb_m}{ft^3}}}\cdot\frac{\frac{1~h}{60~min}}{3.7854\cdot 10^{-3}\frac{m^3}{US_{gal}}}\\
&=4.4029\cdot\sqrt{\frac{1\frac{lb_f}{in^2}\cdot\frac{4.4482~N}{lb_f}\cdot\left(\frac{1~in}{2.54\cdot 10^{-2}~m}\right)^2}{1~bar\cdot\frac{10^5~N/m^2}{bar}}\cdot\frac{1000\frac{kg}{m^3}}{62.4\frac{lb_m}{ft^3}\cdot\frac{1~kg}{2.2042~lb_m}\cdot\left(\frac{1~ft}{30.48\cdot 10^{-2}~m}\right)^3}}\\
&=4.4029\cdot\sqrt{\frac{6894.72}{10^5}\cdot \frac{1000}{999.74}}\\
&=1.1562
\end{align}$
This result is equal to $1/(865\cdot10^{-3})$, which is listed in ref. [1] as "$K_v/C_v=865 \times 10^{-3}$".
Similarly, the relationship of $C_v$ to $A_v$ is built in dimensionless form.
$\begin{align}
\frac{A_v/(m^2)}{C_v/(US_{gal}/min)}&=\frac{\left(\frac{Q}{m^3/s}\right)}{\left(\frac{Q}{US_{gal}/min}\right)}\cdot\frac{\sqrt{\frac{\rho}{\Delta p}}}{\sqrt{\frac{1\frac{lb_f}{in^2}}{\Delta p}\cdot\frac{\rho}{62.4\frac{lb_m}{ft^3}}}}\\
&=\frac{US_{gal}/min}{m^3/s}\cdot\sqrt{\frac{62.4\frac{lb_m}{ft^3}}{1\frac{lb_f}{in^2}}}\cdot 3.7854\cdot 10^{-3}\frac{m^3}{US_{gal}}\frac{1~min}{60~s}\\
&=6.309\cdot 10^{-5}\sqrt{\frac{62.4\frac{lb_m}{ft^3}}{1\frac{lb_f}{in^2}}\cdot\frac{\frac{1~kg/m^3}{2.2042\cdot(30.48\cdot 10^{-2})^3 lb_m/ft^3}}{\frac{\frac{4.4482}{(2.54\cdot 10^{-2})^2}\frac{kg m}{s^2 m^2}}{lb_f/in^2}}}\\
&=6.309\cdot 10^{-5}\cdot\sqrt{\frac{999.744}{6894.72}}\frac{s}{m}\\
&=24.027\cdot 10^{-6}\frac{s}{m}
\end{align}$
This is listed in ref. [1] as $A_v/C_v=23.8\times 10^{-6}$.
From these two, the relationship from $A_v$ to $K_v$ is:
$\frac{A_v/(m^2)}{K_v/(m^3/h)}=\frac{A_v/(m^2)}{C_v/(US_{gal}/min)}\cdot\frac{C_v/(US_{gal}/min)}{K_v/(m^3/h)}=1.1562\cdot 24.027\cdot 10^{-6}\frac{s}{m}=27.78\cdot 10^{-6}\frac{s}{m}$
References:
1. Smith, P., & Zappe, R. W. (2004). Valve selection handbook: engineering fundamentals for selecting the right valve design for every industrial flow application. Elsevier.
2. PAT notes
3. Stephan, P. u.a. (2013). Vdi-Wärmeatlas. 11., bearb. und erw. Aufl. Berlin, Heidelberg: Springer-Verlag.
4. Christen, D. S. (2009). Praxiswissen der chemischen verfahrenstechnik: handbuch für chemiker und verfahrensingenieure. Springer-Verlag.
| github_jupyter |
ERROR: type should be string, got "https://towardsdatascience.com/a-production-ready-multi-class-text-classifier-96490408757\n\n```\nimport os\nimport re\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.multiclass import OneVsRestClassifier\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\ndata_path = 'data'\n\nrows = []\nfor root, _, file in os.walk(data_path):\n for filename in file:\n if '.txt' in filename:\n cuisine = os.path.splitext(filename)[0]\n text_file = open(os.path.join(data_path, filename), \"r\")\n lines = text_file.readlines()\n for line in lines:\n row = {\n 'cuisine': cuisine,\n 'ingredients': line\n }\n rows.append(row)\n text_file.close()\n\ndf = pd.DataFrame.from_dict(rows)\ndf = df.sample(frac=1).reset_index(drop=True)\ndf.head()\ndf.shape\ndf.groupby('cuisine').count()\n#pre-processing\nimport re \ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for dataset\n Every dataset is lower cased except\n \"\"\"\n string = re.sub(r\"\\n\", \"\", string) \n string = re.sub(r\"\\r\", \"\", string) \n string = re.sub(r\"[0-9]\", \"digit\", string)\n string = re.sub(r\"\\'\", \"\", string) \n string = re.sub(r\"\\\"\", \"\", string) \n return string.strip().lower()\nX = []\nfor i in range(df.shape[0]):\n X.append(clean_str(df.iloc[i][1]))\ny = np.array(df[\"cuisine\"])\ny.size\n#train test split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=5)\n#pipeline of feature engineering and model\n\nmodel = Pipeline([\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(class_weight=\"balanced\")))\n])\n#the class_weight=\"balanced\" option tries to remove the biasedness of model towards majority sample\n#parameter selection\nfrom sklearn.model_selection import GridSearchCV\nparameters = {'vectorizer__ngram_range': [(1, 1), (1, 2),(2,2)],\n 'tfidf__use_idf': (True, False)}\ngs_clf_svm = GridSearchCV(model, parameters, n_jobs=-1)\ngs_clf_svm = gs_clf_svm.fit(X, y)\nprint(gs_clf_svm.best_score_)\nprint(gs_clf_svm.best_params_)\n#preparing the final pipeline using the selected parameters\nmodel = Pipeline([('vectorizer', CountVectorizer(ngram_range=(1,2))),\n ('tfidf', TfidfTransformer(use_idf=True)),\n ('clf', OneVsRestClassifier(LinearSVC(class_weight=\"balanced\")))])\n#fit model with training data\nmodel.fit(X_train, y_train)\n#evaluation on test data\npred = model.predict(X_test)\nmodel.classes_\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nconfusion_matrix(pred, y_test)\naccuracy_score(y_test, pred)\n#save the model\nfrom sklearn.externals import joblib\njoblib.dump(model, 'model_cuisine_ingredients.pkl', compress=1)\nfrom sklearn.externals import joblib\nmodel = joblib.load('model_cuisine_ingredients.pkl')\ntest_recipe = \"1 2 1/2 to 3 pound boneless pork shoulder or butt, trimmed and cut in half 1 small butternut squash (about 1 1/2 pounds)—peeled, seeded, and cut into 1 inch pieces 1 14.5 ounce can diced tomatoes 1 jalapeño pepper, seeded and chopped 2 cloves garlic, chopped 1 tablespoon chili powder kosher salt 4 6 inch corn tortillas, cut into 1/2 inch wide strips 1 tablespoon canola oil sliced radishes, cilantro sprigs, and lime wedges, for serving\"\nmodel.predict([test_recipe])[0]\nsteak_hache = \"1 tbsp vegetable oil 4 shallots , very finely chopped 600g freshly ground beef (ask the butcher for something with roughly 15% fat - we used chuck) 8 thyme sprigs, leaves picked and chopped 2 tsp Dijon mustard 2 tbsp plain flour 200ml crème fraîche 1 egg yolk 6 tarragon sprigs, leaves picked and finely chopped dressed green salad, to serve\"\nmodel.predict([steak_hache])[0]\ntoad_in_the_hole = \"140g plain flour 3 eggs 300ml milk 2 tsp Dijon mustard 2 tbsp vegetable oil 8 Cumberland sausages 8 sage leaves 4 rosemary sprigs\"\nmodel.predict([toad_in_the_hole])[0]\n```\n\n" | github_jupyter |
# Problem statement
We have data from a Portuguese bank on details of customers related to selling a term deposit
The objective of the project is to help the marketing team identify potential customers who are relatively more likely to subscribe to the term deposit and this increase the hit ratio
# Data dictionary
**Bank client data**
* 1 - age
* 2 - job : type of job
* 3 - marital : marital status
* 4 - education
* 5 - default: has credit in default?
* 6 - housing: has housing loan?
* 7 - loan: has personal loan?
* 8 - balance in account
**Related to previous contact**
* 8 - contact: contact communication type
* 9 - month: last contact month of year
* 10 - day_of_week: last contact day of the week
* 11 - duration: last contact duration, in seconds*
**Other attributes**
* 12 - campaign: number of contacts performed during this campaign and for this client
* 13 - pdays: number of days that passed by after the client was last contacted from a previous campaign
* 14 - previous: number of contacts performed before this campaign and for this client
* 15 - poutcome: outcome of the previous marketing campaign
**Output variable (desired target):has the client subscribed a term deposit?**
```
# To enable plotting graphs in Jupyter notebook
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
# calculate accuracy measures and confusion matrix
from sklearn import metrics
#Load the file from local directory using pd.read_csv which is a special form of read_table
#while reading the data, supply the "colnames" list
bank_df = pd.read_csv("bank-full.csv")
bank_df.head()
bank_df.info()
#### this attribute highly affects the output target (e.g., if duration=0 then y='no'). Yet, the duration is not known before a call is performed. Also, after the end of the call y is obviously known. Thus, this input should only be included for benchmark purposes and should be discarded if the intention is to have a realistic predictive model
bank_df.drop(['duration'], inplace=True, axis=1)
```
#### Certain variables are more relevant if they are categorical variable than numerical variables. We will convert such categorical variables to numeric variabes
```
bank_df['day']=bank_df['day'].astype('category')
bank_df['Target']=bank_df['Target'].astype('category')
```
# Exploratory data analysis
## Univariate analysis - boxplot / histogram for numerical variables
```
sns.boxplot(x=bank_df['age'], data=bank_df)
```
**Age column has some outliers. The median age is about 40 years. There are some customers above 90 years of age. This data might have to be checked**
```
#histograms from the pair plots
sns.pairplot(bank_df)
```
**The distribution of all numerical variables other than age is highly skewed - hence we might want to transform or bin some of these variables**
**On similar lines, please perform univariate analysis of other numerical variables**
## Univariate analysis - countplot / value count for categorical variables
```
bank_df['job'].value_counts()
sns.countplot(bank_df['marital'])
sns.countplot(bank_df['education'])
sns.countplot(bank_df['default'])
```
**default - yes is a very small % - we can consider deleting this column**
```
sns.countplot(bank_df['housing'])
sns.countplot(bank_df['loan'])
sns.countplot(bank_df['contact'])
sns.countplot(bank_df['poutcome'])
sns.countplot(bank_df['Target'])
bank_df['Target'].value_counts(normalize=True)
```
### The response rate is only 11.6%. Hence the Y variable has a high class imbalance. Hence accuracy will not be a reliable model performance measure.
### FN is very critical for this business case because a false negative is a customer who will potentially subscribe for a loan but who has been classified as 'will not subscribe'. Hence the most relevant model performance measure is recall
## Bivariate analysis
```
#Group numerical variables by mean for the classes of Y variable
np.round(bank_df.groupby(["Target"]).mean() ,1)
```
#### The mean balance is higher for customers who subscribe to the term deposit compared to those who dont
#### number of days that passed by after the client was last contacted from a previous campaign is higher for people who have subscribed
#### number of contacts performed before this campaign is also higher for customers who subscribe
### All of the above facts indicate that customers with a higher balance and those who have been contacted frequently before the campaign tend to subscribe for the term deposit
### Bivariate analysis using crosstab
```
pd.crosstab(bank_df['job'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
```
#### The highest conversion is for students (28%) and lowest is for blue-collar(7%
```
pd.crosstab(bank_df['marital'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
pd.crosstab(bank_df['education'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
print(pd.crosstab(bank_df['default'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False ))
print(bank_df['default'].value_counts(normalize=True))
```
### Since default - yes is only 2% of the data and the conversion is also comparitively lower for default - yes, we can remove this column
```
bank_df.drop(['default'], axis=1, inplace=True)
bank_df.columns
pd.crosstab(bank_df['housing'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
pd.crosstab(bank_df['loan'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
pd.crosstab(bank_df['contact'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
pd.crosstab(bank_df['day'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )[0:10]
pd.crosstab(bank_df['month'], bank_df['Target'], normalize='index').sort_values(by='yes',ascending=False )
```
### List out the high level findings from bivariate analysis that could provide pointers to feature selection
```
#Binning:
def binning(col, cut_points, labels=None):
#Define min and max values:
minval = col.min()
maxval = col.max()
#create list by adding min and max to cut_points
break_points = [minval] + cut_points + [maxval]
#if no labels provided, use default labels 0 ... (n-1)
if not labels:
labels = range(len(cut_points)+1)
#Binning using cut function of pandas
colBin = pd.cut(col,bins=break_points,labels=labels,include_lowest=True)
return colBin
#Binning balance
cut_points = [0,500,1000, 1500,2000]
labels = ["very low","low","medium","high", "very high", "highest"]
bank_df['balance_range'] = binning(bank_df['balance'], cut_points, labels)
bank_df['balance_range'].value_counts()
#Binning campaign
cut_points = [2,3,4]
labels = ["<=2","3","4",">4"]
bank_df['campaign_range'] = binning(bank_df['campaign'], cut_points, labels)
bank_df['campaign_range'].value_counts()
bank_df.drop(['balance', 'campaign'], axis=1, inplace=True)
bank_df.columns
X = bank_df.drop("Target" , axis=1)
y = bank_df["Target"] # select all rows and the 17 th column which is the classification "Yes", "No"
X = pd.get_dummies(X, drop_first=True)
test_size = 0.30 # taking 70:30 training and test set
seed = 7 # Random numbmer seeding for reapeatability of the code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed)
X_train.shape,X_test.shape
#instantiating decision tree as the default model
dt_model = DecisionTreeClassifier()
dt_model.fit(X_train, y_train)
# Is the model an overfit model?
y_pred = dt_model.predict(X_test)
print(dt_model.score(X_train, y_train))
print(dt_model.score(X_test , y_test))
# Note: - Decision Tree is a non-parametric algorithm and hence prone to overfitting easily. This is evident from the difference
# in scores in training and testing
# In ensemble techniques, we want multiple instances (each different from the other) and each instance to be overfit!!!
# hopefully, the different instances will do different mistakes in classification and when we club them, their
# errors will get cancelled out giving us the benefit of lower bias and lower overall variance errors.
#Confusion matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score
print(confusion_matrix(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
print(recall_score(y_test, y_pred,average="binary", pos_label="yes"))
```
#### The recall score is relatively low and this has to be improves in the model
```
clf_pruned = DecisionTreeClassifier(criterion = "entropy", random_state = 100, max_depth=3, min_samples_leaf=5)
clf_pruned.fit(X_train, y_train)
```
## Visualizing the tree
```
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from IPython.display import Image
import pydotplus
import graphviz
feature_cols = X_train.columns
dot_data = StringIO()
export_graphviz(clf_pruned, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_cols,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('bank_pruned.png')
Image(graph.create_png())
## Calculating feature importance
feat_importance = clf_pruned.tree_.compute_feature_importances(normalize=False)
feat_imp_dict = dict(zip(feature_cols, clf_pruned.feature_importances_))
feat_imp = pd.DataFrame.from_dict(feat_imp_dict, orient='index')
feat_imp.sort_values(by=0, ascending=False)[0:10] #Top 10 features
preds_pruned = clf_pruned.predict(X_test)
preds_pruned_train = clf_pruned.predict(X_train)
acc_DT = accuracy_score(y_test, preds_pruned)
recall_DT = recall_score(y_test, preds_pruned, average="binary", pos_label="yes")
#Store the accuracy results for each model in a dataframe for final comparison
resultsDf = pd.DataFrame({'Method':['Decision Tree'], 'accuracy': acc_DT, 'recall': recall_DT})
resultsDf = resultsDf[['Method', 'accuracy', 'recall']]
resultsDf
```
### Overfitting is reduced after pruning, but recall has drastically reduced
```
## Apply the Random forest model and print the accuracy of Random forest Model
from sklearn.ensemble import RandomForestClassifier
rfcl = RandomForestClassifier(n_estimators = 50)
rfcl = rfcl.fit(X_train, y_train)
pred_RF = rfcl.predict(X_test)
acc_RF = accuracy_score(y_test, pred_RF)
recall_RF = recall_score(y_test, pred_RF, average="binary", pos_label="yes")
tempResultsDf = pd.DataFrame({'Method':['Random Forest'], 'accuracy': [acc_RF], 'recall': [recall_RF]})
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[['Method', 'accuracy', 'recall']]
resultsDf
resultsDf
## Apply Adaboost Ensemble Algorithm for the same data and print the accuracy.
from sklearn.ensemble import AdaBoostClassifier
abcl = AdaBoostClassifier( n_estimators= 200, learning_rate=0.1, random_state=22)
abcl = abcl.fit(X_train, y_train)
pred_AB =abcl.predict(X_test)
acc_AB = accuracy_score(y_test, pred_AB)
recall_AB = recall_score(y_test, pred_AB, pos_label='yes')
tempResultsDf = pd.DataFrame({'Method':['Adaboost'], 'accuracy': [acc_AB], 'recall':[recall_AB]})
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[['Method', 'accuracy', 'recall']]
resultsDf
resultsDf
## Apply Bagging Classifier Algorithm and print the accuracy
from sklearn.ensemble import BaggingClassifier
bgcl = BaggingClassifier(n_estimators=100, max_samples= .7, bootstrap=True, oob_score=True, random_state=22)
bgcl = bgcl.fit(X_train, y_train)
pred_BG =bgcl.predict(X_test)
acc_BG = accuracy_score(y_test, pred_BG)
recall_BG = recall_score(y_test, pred_BG, pos_label='yes')
tempResultsDf = pd.DataFrame({'Method':['Bagging'], 'accuracy': [acc_BG], 'recall':[recall_BG]})
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[['Method', 'accuracy', 'recall']]
resultsDf
resultsDf
from sklearn.ensemble import GradientBoostingClassifier
gbcl = GradientBoostingClassifier(n_estimators = 200, learning_rate = 0.1, random_state=22)
gbcl = gbcl.fit(X_train, y_train)
pred_GB =gbcl.predict(X_test)
acc_GB = accuracy_score(y_test, pred_GB)
recall_GB = recall_score(y_test, pred_GB, pos_label='yes')
tempResultsDf = pd.DataFrame({'Method':['Gradient Boost'], 'accuracy': [acc_GB], 'recall':[recall_GB]})
resultsDf = pd.concat([resultsDf, tempResultsDf])
resultsDf = resultsDf[['Method', 'accuracy', 'recall']]
resultsDf
resultsDf
```
### Bagging gives overall best model performance. However, please note that the recall is still very low and will have to be improved
| github_jupyter |
```
import pickle
import math
from nltk import word_tokenize
from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction, sentence_bleu
from collections import Counter
from fractions import Fraction
from modules.sentence import tokenizer, read, detokenize
from modules.model import NMT
nbest = 0
weights = (0.25, 0.25, 0.25, 0.25)
from models import fl_en
model = fl_en
source_tokenizer = tokenizer(model.config['source_tokenizer'], lowercase=model.config['source_lowercase'])
source_eval = read('models/fl_en/source.data.test', source_tokenizer, model.config['backwards'])
target_tokenizer = tokenizer('word', lowercase=model.config['target_lowercase'])
references = read('models/fl_en/source.data.test', target_tokenizer, model.config['backwards'])
print(references)
output_file = open('models/fl_en/result.data.test', 'w', encoding='utf-8')
hypotheses = []
for i, sent in enumerate(model.translate(source_eval, encode=True, nbest=nbest)):
print(sent, file=output_file, flush=True)
hypotheses.append(word_tokenize(sent))
output_file.close()
evaluation_file = open('models/fl_en/scores.data.eval.csv','w', encoding='utf-8')
p_numerators = Counter()
p_denominators = Counter()
hyp_lengths, ref_lengths = 0, 0
for reference, hypothesis in zip(references, hypotheses):
hyp_len = len(hypothesis)
ref_len = closest_ref_length([reference], hyp_len)
hyp_lengths += hyp_len
ref_lengths += ref_len
set_data = '%d,%d' % (ref_len, hyp_len)
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(reference, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
set_data += ',%d,%d' % (p_i.numerator, p_i.denominator)
set_data += ',%f' % sentence_bleu([reference], hypothesis)
print(set_data, file=evaluation_file, flush=True)
evaluation_file.close()
bp = brevity_penalty(ref_lengths, hyp_lengths)
print(bp)
p_n = [Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)]
smoothing_function = SmoothingFunction().method0
p_n = smoothing_function(p_n, references=references, hypothesis=hypothesis,
hyp_len=hyp_len, emulate_multibleu=False)
s = (w * math.log(p_i) for i, (w, p_i) in enumerate(zip(weights, p_n)))
print(math.exp(math.fsum(s)))
from nltk.translate.bleu_score import corpus_bleu
corpus_bleu(references, hypotheses)
```
| github_jupyter |
# EDA
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append("../src")
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import scipy
from collections import Counter
```
## Data Preparation
```
ds_links = pd.read_csv("../ml-latest-small/links.csv")
ds_movies = pd.read_csv("../ml-latest-small/movies.csv")
ds_ratings = pd.read_csv("../ml-latest-small/ratings.csv")
ds_tags = pd.read_csv("../ml-latest-small/tags.csv")
```
## Ratings
```
ds_movies
ratings_mean_count = ds_ratings.groupby("movieId").agg({"rating": ["mean", "count"]})
ratings_mean_count.columns = ["ratings_mean", "ratings_count"]
ratings_mean_count
```
## User-Item Matrix
```
def create_user_item_matrix(ratings) -> pd.DataFrame:
mat = ratings.pivot(index="userId", columns="movieId", values="rating")
mat[~mat.isna()] = 1
mat.fillna(0, inplace=True)
return mat
def create_user_item_ranking_matrix(ratings) -> pd.DataFrame:
return ratings.pivot(index="userId", columns="movieId", values="rating")
user_item_matrix = create_user_item_ranking_matrix(ds_ratings)
user_item_matrix.head()
```
유저마다 시간별로 rating 매긴걸 정렬한 다음
일정 비율로 나눠서 이전에 매긴 rating을 바탕으로 이후에 매긴 rating을 예측할 수 있는
모델을 만들고자 함
```
def create_user_item_matrix_train(ds_ratings, train_size=0.5):
users = ds_ratings.userId.unique()
train_sets = []
for user in users:
ratings = ds_ratings[ds_ratings.userId == user].sort_values("timestamp")
q_value = ratings.timestamp.quantile(q=train_size, interpolation='nearest')
train_set = ratings[ratings.timestamp <= q_value]
train_sets.append(train_set)
train_set = pd.concat(train_sets, axis=0)
return train_set
train_set = create_user_item_matrix_train(ds_ratings, 0.5)
train_set = create_user_item_ranking_matrix(train_set)
train_set
train_set.to_csv("../data/user_item_matrix_X_0.5.csv")
create_user_item_matrix(ds_ratings).to_csv("../data/user_item_matrix_Y_0.5.csv")
```
## Tag 정보 EDA
```
tag_counts = ds_tags.tag.value_counts()
tag_counts[tag_counts >= 5]
```
## Movie EDA
```
from sklearn.feature_extraction.text import CountVectorizer
# ds_movies.genres.str.split("|")
cv = CountVectorizer()
genres = cv.fit_transform(ds_movies.genres)
genres = pd.DataFrame(
genres.toarray(),
columns=list(sorted(cv.vocabulary_.keys(), key=lambda x: cv.vocabulary_[x]))
)
genres
```
title 에서 연도를 지울 경우 중복되는게 있는지 확인해봤고, 282개 있음
```
titles = ds_movies.title.str[:-7]
titles.value_counts()
ds_movies[ds_movies.title.str.contains("Hamlet")]
```
## 학습에 필요한 결과물 저장
```
movie_genre_matrix = pd.concat(
[ds_movies.drop(columns="genres"), genres],
axis=1
).drop(columns="movieId")
movie_genre_matrix.index = ds_movies.movieId
movie_genre_matrix = pd.concat([movie_genre_matrix, ratings_mean_count], axis=1)
movie_genre_matrix["years"] = movie_genre_matrix.title.str.strip().str[-5:-1]
movie_genre_matrix.years = movie_genre_matrix.years.apply(lambda x: int(x) if x.isdigit() else 0)
movie_genre_matrix.to_csv("../data/movies.csv")
movie_genre_matrix
movie_genre_matrix.columns[1:25]
```
영화 점보와 평점 정보를 pandas를 이용해서 불러옵니다
```
ds_movies = pd.read_csv("../ml-latest-small/movies.csv", index_col=0)
ds_ratings = pd.read_csv("../ml-latest-small/ratings.csv")
```
영화 평점과 평가 수를 집계한 후 영화 제목을 데이터프레임에 추가합니다.
```
movie_ratings = ds_ratings.groupby("movieId") \
.agg({"rating": ["count", "mean"]})
movie_ratings["title"] = ds_movies.title
movie_ratings
```
평점이 50개 이상한 영화 중 가장 평점이 좋은 영화 10개를 가져옵니다.<br/>
평점이 같다면 평가 수가 많은 영화 순으로 정렬합니다.
```
movie_ratings[movie_ratings[("rating", "count")] >= 50] \
.sort_values([("rating", "mean"), ("rating", "count")], ascending=[False, False]) \
.head(10)
```
| github_jupyter |
# Composipy for strength analysis of a laminate
In this exemple we will use the exercise 6-7 from *Analysis and Performance of Fiber Composites by B. Agarwal* pg. 244.
```
from composipy import Ply, Laminate, Load, Strength
```
Fist, lets consider the following laminate
$[45_{ply1}/0_{ply2}/45_{ply1}]$
Where $ply_1$ is $3 mm$ thick and $ply_2$ is $6mm$ thick with the following Elastic and strength characteristics
- E1 = 19745 MPa
- E2 = 1980 MPa
- $\nu_{12}$ = 0.35
- G = 700 MPa
- $\sigma_1^T$ = 400 Mpa
- $\sigma_1^C$ = -250 Mpa
- $\sigma_2^T$ = 150 Mpa
- $\sigma_2^C$ = -200 Mpa
- $\tau_{12}$ = 25 Mpa
### Setting the laminate
```
ply_1 = Ply(19745 , 1980, 0.35, 700, 3, 400, -250, 150, -200, 25, name='3mm Unidirection Fiber Composite')
ply_2 = Ply(19745 , 1980, 0.35, 700, 6, 400, -250, 150, -200, 25, name='6mm Unidirection Fiber Composite')
```
Now, defining the laminate
```
layup_1 = [
(45, ply_1),
(0, ply_2),
(45, ply_1)
]
laminate_1 = Laminate(layup_1)
print(ply_1,'\n',ply_2)
print('==============================================')
print(laminate_1)
```
The $ABD'$ matrix can be called by the `ABD_p` atribute
```
laminate_1.ABD_p
```
### Setting the loads
In the exemple, the element is subjected to the following loads
- Nx = 1000 N/mm
- Ny = 200 N/m
- Nxy = 0
- Mx = 0
- My = 0
- Mxy = 0
We will input the loads by using the `Load` class.
```
loads_1 = Load(1000,200,0,0,0,0)
loads_1
```
### Checking the strength and elastic parameters of the laminate under load
Now we can analyse the strain and strees in the laminate using the `Strength` class.
```
analysis = Strength(laminate_1,loads_1)
```
Let's see the the mid-plane strain in the load direction (*xy*) by calling the `mid_strain_xy` atribute
```
analysis.mid_strain_xy
```
Or, in the laminate coordinates (*12*), by calling `mid_strain_12`.
```
analysis.mid_strain_12
```
Just like that, we can get the stress in all plies for both *xy* and *12* directions by calling `stress_xy` or `stress_12`.
```
analysis.stress_xy
analysis.stress_12
```
From the stresses in the laminate coordinates, we can calculate the Tsai-Wu failure index for all plies by calling `TW_i`.
```
analysis.TW_i
```
Because the Tsai-Wu index is below 1 for all plies, the laminate is still undamaged.
We can discover in what load the first ply will fail based on the Tsai-Wu failure criterion by using the `FPF` parameter.
```
analysis.FPF
```
By this, we now know that the second ply will fail under $Nx = 2548.06 N/mm; Ny=509.61N/mm$ and that the Strenth Ratio for the FPF is $2.57$.
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import time
import os
import copy
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader, Dataset
from torchvision.utils import make_grid
% matplotlib inline
warnings.filterwarnings('ignore')
path=os.getcwd()
df=pd.read_csv(path+'/twins_aligned_cropped_resized/df/pairs_test.csv')
df1=pd.read_csv(path+'/twins_aligned_cropped_resized/df/pairs_train.csv')
def check_overlap(df1,df):
t=0
print(df1.shape[0])
print(df.shape[0])
for i in range(df1.shape[0]):
for j in range(df.shape[0]):
if (df1['id_1'].iloc[i]==df['id_1'].iloc[j] and df1['id_2'].iloc[i]==df['id_2'].iloc[j]) or (df1['id_2'].iloc[i]==df['id_1'].iloc[j] and df1['id_2'].iloc[i]==df['id_1'].iloc[j]): t+=1
print(t)
return t
check_overlap(df1,df)
```
## Splitting
```
df=pd.read_csv(path+'/twins_aligned_cropped_resized/df/pairs.csv')
df['label'].unique()
df.groupby(['label']).count()
df[(df['label']=='Sibling') ]
#{'id_1':[0],'id_2':[0],'label':[0]}
train=pd.DataFrame({})
test=pd.DataFrame({})
same=df[df['label']=='Same']
ident=df[(df['label']=='Identical')| (df['label']=='IdenticalMirror')
| (df['label']=='UnknownTwinType') |(df['label']=='Fraternal')|(df['label']=='Sibling')]
other=df[(df['label']=='IdenticalTriplet') ]#| (df['label']=='Sibling')
print(same.shape)
print(ident.shape)
print(other.shape)
train=train.append(other)
def inverse(a):
copy=a.copy()
#print(a)
a['id_1']=str(copy['id_2'])
a['id_2']=str(copy['id_1'])
#print(a)
return a
while ident.shape[0]>0:
prob=np.random.randint(0,11)
t=ident.iloc[np.random.randint(ident.shape[0])]
if prob>=8:
test=test.append(t)
test=test.append(inverse(t))
else:
train=train.append(t)
train=train.append(inverse(t))
ident=ident[(ident['id_1']!=t['id_1']) & (str(ident['id_2'])!=str(t['id_2'])) ]
ident=ident[(ident['id_1']!=inverse(t)['id_1']) &(ident['id_2']!=inverse(t)['id_2']) ]
print(test.shape)
print(train.shape)
check_overlap(test,train)
while same.shape[0]>0:
t=same.iloc[np.random.randint(same.shape[0])]
if t['id_1'] in train['id_1'].tolist():
train=train.append(t)
else:
test=test.append(t)
same=same[(same['id_1']!=t['id_1']) &(same['id_2']!=t['id_2']) ]
print(test.shape)
print(train.shape)
check_overlap(test,train)
print(test.shape)
print(train.shape)
check_overlap(test,train)
train.to_csv(path+'/twins_aligned_cropped_resized/df/pairs_train_fedor.csv',index =False)
test.to_csv(path+'/twins_aligned_cropped_resized/df/pairs_test_fedor.csv',index =False)
```
## Read
```
train=pd.read_csv(path+'/twins_aligned_cropped_resized/df/pairs_train_fedor.csv')
test=pd.read_csv(path+'/twins_aligned_cropped_resized/df/pairs_test_fedor.csv')
test.head()
test['id_1']=test['id_1'].apply(int)
test['id_2']=test['id_2'].apply(int)
train['id_1']=train['id_1'].apply(int)
train['id_2']=train['id_2'].apply(int)
test.head()
```
| github_jupyter |
# <font color='cyan'>AI Chatbot Test</font>
```
import nltk
import numpy as np
import random
import string # process standard python strings
```
### Read the raw txt file
```
f = open('chatbot.txt', 'r', errors = 'ignore')
raw = f.read()
raw = raw.lower()
# 1st time use only
# nltk.download('punkt')
# nltk.download('wordnet')
```
### Tokenize the raw data
```
# converts to list of sentences
sent_tokens = nltk.sent_tokenize(raw)
# converts to list of words
word_tokens = nltk.word_tokenize(raw)
sent_tokens
word_tokens[:2]
```
### Pre-processing the Raw Text
```
'''
define function called LemTokens which will
take in the tokens as inputs
& return normalised tokens
'''
lemmer = nltk.stem.WordNetLemmatizer()
# Wordnet is a semantically-oriented dictionary of English included
# in NLTK
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
```
### Keyword Matching
```
'''
define function for greeting by bot
Eg. If user input greeting, bot reply with greeting
ELIZA uses simple keyword matching for greetings
'''
GREETING_INPUTS = ('hello', 'hi', 'greetings', 'sup', "what's up", 'hey')
GREETING_RESPONSES = ['hi', 'hey', '*nods', 'hi there', 'hello',
"I'm glad! You're talking to me!"]
def greeting(sentence):
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSES)
```
### Generating Response
```
'''
To generate response from our bot for input qns,
concept of codument similarity used
We'll begin by importing the necessary modules
'''
# From scikit learn libary, import TFidf vectorizer to convert collection of
# raw documents to matrix of TF-IDF features
from sklearn.feature_extraction.text import TfidfVectorizer
# import cosine similary module from scikit learn lib
from sklearn.metrics.pairwise import cosine_similarity
# used to find similarity between words entered by user & owrds in corpus
# This is the simplest implementation of a chatbot
# define function response which searches user's utterance for >1 known keywords
# & returns several possible responses
# if no matches, return "I'm sorry! Don't understand you!"
def response(user_resp):
robo_response = ''
sent_tokens.append(user_resp)
tfidf_vec = TfidfVectorizer(tokenizer = LemNormalize, stop_words = 'english')
tfidf = tfidf_vec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx = vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
robo_response += "Sorry! Don't understand" if (req_tfidf == 0) else sent_tokens[idx]
return robo_response
# feed lines we want our bot to say while starting & ending conversation
# depending on user's input
flag = True
print('BOI: Howdy. Ask me Anything :)')
robo_text: str = ''
while flag:
user_resp = input('>>>').lower()
if user_resp not in ['bye', 'exit']:
if user_resp in ['thanks', 'thank you']:
flag = False
robo_text = 'No Problem'
else:
if greeting(user_resp) != None:
robo_text = greeting(user_resp)
else:
print(response(user_resp))
sent_tokens.remove(user_resp)
else:
flag = False
robo_text = 'Cya!'
print(f"BOI: {robo_text}")
```
| github_jupyter |
```
import os
import matplotlib.pyplot as plt
import numpy as np
import qiskit.ignis.mitigation.measurement as mc
from dotenv import load_dotenv
from numpy import pi
from qiskit import (IBMQ, Aer, ClassicalRegister, QuantumCircuit,
QuantumRegister, transpile)
from qiskit.ignis.verification.tomography import (StateTomographyFitter,
state_tomography_circuits)
from qiskit.providers.aer import AerSimulator, noise
from qiskit.providers.ibmq.managed import IBMQJobManager
from qiskit.quantum_info import partial_trace
from qutip import Bloch, Bloch3d, Qobj
from tqdm import tqdm
from tqdm.contrib import tenumerate
from sync_calcs import dm2bloch, spin_husimi_qfunc, spin_S_measure
from sync_gates import add_tomography
from sync_plots import *
load_dotenv()
import logging
logging.getLogger('qiskit.providers.ibmq').setLevel(logging.ERROR)
backend = Aer.get_backend('statevector_simulator')
```
# Parameters
```
# Simulation Steps
SIM_STEPS = 5000
# Qubits of physical device to use
# Ordered from zero
main = 0
ancilla = 1
gain_amp = 1
loss_amp = 0.1
m = min(loss_amp, gain_amp)
detuning = 3 * m
signal_strength = 2 * m
dt = 0.5
# Plotting constants
n = 20 # angle array size scale
theta = np.linspace(0, np.pi, n)
phi = np.linspace(-np.pi, np.pi, 2 * n).reshape(-1, 1) # 1D vector -> 2D column vector
```
# Circuit
```
def generate_circuit(time_steps, detuning, signal_strength, steps, main, ancilla):
qreg_q = QuantumRegister(max(main, ancilla) + 1, "q")
creg_c = ClassicalRegister(2, "c")
qc = QuantumCircuit(qreg_q, creg_c)
for i in range(steps):
if hasattr(time_steps, "__iter__"):
dt = time_steps[i]
else:
dt = time_steps
theta_d = 2 * np.arcsin(np.sqrt(loss_amp * dt))
theta_g = 2 * np.arcsin(np.sqrt(gain_amp * dt))
theta = signal_strength * dt
phi = pi - detuning * dt / 2
lam = pi - detuning * dt / 2
qc.u(theta, phi, lam, qreg_q[main])
# Loss CU
# qc.cu(theta_d, 0, 0, 0, qreg_q[main], qreg_q[ancilla])
# qc.cx(qreg_q[ancilla], qreg_q[main])
# qc.measure(qreg_q[ancilla], creg_c[0])
# qc.reset(qreg_q[ancilla])
# Loss 2xCX
qc.u(pi / 2, -pi, 0, qreg_q[main])
qc.u(-theta_d / 2, -pi / 2, pi, qreg_q[ancilla])
qc.cx(qreg_q[ancilla], qreg_q[main])
qc.u(pi / 2, -pi / 2, 0, qreg_q[main])
qc.u(-theta_d / 2, pi, pi / 2, qreg_q[ancilla])
qc.cx(qreg_q[ancilla], qreg_q[main])
qc.u(0, 0, -pi / 2, qreg_q[main])
qc.u(0, 0, -pi / 2, qreg_q[ancilla])
qc.measure(qreg_q[ancilla], creg_c[0])
qc.reset(qreg_q[ancilla])
# # Gain
qc.u(-pi, 0, 0, qreg_q[main])
qc.cx(qreg_q[main], qreg_q[ancilla])
qc.cu(theta_g, 0, 0, 0, qreg_q[main], qreg_q[ancilla])
qc.cx(qreg_q[main], qreg_q[ancilla])
qc.u(pi, 0, 0, qreg_q[main])
qc.measure(qreg_q[ancilla], creg_c[0])
qc.reset(qreg_q[ancilla])
return qc
gain_amp=0.1
loss_amp=1
DR = 1
detunings = np.linspace(-DR, DR, n)
S = np.zeros([2*n, n])
for i, D in enumerate(detunings):
circuit = generate_circuit(dt, D, loss_amp, SIM_STEPS, main, ancilla)
circuit = transpile(circuit, backend)
job = backend.run(circuit)
prod_state = job.result().get_statevector(0)
state = partial_trace(prod_state, [1])
s = state.data
s[0,0], s[1,1] = s[1,1], s[0,0]
s[0,1], s[1,0] = s[1,0], s[0,1]
state = Qobj(s)
Q = spin_husimi_qfunc(state, theta, phi)
s = spin_S_measure(theta, Q)
S[:, i] = s
# circuit = generate_circuit(dt, 0.1, loss_amp, 1000, main, ancilla)
# circuit = transpile(circuit, backend)
# job = backend.run(circuit)
# prod_state = job.result().get_statevector(0)
# state = partial_trace(prod_state, [1])
# s = state.data
# # s[0,0], s[1,1] = s[1,1], s[0,0]
# # s[0,1], s[1,0] = s[1,0], s[0,1]
# state = Qobj(s)
# state
fig, ax = plt.subplots(figsize=set_size(DOCUMENT_WIDTH * 2))
DETUNING, PHI = np.meshgrid(detunings / loss_amp, phi)
cquad = ax.contourf(PHI, DETUNING, S, 100)
for c in cquad.collections:
c.set_edgecolor("face")
ax.set_xlabel(r'$\varphi$')
ax.set_ylabel(r'$\Delta/\Gamma_d$')
cbar = fig.colorbar(cquad)
cbar.set_label(r"$S(\varphi\,|\,\hat\rho)$", rotation=270, labelpad=20)
cbar.ax.yaxis.set_major_locator(MultipleLocator(base=0.02))
cbar.ax.yaxis.set_major_formatter(FuncFormatter(lambda v, pos: f"{v:.2f}"))
angle_xaxis(ax)
calc_and_plot_Q_and_S(Qobj(state.data))
b = Bloch()
b.add_states(state)
b.show()
```
| github_jupyter |
```
# If you run on colab uncomment the following line
#!pip install git+https://github.com/clementchadebec/benchmark_VAE.git
import torch
import torchvision.datasets as datasets
%load_ext autoreload
%autoreload 2
mnist_trainset = datasets.MNIST(root='../../data', train=True, download=True, transform=None)
train_dataset = mnist_trainset.data[:-10000].reshape(-1, 1, 28, 28) / 255.
eval_dataset = mnist_trainset.data[-10000:].reshape(-1, 1, 28, 28) / 255.
from pythae.models import VAEGAN, VAEGANConfig
from pythae.trainers import CoupledOptimizerAdversarialTrainer, CoupledOptimizerAdversarialTrainerConfig
from pythae.pipelines.training import TrainingPipeline
from pythae.models.nn.benchmarks.mnist import Encoder_VAE_MNIST, Decoder_AE_MNIST, LayeredDiscriminator_MNIST
config = CoupledOptimizerAdversarialTrainerConfig(
output_dir='my_model',
learning_rate=1e-4,
batch_size=100,
num_epochs=100,
)
model_config = VAEGANConfig(
input_dim=(1, 28, 28),
latent_dim=16,
adversarial_loss_scale=0.8,
reconstruction_layer= 3,
margin=0.4,
equilibrium= 0.68
)
model = VAEGAN(
model_config=model_config,
encoder=Encoder_VAE_MNIST(model_config),
decoder=Decoder_AE_MNIST(model_config)
)
pipeline = TrainingPipeline(
training_config=config,
model=model
)
pipeline(
train_data=train_dataset,
eval_data=eval_dataset
)
import os
last_training = sorted(os.listdir('my_model'))[-1]
trained_model = VAEGAN.load_from_folder(os.path.join('my_model', last_training, 'final_model'))
from pythae.samplers import NormalSampler
# create normal sampler
normal_samper = NormalSampler(
model=trained_model
)
# sample
gen_data = normal_samper.sample(
num_samples=25
)
import matplotlib.pyplot as plt
# show results with normal sampler
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(10, 10))
for i in range(5):
for j in range(5):
axes[i][j].imshow(gen_data[i*5 +j].cpu().squeeze(0), cmap='gray')
axes[i][j].axis('off')
plt.tight_layout(pad=0.)
from pythae.samplers import GaussianMixtureSampler, GaussianMixtureSamplerConfig
# set up gmm sampler config
gmm_sampler_config = GaussianMixtureSamplerConfig(
n_components=10
)
# create gmm sampler
gmm_sampler = GaussianMixtureSampler(
sampler_config=gmm_sampler_config,
model=trained_model
)
# fit the sampler
gmm_sampler.fit(train_dataset)
# sample
gen_data = gmm_sampler.sample(
num_samples=25
)
# show results with gmm sampler
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(10, 10))
for i in range(5):
for j in range(5):
axes[i][j].imshow(gen_data[i*5 +j].cpu().squeeze(0), cmap='gray')
axes[i][j].axis('off')
plt.tight_layout(pad=0.)
```
## ... the other samplers work the same
| github_jupyter |
# **PointRend - Image Segmentation as Rendering**
**Authors: Alexander Kirillov, Yuxin Wu, Kaiming H,e Ross Girshick - Facebook AI Research (FAIR)**
**Official Github**: https://github.com/facebookresearch/detectron2/tree/main/projects/PointRend
---
**Edited By Su Hyung Choi (Key Summary & Code Practice)**
If you have any issues on this scripts, please PR to the repository below.
**[Github: @JonyChoi - Computer Vision Paper Reviews]** https://github.com/jonychoi/Computer-Vision-Paper-Reviews
Edited Jan 10 2022
---
### **Abstract**
<table>
<tbody>
<tr>
<td>
<p>
<i>We present a new method for efficient high-quality
image segmentation of objects and scenes. By analogizing
classical computer graphics methods for efficient rendering
with over- and undersampling challenges faced in pixel
labeling tasks, we develop a unique perspective of image
segmentation as a rendering problem. From this vantage,
we present the PointRend (Point-based Rendering) neural
network module: a module that performs point-based
segmentation predictions at adaptively selected locations
based on an iterative subdivision algorithm. PointRend
can be flexibly applied to both instance and semantic
segmentation tasks by building on top of existing state-ofthe-art models. While many concrete implementations of
the general idea are possible, we show that a simple design
already achieves excellent results. Qualitatively, PointRend
outputs crisp object boundaries in regions that are oversmoothed by previous methods. Quantitatively, PointRend
yields significant gains on COCO and Cityscapes, for both
instance and semantic segmentation. PointRend’s efficiency
enables output resolutions that are otherwise impractical
in terms of memory or computation compared to existing
approaches. Code has been made available at https://
github.com/facebookresearch/detectron2/
tree/master/projects/PointRend.</i>
</p>
</td>
</tr>
</tbody>
</table>
### **Introduction**
<table>
<tbody>
<tr>
<td>
<p>
Image segmentation tasks involve mapping pixels sampled on a regular grid to a label map, or a set of label maps,
on the same grid. For semantic segmentation, the label map
indicates the predicted category at each pixel. In the case of
instance segmentation, a binary foreground vs. background
map is predicted for each detected object. The modern tools
of choice for these tasks are built on convolutional neural
networks (CNNs) [27, 26].
</p>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/figure1.png" width="300" />
</td>
<td>
<img src="./imgs/figure1_description.png" width="350" />
</td>
</tr>
</tbody>
</table>
<p>
CNNs for image segmentation typically operate on regular grids: the input image is a regular grid of pixels, their
hidden representations are feature vectors on a regular grid,
and their outputs are label maps on a regular grid. Regular grids are convenient, but not necessarily computationally ideal for image segmentation. The label maps predicted by these networks should be mostly smooth, i.e.,
neighboring pixels often take the same label, because highfrequency regions are restricted to the sparse boundaries between objects. A regular grid will unnecessarily oversample
the smooth areas while simultaneously undersampling object boundaries. The result is excess computation in smooth
regions and blurry contours (Fig. 1, upper-left). Image segmentation methods often predict labels on a low-resolution
regular grid, e.g., 1/8-th of the input [35] for semantic segmentation, or 28×28 [19] for instance segmentation, as a
compromise between undersampling and oversampling.
</p>
<p>
Analogous sampling issues have been studied for
decades in computer graphics. For example, a renderer
maps a model (e.g., a 3D mesh) to a rasterized image, i.e. a regular grid of pixels. While the output is on a regular grid,
computation is not allocated uniformly over the grid. Instead, a common graphics strategy is to compute pixel values at an irregular subset of adaptively selected points in the
image plane. The classical subdivision technique of [48], as
an example, yields a quadtree-like sampling pattern that efficiently renders an anti-aliased, high-resolution image.
</p>
<p>
The central idea of this paper is to view image segmentation as a rendering problem and to adapt classical
ideas from computer graphics to efficiently “render” highquality label maps (see Fig. 1, bottom-left). We encapsulate this computational idea in a new neural network
module, called PointRend, that uses a subdivision strategy
to adaptively select a non-uniform set of points at which
to compute labels. PointRend can be incorporated into
popular meta-architectures for both instance segmentation
(e.g., Mask R-CNN [19]) and semantic segmentation (e.g.,
FCN [35]). Its subdivision strategy efficiently computes
high-resolution segmentation maps using an order of magnitude fewer floating-point operations than direct, dense
computation.
</p>
<img src="./imgs/figure2.png" />
<p>
PointRend is a general module that admits many possible implementations. Viewed abstractly, a PointRend
module accepts one or more typical CNN feature maps
f(xi, yi) that are defined over regular grids, and outputs
high-resolution predictions p(x0i, y0i) over a finer grid. Instead of making excessive predictions over all points on the
output grid, PointRend makes predictions only on carefully
selected points. To make these predictions, it extracts a
point-wise feature representation for the selected points by
interpolating f, and uses a small point head subnetwork to
predict output labels from the point-wise features. We will
present a simple and effective PointRend implementation.
</p>
<p>
We evaluate PointRend on instance and semantic segmentation tasks using the COCO [29] and Cityscapes [9]
benchmarks. Qualitatively, PointRend efficiently computes
sharp boundaries between objects, as illustrated in Fig. 2
and Fig. 8. We also observe quantitative improvements even
though the standard intersection-over-union based metrics
for these tasks (mask AP and mIoU) are biased towards
object-interior pixels and are relatively insensitive to boundary improvements. PointRend improves strong Mask RCNN and DeepLabV3 [5] models by a significant margin.
</p>
</td>
</tr>
</tbody>
</table>
### **2. Related Work**
<table>
<tbody>
<tr>
<td>
<p>
<strong>Rendering</strong> algorithms in computer graphics output a regular grid of pixels. However, they usually compute these
pixel values over a non-uniform set of points. Efficient procedures like subdivision [48] and adaptive sampling [38, 42]
refine a coarse rasterization in areas where pixel values
have larger variance. Ray-tracing renderers often use oversampling [50], a technique that samples some points more
densely than the output grid to avoid aliasing effects. Here,
we apply classical subdivision to image segmentation.
</p>
<p>
Non-uniform grid representations. Computation on regular grids is the dominant paradigm for 2D image analysis, but this is not the case for other vision tasks. In 3D
shape recognition, large 3D grids are infeasible due to cubic scaling. Most CNN-based approaches do not go beyond coarse 64×64×64 grids [12, 8]. Instead, recent works
consider more efficient non-uniform representations such as
meshes [47, 14], signed distance functions [37], and octrees [46]. Similar to a signed distance function, PointRend
can compute segmentation values at any point.
</p>
<p>
Recently, Marin et al. [36] propose an efficient semantic
segmentation network based on non-uniform subsampling
of the input image prior to processing with a standard semantic segmentation network. PointRend, in contrast, focuses on non-uniform sampling at the output. It may be
possible to combine the two approaches, though [36] is currently unproven for instance segmentation.
</p>
<p>
<strong>Instance segmentation</strong> methods based on the Mask RCNN meta-architecture [19] occupy top ranks in recent
challenges [32, 3]. These region-based architectures typically predict masks on a 28×28 grid irrespective of object size. This is sufficient for small objects, but for large
objects it produces undesirable “blobby” output that oversmooths the fine-level details of large objects (see Fig. 1,
top-left). Alternative, bottom-up approaches group pixels
to form object masks [31, 1, 25]. These methods can produce more detailed output, however, they lag behind regionbased approaches on most instance segmentation benchmarks [29, 9, 40]. TensorMask [7], an alternative slidingwindow method, uses a sophisticated network design to
predict sharp high-resolution masks for large objects, but
its accuracy also lags slightly behind. In this paper, we
show that a region-based segmentation model equipped
with PointRend can produce masks with fine-level details
while improving the accuracy of region-based approaches.
</p>
<p>
<strong>Semantic segmentation.</strong> Fully convolutional networks
(FCNs) [35] are the foundation of modern semantic segmentation approaches. They often predict outputs that have
lower resolution than the input grid and use bilinear upsampling to recover the remaining 8-16× resolution. Results
may be improved with dilated/atrous convolutions that replace some subsampling layers [4, 5] at the expense of more
memory and computation.
</p>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/figure3.png" width="300" />
</td>
<td>
<img src="./imgs/figure3_description.png" width="290" />
</td>
</tr>
</tbody>
</table>
<p>
Alternative approaches include encoder-decoder achitectures [6, 24, 44, 45] that subsample the grid representation
in the encoder and then upsample it in the decoder, using
skip connections [44] to recover filtered details. Current
approaches combine dilated convolutions with an encoderdecoder structure [6, 30] to produce output on a 4× sparser
grid than the input grid before applying bilinear interpolation. In our work, we propose a method that can efficiently
predict fine-level details on a grid as dense as the input grid.
</p>
</tr>
</tbody>
</table>
### **3. Method**
<table>
<tbody>
<tr>
<td>
<p>
We analogize image segmentation (of objects and/or
scenes) in computer vision to image rendering in computer
graphics. Rendering is about displaying a model (e.g., a
3D mesh) as a regular grid of pixels, i.e., an image. While
the output representation is a regular grid, the underlying
physical entity (e.g., the 3D model) is continuous and its
physical occupancy and other attributes can be queried at
any real-value point on the image plane using physical and
geometric reasoning, such as ray-tracing.
</p>
<p>
Analogously, in computer vision, we can think of an image segmentation as the occupancy map of an underlying
continuous entity, and the segmentation output, which is a
regular grid of predicted labels, is “rendered” from it. The
entity is encoded in the network’s feature maps and can be
accessed at any point by interpolation. A parameterized
function, that is trained to predict occupancy from these interpolated point-wise feature representations, is the counterpart to physical and geometric reasoning.
</p>
<p>
Based on this analogy, we propose PointRend (Pointbased Rendering) as a methodology for image segmentation using point representations. A PointRend module accepts one or more typical CNN feature maps of C channels f ∈ R
C×H×W , each defined over a regular grid (that
is typically 4× to 16× coarser than the image grid), and outputs predictions for the K class labels p ∈ R
K×H0×W0
over a regular grid of different (and likely higher) resolution. A PointRend module consists of three main components: (i) A point selection strategy chooses a small number
of real-value points to make predictions on, avoiding excessive computation for all pixels in the high-resolution output
grid. (ii) For each selected point, a point-wise feature representation is extracted. Features for a real-value point are
computed by bilinear interpolation of f, using the point’s 4
nearest neighbors that are on the regular grid of f. As a result, it is able to utilize sub-pixel information encoded in the
channel dimension of f to predict a segmentation that has
higher resolution than f. (iii) A point head: a small neural network trained to predict a label from this point-wise
feature representation, independently for each point.
</p>
<p>
The PointRend architecture can be applied to instance
segmentation (e.g., on Mask R-CNN [19]) and semantic
segmentation (e.g., on FCNs [35]) tasks. For instance segmentation, PointRend is applied to each region. It computes masks in a coarse-to-fine fashion by making predictions over a set of selected points (see Fig. 3). For semantic segmentation, the whole image can be considered as a
single region, and thus without loss of generality we will
describe PointRend in the context of instance segmentation.
We discuss the three main components in more detail next.
</p>
</td>
</tr>
</tbody>
</table>
### **3.1. Point Selection for Inference and Training**
<table>
<thead>
<tr>
<th>
Point Selection for Inference and Training
</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<p>
At the core of our method is the idea of flexibly and
adaptively selecting points in the image plane at which to
predict segmentation labels. Intuitively, these points should
be located more densely near high-frequency areas, such as
object boundaries, analogous to the anti-aliasing problem in
ray-tracing. We develop this idea for inference and training.
</p>
<p>
<strong>Inference.</strong> Our selection strategy for inference is inspired
by the classical technique of adaptive subdivision [48] in
computer graphics. The technique is used to efficiently render high resolutions images (e.g., via ray-tracing) by computing only at locations where there is a high chance that
the value is significantly different from its neighbors; for all
other locations the values are obtained by interpolating already computed output values (starting from a coarse grid).
</p>
<p>
For each region, we iteratively “render” the output mask
in a coarse-to-fine fashion. The coarsest level prediction is
made on the points on a regular grid (e.g., by using a standard coarse segmentation prediction head). In each iteration, PointRend upsamples its previously predicted segmentation using bilinear interpolation and then selects the N
most uncertain points (e.g., those with probabilities closest
to 0.5 for a binary mask) on this denser grid. PointRend then
computes the point-wise feature representation (described
shortly in §3.2) for each of these N points and predicts their
labels. This process is repeated until the segmentation is upsampled to a desired resolution. One step of this procedure is illustrated on a toy example in Fig. 4.
</p>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/figure4.png" width="400"/>
</td>
<td>
<img src="./imgs/figure5.png" width="380"/>
</td>
</tr>
</tbody>
</table>
<p>
With a desired output resolution of M×M pixels and a
starting resolution of M0×M0, PointRend requires no more
than N log2
M
M0
point predictions. This is much smaller
than M×M, allowing PointRend to make high-resolution
predictions much more effectively. For example, if M0 is
7 and the desired resolutions is M=224, then 5 subdivision
steps are preformed. If we select N=282 points at each
step, PointRend makes predictions for only 282
·4.25 points,
which is 15 times smaller than 2242
. Note that fewer than
N log2
M
M0
points are selected overall because in the first
subdivision step only 142 points are available.
</p>
<p>
<strong>Training.</strong> During training, PointRend also needs to select
points at which to construct point-wise features for training the point head. In principle, the point selection strategy
can be similar to the subdivision strategy used in inference.
However, subdivision introduces sequential steps that are
less friendly to training neural networks with backpropagation. Instead, for training we use a non-iterative strategy
based on random sampling.
</p>
<p>
The sampling strategy selects N points on a feature map to train on.1
It is designed to bias selection towards uncertain regions, while also retaining
some degree of uniform coverage, using three principles.
(i) Over generation: we over-generate candidate points by randomly sampling kN points (k>1) from a uniform distribution. (ii) Importance sampling: we focus on points with
uncertain coarse predictions by interpolating the coarse
prediction values at all kN points and computing a taskspecific uncertainty estimate (defined in §4 and §5). The
most uncertain βN points (β ∈ [0, 1]) are selected from
the kN candidates. (iii) Coverage: the remaining (1 − β)N
points are sampled from a uniform distribution. We illustrate this procedure with different settings, and compare it
to regular grid selection, in Fig. 5.
</p>
<p>
At training time, predictions and loss functions are only
computed on the N sampled points (in addition to the coarse
segmentation), which is simpler and more efficient than
backpropagation through subdivision steps. This design is
similar to the parallel training of RPN + Fast R-CNN in a
Faster R-CNN system [13], whose inference is sequential.
</p>
</td>
</tr>
</tbody>
</table>
### **3.2. Point-wise Representation and Point Head**
<table>
<tbody>
<tr>
<td>
<p>
PointRend constructs point-wise features at selected
points by combining (e.g., concatenating) two feature types,
fine-grained and coarse prediction features, described next.
</p>
<p>
<strong>Fine-grained features.</strong> To allow PointRend to render fine
segmentation details we extract a feature vector at each sampled point from CNN feature maps. Because a point is a
real-value 2D coordinate, we perform bilinear interpolation
on the feature maps to compute the feature vector, following standard practice [22, 19, 10]. Features can be extracted
from a single feature map (e.g., res2 in a ResNet); they can
also be extracted from multiple feature maps (e.g., res2 to
res5, or their feature pyramid [28] counterparts) and concatenated, following the Hypercolumn method [17].
</p>
<p>
<strong>Coarse prediction features.</strong> The fine-grained features enable resolving detail, but are also deficient in two regards.
First, they do not contain region-specific information and
thus the same point overlapped by two instances’ bounding boxes will have the same fine-grained features. Yet, the
point can only be in the foreground of one instance. Therefore, for the task of instance segmentation, where different
regions may predict different labels for the same point, additional region-specific information is needed.
</p>
<p>
Second, depending on which feature maps are used for
the fine-grained features, the features may contain only relatively low-level information (e.g., we will use res2 with
DeepLabV3). In this case, a feature source with more contextual and semantic information can be helpful. This issue
affects both instance and semantic segmentation.
</p>
<p>
Based on these considerations, the second feature type is
a coarse segmentation prediction from the network, i.e., a
K-dimensional vector at each point in the region (box) representing a K-class prediction. The coarse resolution, by
design, provides more globalized context, while the channels convey the semantic classes. These coarse predictions are similar to the outputs made by the existing architectures,
and are supervised during training in the same way as existing models. For instance segmentation, the coarse prediction can be, for example, the output of a lightweight 7×7
resolution mask head in Mask R-CNN. For semantic segmentation, it can be, for example, predictions from a stride
16 feature map.
</p>
<p>
<strong>Point head.</strong> Given the point-wise feature representation
at each selected point, PointRend makes point-wise segmentation predictions using a simple multi-layer perceptron (MLP). This MLP shares weights across all points (and
all regions), analogous to a graph convolution [23] or a
PointNet [43]. Since the MLP predicts a segmentation label for each point, it can be trained by standard task-specific
segmentation losses (described in §4 and §5).
</p>
</td>
</tr>
</tbody>
</table>
### **4. Experiments: Instance Segmentation**
<table>
<tbody>
<tr>
<td>
<p>
<strong>Datasets.</strong> We use two standard instance segmentation
datasets: COCO [29] and Cityscapes [9]. We report the
standard mask AP metric [29] using the median of 3 runs
for COCO and 5 for Cityscapes (it has higher variance).
</p>
<p>
COCO has 80 categories with instance-level annotation.
We train on train2017 (∼118k images) and report results
on val2017 (5k images). As noted in [16], the COCO
ground-truth is often coarse and AP for the dataset may not
fully reflect improvements in mask quality. Therefore we
supplement COCO results with AP measured using the 80
COCO category subset of LVIS [16], denoted by AP*.
</p>
<p>
The LVIS annotations have significantly higher quality. Note
that for AP? we use the same models trained on COCO
and simply re-evaluate their predictions against the higherquality LVIS annotations using the LVIS evaluation API.
Cityscapes is an ego-centric street-scene dataset with
8 categories, 2975 train images, and 500 validation images. The images are higher resolution compared to COCO
(1024×2048 pixels) and have finer, more pixel-accurate
ground-truth instance segmentations.
</p>
<p>
<strong>Architecture.</strong> Our experiments use Mask R-CNN with a
ResNet-50 [20] + FPN [28] backbone. The default mask
head in Mask R-CNN is a region-wise FCN, which we denote by “4× conv”.2 We use this as our baseline for comparison. For PointRend, we make appropriate modifications
to this baseline, as described next.
</p>
<p>
<strong>Lightweight, coarse mask prediction head.</strong> To compute
the coarse prediction, we replace the 4× conv mask head
with a lighter weight design that resembles Mask R-CNN’s
box head and produces a 7×7 mask prediction. Specifically, for each bounding box, we extract a 14×14 feature map from the P2 level of the FPN using bilinear interpolation. The features are computed on a regular grid inside the
bounding box (this operation can seen as a simple version of
RoIAlign). Next, we use a stride-two 2×2 convolution layer
with 256 output channels followed by ReLU [39], which
reduces the spatial size to 7×7. Finally, similar to Mask
R-CNN’s box head, an MLP with two 1024-wide hidden
layers is applied to yield a 7×7 mask prediction for each of
the K classes. ReLU is used on the MLP’s hidden layers
and the sigmoid activation function is applied to its outputs.
</p>
<p>
<strong>PointRend.</strong> At each selected point, a K-dimensional feature vector is extracted from the coarse prediction head’s
output using bilinear interpolation. PointRend also interpolates a 256-dimensional feature vector from the P2 level of
the FPN. This level has a stride of 4 w.r.t. the input image.
These coarse prediction and fine-grained feature vectors are
concatenated. We make a K-class prediction at selected
points using an MLP with 3 hidden layers with 256 channels. In each layer of the MLP, we supplement the 256 output channels with the K coarse prediction features to make
the input vector for the next layer. We use ReLU inside the
MLP and apply sigmoid to its output.
</p>
<p>
<strong>Training.</strong> We use the standard 1× training schedule and
data augmentation from Detectron2 [49] by default (full details are in the appendix). For PointRend, we sample 142
points using the biased sampling strategy described in the
§3.1 with k=3 and β=0.75. We use the distance between
0.5 and the probability of the ground truth class interpolated from the coarse prediction as the point-wise uncertainty measure. For a predicted box with ground-truth class
c, we sum the binary cross-entropy loss for the c-th MLP
output over the 142 points. The lightweight coarse prediction head uses the average cross-entropy loss for the mask
predicted for class c, i.e., the same loss as the baseline 4×
conv head. We sum all losses without any re-weighting.
</p>
<p>
During training, Mask R-CNN applies the box and mask
heads in parallel, while during inference they run as a cascade. We found that training as a cascade does not improve
the baseline Mask R-CNN, but PointRend can benefit from
it by sampling points inside more accurate boxes, slightly
improving overall performance (∼0.2% AP, absolute).
</p>
<p>
<strong>Inference.</strong> For inference on a box with predicted class c,
unless otherwise specified, we use the adaptive subdivision
technique to refine the coarse 7×7 prediction for class c to
the 224×224 in 5 steps. At each step, we select and update
(at most) the N=282 most uncertain points based on the
absolute difference between the predictions and 0.5.
</p>
</td>
</tr>
</tbody>
</table>
### **4.1. Main Results**
<table>
<tbody>
<tr>
<td>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/table1.png" width="520" />
</td>
<td>
<img src="./imgs/figure6.png" width="470" />
</td>
</tr>
</tbody>
</table>
<table width="500">
<tbody>
<tr>
<td>
<img src="./imgs/table2.png" width="500" />
<img src="./imgs/table2_description.png" width="500" />
</td>
</tr>
</tbody>
</table>
<p>
We compare PointRend to the default 4× conv head in
Mask R-CNN in Table 1. PointRend outperforms the default head on both datasets. The gap is larger when evaluating the COCO categories using the LVIS annotations (AP*)
and for Cityscapes, which we attribute to the superior annotation quality in these datasets. Even with the same output
resolution PointRend outperforms the baseline. The difference between 28×28 and 224×224 is relatively small because AP uses intersection-over-union [11] and, therefore,
is heavily biased towards object-interior pixels and less sensitive to the boundary quality. Visually, however, the difference in boundary quality is obvious, see Fig. 6.
</p>
<p>
Subdivision inference allows PointRend to yield a high
resolution 224×224 prediction using more than 30 times
less compute (FLOPs) and memory than the default 4×
conv head needs to output the same resolution (based on
taking a 112×112 RoIAlign input), see Table 2. PointRend
makes high resolution output feasible in the Mask R-CNN
framework by ignoring areas of an object where a coarse prediction is sufficient (e.g., in the areas far away from object boundaries). In terms of wall-clock runtime, our unoptimized implementation outputs 224×224 masks at ∼13 fps,
which is roughly the same frame-rate as a 4× conv head
modified to output 56×56 masks (by doubling the default
RoIAlign size), a design that actually has lower COCO AP
compared to the 28×28 4× conv head (34.5% vs. 35.2%).
</p>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/table3.png" width="400" />
</td>
<td>
<img src="./imgs/figure7.png" width="390" />
</td>
</tr>
</tbody>
</table>
<p>
Table 3 shows PointRend subdivision inference with different output resolutions and number of points selected at
each subdivision step. Predicting masks at a higher resolution can improve results. Though AP saturates, visual
improvements are still apparent when moving from lower
(e.g., 56×56) to higher (e.g., 224×224) resolution outputs,
see Fig. 7. AP also saturates with the number of points sampled in each subdivision step because points are selected in
the most ambiguous areas first. Additional points may make
predictions in the areas where a coarse prediction is already
sufficient. For objects with complex boundaries, however,
using more points may be beneficial.
</p>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/table4.png" width="400" />
</td>
<td>
<img src="./imgs/table5.png" width="400" />
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
### **4.2. Ablation Experiments**
<table>
<tbody>
<tr>
<td>
<p>
We conduct a number of ablations to analyze PointRend.
In general we note that it is robust to the exact design of the
point head MLP. Changes of its depth or width do not show
any significant difference in our experiments.
</p>
<p>
<strong>Point selection during training.</strong> During training we select
142 points per object following the biased sampling strategy (§3.1). Sampling only 142 points makes training computationally and memory efficient and we found that using
more points does not improve results. Surprisingly, sampling only 49 points per box still maintains AP, though we
observe an increased variance in AP.
</p>
<p>
Table 4 shows PointRend performance with different selection strategies during training. Regular grid selection
achieves similar results to uniform sampling. Whereas biasing sampling toward ambiguous areas improves AP. However, a sampling strategy that is biased too heavily towards
boundaries of the coarse prediction (k>10 and β close to
1.0) decreases AP. Overall, we find a wide range of parameters 2<k<5 and 0.75<β<1.0 delivers similar results.
</p>
<p>
<strong>Larger models, longer training.</strong> Training ResNet-50 +
FPN (denoted R50-FPN) with the 1× schedule under-fits
on COCO. In Table 5 we show that the PointRend improvements over the baseline hold with both longer training
schedule and larger models (see the appendix for details).
</p>
<img src="./imgs/figure8.png" />
</td>
</tr>
</tbody>
</table>
### **5. Experiments: Semantic Segmentation**
<table>
<tbody>
<tr>
<td>
<p>
PointRend is not limited to instance segmentation and
can be extended to other pixel-level recognition tasks. Here,
we demonstrate that PointRend can benefit two semantic
segmentation models: DeeplabV3 [5], which uses dilated
convolutions to make prediction on a denser grid, and SemanticFPN [24], a simple encoder-decoder architecture.
</p>
<p>
<strong>Dataset.</strong> We use the Cityscapes [9] semantic segmentation
set with 19 categories, 2975 training images, and 500 validation images. We report the median mIoU of 5 trials.
</p>
<p>
<strong>Implementation details.</strong> We reimplemented DeeplabV3
and SemanticFPN following their respective papers. SemanticFPN uses a standard ResNet-101 [20], whereas
DeeplabV3 uses the ResNet-103 proposed in [5].3 We follow the original papers’ training schedules and data augmentation (details are in the appendix).
</p>
<p>
We use the same PointRend architecture as for instance segmentation. Coarse prediction features come from
the (already coarse) output of the semantic segmentation
model. Fine-grained features are interpolated from res2 for
DeeplabV3 and from P2 for SemanticFPN. During training
we sample as many points as there are on a stride 16 feature map of the input (2304 for deeplabV3 and 2048 for SemanticFPN). We use the same k=3, β=0.75 point selection
strategy. During inference, subdivision uses N=8096 (i.e.,
the number of points in the stride 16 map of a 1024×2048
image) until reaching the input image resolution. To measure prediction uncertainty we use the same strategy during training and inference: the difference between the most
confident and second most confident class probabilities.
</p>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/table6.png" width="500" />
</td>
<td>
<img src="./imgs/figure9.png" width="400" />
</td>
</tr>
</tbody>
</table>
<table>
<tbody>
<tr>
<td>
<img src="./imgs/table7.png" width="500" />
</td>
</tr>
</tbody>
</table>
<p>
<strong>DeeplabV3.</strong> In Table 6 we compare DeepLabV3 to
DeeplabV3 with PointRend. The output resolution can also
be increased by 2× at inference by using dilated convolutions in res4 stage, as described in [5]. Compared to both, PointRend has higher mIoU. Qualitative improvements are
also evident, see Fig. 8. By sampling points adaptively,
PointRend reaches 1024×2048 resolution (i.e. 2M points)
by making predictions for only 32k points, see Fig. 9.
</p>
<p>
<strong>SemanticFPN.</strong> Table 7 shows that SemanticFPN with
PointRend improves over both 8× and 4× output stride
variants without PointRend.
</p>
</td>
</tr>
</tbody>
</table>
### **Appendix A. Instance Segmentation Details**
<table>
<thead>
<tr>
<th>
Appendix A. Instance Segmentation Details
</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<p>
We use SGD with 0.9 momentum; a linear learning rate
warmup [15] over 1000 updates starting from a learning rate
of 0.001 is applied; weight decay 0.0001 is applied; horizontal flipping and scale train-time data augmentation; the
batch normalization (BN) [21] layers from the ImageNet
pre-trained models are frozen (i.e., BN is not used); no testtime augmentation is used.
</p>
<p>
<strong>COCO [29]:</strong> 16 images per mini-batch; the training schedule is 60k / 20k / 10k updates at learning rates of 0.02 / 0.002 / 0.0002 respectively; training images are resized randomly
to a shorter edge from 640 to 800 pixels with a step of 32
pixels and inference images are resized to a shorter edge
size of 800 pixels.
</p>
<p>
<strong>Cityscapes [9]:</strong> 8 images per mini-batch the training
schedule is 18k / 6k updates at learning rates of 0.01 /
0.001 respectively; training images are resized randomly to
a shorter edge from 800 to 1024 pixels with a step of 32 pixels and inference images are resized to a shorter edge size
of 1024 pixels.
</p>
<p>
<strong>Longer schedule:</strong> The 3× schedule for COCO is 210k /
40k / 20k updates at learning rates of 0.02 / 0.002 / 0.0002,
respectively; all other details are the same as the setting described above.
</p>
</td>
</tr>
</tbody>
</table>
### **Appendix B. Semantic Segmentation Details**
<table>
<tbody>
<tr>
<td>
<p>
<strong>DeeplabV3 [5]:</strong> We use SGD with 0.9 momentum with 16
images per mini-batch cropped to a fixed 768×768 size;
the training schedule is 90k updates with a poly learning
rate [34] update strategy, starting from 0.01; a linear learning rate warmup [15] over 1000 updates starting from a
learning rate of 0.001 is applied; the learning rate for ASPP
and the prediction convolution are multiplied by 10; weight
decay of 0.0001 is applied; random horizontal flipping and
scaling of 0.5× to 2.0× with a 32 pixel step is used as training data augmentation; BN is applied to 16 images minibatches; no test-time augmentation is used;
</p>
<p>
<strong>SemanticFPN [24]:</strong> We use SGD with 0.9 momentum
with 32 images per mini-batch cropped to a fixed 512×1024
size; the training schedule is 40k / 15k / 10k updates at
learning rates of 0.01 / 0.001 / 0.0001 respectively; a linear
learning rate warmup [15] over 1000 updates starting from
a learning rate of 0.001 is applied; weight decay 0.0001 is
applied; horizontal flipping, color augmentation [33], and
crop bootstrapping [2] are used during training; scale traintime data augmentation resizes an input image from 0.5×
to 2.0× with a 32 pixel step; BN layers are frozen (i.e., BN
is not used); no test-time augmentation is used.
</p>
</td>
</tr>
</tbody>
</table>
### **Appendix C. AP* Computation**
<table>
<tbody>
<tr>
<td>
<p>
The first version (v1) of this paper on arXiv has an error in COCO mask AP evaluated against the LVIS annotations [16] (AP*
). The old version used an incorrect list of
the categories not present in each evaluation image, which
resulted in lower AP* values.
</p>
</td>
</tr>
</tbody>
</table>
### **References**
- [1] Anurag Arnab and Philip HS Torr. Pixelwise instance
segmentation with a dynamically instantiated network. In
CVPR, 2017. 3
- [2] Samuel Rota Bulo, Lorenzo Porzi, and Peter Kontschieder. `
In-place activated batchnorm for memory-optimized training
of DNNs. In CVPR, 2018. 9
- [3] Kai Chen, Jiangmiao Pang, Jiaqi Wang, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jianping Shi,
Wanli Ouyang, et al. Hybrid task cascade for instance segmentation. In CVPR, 2019. 3
- [4] Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos,
Kevin Murphy, and Alan L Yuille. DeepLab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs. PAMI, 2018. 3
- [5] Liang-Chieh Chen, George Papandreou, Florian Schroff, and
Hartwig Adam. Rethinking atrous convolution for semantic
image segmentation. arXiv:1706.05587, 2017. 2, 3, 8, 9
- [6] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian
Schroff, and Hartwig Adam. Encoder-decoder with atrous
separable convolution for semantic image segmentation. In
ECCV, 2018. 3
- [7] Xinlei Chen, Ross Girshick, Kaiming He, and Piotr Dollar. ´
TensorMask: A foundation for dense object segmentation. In
ICCV, 2019. 3
- [8] Christopher B Choy, Danfei Xu, JunYoung Gwak, Kevin
Chen, and Silvio Savarese. 3D-R2N2: A unified approach
for single and multi-view 3D object reconstruction. In
ECCV, 2016. 3
- [9] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo
Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe
Franke, Stefan Roth, and Bernt Schiele. The Cityscapes
dataset for semantic urban scene understanding. In CVPR, 2016. 2, 3, 5, 8, 9
- [10] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong
Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In ICCV, 2017. 5
- [11] Mark Everingham, SM Ali Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The
PASCAL visual object classes challenge: A retrospective.
IJCV, 2015. 6
- [12] Rohit Girdhar, David F Fouhey, Mikel Rodriguez, and Abhinav Gupta. Learning a predictable and generative vector
representation for objects. In ECCV, 2016. 3
- [13] Ross Girshick. Fast R-CNN. In ICCV, 2015. 5
- [14] Georgia Gkioxari, Jitendra Malik, and Justin Johnson. Mesh
R-CNN. In ICCV, 2019. 3
9
- [15] Priya Goyal, Piotr Dollar, Ross Girshick, Pieter Noord- ´
huis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch,
Yangqing Jia, and Kaiming He. Accurate, large minibatch
sgd: Training imagenet in 1 hour. arXiv:1706.02677, 2017.
9
- [16] Agrim Gupta, Piotr Dollar, and Ross Girshick. LVIS: A
dataset for large vocabulary instance segmentation. In ICCV, 2019. 5, 6, 7, 9
- [17] Bharath Hariharan, Pablo Arbelaez, Ross Girshick, and Ji- ´
tendra Malik. Hypercolumns for object segmentation and
fine-grained localization. In CVPR, 2015. 5
- [18] Kaiming He, Ross Girshick, and Piotr Dollar. Rethinking ´
imagenet pre-training. In ICCV, 2019. 7
- [19] Kaiming He, Georgia Gkioxari, Piotr Dollar, and Ross Gir- ´
shick. Mask R-CNN. In ICCV, 2017. 1, 2, 3, 4, 5, 6
- [20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
Deep residual learning for image recognition. In CVPR, 2016. 2, 5, 8
- [21] Sergey Ioffe and Christian Szegedy. Batch normalization:
Accelerating deep network training by reducing internal covariate shift. In ICML, 2015. 9
- [22] Max Jaderberg, Karen Simonyan, Andrew Zisserman, and
Koray Kavukcuoglu. Spatial transformer networks. In NIPS, 2015. 5
- [23] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. ICLR, 2017. 5
- [24] Alexander Kirillov, Ross Girshick, Kaiming He, and Piotr
Dollar. Panoptic feature pyramid networks. In ´ CVPR, 2019.
3, 8, 9
- [25] Alexander Kirillov, Evgeny Levinkov, Bjoern Andres, Bogdan Savchynskyy, and Carsten Rother. InstanceCut: from
edges to instances with multicut. In CVPR, 2017. 3
- [26] Alex Krizhevsky, Ilya Sutskever, and Geoff Hinton. ImageNet classification with deep convolutional neural networks. In NIPS, 2012. 1
- [27] Yann LeCun, Bernhard Boser, John S Denker, Donnie
Henderson, Richard E Howard, Wayne Hubbard, and
Lawrence D Jackel. Backpropagation applied to handwritten zip code recognition. Neural computation, 1989. 1
- [28] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, ´
Bharath Hariharan, and Serge Belongie. Feature pyramid
networks for object detection. In CVPR, 2017. 2, 5
- [29] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays,
Pietro Perona, Deva Ramanan, Piotr Dollar, and C Lawrence ´
Zitnick. Microsoft COCO: Common objects in context. In
ECCV, 2014. 2, 3, 5, 9
- [30] Chenxi Liu, Liang-Chieh Chen, Florian Schroff, Hartwig
Adam, Wei Hua, Alan L Yuille, and Li Fei-Fei. Autodeeplab: Hierarchical neural architecture search for semantic
image segmentation. In CVPR, 2019. 3
- [31] Shu Liu, Jiaya Jia, Sanja Fidler, and Raquel Urtasun. SGN:
Sequential grouping networks for instance segmentation. In
CVPR, 2017. 3
- [32] Shu Liu, Lu Qi, Haifang Qin, Jianping Shi, and Jiaya Jia.
Path aggregation network for instance segmentation. In
CVPR, 2018. 3
- [33] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian
Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C
Berg. SSD: Single shot multibox detector. In ECCV, 2016.
9
- [34] Wei Liu, Andrew Rabinovich, and Alexander C Berg.
Parsenet: Looking wider to see better. arXiv:1506.04579, 2015. 9
- [35] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully
convolutional networks for semantic segmentation. In
CVPR, 2015. 1, 2, 3, 4
- [36] Dmitrii Marin, Zijian He, Peter Vajda, Priyam Chatterjee,
Sam Tsai, Fei Yang, and Yuri Boykov. Efficient segmentation: Learning downsampling near semantic boundaries. In
ICCV, 2019. 3
- [37] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks:
Learning 3d reconstruction in function space. In CVPR, s2019. 3
- [38] Don P Mitchell. Generating antialiased images at low sampling densities. ACM SIGGRAPH Computer Graphics, 1987. 2
- [39] Vinod Nair and Geoffrey E Hinton. Rectified linear units
improve restricted boltzmann machines. In ICML, 2010. 6
- [40] Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and `
Peter Kontschieder. The mapillary vistas dataset for semantic
understanding of street scenes. In CVPR, 2017. 3
- [41] Paphio. Jo-Wilfried Tsonga - [19]. CC BY-NC-SA
2.0. https://www.flickr.com/photos/paphio/
2855627782/, 2008. 1
- [42] Matt Pharr, Wenzel Jakob, and Greg Humphreys. Physically
based rendering: From theory to implementation, chapter 7.
Morgan Kaufmann, 2016. 2
- [43] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas.
PointNet: Deep learning on point sets for 3D classification
and segmentation. In CVPR, 2017. 5
- [44] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. UNet: Convolutional networks for biomedical image segmentation. In MICCAI, 2015. 3
- [45] Ke Sun, Yang Zhao, Borui Jiang, Tianheng Cheng, Bin Xiao,
Dong Liu, Yadong Mu, Xinggang Wang, Wenyu Liu, and
Jingdong Wang. High-resolution representations for labeling
pixels and regions. arXiv:1904.04514, 2019. 3
- [46] Maxim Tatarchenko, Alexey Dosovitskiy, and Thomas Brox.
Octree generating networks: Efficient convolutional architectures for high-resolution 3D outputs. In ICCV, 2017. 3
- [47] Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei
Liu, and Yu-Gang Jiang. Pixel2Mesh: Generating 3D mesh
models from single RGB images. In ECCV, 2018. 3
- [48] Turner Whitted. An improved illumination model for shaded
display. In ACM SIGGRAPH Computer Graphics, 1979. 2, 4
- [49] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen
Lo, and Ross Girshick. Detectron2. https://github.
com/facebookresearch/detectron2, 2019. 6
[50] Kun Zhou, Qiming Hou, Rui Wang, and Baining Guo. Realtime kd-tree construction on graphics hardware. In ACM
Transactions on Graphics (TOG), 2008. 2
| github_jupyter |
<div style='background-image: url("share/baku.jpg") ; padding: 0px ; background-size: cover ; border-radius: 15px ; height: 250px; background-position: 0% 80%'>
<div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.9) ; width: 50% ; height: 150px">
<div style="position: relative ; top: 50% ; transform: translatey(-50%)">
<div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.9) ; line-height: 100%">ObsPy Tutorial</div>
<div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.7)">Handling Event Metadata</div>
</div>
</div>
</div>
image: User:Abbaszade656 / Wikimedia Commons / <a href="http://creativecommons.org/licenses/by-sa/4.0/">CC-BY-SA-4.0</a>
## Workshop for the "Training in Network Management Systems and Analytical Tools for Seismic"
### Baku, October 2018
Seismo-Live: http://seismo-live.org
##### Authors:
* Lion Krischer ([@krischer](https://github.com/krischer))
* Tobias Megies ([@megies](https://github.com/megies))
---

```
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = 12, 8
```
- for event metadata, the de-facto standard is [QuakeML (an xml document structure)](https://quake.ethz.ch/quakeml/)
- QuakeML files can be read using **`read_events()`**
```
import obspy
catalog = obspy.read_events("./data/south_napa_with_some_aftershocks.xml")
print(catalog)
```
- **`read_events()`** function returns a **`Catalog`** object, which is
a collection of **`Event`** objects.
```
print(type(catalog))
print(type(catalog[0]))
event = catalog[0]
print(event)
```
- Event objects are again collections of other resources.
- the nested ObsPy Event class structure (Catalog/Event/Origin/Magnitude/FocalMechanism/...) is closely modelled after QuakeML
<img src="images/Event.svg" width=90%>
```
print(type(event.origins))
print(type(event.origins[0]))
print(event.origins[0])
print(type(event.magnitudes))
print(type(event.magnitudes[0]))
print(event.magnitudes[0])
# try event.<Tab> to get an idea what "children" elements event has
```
- The Catalog object contains some convenience methods to make
working with events easier.
- for example, the included events can be filtered with various keys.
```
largest_magnitude_events = catalog.filter("magnitude >= 4.0")
print(largest_magnitude_events)
```
- There is a basic preview plot using the matplotlib basemap module.
```
catalog.plot(projection="local", resolution="i", label="magnitude");
```
- a (modified) Catalog can be output to file in a number of different formats.
```
largest_magnitude_events.write("/tmp/large_events.xml", format="QUAKEML")
!ls -l /tmp/large_events.xml
```
- the event type classes can be used to build up Events/Catalogs/Picks/.. from scratch in custom processing work flows and to share them with other researchers in the de facto standard format QuakeML
```
from obspy import UTCDateTime
from obspy.core.event import Catalog, Event, Origin, Magnitude
from obspy.geodetics import FlinnEngdahl
cat = Catalog()
cat.description = "Just a fictitious toy example catalog built from scratch"
e = Event()
e.event_type = "not existing"
o = Origin()
o.time = UTCDateTime(2014, 2, 23, 18, 0, 0)
o.latitude = 47.6
o.longitude = 12.0
o.depth = 10000
o.depth_type = "operator assigned"
o.evaluation_mode = "manual"
o.evaluation_status = "preliminary"
o.region = FlinnEngdahl().get_region(o.longitude, o.latitude)
m = Magnitude()
m.mag = 7.2
m.magnitude_type = "Mw"
m2 = Magnitude()
m2.mag = 7.4
m2.magnitude_type = "Ms"
# also included could be: custom picks, amplitude measurements, station magnitudes,
# focal mechanisms, moment tensors, ...
# make associations, put everything together
cat.append(e)
e.origins = [o]
e.magnitudes = [m, m2]
m.origin_id = o.resource_id
m2.origin_id = o.resource_id
print(cat)
cat.write("/tmp/my_custom_events.xml", format="QUAKEML")
!cat /tmp/my_custom_events.xml
```
| github_jupyter |
# Simple Analysis with Pandas and Numpy
***ABSTRACT***
* If a donor gives aid for a project that the recipient government would have undertaken anyway, then the aid is financing some expenditure other than the intended project. The notion that aid in this sense may be "fungible," while long recognized, has recently been receiving some empirical support. The paper "What Does Aid to Africa Finance?" focuses on Sub-Saharan Africa—the region with the largest GDP share of aid—and presents results that indicate that aid may be partially fungible, and suggests some reasons why.
This database contains data used for the analysis.
#### Import Libraries & Load the data
```
import pandas as pd
import numpy as np
print('OK')
df = pd.read_csv('data.csv')
df.head(-5)
df.info()
df_new = df.copy()
df1 = df_new.sample(frac = 0.25, random_state = 0)
df_new = df_new.drop(df1.index)
df1.head(3)
df2 = df_new.sample(frac = 0.25, random_state = 0)
df_new = df_new.drop(df2.index)
df2.head(3)
df3 = df_new.sample(frac = 0.25, random_state = 0)
df3.head(3)
df4 = df_new.drop(df3.index) # since all subsets' indexes were dropped
df4.head(3)
```
### Missing Values
* **Interpolation** is a type of estimation, a method of constructing new data points within the range of a discrete set of known data points while **imputation** is replacing the missing data of the mean of the column.
```
df3.isnull().sum()
df3[df3['popn'].isnull() == True]
df3['popn'].fillna(df3['popn'].mean(), inplace = True) # that's called imputation
df3.isnull().sum()
df1.isna().sum()
df1['popn'].fillna(df1['popn'].interpolate(), inplace = True)
df1.isna().sum()
```
##### When to use interpolation or imputation?
* Data has linear relationship = Interpolation otherwise imputation.
### Combine Data
```
df5 = df1.join(df2, lsuffix = '_left') # _left indicates columns from left hand side
df5 # NaN = df1 is larger than df2
# Concat
df6 = pd.concat([df1,df2], axis = 0) # 0 indicates rows
df6
```
#### Inner Join
<img src="https://cdn.sqltutorial.org/wp-content/uploads/2016/03/SQL-INNER-JOIN.png"/>
```
df7 = pd.merge(df1,df2, on = 'year')
df7
```
#### Full Outer Inclusive Join
<img src="https://cdn.sqltutorial.org/wp-content/uploads/2016/07/SQL-FULL-OUTER-JOIN.png"/>
```
df8 = pd.merge(df1,df2, how = 'outer')
df8
```
#### Left Inclusive Join
<img src="https://cdn.sqltutorial.org/wp-content/uploads/2016/03/SQL-LEFT-JOIN.png"/>
```
df9 = pd.merge(df1,df2, how = 'left')
df9
```
#### Right Inclusive Join
<img src="https://www.dofactory.com/img/sql/sql-right-join.png"/>
```
df10 = pd.merge(df1,df2, how = 'right')
df10.head(5)
```
### Sorting Data
```
df1.sort_values(by = ['agrgdp'], ascending = True)
df1
df1.sort_index(axis = 0, ascending =True)
```
### Selecting and Slicing Data
```
df1[['countryc', 'year']]
df1.iloc[:,1:8].head()
```
### Grouping & Aggregating
```
df1.groupby(['year', 'infmort']).agg(np.mean)
df1.groupby(['schsec']).groups
```
| github_jupyter |
# Word vectors (FastText) for Baseline
#### Create Spacy model from word vectors
```bash
python -m spacy init-model en output/cord19_docrel/spacy/en_cord19_fasttext_300d --vectors-loc output/cord19_docrel/cord19.fasttext.w2v.txt
python -m spacy init-model en output/acl_docrel/spacy/en_acl_fasttext_300d --vectors-loc output/acl_docrel/acl.fasttext.w2v.txt
```
```
import gensim
import json
import os
import requests
import pickle
import pandas as pd
import logging
from pathlib import Path
from tqdm import tqdm_notebook as tqdm
from smart_open import open
from nlp import load_dataset
import nlp
import acl.utils
from trainer_cli import ExperimentArguments
```
## CORD19
```
data_dir = Path('./output/cord19_docrel')
experiment_args = ExperimentArguments(
nlp_dataset='./datasets/cord19_docrel/cord19_docrel.py',
nlp_cache_dir='./data/nlp_cache',
doc_id_col='doi',
doc_a_col='from_doi',
doc_b_col='to_doi',
cv_fold=1,
)
docs_ds = load_dataset(experiment_args.nlp_dataset,
name='docs',
cache_dir=experiment_args.nlp_cache_dir,
split=nlp.Split('docs'))
# Extract tokens from each document and create token file.
tokens_count = 0
with open(data_dir / 'tokens.txt', 'w') as f:
for idx, doc in docs_ds.data.to_pandas().iterrows():
text = acl.utils.get_text_from_doc(doc)
for token in gensim.utils.simple_preprocess(text, min_len=2, max_len=15):
f.write(token + ' ')
tokens_count += 1
f.write('\n')
print(f'Total tokens: {tokens_count:,}')
import fasttext
model = fasttext.train_unsupervised(str(data_dir / 'tokens.txt'),
model='skipgram',
lr=0.05, # learning rate [0.05]
dim=300, # size of word vectors [100]
ws=5, # size of the context window [5]
epoch=5, # number of epochs [5]
thread=4, # number of threads [number of cpus]
)
model.save_model(str(data_dir / 'cord19.fasttext.bin'))
from gensim.models.wrappers import FastText
ft_model = FastText.load_fasttext_format(str(data_dir / 'cord19.fasttext.bin'))
ft_model.wv.save_word2vec_format(data_dir / 'cord19.fasttext.w2v.txt')
# Unset
del ft_model
del model
del docs_ds
del experiment_args
del data_dir
```
## ACL
```
data_dir = Path('./output/acl_docrel')
experiment_args = ExperimentArguments(
nlp_dataset='./datasets/acl_docrel/acl_docrel.py',
nlp_cache_dir='./data/nlp_cache',
doc_id_col='s2_id',
doc_a_col='from_s2_id',
doc_b_col='to_s2_id',
cv_fold=1,
)
docs_ds = load_dataset(experiment_args.nlp_dataset,
name='docs',
cache_dir=experiment_args.nlp_cache_dir,
split=nlp.Split('docs'))
# Extract tokens from each document and create token file.
tokens_count = 0
with open(data_dir / 'tokens.txt', 'w') as f:
for idx, doc in docs_ds.data.to_pandas().iterrows():
text = acl.utils.get_text_from_doc(doc)
for token in gensim.utils.simple_preprocess(text, min_len=2, max_len=15):
f.write(token + ' ')
tokens_count += 1
f.write('\n')
# Total tokens: 2,194,010
print(f'Total tokens: {tokens_count:,}')
import fasttext
model = fasttext.train_unsupervised(str(data_dir / 'tokens.txt'),
model='skipgram',
lr=0.05, # learning rate [0.05]
dim=300, # size of word vectors [100]
ws=5, # size of the context window [5]
epoch=5, # number of epochs [5]
thread=4, # number of threads [number of cpus]
)
model.save_model(str(data_dir / 'acl.fasttext.bin'))
from gensim.models.wrappers import FastText
ft_model = FastText.load_fasttext_format(str(data_dir / 'acl.fasttext.bin'))
ft_model.wv.save_word2vec_format(data_dir / 'acl.fasttext.w2v.txt')
```
| github_jupyter |
# workbook C: lists and strings
This activity builds on the Python you have become familiar with in
* *Chapter 2 Python Lists*
* *Chapter 3 Functions and packages*
from the
[DataCamp online course *Intro to Python for Data Science*](https://www.datacamp.com/courses/intro-to-python-for-data-science). Here we will look at lists and explore how strings and lists are related.
> ### Reminder: saving your work
>
> As you work through the work book it is important to regularly save your work. Notice that as you have made changes the Jupyter window top line will warn you there are `(unsaved changes)` in small text. To save your work in this notebook by either select menu item `File` `Save` or by hit the save button:
>
> 
>
>
> ### Reminder: getting help
> Please see the page:
> [Help with programming](https://canvas.anglia.ac.uk/courses/12178/pages/help-with-programming)
> on ARU Canvas.
## Python Lists: defining lists
Why do we need lists? Lists are useful for keeping a collection of things in order.
So suppose we wanted to store information about the number
of chromosomes for a few selected species:
| Organism | Scientific name | Chromosome number |
| -------- | :--------------: | :---------------: |
| fruit fly | Drosophila melanogaster | 8 |
| rice | Oryza sativa | 24 |
| mouse | Mus musculus | 40 |
| wheat | Triticum aestivum | 42 |
| human | Homo sapiens | 46 |
| potato | Solanum tuberosum | 48 |
| great white shark | Carcharodon carcharias | 82 |
| carp | Carassius carassius | 100 |
> *information from https://en.wikipedia.org/wiki/List_of_organisms_by_chromosome_count*
We can store the list of organisms in a Python list:
```
organisms = ['fly', 'rice', 'mouse', 'wheat', 'human', 'potato', 'shark', 'carp']
# Instruction: add Python command to print organisms list
### your line here!
```
Notice the use of square brackets `[]` in both the list definition and when the list is the printed.
**Now its your turn.** Create a list `chromosomes` that contains information from the table above about the number of chromosomes for each species.
```
# Instruction: define a Python list 'chromosomes' with
# the number of chromosomes from table above (as integers)
### your line here!
# Instruction: now print your chromosomes list
### your line here!
```
Now run the following following Python that checks the chromosomes list is as it is expected (do not worry about understanding it - for now).
```
# Instruction: run this cell to check your chromosomes list
# python code to check chromosomes list as described above
# do not worry about understanding it yet - it just shows what can be done
if 'chromosomes' not in locals():
print('Error: chromosomes variable has not been defined! Try again!')
elif not isinstance(chromosomes, list):
print('Error: chromosomes is not a list! Try again!')
elif len(chromosomes) != 8:
print('Error: chromosomes list does not have 8 elements! Try again!')
elif sum(chromosomes) != 390:
print('Error: chromosomes list does not have expected sum! Try again!')
else:
print('Well done chomosomes list checks out!')
```
### Appending items to a list
You can append extra items to a list.
So to define a list of scientific names we could start with an empty list and append the names one by one:
```
scientific = [] # empty list
scientific.append('Drosophila melanogaster')
scientific.append('Oryza sativa')
scientific.append('Mus musculus')
scientific.append('Triticum aestivum')
scientific = scientific + ['Homo sapiens',
'Solanum tuberosum',
'Carcharodon carcharias',
'Carassius carassius']
# Instruction: add Python command to print scientific
### your line here!
```
Notice that the last four scientific names have been added by concatenating (joining) two lists together - the result is a new list.
The concatentation command above could be more simplified by using the `+=` operator:
```
scientific += ['Homo sapiens', 'Solanum tuberosum',
'Carcharodon carcharias', 'Carassius carassius']
```
**Now it is your turn.** From https://en.wikipedia.org/wiki/List_of_organisms_by_chromosome_count The `Red king crab` (`Paralithodes camtschaticus`) has `208` chromosomes. Lets add this information to the lists.
```
# Instruction: append 'king crab' to the list organisms
### your line here!
# following lines check that 'king crab' is somewhere in the list.
if not 'king crab' in organisms:
print('Error: you have not added "king crab" to organisms! Try again!')
else:
print('Well done organisms now has a king crab in it:\n', organisms)
```
Lets add the king crab's number of chromosomes to list `chromosomes`. This time using concatenation rather than `.append()`
```
# Instruction append 208 to the list chromosomes using concatenation
### your line here!
print(chromosomes)
```
> *Hint: remember that you can only concentate two lists together so:*
> ```
> primes = [2, 3, 5]
>primes += [7]
> ```
> *works fine whereas trying concentenate a list and an integer
> will result in an error. So:*
> ```
> primes = [2, 3, 5]
> primes += 7
> ```
> **will not work**
Now lets you should add the red kings crab scientific name `Paralithodes camtschaticus` to the `scientific` list.
```
# Instruction append 'Paralithodes camtschaticus' to the list scientific
### your line here!
print(scientific)
```
### Finding number of items in a list using the `len()` function
Reminder in exercise we saw that we use the `len()` function to find the number of letters in a string.
This function will also return the number of items in a list.
Try running the next cell
```
# Instruction: run this cell to see how len() works with lists.
primes = [2, 3, 5]
print(len(primes))
```
**Now its your turn**
```
# Instruction: print out the number of items in the a_zero, empty and three_nones lists
a_zero = ['a', 0]
empty = []
three_nones = [None, None, None]
### your line here!
```
Notice that a list can be empty `[]`, this is a list with no items in it. Lists can also have `None` in them!
Now print out the number of items in each of the `organisms`, `scientific` and `chromosomes` lists that have be defined above (they should be in the kernel memory).
### Getting an item from a list by using `list_name[index]`
> *If this is unfamiliar, go back and look at the
> [DataCamp course](https://www.datacamp.com/courses/intro-to-python-for-data-science)
> particularly Chapter 2, "Subsetting lists" video.*
In the DataCamp course, you have seen that you get a specific item in a list by using `list_name[index]`. So to get the first item of a list `a_list` you can specify `a_list[0]`, the second item of the list by `a_list[1]`:
```
# Instruction: run this cell to demonstation getting a
# specific item of a list by its index
fruits = ['apple', 'banana', 'cherry', 'date', 'elderberry']
print('first item of list fruits is fruits[0]=' + fruits[0])
print('second item of list fruits is fruits[1]=' + fruits[1])
print('third item of list fruits is fruits[2]=' + fruits[2])
```
furthermore, you have seen you can get the last item of a list by using an index of `-1`:
```
# Instruction: run this cell to demonstation getting a
# specific item of a list by its index
print('last item of list fruits is fruits[-1]=' + fruits[-1])
print('penultimate item of a list is -2, fruits[-2]=' + fruits[-2])
```
**Now its your turn** write python to print out the first, 2nd and last item of the `organisms` list
```
# Instruction: print out the first, 2nd and last item of the `organisms` list
### your line here!
### your line here!
### your line here!
```
### Getting a character from a string by using string_name[index]
We have already seen that strings and lists have some features in common as the `len()` function can be used to find the number of characters in a string or the number of items in a list. You will not be surprised to know that you can get individual characters from a string using the index just like for lists.
```
# Instruction: run this cell to demonstation getting a
# specific charact of a string by its index
my_string = 'A stitch in time, saves 9'
print('my string=' + my_string)
print('1st character is my_string[0]=' + my_string[0])
print('3rd character is my_string[1]=' + my_string[2])
print('5th character is my_string[1]=' + my_string[4])
print('last character is my_string[-1]=' + my_string[-1])
```
**Now its your turn** write Python:
```
seq_a = 'ATGGGGCATGCATGC'
# Instruction: print out the first character of seq_a
### your line here!
# Instruction: print out the 2nd character of seq_a
### your line here!
# Instruction: print out the 7th character of seq_a
### your line here!
# Instruction: print out the last character of seq_a
### your line here!
# Instruction: print out the penultimate (one before last) character of seq_a
### your line here!
```
write Python to print out the first and last character of the first item of the fruits list.
> *Hint the first items of fruits is* `fruits[0]`
```
# Instruction: print out the first item of the fruits list defined above
### your line here!
# now print out the first character of the first fruit
### your line here!
# now print out the last character of the first fruit
### your line here!
```
### Slicing lists by using list_name[start:end]
> *If this is unfamiliar, go back and look at the
> [DataCamp course](https://www.datacamp.com/courses/intro-to-python-for-data-science)
> particularly Chapter 2, "Subsetting lists" video.*
In the DataCamp course, you have seen that as well as getting specific characters from a list one can **slice** of parts of lists:
```
list_name[ start : end ]
inclusive : exclusive
```
* Remember Python indexes start at zero.
* Remember the `end` index is exclusive - so the slice will not include that element, ending on the one before
* If you leave out `start` and/or `end` the slice will be from the start of the list and/or to the end of the list.
So lets try this out in practice
```
# Instruction: run this cell to demonstate list slicing
fruits = ['apple', 'banana', 'cherry', 'date', 'elderberry', 'fig', 'grape']
# index 0 1 2 3 4 5 6
# Slice the first two fruits
print('first two fruits - fruits[0:2]=', fruits[0:2])
# the previous slice started at the beginning so can leave out the zero
print('first two fruits - leaving out initial zero, fruits[:2]=', fruits[:2])
# create a new list with the first three fruits
first_3_fruits = fruits[:3]
print('new list first_3_fruits=', first_3_fruits)
# the second to the 5th fruit:
print('the second to the 5th fruit - fruits[1:5]=', fruits[1:5])
# all the fruits but the first one:
print('all the fruits but the first one - fruits[1:]=', fruits[1:])
# all the fruits but the last one:
print('all the fruits but the last one - fruits[1:]=', fruits[:-1])
veggies = ['aubergine ', 'broccoli', 'cabbage', 'daikon', 'endive', 'fennel', 'gherkin']
# Instruction: print out the first four veggies
### your line here!
# Instruction: print out the 5th to the last veggies
### your line here!
# Instruction: print out all the veggies except the first
### your line here!
# Instruction: print out all the veggies except the last
### your line here!
# Instruction: create a new list called 'select_v' with all the veggies except the first and last
### your line here!
# Instruction: print out the new check your select_v list
### your line here!
# Instruction: run this cell to check your select_v list
# python code to check check your select_v list
# do not worry about understanding it yet - it just shows what can be done
if 'select_v' not in locals():
print('Error: check your select_v list has not been defined! Try again!')
elif not isinstance(select_v, list):
print('Error: check your select_v list is not a list! Try again!')
elif len(select_v) != 5 or select_v[-1] != 'fennel':
print('Error: your select_v list is not correct!')
else:
print('Well done your select_v list checks out!')
```
In the next section we will answer the question: what is the **type** of a slice from a list?
```
num_list = [0, 1, 2]
print('type of num_list is', type(num_list))
# Instruction: print out the slice containing the first two element of num_list
### your line here!
# what type is the slice in the previous line?
### In the next line replace **** with your predictions
print('I predict the type of the slice from a list to be ******')
# now write Python using type() to check your prediction.
### your line here!
```
### Slicing strings by using strings[start:end]
Guess what? You can slice strings in exactly the same way as lists. You have a go
```
b_seq = 10*'A' + 10*'G' + 10*'T'
# Instruction: what are the first 5 characters of b_seq
### your line here!
# Instruction: what are the last 11 characters of b_seq
### your line here!
# Instruction: what are the 10th to 19th characters of b_seq
### your line here!
# Instruction: what is the last character of b_seq
### your line here!
```
### Advanced slicing with increment list_name[start:end:increment]
By default a slice will include all elements between the indices `start` and the `end-1`.
If you specify an increment of two then you can select, every second element:
```
# Instruction: run this cell to demonstate list increment
fruits = ['apple', 'banana', 'cherry', 'date', 'elderberry', 'fig', 'grape']
# index 0 1 2 3 4 5 6
odd_fruits = fruits[::2]
print(odd_fruits)
veggies = ['aubergine ', 'broccoli', 'cabbage', 'daikon', 'endive', 'fennel', 'gherkin']
# Instruction: create list odd_veggies that has the 1st, 3rd, 5th veggies.
### your line here!
# Instruction: create list even_veggies that has the 2nd, 4th, 6th veggies
### your line here!
# Instruction: print the two lists
### your line here!
### your line here!
```
### Reversing a list (or string) using an increment of minus -1
try this out:
```
# Instruction: run this cell to demonstrate reversing a list
fruits = ['apple', 'banana', 'cherry', 'date', 'elderberry', 'fig', 'grape']
reverse_fruits = fruits[::-1]
print(reverse_fruits)
```
exactly the same syntax can be use to reverse a string.
```
seq_c = 'AATGCC'
# Instruction: create a string seq_c_backwards that is the reverse of seq_c
### your line here!
# Instruction: print out seq_c_backwards
### your line here!
```
### Replacing list elements by `list_name[index] = new_value`
>
> *Advanced material: If you are confused or time is short go on to [homework B](./ex_B_homework.ipynb)*
As you have seen in the Data Camp Coures, it is easy to replace a list element:
```
# Instruction: run this cell to demonstrate replacing list elements
fruits = ['apple', 'banana', 'cherry', 'date', 'elderberry', 'fig', 'grape']
fruits[1] = 'blueberry'
fruits[-1] = 'grapefruit'
print(fruits)
```
**Now your turn:** Our table of organism and Chromosomes:
| Organism | Scientific name | Chromosome number |
| -------- | :--------------: | :---------------: |
| fruit **fli** | Drosophila melanogaster | 8 |
| rice | Oryza sativa | **20** |
| mouse | Mus musculus | 30 |
| wheat | Triticum aestivum | 42 |
| human | **Hom** sapiens | 46 |
| potato | Solanum tuberosum | 48 |
| **grape** white shark | Carcharodon carcharias | 82 |
| carp | Carassius carassius | **101** |
> *information from https://en.wikipedia.org/wiki/List_of_organisms_by_chromosome_count*
now has a number of mistakes shown in **bold**:
* **fli** should be *fly*
* **grape white shark** should be *great white shark*
* **hom sapiens** should *Homo sapiens*
* **20** the number of chromosomes for rice should be *24*
* **101** the number of chromosomes for carp should be *100*
```
organisms = ['fruit fli', 'rice', 'mouse', 'wheat', 'human', 'potato', 'grape white shark', 'carp']
chromosomes = [8, 20, 30, 42, 46, 48, 82, 101]
scientific = ['Drosophila melanogaster',
'Oryza sativa',
'Mus musculus',
'Triticum aestivum',
'Hom sapiens',
'Solanum tuberosum',
'Carcharodon carcharias',
'Carassius carassius']
# Instruction: correct the three lists by replacing the erroneous entries
### your lines here!
# Instruction: print the three lists after correction to check your work
### your lines here!
# Using Python you will soon learn it is possible to print out the three lists as a table with rows
# Instruction: run this cell to check your corrections
for row in zip(organisms, chromosomes, scientific):
print(row)
```
### Deleting list elements by `del(list_name[index])`
>
> *Advanced material: If you are confused or time is short go on to [homework B](./ex_B_homework.ipynb)*
As you have seen in the Data Camp Coures, it is easy to delete a list element using the `del()`:
```
# Instruction: run this cell to demonstrate deleting list elements
fruits = ['apple', 'banana', 'cherry', 'date', 'elderberry', 'fig', 'grape']
# delete the first fruits item
del(fruits[0])
# delete the last fruits item
del(fruits[-1])
print('after deleting first and last:', fruits)
```
### You **cannot** use `del()` on strings
>
> *Advanced material: If you are confused or time is short go on to [homework B](./ex_B_homework.ipynb)*
Although lists and strings are similar note that you cannot reassign specific string characters
in the same way as you can lists:
```
# Instruction: run this cell to demonstrate that you cannot reassign specific string items
# N.B. running this cell will cause a failure!
my_list = ['a', 'b']
my_list[0] = 'c' # works my_list now ['c', 'b']
my_string = 'ab'
my_string[0] = 'c' # Fails with TypeError: 'str' object does not support item assignment
```
Similarly, you cannot use del() on string characters:
```
# Instruction: run this cell to demonstrate that you cannot del specific string items
# N.B. running this cell will cause a failure!
my_list = ['a', 'b']
del(my_list[0]) # works my_list now ['b']
my_string = 'ab'
del(my_string[0]) # Fails with TypeError: 'str' object doesn't support item deletion
```
the reason is that in Python strings are *immutable* - once defined they cannot be altered.
If you want to perform this kind of manipulation on a string you can convert to a list, manipulate it and convert it back:
```
# Instruction: run this cell to demonstrate converting
# a string to a list, manipulation and back conversion
my_str = 'happily'
my_list = list(my_str)
my_list[0] ='H'
del(my_list[-3:-1]) # remove the 'il'
my_str = ''.join(my_list) # replace my_str with the manipulated one
print(my_str)
```
### Lists of lists
>
> *Advanced material: If you are confused or time is short go on to* **homework_C.ipynb**
In the Data camp course you saw how it makes sense to group data by using lists that contain lists in them:
```
# Instruction: run this cell to see simplified DataCamp List of lists example:
# house information as list of lists
house = [["hallway", 32],
["kitchen", 75.],
["living room", 33.]]
print(type(house))
print(house)
```
Lets return to our the table:
| Organism | Scientific name | Chromosome number |
| -------- | :--------------: | :---------------: |
| fruit fly | Drosophila melanogaster | 8 |
| rice | Oryza sativa | 20 |
| mouse | Mus musculus | 40 |
| wheat | Triticum aestivum | 42 |
| human | Homo sapiens | 46 |
above we have stored the information in three separate lists. But this approach means that information could easily become out of sync. We could store the information in a single list:
```
table_single = ['fruit fly', 'Drosophila melanogaster', 8, 'rice', 'Oryza sativa', 24, ]
```
but this means that accessing/modifying the data is pretty cumbersome
* the organism is stored in indices 0, 3, 6, ...
* the Chromosome number in indices 2, 5, 8
Instead storing as in a list of lists makes sense:
```
# Instruction: run this cell to see start of table as a list of lists
table_lol = [['fruit fly', 'Drosophila melanogaster', 8],
['rice', 'Oryza sativa', 22],
['mouse', 'Mus musculus', 40]]
print('full table', table_lol)
print('first row of table: ', table_lol[0])
print('first organism: ', table_lol[0][0])
print('first Chromosome number : ', table_lol[0][-1])
# Instruction: use append or concatention to complete
# table_lol with the data for wheat and human
print('completed table', table_lol)
```
You decide that you want to remove the `'fruit fly'` data from the table and notice there has been a mistake in the number of chromosome for rice that should be 24 rather than 20. Finally print the table to check your work.
```
# Instruction: make corrections to table_lol
### your lines here!
```
>
> *Advanced Python Hint: there are other, possibly better choices to store the table information:*
> * *Could have a list of named tuples or*
> * *An ordered dictionary of tuples*
## Homework C
Now go on and use strings and the string functions and methods introduced here to complete the **homework_C.ipynb** book
| github_jupyter |
# 2.3 KL divergence and cross-entropy
```
from IPython.display import IFrame
IFrame(src="https://cdnapisec.kaltura.com/p/2356971/sp/235697100/embedIframeJs/uiconf_id/41416911/partner_id/2356971?iframeembed=true&playerId=kaltura_player&entry_id=1_1x5pta90&flashvars[streamerType]=auto&flashvars[localizationCode]=en&flashvars[leadWithHTML5]=true&flashvars[sideBarContainer.plugin]=true&flashvars[sideBarContainer.position]=left&flashvars[sideBarContainer.clickToClose]=true&flashvars[chapters.plugin]=true&flashvars[chapters.layout]=vertical&flashvars[chapters.thumbnailRotator]=false&flashvars[streamSelector.plugin]=true&flashvars[EmbedPlayer.SpinnerTarget]=videoHolder&flashvars[dualScreen.plugin]=true&flashvars[hotspots.plugin]=1&flashvars[Kaltura.addCrossoriginToIframe]=true&&wid=1_l1sjg1vv" ,width='800', height='500')
```
Cross-entropy minimization is frequently used in optimization and
rare-event probability estimation. When comparing a distribution against
a fixed reference distribution, cross-entropy and KL divergence are
identical up to an additive constant. See more details in
[@murphy2012machine; @kullback1951information; @kullback1997information]
and the reference therein.
The KL(Kullback--Leibler) divergence defines a special distance between
two discrete probability distributions
$
p=\left( \begin{array}{ccc}
p_1\\
\vdots \\
p_k
\end{array} \right),\quad q=\left( \begin{array}{ccc}
q_1\\
\vdots \\
q_k
\end{array} \right
)
$
with $
0\le p_i, q_i\le1$
and
$\sum_{i=1}^{k}p_i=\sum_{i=1}^{k}q_i=1$ by $
D_{\rm KL}(q,p)= \sum_{i=1}^k q_i\log \frac{q_i}{p_i}.$
```{admonition} Lemma
$D_{\rm KL}(q,p)$ works like a "distance\" without the symmetry:
1. $D_{\rm KL}(q,p)\ge0$;
2. $D_{\rm KL}(q,p)=0$ if and only if $p=q$;
```
```{admonition} Proof
*Proof.* We first note that the elementary inequality
$\log x \le x - 1, \quad\mathrm{for\ any\ }x\ge0,$ and the equality
holds if and only if $x=1$.
$-D_{\rm KL}(q,p) = - \sum_{i=1}^c q_i\log \frac{q_i}{p_i} = \sum_{i=1}^k q_i\log \frac{p_i}{q_i} \le \sum_{i=1}^k q_i( \frac{p_i}{q_i} - 1) = 0.$
And the equality holds if and only if
$\frac{p_i}{q_i} = 1 \quad \forall i = 1:k.$
```
Define cross-entropy for distribution $p$ and $q$ by
$
H(q,p) = - \sum_{i=1}^k q_i \log p_i,$ and the entropy for distribution
$q$ by $
H(q) = - \sum_{i=1}^k q_i \log q_i.$ Note that
$D_{\rm KL}(q,p)= \sum_{i=1}^k q_i\log \frac{q_i}{p_i} = \sum_{i=1}^k q_i \log q_i - \sum_{i=1}^k q_i \log p_i$
Thus,
$$
H(q,p) = H(q) + D_{\rm KL}(q,p).
$$ (rel1)
It follows from the [relation](rel1) that
$$
\mathop{\arg\min}_p D_{\rm KL}(q,p)=\mathop{\arg\min}_p H(q,p).
$$ (rel2)
The concept of cross-entropy can be used to define a loss function in
machine learning and optimization. Let us assume $y_i$ is the true label
for $x_i$, for example $y_i = e_{k_i}$ if $x_i \in A_{k_i}$. Consider
the predicted distribution
$p(x;\theta) = \frac{1}{\sum\limits_{i=1}^k e^{w_i x+b_i}}$.
$\begin{pmatrix}
e^{w_1 x+b_1}\\
e^{w_2 x+b_2}\\
\vdots\\
e^{w_k x+b_k}
\end{pmatrix}
= \begin{pmatrix}
p_1(x; \theta) \\
p_2(x; \theta) \\
\vdots \\
p_k(x; \theta)
\end{pmatrix}$
for any data $x \in A$. By[.](rel2), the minimization of KL divergence is
equivalent to the minimization of the cross-entropy, namely
$\mathop{\arg\min}_{\theta} \sum_{i=1}^N D_{\rm KL}(y_i, p(x_i;\theta)) = \mathop{\arg\min}_{\theta} \sum_{i=1}^N H(y_i, p(x_i; \theta)).$
Recall that we have all data
$D = \{(x_1,y_1),(x_2,y_2),\cdots, (x_N, y_N)\}$. Then, it is natural to
consider the loss function as following:
$\sum_{j=1}^N H(y_i, p(x_i; \theta)),$ which measures the
distance between the real label and predicted one for all data. In the
meantime, we can check that
$\begin{aligned}
\sum_{j=1}^N H(y_j, p(x_j; \theta))&=-\sum_{j=1}^N y_j \cdot \log p(x_j; \theta )\\
&=-\sum_{j=1}^N \log p_{i_j}(x_i; \theta) \quad (\text{because}~y_j = e_{i_j}~\text{for}~x_j \in A_{i_j})\\
&=-\sum_{i=1}^k \sum_{x\in A_i} \log p_{i}(x; \theta) \\
&=-\log \prod_{i=1}^k \prod_{x\in A_i} p_{i}(x; \theta)\\
& = L(\theta)
\end{aligned}$ with $L(\theta)$
defined in as
$L( \theta) = - \sum_{i=1}^k \sum_{x\in A_i} \log p_{i}(x; \theta).$
That is to say, the logistic regression loss function defined by
likelihood in []() is exact the loss function defined by measuring
the distance between real label and predicted one via cross-entropy. We
can note $\label{key}
\min_{ \theta} L_\lambda( \theta) \Leftrightarrow \min_{ \theta} \sum_{j=1}^N H(y_i, p(x_i; \theta)) + \lambda R(\| \theta\|)
\Leftrightarrow \min_{ \theta} \sum_{j=1}^N D_{\rm KL}(y_i, p(x_i; \theta)) + \lambda R(\| \theta\|).$
| github_jupyter |
```
!pip install tf-nightly-2.0-preview
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(False)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.1,
np.cos(season_time * 6 * np.pi),
2 / np.exp(9 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(10 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.005
noise_level = 3
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=51)
split_time = 3000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
window_size = 20
batch_size = 32
shuffle_buffer_size = 1000
plot_series(time, series)
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
tf.keras.backend.clear_session()
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 10.0)
])
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(dataset, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
tf.keras.backend.clear_session()
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 100.0)
])
model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9),metrics=["mae"])
history = model.fit(dataset,epochs=500,verbose=1)
forecast = []
results = []
for time in range(len(series) - window_size):
forecast.append(model.predict(series[time:time + window_size][np.newaxis]))
forecast = forecast[split_time-window_size:]
results = np.array(forecast)[:, 0, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, results)
tf.keras.metrics.mean_absolute_error(x_valid, results).numpy()
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
mae=history.history['mae']
loss=history.history['loss']
epochs=range(len(loss)) # Get number of epochs
#------------------------------------------------
# Plot MAE and Loss
#------------------------------------------------
plt.plot(epochs, mae, 'r')
plt.plot(epochs, loss, 'b')
plt.title('MAE and Loss')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["MAE", "Loss"])
plt.figure()
epochs_zoom = epochs[200:]
mae_zoom = mae[200:]
loss_zoom = loss[200:]
#------------------------------------------------
# Plot Zoomed MAE and Loss
#------------------------------------------------
plt.plot(epochs_zoom, mae_zoom, 'r')
plt.plot(epochs_zoom, loss_zoom, 'b')
plt.title('MAE and Loss')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend(["MAE", "Loss"])
plt.figure()
```
| github_jupyter |
<table>
<tr><td align="right" style="background-color:#ffffff;">
<img src="../images/logo.jpg" width="20%" align="right">
</td></tr>
<tr><td align="right" style="color:#777777;background-color:#ffffff;font-size:12px;">
Abuzer Yakaryilmaz | April 30, 2019 (updated)
</td></tr>
<tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;">
This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros.
</td></tr>
</table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
<h2> <font color="blue"> Solutions for </font>Rotation Automata</h2>
<a id="task1"></a>
<h3> Task 1 </h3>
Do the same task given above by using different angles.
Test at least three different angles.
Please modify the code above.
<h3>Solution</h3>
Any odd multiple of $ \frac{\pi}{16} $ works: $ i \frac{\pi}{16} $, where $ i \in \{1,3,5,7,\ldots\} $
<a id="task2"></a>
<h3> Task 2 </h3>
Let $ \mathsf{p} = 11 $.
Determine an angle of rotation such that when the length of stream is a multiple of $ \sf p $, then we observe only state $ 0 $, and we can also observe state $ 1 $, otherwise.
Test your rotation by using a quantum circuit. Execute the circuit for all streams of lengths from 1 to 11.
<h3>Solution</h3>
We can pick any angle $ k\frac{2\pi}{11} $ for $ k \in \{1,\ldots,10\} $.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# the angle of rotation
r = randrange(1,11)
print("the picked angle is",r,"times of 2pi/11")
print()
theta = r*2*pi/11
# we read streams of length from 1 to 11
for i in range(1,12):
# quantum circuit with one qubit and one bit
qreg = QuantumRegister(1)
creg = ClassicalRegister(1)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
mycircuit.ry(2*theta,qreg[0]) # apply one rotation for each symbol
# we measure after reading the whole stream
mycircuit.measure(qreg[0],creg[0])
# execute the circuit 1000 times
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=1000)
counts = job.result().get_counts(mycircuit)
print("stream of lenght",i,"->",counts)
```
<a id="task3"></a>
<h3> Task 3 </h3>
List down 10 possible different angles for Task 2, where each angle should be between 0 and $2\pi$.
<h3>Solution</h3>
Any angle $ k\frac{2\pi}{11} $ for $ k \in \{1,\ldots,10\} $.
<a id="task4"></a>
<h3> Task 4 </h3>
For each stream of length from 1 to 10, experimentially determine the best angle of rotation by using your circuit.
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# for each stream of length from 1 to 10
for i in range(1,11):
# we try each angle of the form k*2*pi/11 for k=1,...,10
# we try to find the best k for which we observe 1 the most
number_of_one_state = 0
best_k = 1
all_outcomes_for_i = "length "+str(i)+"-> "
for k in range(1,11):
theta = k*2*pi/11
# quantum circuit with one qubit and one bit
qreg = QuantumRegister(1)
creg = ClassicalRegister(1)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
mycircuit.ry(2*theta,qreg[0]) # apply one rotation for each symbol
# we measure after reading the whole stream
mycircuit.measure(qreg[0],creg[0])
# execute the circuit 10000 times
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)
counts = job.result().get_counts(mycircuit)
all_outcomes_for_i = all_outcomes_for_i + str(k)+ ":" + str(counts['1']) + " "
if int(counts['1']) > number_of_one_state:
number_of_one_state = counts['1']
best_k = k
print(all_outcomes_for_i)
print("for length",i,", the best k is",best_k)
print()
```
<a id="task5"></a>
<h3> Task 5 </h3>
Let $ \mathsf{p} = 31 $.
Create a circuit with three quantum states and three classical states.
Rotate the qubits with angles $ 3\frac{2\pi}{31} $, $ 7\frac{2\pi}{31} $, and $ 11\frac{2\pi}{31} $, respectively.
Execute your circuit for all streams of lengths from 1 to 30. Check whether the number of state $ \ket{000} $ is less than half or not.
<i>Note that whether a key is in dictionary or not can be checked as follows:</i>
if '000' in counts.keys():
c = counts['000']
else:
c = 0
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# the angles of rotations
theta1 = 3*2*pi/31
theta2 = 7*2*pi/31
theta3 = 11*2*pi/31
# we read streams of length from 1 to 30
for i in range(1,31):
# quantum circuit with three qubits and three bits
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
mycircuit.ry(2*theta1,qreg[0])
mycircuit.ry(2*theta2,qreg[1])
mycircuit.ry(2*theta3,qreg[2])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
print(counts)
if '000' in counts.keys():
c = counts['000']
else:
c = 0
print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
print("the ratio of 000 is ",percentange,"%")
print()
```
<a id="task6"></a>
<h3> Task 6 </h3>
Let $ \mathsf{p} = 31 $.
Create a circuit with three quantum states and three classical states.
Rotate the qubits with random angles of the form $ k\frac{2\pi}{31}, $ where $ k
\in \{1,\ldots,30\}.$
Execute your circuit for all streams of lengths from 1 to 30.
Calculate the maximum percentage of observing the state $ \ket{000} $.
Repeat this task for a few times.
<i>Note that whether a key is in dictionary or not can be checked as follows:</i>
if '000' in counts.keys():
c = counts['000']
else:
c = 0
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# randomly picked angles of rotations
k1 = randrange(1,31)
theta1 = k1*2*pi/31
k2 = randrange(1,31)
theta2 = k2*2*pi/31
k3 = randrange(1,31)
theta3 = k3*2*pi/31
print("k1 =",k1,"k2 =",k2,"k3 =",k3)
print()
max_percentange = 0
# we read streams of length from 1 to 30
for i in range(1,31):
# quantum circuit with three qubits and three bits
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
mycircuit.ry(2*theta1,qreg[0])
mycircuit.ry(2*theta2,qreg[1])
mycircuit.ry(2*theta3,qreg[2])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
# print(counts)
if '000' in counts.keys():
c = counts['000']
else:
c = 0
# print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
if max_percentange < percentange: max_percentange = percentange
# print("the ration of 000 is ",percentange,"%")
# print()
print("max percentage is",max_percentange)
```
<a id="task7"></a>
<h3> Task 7 </h3>
Repeat Task 6 by using four and five qubits.
<h3>Solution</h3>
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
number_of_qubits = 4
#number_of_qubits = 5
# randomly picked angles of rotations
theta = []
for i in range(number_of_qubits):
k = randrange(1,31)
print("k",str(i),"=",k)
theta += [k*2*pi/31]
# print(theta)
# we count the number of zeros
zeros = ''
for i in range(number_of_qubits):
zeros = zeros + '0'
print("zeros = ",zeros)
print()
max_percentange = 0
# we read streams of length from 1 to 30
for i in range(1,31):
# quantum circuit with qubits and bits
qreg = QuantumRegister(number_of_qubits)
creg = ClassicalRegister(number_of_qubits)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
for k in range(number_of_qubits):
mycircuit.ry(2*theta[k],qreg[k])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
# print(counts)
if zeros in counts.keys():
c = counts[zeros]
else:
c = 0
# print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
if max_percentange < percentange: max_percentange = percentange
# print("the ration of 000 is ",percentange,"%")
# print()
print("max percentage is",max_percentange)
```
| github_jupyter |
# Classification of UK Charities
In this notebook, data from several sources has been used to classify UK charities
```
from google.colab import drive
drive.mount('/content/drive/')
import json
import pandas as pd
import networkx as nx
from numpy.core.numeric import NaN
with open('drive/My Drive/UK_Data/json/publicextract.charity_trustee.json', encoding="utf-8-sig") as f:
data = json.load(f)
print(len(data))
print(json.dumps(data[0],indent=4))
```
- In the charity_trustee table we have all combinations of trustees and charities.
- Website https://register-of-charities.charitycommission.gov.uk/sector-data/sector-overview
```
members = {}
orgs = {}
for trustee in data:
tid = trustee['trustee_id']
oid = trustee['organisation_number']
if(tid not in members):
members[tid] = [trustee]
else:
members[tid].append(trustee)
if(oid not in orgs):
orgs[oid] = [trustee]
else:
orgs[oid].append(trustee)
print("Total Trustees : ",len(members))
print("Total Organizations: ",len(orgs))
with open('drive/My Drive/UK_Data/json/publicextract.charity.json', encoding="utf-8-sig") as f:
charity_data = json.load(f)
print(len(charity_data))
print(json.dumps(charity_data[3],indent=4))
total_registered_main = 0
total_registered_linked = 0
total_removed = 0
nc = 0
for i in charity_data:
if(i['charity_registration_status']!="Removed"):
if(i['linked_charity_number']!=0):
total_registered_linked+=1
else:
total_registered_main+=1
if(i['charity_activities']==None):
nc+=1
else:
total_removed+=1
print("Total Registered Main : ",total_registered_main)
print("Total Registered Linked: ",total_registered_linked)
print("Total Removed : ",total_removed)
print(nc)
for i in charity_data:
if(i['registered_charity_number']==200027):
print(i)
# print(charity_data[0])
```
* For charity classification we have charity_activities field in the charity table which has a description(one line summary) of how the charity spends its money, might be used for classification. This field is not None only for the parent organisations, i.e. orgs with linked charity no. = 0.
* charity_governing_document table has a charitable_objects field which is a paragraph desscription. Might be used for charity classification.
* there seems to be a discrepency, in the charity table - charity no. 200027 points to POTTERNE MISSION ROOM AND TRUST but while searched online on the website 200027 points to RURAL MINISTRIES => RESOLVED - the latter seems to be the main organisation and the former is a linked charity which was removed
* there are many cases where a single charity is classified into various types i.e. it has various "what" descriptions in the charity classification table. same goes for other fields - "WHO" and "HOW". Therefore, the provided classification will create ambiguity if directly used.
# Charities with income > 10 Million
```
#Lets try figuring out charities with income over 10 Million Pounds
charity10m = []
count10m = 0
for charity in charity_data:
if(charity['charity_registration_status']!="Removed" and
charity['latest_income']!= None and
charity['latest_income'] >= 10000000):
count10m+=1
charity10m.append(charity)
print(count10m)
#Sort charity10m in descending order of income
n = len(charity10m)
for i in range(n-1):
for j in range(0, n-i-1):
if charity10m[j]['latest_income'] < charity10m[j + 1]['latest_income'] :
charity10m[j], charity10m[j + 1] = charity10m[j + 1], charity10m[j]
# charity10m[:10]
for i in charity10m[0:10]:
print(i['charity_name'],"|",i['registered_charity_number'],"|",i['organisation_number'])
print(json.dumps(charity10m[0],indent=3))
# create a dict for accessing charity information through organisation number
charity = {}
for ch in charity_data:
charity[ch['organisation_number']] = ch
# charity
with open('drive/My Drive/UK_Data/json/publicextract.charity_governing_document.json', encoding="utf-8-sig") as f:
charity_governing_document = json.load(f)
print(len(charity_governing_document))
# create a dict for accessing governing document info using organisation number
charity_governing_document_data = {}
for i in charity_governing_document:
charity_governing_document_data[i['organisation_number']] = i
# check if all charities have data in the charitable object column
count = 0
c2=0
empty_objects = []
for i in charity_governing_document:
if(i['charitable_objects'] == None):
empty_objects.append(i)
# print(i)
if(i['linked_charity_number']!=0):
c2+=1
else:
count+=1
print(count)
print(c2)
```
There are 27 Parent orgs and 13 linked charities in charity_governing_document table which have empty chartiable objects column.
```
# check how many charities with empty charitable objects are still registered
registered = 0
removed = 0
for i in empty_objects:
if(charity[i['organisation_number']]['charity_registration_status'] == "Removed"):
removed+=1
else:
registered+=1
# print(i)
print("Registered = ",registered)
print("Removed = ",removed)
```
Out of 40 (13+27) charities that have empty charitable objects, 24 are still registered and only 16 are removed. However we can still simply ignore these charities since they only contribute to an extremely small fraction of all charities in the dataset.
```
charity_classification = pd.read_csv("drive/My Drive/UK_Data/text/publicextract.charity_classification.txt",sep="\t")
# refer to the data definition file on the website to understand detailed description of each table and the columns
print(len(charity_classification))
%load_ext google.colab.data_table
charity_classification.head(10)
from collections import Counter
Counter(charity_classification['classification_description'])
# we are only concerned with the descriptions corresponding to 'What' classification type
filtered = charity_classification[charity_classification['classification_type'] == 'What']
print(filtered.shape)
filtered.head(10)
Counter(filtered['classification_description'])
# count the number of classifications for each organisation
classification_count = Counter(filtered['organisation_number'])
pc=0
for i in classification_count.items():
print(i)
pc+=1
if pc==10:
break
# we only want to work with charities that are still registered
ar = []
for i in charity_data:
if(i['charity_registration_status']!="Removed"):
ar.append(i['organisation_number'])
len(set(ar))
# check the number of charities from registered charities that have a classification provided in the classification table
final = set(ar).intersection(set(classification_count.keys()))
len(final)
# counting the number of charities having n classifications
count_num_type = {}
for i in classification_count:
if(i in final):
idx = classification_count[i]
if(idx in count_num_type):
count_num_type[idx]+=1
else:
count_num_type[idx] = 1
print(sum(count_num_type.values()))
count_num_type
```
- a lot of charities (71916) have just 1 classification
- around 100,000 charities are classified into more than 1 category
```
# lets work with charities that have a unique classification - classified only into a single category
unique_category = []
for i in classification_count:
if(classification_count[i]==1 and (i in final) ):
unique_category.append(i)
unique_category[:10]
# filtering data for charities with unique category
filtered_single = filtered[filtered['organisation_number'].isin(unique_category) ]
filtered_single.shape
# view classification of charities, it will not include overlapping results since each charity is only assigned a single category
Counter(filtered_single['classification_description'])
```
# Non Profit Classifier
- Mapping UK charities to US charity classification
- https://github.com/ma-ji/npo_classifier
- based on NTEE classification - https://en.wikipedia.org/wiki/National_Taxonomy_of_Exempt_Entities#:~:text=The%20National%20Taxonomy%20of%20Exempt%20Entities%20%28NTEE%29%20is,case%20when%20the%20organization%20is%20recognized%20as%20tax-exempt
- NTEE code structure - https://nccs.urban.org/project/national-taxonomy-exempt-entities-ntee-codes#overview
```
!pip install transformers
import requests
exec(requests.get('https://raw.githubusercontent.com/ma-ji/npo_classifier/master/API/npoclass.py').text)
!pip3 install pickle5
import pickle5 as pickle
```
- refer to the documentation on github website to learn more about using the ML classifier API
- API => npoclass(inputs, gpu_core=True, model_path=None, ntee_type='bc', n_jobs=4, backend='multiprocessing', batch_size_dl=64, verbose=1)
```
# testing out the classifier
# make sure you have uploaded the model data to your drive so that it can be accessed by the API
out = npoclass("helping poor individuals",
True,
"drive/My Drive/UK_Data/npoclass_model_bc/",
'bc')
print(out)
```
It takes more time than expected to classify a single charity by the API, so it might take a huge amount of time if we want to classify all 170k charities this way.
```
category_map = {"I": "Arts, Culture, and Humanities",
"II": "Education",
"III": "Environment and Animals",
"IV": "Health",
"V": "Human Services",
"VI": "International, Foreign Affairs",
"VII": "Public, Societal Benefit",
"VIII": "Religion Related",
"IX": "Mutual/Membership Benefit",
"X": "Unknown, Unclassified"}
# create a new dataframe to store the classification results from the ML classifier
# more useful columns can be added to this dataframe if required
classification_us_data = pd.DataFrame({'organisation_number': pd.Series(dtype='int'),
'registered_charity_number': pd.Series(dtype='int'),
'charity_name':pd.Series(dtype='str'),
'classification_code_us': pd.Series(dtype='str'),
'classification_description_us':pd.Series(dtype='str'),
'confidence':pd.Series(dtype='float')})
# we will be using this 'final' list which contains the organisaiton numbers of all registered charities
# we will be using the description from the charity activities column to be given as input to the classifier
parent_list = list(final)
cc = 0
compression_opts = dict(method='zip',
archive_name='classification_us_data.csv')
# a list of charity descriptions can be given as input to the API, which will save time in the overall classification due to API response time
# we basically want to iterate over all the charities in the parent_list and sent theit activity decriptions as inputs to the API
# in the code chunk below classification has been done in groups of 20 to save time and to ensure that no data is lost due to network issue
# if for some reason the notebook disconnects or the code produces some error then the results fetched till that point will all be save in the output file
# in case if the connection is lost and for loop is broken then it is best to first download the output file from the notebook environment
# and then read in the dataframe from the output file and then start the loop again from the point where it was broken
# make sure to modify the start and end range and the step size of the for loop according the the requirement
CHUNK_SIZE = 20 # define the chunk size - no. of charities to be processed together
for i in range (630,650,CHUNK_SIZE):
# separating out 20 ids
chunk_list = parent_list[i:i+CHUNK_SIZE]
print(chunk_list)
# input list will contains descriptions for the corresponding 20 organisations
input_list = []
for j in chunk_list:
if(charity[j]['charity_activities']!=None):
input_list.append(charity[j]['charity_activities'])
# get the prediction from the API
pred = npoclass(input_list,
True,
"drive/My Drive/UK_Data/npoclass_model_bc/",
'bc',
batch_size_dl=128)
# if the charity activities column is empty for a charity then simply add its classification as unknown/unclassified
for jj in range(len(chunk_list)):
if(charity[chunk_list[jj]]['charity_activities']==None):
pred.insert(jj,{"recommended":"X"})
# insert the data for all charities in the chunk list to the main dataframe
for k in range (len(pred)):
org_num = chunk_list[k]
element = pred[k]
classification_us_data = classification_us_data.append({'organisation_number': int(org_num),
'registered_charity_number': int(charity[org_num]['registered_charity_number']),
'charity_name':charity[org_num]['charity_name'],
'classification_code_us':element['recommended'],
'classification_description_us':category_map[element['recommended']],
'confidence':0 if element['recommended']=='X' else round(element['probabilities'][element['recommended']],2)},
ignore_index=True)
# save the dataframe at every step so that no progress is lost, note that the code will simply keep overwriting the output file
classification_us_data.to_csv('classification_us_data.zip', index=False,
compression=compression_opts)
classification_us_data[:10]
# predict the classification using charitable object instead of charity activities
# this can be integrated in the block above as well
classification_us_data1 = classification_us_data
activities = []
cobjects = []
codes = []
descp = []
confd = []
ct = 0
for i in classification_us_data1["organisation_number"]:
print(ct)
obj = charity_governing_document_data[i]["charitable_objects"]
cobjects.append(obj)
activities.append(charity[i]["charity_activities"])
if(obj==None):
codes.append("X")
descp.append("Unknown, Unclassified")
confd.append(0)
else:
pred = npoclass(obj,
True,
"drive/My Drive/UK_Data/npoclass_model_bc/",
'bc',
batch_size_dl=128)[0]
codes.append(pred['recommended'])
descp.append(category_map[pred['recommended']])
confd.append(round(pred['probabilities'][pred['recommended']],2))
ct+=1
# rename a few columns so that the data from previous data frame inclusing classification using charity activities can be incorporated
classification_us_data1 = classification_us_data1.rename(columns={"classification_code_us":'classification_code1',
"classification_description_us":'classification_description1',
"confidence":"confidence1"})
classification_us_data1['charity_activities'] = activities
classification_us_data1['charitable_objects'] = cobjects
classification_us_data1['classification_code2'] = codes
classification_us_data1['classification_description2'] = descp
classification_us_data1['confidence2'] = confd
classification_us_data1.head()
# save the data frame to csv
classification_us_data1.to_csv('classification_us_data_WHOLE.zip',
index=False,
compression=compression_opts)
# count how many times different category predicitons were given for charity activity and charitable object based classification
dc=0 # difference count
for index, row in classification_us_data1.iterrows():
if(row['classification_code1'] !=row['classification_code2']):
dc+=1
print(dc)
```
This might not be an appropriate classification for us since it is not really producing same output based on charity activity and charitable object. Moreover, it does take some time to get the classification from the API so it might take a lot of time to classify all the charities. This is something that can be explored more in future but for now the focus is being shifted to a new classification found below.
### Classification by charityclassification.org.uk - UKCAT - Rules Based Classification
- src - https://charityclassification.org.uk
- refer to the information provided on classification on the mentioned website
- Classification data is already provided, simply download the CSV file from the website and upload it to drive for accessibility.
```
classification = pd.read_csv('drive/My Drive/UK_Data/Classification/charities_active-ukcat.csv')
classification.head()
# splitting the first columns into Island, Country and org number
# the first part of an org identifier represents the Island
# second part represents the country :
# CHC - charities in England and Wales
# NIC - charities in northern ireland
# SC - charities in Scotland
# third part is the unique org no.
classification[['Island', 'Country', 'organisation_number']] = classification['org_id'].str.split('-', expand=True)
classification.head()
classification.drop("org_id", axis=1, inplace=True)
new_cols = ['Island','Country','organisation_number','ukcat_code']
classification=classification.reindex(columns=new_cols)
print(classification.shape)
print(classification.head())
Counter(classification['Island'])
# all orgs are inside Great Britain
Counter(classification['Country'])
# CHC - England and Wales
# NIC - Northern Ireland
# SC - Scotland
# focus on charities in England and Wales
classification_england = classification[classification['Country']=="CHC"]
classification_england.shape
classification_england = classification_england.rename(columns={'ukcat_code':'Code'})
classification_england.head()
# charity classification website also provides detailed description for all the classification codes
codes = pd.read_csv('drive/My Drive/UK_Data/Classification/ukcat-codes.csv')
codes.head()
# merge the previous data frame with code description dataframe
classification_england = classification_england.merge(codes,on="Code",how="left")
classification_england.head()
# dropping the columns that won't be required
classification_england.drop(['Regular expression','Exclude regular expression'], axis=1, inplace=True)
classification_england.head()
Counter(classification_england['Category'])
# add some more data to the classification dataframe such as no. of trustees,
# latest income, expenditure and registration status
n_trustees = []
latest_income = []
latest_expenditure = []
registration_status = []
ctr = 0
for i,row in classification_england.iterrows():
if(ctr%20000==0):
print(ctr,end=" ")
id = int(row['organisation_number'])
try:
n_trustees.append(len(orgs[id]))
except:
n_trustees.append(None)
try:
latest_income.append(charity[id]['latest_income'])
latest_expenditure.append(charity[id]['latest_expenditure'])
registration_status.append(charity[id]['charity_registration_status'])
except:
latest_income.append(0)
latest_expenditure.append(0)
registration_status.append("Unknown")
ctr+=1
# update the dataframe
classification_england['num_trustees'] = n_trustees
classification_england['latest_income'] = latest_income
classification_england['latest_expenditure'] = latest_expenditure
classification_england['registration_status'] = registration_status
classification_england.head()
Counter(classification_england['registration_status'])
#filtering out charities with "Removed" or "Unknown" registration status from the dataframe
classification_england_filtered = classification_england[classification_england['registration_status'] == 'Registered']
print(classification_england_filtered.shape)
#adding a column with each entry 1 so that the nubmber of charities in each category can be counted at the time of aggregation using groupby
rws = classification_england_filtered.shape[0]
nums = [1 for i in range(rws)]
classification_england_filtered['total_organisations'] = nums
classification_england_filtered.head()
# aggregate all the data into tags
groupedbyTag = classification_england_filtered.groupby('tag').agg({'total_organisations':'sum',
'num_trustees':'sum',
'latest_income':'sum',
'latest_expenditure':'sum'})
groupedbyTag.head()
# aggregate all data into categories
groupedbyCategory = classification_england_filtered.groupby('Category').agg({'total_organisations':'sum',
'num_trustees':'sum',
'latest_income':'sum',
'latest_expenditure':'sum'})
groupedbyCategory.head()
# save the dataframes as CSVs
compression_opts1 = dict(method='zip',archive_name='gropuedbyTag.csv')
compression_opts2 = dict(method='zip',archive_name='gropuedbyCategory.csv')
groupedbyTag.to_csv('groupedbyTag.zip', index=True, compression=compression_opts1)
groupedbyCategory.to_csv('groupedbyCategory.zip', index=True, compression=compression_opts2)
compression_opts = dict(method='zip',
archive_name='classification_england_filtered.csv')
classification_england_filtered.to_csv('classification_england_filtered.zip', index=False, compression=compression_opts)
compression_opts = dict(method='zip',
archive_name='classification_england_whole.csv')
classification_england.to_csv('classification_england_whole.zip', index=False, compression=compression_opts)
#REMOVING CATEGORY DUPLICATES FOR AN ORGANISATION
# there are rows in the main dataframe where the org no. and category is same but the the tags or sub categories are different
# we need to account for them while groupong by categories as it will produce false results if not taken into account
classification_england_filtered_category = classification_england_filtered.drop_duplicates(subset =["organisation_number","Category"],
keep = "first", inplace = False)
print(classification_england_filtered_category.shape)
classification_england_filtered_category.head()
classification_england_filtered_category.to_csv('classification_england_filtered_RemovedCategoryDuplicates.zip', index=False, compression=compression_opts)
#Grouping by category again after removing the duplicates
groupedbyCategory_filtered = classification_england_filtered_category.groupby('Category').agg({'total_organisations':'sum',
'num_trustees':'sum',
'latest_income':'sum',
'latest_expenditure':'sum'})
groupedbyCategory_filtered.head()
compression_opts111 = dict(method='zip',archive_name='gropuedbyCategory_filtered.csv')
groupedbyCategory_filtered.to_csv('groupedbyCategory_filtered.zip', index=True, compression=compression_opts111)
#Arranged in decreasing number of total organisations
groupedbyCategory_filtered.sort_values(by=['total_organisations'],ascending=False)
```
We have 24 different categories with highest number of organisations in the 'Beneficiary Group' category followed by Education, Associations, Charity and VCS support etc.
### Classification by charityclassification.org.uk - ICNPTSO - ML Classifier
This is something that can be explored in future if required
```
```
### Creating Clean Dataset in Graph Object Form
```
import networkx as nx
classification_england_filtered_category.reset_index(drop=True,inplace=True)
print(classification_england_filtered_category.shape)
classification_england_filtered_category.head(5)
#categorywise classification, no category duplicates
# `classification_england_filtered_category` includes the individual organisation data classified into unique categories
# we are not concerned with tags or sub categories right now
# it is still possible that a single organiastion is classified into multiple categories and we want to all of those in the final classification
final_classification = {}
for i,row in classification_england_filtered_category.iterrows():
nn = row['organisation_number']
if(nn not in final_classification.keys()):
final_classification[nn] = [row['Category']]
else:
final_classification[nn].append(row['Category']) # store all the categories inside an array
# the classification categories for an organisation will be accessible by the org number
fc=0
for i in final_classification.items():
print(i)
fc+=1
if fc==10:
break
# fetch all metadata of a particular charity if its registration status is - 'registered'
# they keys in the below dictionary are registered charity numbers and not the organisation number
final_charity = {}
for i in charity.keys():
if(charity[i]['charity_registration_status']=="Registered"):
# id = charity[i]['registered_charity_number']
final_charity[i] = charity[i]
print(len(final_charity.keys()))
print(json.dumps(list(final_charity.items())[0],indent=3))
# check if classification of all the charities is available in the dataset from charityclassification.org
ne = []
for i in final_charity.keys():
try:
temp = final_classification[str(i)]
except:
ne.append(i)
print(len(final_charity.keys()))
print(len(ne))
```
- 117681 organisation seem to have no classification data from charityclassification.org
- 185736 - 117681 = 68055 charities have classifications
- need to figure it out, why are the classifications missing?
```
# we need no. of trustees and trustee list for every organisation
# trustees are added in a list for corresponsing charity
orgs_trustees = {}
for i in orgs:
for j in orgs[i]:
if(i not in orgs_trustees.keys()):
orgs_trustees[i] = [(j['trustee_id'],j['trustee_name'])]
else:
orgs_trustees[i].append((j['trustee_id'],j['trustee_name']))
list(orgs_trustees.items())[0]
# we need number of orgs and org list for every trustee
# orgs are added into a list for every trustee
trustee_orgs = {}
for i in members:
for j in members[i]:
id = j['organisation_number']
if i not in trustee_orgs.keys():
trustee_orgs[i] = [(id,charity[id]['charity_name'])]
else:
trustee_orgs[i].append((id,charity[id]['charity_name']))
list(trustee_orgs.items())[0]
trusteeList = members.keys()
orgList = orgs.keys()
print(len(trusteeList),len(set(trusteeList)))
print(len(orgList),len(set(orgList)))
common = set(trusteeList).intersection(set(orgList))
print(len(common))
```
This might create conflict since we are adding nodes to the network by their ids and if trustees and orgs share some common ids then it will create conflicts and data will be lost.
```
#createing clean graph object dataset
G = nx.MultiGraph()
count = 0
for i in orgs:
count+=1
G.add_node(i)
# copy metadata
attribute_dict = final_charity[i].copy()
try:
attribute_dict['Classification'] = final_classification[str(i)]
except:
# add an empty list if no classification is found
attribute_dict['Classification'] = []
# add no. of trustees
attribute_dict['num_trustee'] = len(orgs[i])
attribute_dict['type'] = 0 #denoting the node type as organisation
# add trustee list as attribute to the org node
attribute_dict['trustees'] = orgs_trustees[i]
# node id is the org number
# setting attributes for org i
attrs = {i:attribute_dict}
nx.set_node_attributes(G, attrs)
# add trustee data
for j in orgs_trustees[i]:
#if its a common id
if(j[0] in common):
# add T at the end of the id so that it can be added as a separate node
node_id = str(j[0])+'T'
else:
node_id = j[0]
# add node to the network
G.add_node(node_id)
nattrs = {node_id:{"Name":j[1],"type":1,"organisations":trustee_orgs[j[0]]}} #1 denotes node type as trustee
nx.set_node_attributes(G, nattrs)
if(count%10000==0):
print(count,end=" ")
G.number_of_nodes()
#Adding edges
for p in members.keys():
# modify the node id if its a common id
if p in common:
node_id = str(p)+'T'
else:
node_id = p
if(node_id in G.nodes(0)):
boards = []
for j in members[p]:
if(j['organisation_number'] in G.nodes()):
boards.append(j['organisation_number'])
res = [ (node_id,sub) for sub in boards ]
G.add_edges_from(res)
G.nodes[23760]
G.nodes[3123875]
import pickle
# export the graph data to a pickle file
with open("dataset.pickle", 'wb') as f:
pickle.dump(G, f)
import pickle
# this is how data can be loaded from the saved pickle file for future use
# note that the data will be loaded as a networkx multigraph since that was the format it was initially exported in
data = pickle.load(open("dataset.pickle", "rb"))
type(data)
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
%matplotlib inline
isolados = pd.read_csv('data/01 - geral_normalizada.csv')
isolados.sample(5)
df = pd.read_csv('data/02 - reacoes_normalizada.csv', names=['Ano','CCR','Composto','Resultado'], header=None, index_col=0)
df.sample(5)
# junta os termos em um unico identificador do isolado:
df["Isolado"] = "UFT."+df['Ano'].astype(str)+"(L)"+df['CCR'].astype(str)
# remove as colunas que não serao mais uteis:
del df['Ano']
del df['CCR']
# define nova coluna de indice:
df.set_index(['Isolado'], inplace=True)
# compostos = []
# for i in range(1,81):
# compostos.append(i)
# compostos
df.sample(5)
# converte coluna de resultados em linhas:
df = df.pivot(columns='Composto')
# salva estado atual do dataframe em arquivo CSV:
df.to_csv('03 - reacoes_formatadas.csv')
df.sample(5)
```
### Análise Exploratória de Dados:
1. Informações Gerais
2. Tratamento de valores Nulos
3.
Questions to answer:
* How many features do you have?
* How many observations do you have?
* What is the data type of each feature?
```
# dimensoes - linhas e colunas
df.shape
# informações diversas:
# df.info()
# descricao dos dados sob analise:
df.describe(include='object')
# mostra valores faltantes dentro de uma amostra dos dados:
sns.heatmap(df.isnull())
plt.show()
# plota cada característica categorica:
for column in df.select_dtypes(include='object'):
if df[column].nunique() < 10:
sns.countplot(y=column, data=df)
plt.show()
```
Are there so many missing values for a variable that you should drop that variable from your dataset?
```
# remocao das colunas com maioria dos valores nulos:
#df.isnull().sum()
df[df.columns[df.isnull().any()]].isnull().sum() * 100 / df.shape[0]
# mostra colunas com qtd de valores nulos maior que 50% dos possiveis registros:
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Missing Percent'])
missing_data['Missing Percent'] = missing_data['Missing Percent'].apply(lambda x: x * 100)
missing_data.loc[missing_data['Missing Percent'] > 50]
df.sample(5)
# o dataframe é o mesmo, só eliminei manualmente o identificador "Resultado" e ajeitei o header do arquivo:
data = pd.read_csv('data/03.1 - reacoes_formatadas.csv', index_col=0)
data.sample(5)
from pandas_profiling import ProfileReport
relatorio_inicial = ProfileReport(data, title="Reações - Relatorio Inicial", correlations={"cramers": {"calculate": False}})
relatorio_inicial.to_widgets()
relatorio_inicial.to_file("01 - relatorio-inicial_reacoes.html")
```
Deleta (arbitrariamente) colunas com menos de 50% de preenchimento dos dados:
As colunas com mais de 50% de valores nulos são, respectivamente:
1, 39, 40, 45, 46, 48, 64, 67, 68, 69, 78 e 79;
Logo, ...
```
# TODO: automatizar/melhorar isso aqui:
colunas_excluir = [ '1', '39', '40', '45', '46', '48', '64', '67', '68', '69', '78', '79' ]
# for coluna in colunas_excluir:
data.drop(columns=colunas_excluir, axis=0, inplace=True)
# dimensao atual, sem os 12 compostos removidos por pela alta taxa de valores em branco:
data.shape
```
Verificar colunas com baixa cardinalidade (com cardinalidade igual a 1). Colunas cujos valores não possuem variação não contribuem para algoritmo de agrupamento;
```
# apenas um coluna apresenta cardinalidade == 1. Coluna correspondente ao composto 48.
#Já removido na operação anterior por apresentar 92% dos dados faltantes.
# salva estado atual da tabela:
# salva estado atual do dataframe em arquivo CSV:
data.to_csv('03.1 - reacoes_col_removidas.csv')
# ainda temos valores nulos, vamos vê-los:
# mostra colunas com qtd de valores nulos maior que 40% dos possiveis registros:
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Missing Percent'])
missing_data['Missing Percent'] = missing_data['Missing Percent'].apply(lambda x: x * 100)
missing_data.loc[missing_data['Missing Percent'] > 40]
```
### 4 - Como tratar essa grande qtd de valores faltantes???
1. Ignorar, tratar values faltantes com o vector de zeros do **Dummie Enconding**;
2. Remover todos e tratar registros como linhas de uma matriz 'banguela';
3. Converter esses valores para uma representação qualquer: fill in the missing values --converter e depois remover;
4. Usar interpolação (**Impute**): decidir, de maneira inteligente o que deverá substituir o espaço em branco --geralmente ecolhe o valor com maior frequência;
```
# 4.1. Ignorar valores nulos e tratar tudo com o One Hot Encoding:
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
data.columns
data.head()
# colunas para transformar: todas, exceto a coluna de índice (coluna zero)
# data[:-1]
data_encoded = pd.get_dummies(data, prefix=data.columns, prefix_sep=' ', dummy_na=False)
# a diferenca esta no drop_firt=True
# este parametro permite que o dataframe fique mais enxuto gerando k-1 registros no One-Hot-Encoding:
# o uso do drop_first permite que o encoding utilizado seja o verdadeiro dunny encoding (sem el o metodo faz o one hot encoding)
# data_encoded2 = pd.get_dummies(data, prefix=data.columns, prefix_sep=' ', dummy_na=False, drop_first=True)
data_encoded.head(7)
data_encoded.shape # não é um metodo .shape() e sim uma propriedade .shape
# salva estado atual do dataframe em arquivo CSV:
data_encoded.to_csv('04.1 - reacoes_one-hot_encoded.csv')
# gera o dataframe resultante do verdadeiro dummy_encoding:
data_encoded2 = pd.get_dummies(data, prefix=data.columns, prefix_sep=' ', dummy_na=False, drop_first=True)
# salva estado atual do dataframe em arquivo CSV:
data_encoded2.to_csv('04.2 - reacoes_dummy_encoded.csv')
data_encoded2.head()
# 4.2 - TODO: construção da Matriz 'Banguela':
data.sample(6)
# 4.3 - TODO:
# 4.4 - TODO:
# geração do relatório do Pandas Profiling, novamente:
relatorio_final = ProfileReport(data_encoded2, title="Reações - Relatorio Final", correlations={"cramers": {"calculate": False}})
relatorio_final.to_widgets()
```
| github_jupyter |
```
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import *
from pyspark.sql.types import *
from pyspark.sql.functions import udf
from pyspark.sql.functions import *
from pyspark.sql.window import Window
NoneType = type(None)
import os
import socket
import hashlib
import string
import time
from osgeo import ogr
import geopandas as gpd
from pyspark.sql import SparkSession
from sedona.register import SedonaRegistrator
from sedona.utils import SedonaKryoRegistrator, KryoSerializer
def createMd5(text):
return hashlib.md5(text.encode('utf-8')).hexdigest()
md5Udf= udf(lambda z: createMd5(z),StringType())
def clean_lower(text):
sentence = text.translate(str.maketrans('', '', '!"#$%&\'()*+,./:;<=>?@[\\]^`{|}~-_”“«»‘')).lower()
return " ".join(sentence.split())
cleanLowerUdf= udf(lambda z: clean_lower(z),StringType())
def get_site_from_url(text):
return text.split("/")[2]
getUrl= udf(lambda z: get_site_from_url(z),StringType())
minio_ip = socket.gethostbyname('minio')
spark = SparkSession. \
builder. \
appName("Python Spark S3"). \
config("spark.serializer", KryoSerializer.getName). \
config("spark.executor.memory", "80g"). \
config("spark.driver.memory", "80g"). \
config('spark.dirver.maxResultSize', '5g'). \
config("spark.kryo.registrator", SedonaKryoRegistrator.getName). \
config('spark.hadoop.fs.s3a.endpoint', 'http://'+minio_ip+':9000'). \
config("spark.hadoop.fs.s3a.access.key", "minio-access-key"). \
config("spark.hadoop.fs.s3a.secret.key", "minio-secret-key"). \
config('spark.hadoop.fs.s3a.impl', 'org.apache.hadoop.fs.s3a.S3AFileSystem'). \
config('spark.jars.packages',
'org.apache.sedona:sedona-python-adapter-3.0_2.12:1.0.0-incubating,org.datasyslab:geotools-wrapper:geotools-24.0'). \
getOrCreate()
SedonaRegistrator.registerAll(spark)
st= StructType([
StructField("abstract", StringType()),
StructField("authors", StringType()),
StructField("image", StringType()),
StructField("metadata", StringType()),
StructField("publish_date", TimestampType()),
StructField("text", StringType()),
StructField("title", StringType()),
StructField("url", StringType()),
])
df_news_covid_mexico = spark \
.read.schema(st).option("timestampFormat", "dd-MM-yyyy") \
.json("s3a://news/Samuel_Garcia/*.json")
df_news_covid_mexico.count()
df_news_covid_mexico.printSchema()
df_news_covid_mexico.show(10)
df_news_covid_mexico_date_text = df_news_covid_mexico.select(md5Udf("url").alias("article_id"),"title","url","publish_date",cleanLowerUdf("text").alias("clean_text"),getUrl("url").alias("site")).filter("length(text) >= 2")
df_news_covid_mexico_date_text.show(15)
df_news_covid_mexico_date_text.count()
df_news_covid_mexico_date_text.select("title").show(15,False)
url = "jdbc:postgresql://postgres/shared"
mode="overwrite"
properties = {
"user": "shared",
"password": os.environ['SHARED_PASSWORD']
}
df_news_covid_mexico_date_text.write.jdbc(url=url, table="tb_news_covid_mexico_date_text", mode=mode, properties=properties)
df_news_covid_mexico_palabras = df_news_covid_mexico_date_text.select("article_id","publish_date",explode(split(df_news_covid_mexico_date_text.clean_text, "\s")).alias("palabra")).where(length('palabra') > 1)
df_news_covid_mexico_palabras.show(30)
#https://sigdelta.com/blog/word-count-in-spark-with-a-pinch-of-tf-idf/
df_news_covid_mexico_palabras.groupBy('article_id', 'palabra','publish_date')\
.count()\
.orderBy('count', ascending=False)\
.show(25)
#https://sigdelta.com/blog/word-count-in-spark-with-a-pinch-of-tf-idf-continued/
w = Window.partitionBy(df_news_covid_mexico_palabras['article_id'])
article_tf = df_news_covid_mexico_palabras.groupBy('article_id', 'palabra', 'publish_date')\
.agg(count('*').alias('n_w'),sum(count('*')).over(w).alias('n_d'),(count('*')/sum(count('*')).over(w)).alias('tf'))\
.orderBy('n_w', ascending=False)\
.cache()
article_tf.show(truncate=15)
w = Window.partitionBy('palabra')
c_d = df_news_covid_mexico_palabras.select('article_id').distinct().count()
article_idf = df_news_covid_mexico_palabras.groupBy('palabra', 'article_id','publish_date').agg(
lit(c_d).alias('c_d'),
count('*').over(w).alias('i_d'),
log(lit(c_d)/count('*').over(w)).alias('idf')
)\
.orderBy('idf', ascending=False)\
.cache()
article_idf.show(150, truncate=15)
article_tfidf = article_tf.join(article_idf, ['article_id', 'palabra', 'publish_date'])\
.withColumn('tf_idf', col('tf') * col('idf'))\
.cache()
article_tfidf.orderBy('tf_idf', ascending=False).show(150,truncate=12)
w = Window.partitionBy('article_id').orderBy(col('tf_idf').desc())
article_tfidf_top_15=article_tfidf.withColumn('rank', rank().over(w))\
.where('rank <= 15')\
.drop('rank')\
.orderBy('article_id', 'tf_idf','n_w')\
.select('article_id','publish_date','palabra','n_w','tf_idf')
article_tfidf_top_15.show(truncate=12, n=30)
article_tfidf_top_15_site = article_tfidf_top_15.join(df_news_covid_mexico_date_text, ['article_id','publish_date']).select('article_id','publish_date','site','palabra','n_w','tf_idf')
article_tfidf_top_15_site.show(15)
article_tfidf_top_15_site.write.jdbc(url=url, table="tb_news_covid_mexico_palabras_top_tfidf", mode=mode, properties=properties)
```
| github_jupyter |
# GSD: Rpb1 orthologs in 1011 genomes collection
This collects Rpb1 gene and protein sequences from a collection of natural isolates of sequenced yeast genomes from [Peter et al 2017](https://www.ncbi.nlm.nih.gov/pubmed/29643504), and then estimates the count of the heptad repeats. It builds directly on the notebook [here](GSD%20Rpb1_orthologs_in_PB_genomes.ipynb), which descends from [Searching for coding sequences in genomes using BLAST and Python](../Searching%20for%20coding%20sequences%20in%20genomes%20using%20BLAST%20and%20Python.ipynb). It also builds on the notebooks shown [here](https://nbviewer.jupyter.org/github/fomightez/cl_sq_demo-binder/blob/master/notebooks/GSD/GSD%20Add_Supplemental_data_info_to_nt_count%20data%20for%201011_cerevisiae_collection.ipynb) and [here](https://github.com/fomightez/patmatch-binder).
Reference for sequence data:
[Genome evolution across 1,011 Saccharomyces cerevisiae isolates. Peter J, De Chiara M, Friedrich A, Yue JX, Pflieger D, Bergström A, Sigwalt A, Barre B, Freel K, Llored A, Cruaud C, Labadie K, Aury JM, Istace B, Lebrigand K, Barbry P, Engelen S, Lemainque A, Wincker P, Liti G, Schacherer J. Nature. 2018 Apr;556(7701):339-344. doi: 10.1038/s41586-018-0030-5. Epub 2018 Apr 11. PMID: 29643504](https://www.ncbi.nlm.nih.gov/pubmed/29643504)
-----
## Overview

## Preparation
Get scripts and sequence data necessary.
**DO NOT 'RUN ALL'. AN INTERACTION IS NECESSARY AT CELL FIVE. AFTER THAT INTERACTION, THE REST BELOW IT CAN BE RUN.**
(Caveat: right now this is written for genes with no introns. Only a few hundred have in yeast and that is the organism in this example. Intron presence would only become important when trying to translate in late stages of this workflow.)
```
gene_name = "RPB1"
size_expected = 5202
get_seq_from_link = False
link_to_FASTA_of_gene = "https://gist.githubusercontent.com/fomightez/f46b0624f1d8e3abb6ff908fc447e63b/raw/625eaba76bb54e16032f90c8812350441b753a0c/uz_S288C_YOR270C_VPH1_coding.fsa"
#**Possible future enhancement would be to add getting the FASTA of the gene from Yeastmine with just systematic id**
```
Get the `blast_to_df` script by running this commands.
```
import os
file_needed = "blast_to_df.py"
if not os.path.isfile(file_needed):
!curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/blast-utilities/blast_to_df.py
import pandas as pd
```
**Now to get the entire collection or a subset of the 1011 genomes, the next cell will need to be edited.** I'll probably leave it with a small set for typical running purposes. However, to make it run fast, try the 'super-tiny' set with just two.
```
# Method to get ALL the genomes. TAKES A WHILE!!!
# (ca. 1 hour and 15 minutes to download alone? + Extracting is a while.)
# Easiest way to minotor extracting step is to open terminal, cd to
# `GENOMES_ASSEMBLED`, & use `ls | wc -l` to count files extracted.
#!curl -O http://1002genomes.u-strasbg.fr/files/1011Assemblies.tar.gz
#!tar xzf 1011Assemblies.tar.gz
#!rm 1011Assemblies.tar.gz
# Small development set
!curl -OL https://www.dropbox.com/s/f42tiygq9tr1545/medium_setGENOMES_ASSEMBLED.tar.gz
!tar xzf medium_setGENOMES_ASSEMBLED.tar.gz
# Tiny development set
#!curl -OL https://www.dropbox.com/s/txufq2jflkgip82/tiny_setGENOMES_ASSEMBLED.tar.gz
#!tar xzf tiny_setGENOMES_ASSEMBLED.tar.gz
#!mv tiny_setGENOMES_ASSEMBLED GENOMES_ASSEMBLED
#define directory with genomes
genomes_dirn = "GENOMES_ASSEMBLED"
```
Before process the list of all of them, fix one that has an file name mismatch with what the description lines have.
Specifically, the assembly file name is `CDH.re.fa`, but the FASTA-entries inside begin `CDH-3`.
Simple file name mismatch. So next cell will change that file name to match.
```
import os
import sys
file_with_issues = "CDH.re.fa"
if os.path.isfile("GENOMES_ASSEMBLED/"+file_with_issues):
sys.stderr.write("\nFile with name non-matching entries ('{}') observed and"
" fixed.".format(file_with_issues))
!mv GENOMES_ASSEMBLED/CDH.re.fa GENOMES_ASSEMBLED/CDH_3.re.fa
#pause and then check if file with original name is there still because
# it means this was attempted too soon and need to start over.
import time
time.sleep(12) #12 seconds
if os.path.isfile("GENOMES_ASSEMBLED/"+file_with_issues):
sys.stderr.write("\n***PROBLEM. TRIED THIS CELL BEFORE FINISHED UPLOADING.\n"
"DELETE FILES ASSOCIATED AND START ALL OVER AGAIN WITH UPLOAD STEP***.")
else:
sys.stderr.write("\nFile '{}' not seen and so nothing done"
". Seems wrong.".format(file_with_issues))
sys.exit(1)
# Get SGD gene sequence in FASTA format to search for best matches in the genomes
import sys
gene_filen = gene_name + ".fsa"
if get_seq_from_link:
!curl -o {gene_filen} {link_to_FASTA_of_gene}
else:
!touch {gene_filen}
sys.stderr.write("\nEDIT THE FILE '{}' TO CONTAIN "
"YOUR GENE OF INTEREST (FASTA-FORMATTED)"
".".format(gene_filen))
sys.exit(0)
```
**I PUT CONTENTS OF FILE `S288C_YDL140C_RPO21_coding.fsa` downloaded from [here](https://www.yeastgenome.org/locus/S000002299/sequence) as 'RPB1.fsa'.**
Now you are prepared to run BLAST to search each PacBio-sequenced genomes for the best match to a gene from the Saccharomyces cerevisiae strain S288C reference sequence.
## Use BLAST to search the genomes for matches to the gene in the reference genome at SGD
SGD is the [Saccharomyces cerevisiae Genome Database site](http:yeastgenome.org) and the reference genome is from S288C.
This is going to go through each genome and make a database so it is searchable and then search for matches to the gene. The information on the best match will be collected. One use for that information will be collecting the corresponding sequences later.
Import the script that allows sending BLAST output to Python dataframes so that we can use it here.
```
from blast_to_df import blast_to_df
# Make a list of all `genome.fa` files, excluding `genome.fa.nhr` and `genome.fa.nin` and `genome.fansq`
# The excluding was only necessary because I had run some queries preliminarily in development. Normally, it would just be the `.re.fa` at the outset.
fn_to_check = "re.fa"
genomes = []
import os
import fnmatch
for file in os.listdir(genomes_dirn):
if fnmatch.fnmatch(file, '*'+fn_to_check):
if not file.endswith(".nhr") and not file.endswith(".nin") and not file.endswith(".nsq") :
# plus skip hidden files
if not file.startswith("._"):
genomes.append(file)
len(genomes)
```
Using the trick of putting `%%capture` on first line from [here](https://stackoverflow.com/a/23692951/8508004) to suppress the output from BLAST for many sequences from filling up cell.
(You can monitor the making of files ending in `.nhr` for all the FASTA files in `GENOMES_ASSEMBLED` to monitor progress'.)
```
%%time
%%capture
SGD_gene = gene_filen
dfs = []
for genome in genomes:
!makeblastdb -in {genomes_dirn}/{genome} -dbtype nucl
result = !blastn -query {SGD_gene} -db {genomes_dirn}/{genome} -outfmt "6 qseqid sseqid stitle pident qcovs length mismatch gapopen qstart qend sstart send qframe sframe frames evalue bitscore qseq sseq" -task blastn
from blast_to_df import blast_to_df
blast_df = blast_to_df(result.n)
dfs.append(blast_df.head(1))
# merge the dataframes in the list `dfs` into one dataframe
df = pd.concat(dfs)
#Save the df
filen_prefix = gene_name + "_orthologBLASTdf"
df.to_pickle(filen_prefix+".pkl")
df.to_csv(filen_prefix+'.tsv', sep='\t',index = False)
#df
```
Computationally check if any genomes missing from the BLAST results list?
```
subjids = df.sseqid.tolist()
#print (subjids)
#print (subjids[0:10])
subjids = [x.split("-")[0] for x in subjids]
#print (subjids)
#print (subjids[0:10])
len_genome_fn_end = len(fn_to_check) + 1 # plus one to accound for the period that will be
# between `fn_to_check` and strain_id`, such as `SK1.genome.fa`
genome_ids = [x[:-len_genome_fn_end] for x in genomes]
#print (genome_ids[0:10])
a = set(genome_ids)
#print (a)
print ("initial:",len(a))
r = set(subjids)
print("results:",len(r))
print ("missing:",len(a-r))
if len(a-r):
print("\n")
print("ids missing:",a-r)
#a - r
```
Sanity check: Report on how expected size compares to max size seen?
```
size_seen = df.length.max(0)
print ("Expected size of gene:", size_expected)
print ("Most frequent size of matches:", df.length.mode()[0])
print ("Maximum size of matches:", df.length.max(0))
```
## Collect the identified, raw sequences
Get the expected size centered on the best match, plus a little flanking each because they might not exactly cover the entire open reading frame. (Although, the example here all look to be full size.)
```
# Get the script for extracting based on position (and install dependency pyfaidx)
import os
file_needed = "extract_subsequence_from_FASTA.py"
if not os.path.isfile(file_needed):
!curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/Extract_from_FASTA/extract_subsequence_from_FASTA.py
!pip install pyfaidx
```
For the next cell, I am going to use the trick of putting `%%capture` on first line from [here](https://stackoverflow.com/a/23692951/8508004) to suppress the output from the entire set making a long list of output.
For ease just monitor the progress in a launched terminal with the following code run in the directory where this notebook will be because the generated files only moved into the `raw` directory as last step of cell:
ls seq_extracted* | wc -l
(**NOTE: WHEN RUNNING WITH THE FULL SET, THIS CELL BELOW WILL REPORT AROUND A DOZEN `FileNotFoundError:`/Exceptions. HOWEVER, THEY DON'T CAUSE THE NOTEBOOK ITSELF TO CEASE TO RUN. SO DISREGARD THEM FOR THE TIME BEING.** )
```
%%capture
size_expected = size_expected # use value from above, or alter at this point.
#size_expected = df.length.max(0) #bp length of SGD coding sequence; should be equivalent and that way not hardcoded?
extra_add_to_start = 51 #to allow for 'fuzziness' at starting end
extra_add_to_end = 51 #to allow for 'fuzziness' at far end
genome_fn_end = "re.fa"
def midpoint(items):
'''
takes a iterable of items and returns the midpoint (integer) of the first
and second values
'''
return int((int(items[0])+int(items[1]))/2)
#midpoint((1,100))
def determine_pos_to_get(match_start,match_end):
'''
Take the start and end of the matched region.
Calculate midpoint between those and then
center expected size on that to determine
preliminary start and preliminary end to get.
Add the extra basepairs to get at each end
to allow for fuzziness/differences of actual
gene ends for orthologs.
Return the final start and end positions to get.
'''
center_of_match = midpoint((match_start,match_end))
half_size_expected = int(size_expected/2.0)
if size_expected % 2 != 0:
half_size_expected += 1
start_pos = center_of_match - half_size_expected
end_pos = center_of_match + half_size_expected
start_pos -= extra_add_to_start
end_pos += extra_add_to_end
# Because of getting some flanking sequences to account for 'fuzziness', it
# is possible the start and end can exceed possible. 'End' is not a problem
# because the `extract_subsequence_from_FASTA.py` script will get as much as
# it from the indicated sequence if a larger than possible number is
# provided. However,'start' can become negative and because the region to
# extract is provided as a string the dash can become a problem. Dealing
# with it here by making sequence positive only.
# Additionally, because I rely on center of match to position where to get,
# part being cut-off due to absence on sequence fragment will shift center
# of match away from what is actually center of gene and to counter-balance
# add twice the amount to the other end. (Actually, I feel I should adjust
# the start end likewise if the sequence happens to be shorter than portion
# I would like to capture but I don't know length of involved hit yet and
# that would need to be added to allow that to happen!<--TO DO)
if start_pos < 0:
raw_amount_missing_at_start = abs(start_pos)# for counterbalancing; needs
# to be collected before `start_pos` adjusted
start_pos = 1
end_pos += 2 * raw_amount_missing_at_start
return start_pos, end_pos
# go through the dataframe using information on each to come up with sequence file,
# specific indentifier within sequence file, and the start and end to extract
# store these valaues as a list in a dictionary with the strain identifier as the key.
extracted_info = {}
start,end = 0,0
for row in df.itertuples():
#print (row.length)
start_to_get, end_to_get = determine_pos_to_get(row.sstart, row.send)
posns_to_get = "{}-{}".format(start_to_get, end_to_get)
record_id = row.sseqid
strain_id = row.sseqid.split("-")[0]
seq_fn = strain_id + "." + genome_fn_end
extracted_info[strain_id] = [seq_fn, record_id, posns_to_get]
# Use the dictionary to get the sequences
for id_ in extracted_info:
#%run extract_subsequence_from_FASTA.py {*extracted_info[id_]} #unpacking doesn't seem to work here in `%run`
%run extract_subsequence_from_FASTA.py {genomes_dirn}/{extracted_info[id_][0]} {extracted_info[id_][1]} {extracted_info[id_][2]}
#package up the retrieved sequences
archive_file_name = gene_name+"_raw_ortholog_seqs.tar.gz"
# make list of extracted files using fnmatch
fn_part_to_match = "seq_extracted"
collected_seq_files_list = []
import os
import sys
import fnmatch
for file in os.listdir('.'):
if fnmatch.fnmatch(file, fn_part_to_match+'*'):
#print (file)
collected_seq_files_list.append(file)
!tar czf {archive_file_name} {" ".join(collected_seq_files_list)} # use the list for archiving command
sys.stderr.write("\n\nCollected RAW sequences gathered and saved as "
"`{}`.".format(archive_file_name))
# move the collected raw sequences to a folder in preparation for
# extracting encoding sequence from original source below
!mkdir raw
!mv seq_extracted*.fa raw
```
That archive should contain the "raw" sequence for each gene, even if the ends are a little different for each. At minimum the entire gene sequence needs to be there at this point; extra at each end is preferable at this point.
You should inspect them as soon as possible and adjust the extra sequence to add higher or lower depending on whether the ortholog genes vary more or less, respectively. The reason they don't need to be perfect yet though is because next we are going to extract the longest open reading frame, which presumably demarcates the entire gene. Then we can return to use that information to clean up the collected sequences to just be the coding sequence.
## Collect protein translations of the genes and then clean up "raw" sequences to just be coding
We'll assume the longest translatable frame in the collected "raw" sequences encodes the protein sequence for the gene orthologs of interest. Well base these steps on the [section '20.1.13 Identifying open reading frames'](http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc299) in the present version of the [Biopython Tutorial and Cookbook](http://biopython.org/DIST/docs/tutorial/Tutorial.html) (Last Update – 18 December 2018 (Biopython 1.73).
(First run the next cell to get a script needed for dealing with the strand during the translation and gathering of thge encoding sequence.)
```
import os
file_needed = "convert_fasta_to_reverse_complement.py"
if not os.path.isfile(file_needed):
!curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/ConvertSeq/convert_fasta_to_reverse_complement.py
```
Now to perform the work described in the header to this section...
For the next cell, I am going to use the trick of putting `%%capture` on first line from [here](https://stackoverflow.com/a/23692951/8508004) to suppress the output from the entire set making a long list of output.
For ease just monitor the progress in a launched terminal with the following code run in the directory where this notebook will be:
ls *_ortholog_gene.fa | wc -l
```
%%capture
# find the featured open reading frame and collect presumed protein sequences
# Collect the corresponding encoding sequence from the original source
def len_ORF(items):
# orf is fourth item in the tuples
return len(items[3])
def find_orfs_with_trans(seq, trans_table, min_protein_length):
'''
adapted from the present section '20.1.13 Identifying open reading frames'
http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc299 in the
present version of the [Biopython Tutorial and Cookbook at
http://biopython.org/DIST/docs/tutorial/Tutorial.html
(Last Update – 18 December 2018 (Biopython 1.73)
Same as there except altered to sort on the length of the
open reading frame.
'''
answer = []
seq_len = len(seq)
for strand, nuc in [(+1, seq), (-1, seq.reverse_complement())]:
for frame in range(3):
trans = str(nuc[frame:].translate(trans_table))
trans_len = len(trans)
aa_start = 0
aa_end = 0
while aa_start < trans_len:
aa_end = trans.find("*", aa_start)
if aa_end == -1:
aa_end = trans_len
if aa_end-aa_start >= min_protein_length:
if strand == 1:
start = frame+aa_start*3
end = min(seq_len,frame+aa_end*3+3)
else:
start = seq_len-frame-aa_end*3-3
end = seq_len-frame-aa_start*3
answer.append((start, end, strand,
trans[aa_start:aa_end]))
aa_start = aa_end+1
answer.sort(key=len_ORF, reverse = True)
return answer
def generate_rcoutput_file_name(file_name,suffix_for_saving = "_rc"):
'''
from https://github.com/fomightez/sequencework/blob/master/ConvertSeq/convert_fasta_to_reverse_complement.py
Takes a file name as an argument and returns string for the name of the
output file. The generated name is based on the original file
name.
Specific example
=================
Calling function with
("sequence.fa", "_rc")
returns
"sequence_rc.fa"
'''
main_part_of_name, file_extension = os.path.splitext(
file_name) #from
#http://stackoverflow.com/questions/541390/extracting-extension-from-filename-in-python
if '.' in file_name: #I don't know if this is needed with the os.path.splitext method but I had it before so left it
return main_part_of_name + suffix_for_saving + file_extension
else:
return file_name + suffix_for_saving + ".fa"
def add_strand_to_description_line(file,strand="-1"):
'''
Takes a file and edits description line to add
strand info at end.
Saves the fixed file
'''
import sys
output_file_name = "temp.txt"
# prepare output file for saving so it will be open and ready
with open(output_file_name, 'w') as output_file:
# read in the input file
with open(file, 'r') as input_handler:
# prepare to give feeback later or allow skipping to certain start
lines_processed = 0
for line in input_handler:
lines_processed += 1
if line.startswith(">"):
new_line = line.strip() + "; {} strand\n".format(strand)
else:
new_line = line
# Send text to output
output_file.write(new_line)
# replace the original file with edited
!mv temp.txt {file}
# Feedback
sys.stderr.write("\nIn {}, strand noted.".format(file))
table = 1 #sets translation table to standard nuclear, see
# https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
min_pro_len = 80 #cookbook had the standard `100`. Feel free to adjust.
prot_seqs_info = {} #collect as dictionary with strain_id as key. Values to
# be list with source id as first item and protein length as second and
# strand in source seq as third item, and start and end in source sequence as fourth and fifth,
# and file name of protein and gene as sixth and seventh.
# Example key and value pair: 'YPS138':['<source id>','<protein length>',-1,52,2626,'<gene file name>','<protein file name>']
gene_seqs_fn_list = []
prot_seqs_fn_list = []
from Bio import SeqIO
for raw_seq_filen in collected_seq_files_list:
#strain_id = raw_seq_filen[:-len_genome_fn_end] #if was dealing with source seq
strain_id = raw_seq_filen.split("-")[0].split("seq_extracted")[1]
record = SeqIO.read("raw/"+raw_seq_filen,"fasta")
raw_seq_source_fn = strain_id + "." + genome_fn_end
raw_seq_source_id = record.description.split(":")[0]
orf_list = find_orfs_with_trans(record.seq, table, min_pro_len)
orf_start, orf_end, strand, prot_seq = orf_list[0] #longest ORF seq for protein coding
location_raw_seq = record.description.rsplit(":",1)[1] #get to use in calculating
# the start and end position in original genome sequence.
raw_loc_parts = location_raw_seq.split("-")
start_from_raw_seq = int(raw_loc_parts[0])
end_from_raw_seq = int(raw_loc_parts[1])
length_extracted = len(record) #also to use in calculating relative original
#Fix negative value. (Somehow Biopython can report negative value when hitting
# end of sequence without encountering stop codon and negatives messes up
# indexing later it seems.)
if orf_start < 0:
orf_start = 0
# Trim back to the first Methionine, assumed to be the initiating MET.
# (THIS MIGHT BE A SOURCE OF EXTRA 'LEADING' RESIDUES IN SOME CASES & ARGUES
# FOR LIMITING THE AMOUNT OF FLANKING SEQUENCE ADDED TO ALLOW FOR FUZINESS.)
try:
amt_resi_to_trim = prot_seq.index("M")
except ValueError:
sys.stderr.write("**ERROR**When searching for initiating methionine,\n"
"no Methionine found in the traslated protein sequence.**ERROR**")
sys.exit(1)
prot_seq = prot_seq[amt_resi_to_trim:]
len_seq_trimmed = amt_resi_to_trim * 3
# Calculate the adjusted start and end values for the untrimmed ORF
adj_start = start_from_raw_seq + orf_start
adj_end = end_from_raw_seq - (length_extracted - orf_end)
# Adjust for trimming for appropriate strand.
if strand == 1:
adj_start += len_seq_trimmed
#adj_end += 3 # turns out stop codon is part of numbering biopython returns
elif strand == -1:
adj_end -= len_seq_trimmed
#adj_start -= 3 # turns out stop codon is part of numbering biopython returns
else:
sys.stderr.write("**ERROR**No strand match option detected!**ERROR**")
sys.exit(1)
# Collect the sequence for the actual gene encoding region from
# the original sequence. This way the original numbers will
# be put in the file.
start_n_end_str = "{}-{}".format(adj_start,adj_end)
%run extract_subsequence_from_FASTA.py {genomes_dirn}/{raw_seq_source_fn} {raw_seq_source_id} {start_n_end_str}
# rename the extracted subsequence a more distinguishing name and notify
g_output_file_name = strain_id +"_" + gene_name + "_ortholog_gene.fa"
!mv {raw_seq_filen} {g_output_file_name} # because the sequence saved happens to
# be same as raw sequence file saved previously, that name can be used to
# rename new file.
gene_seqs_fn_list.append(g_output_file_name)
sys.stderr.write("\n\nRenamed gene file to "
"`{}`.".format(g_output_file_name))
# Convert extracted sequence to reverse complement if translation was on negative strand.
if strand == -1:
%run convert_fasta_to_reverse_complement.py {g_output_file_name}
# replace original sequence file with the produced file
produced_fn = generate_rcoutput_file_name(g_output_file_name)
!mv {produced_fn} {g_output_file_name}
# add (after saved) onto the end of the description line for that `-1 strand`
# No way to do this in my current version of convert sequence. So editing descr line.
add_strand_to_description_line(g_output_file_name)
#When settled on actual protein encoding sequence, fill out
# description to use for saving the protein sequence.
prot_descr = (record.description.rsplit(":",1)[0]+ " "+ gene_name
+ "_ortholog"+ "| " +str(len(prot_seq)) + " aas | from "
+ raw_seq_source_id + " "
+ str(adj_start) + "-"+str(adj_end))
if strand == -1:
prot_descr += "; {} strand".format(strand)
# save the protein sequence as FASTA
chunk_size = 70 #<---amino acids per line to have in FASTA
prot_seq_chunks = [prot_seq[i:i+chunk_size] for i in range(
0, len(prot_seq),chunk_size)]
prot_seq_fa = ">" + prot_descr + "\n"+ "\n".join(prot_seq_chunks)
p_output_file_name = strain_id +"_" + gene_name + "_protein_ortholog.fa"
with open(p_output_file_name, 'w') as output:
output.write(prot_seq_fa)
prot_seqs_fn_list.append(p_output_file_name)
sys.stderr.write("\n\nProtein sequence saved as "
"`{}`.".format(p_output_file_name))
# at end store information in `prot_seqs_info` for later making a dataframe
# and then text table for saving summary
#'YPS138':['<source id>',<protein length>,-1,52,2626,'<gene file name>','<protein file name>']
prot_seqs_info[strain_id] = [raw_seq_source_id,len(prot_seq),strand,adj_start,adj_end,
g_output_file_name,p_output_file_name]
sys.stderr.write("\n******END OF A SET OF PROTEIN ORTHOLOG "
"AND ENCODING GENE********")
# use `prot_seqs_info` for saving a summary text table (first convert to dataframe?)
table_fn_prefix = gene_name + "_orthologs_table"
table_fn = table_fn_prefix + ".tsv"
pkl_table_fn = table_fn_prefix + ".pkl"
import pandas as pd
info_df = pd.DataFrame.from_dict(prot_seqs_info, orient='index',
columns=['descr_id', 'length', 'strand', 'start','end','gene_file','prot_file']) # based on
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.from_dict.html and
# note from Python 3.6 that `pd.DataFrame.from_items` is deprecated;
#"Please use DataFrame.from_dict"
info_df.to_pickle(pkl_table_fn)
info_df.to_csv(table_fn, sep='\t') # keep index is default
sys.stderr.write("Text file of associated details saved as '{}'.".format(table_fn))
# pack up archive of gene and protein sequences plus the table
seqs_list = gene_seqs_fn_list + prot_seqs_fn_list + [table_fn,pkl_table_fn]
archive_file_name = gene_name+"_ortholog_seqs.tar.gz"
!tar czf {archive_file_name} {" ".join(seqs_list)} # use the list for archiving command
sys.stderr.write("\nCollected gene and protein sequences"
" (plus table of details) gathered and saved as "
"`{}`.".format(archive_file_name))
```
Save the tarballed archive to your local machine.
-----
## Estimate the count of the heptad repeats
Make a table of the estimate of heptad repeats for each orthlogous protein sequence.
```
# get the 'patmatch results to dataframe' script
!curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/patmatch-utilities/patmatch_results_to_df.py
```
Using the trick of putting `%%capture` on first line from [here](https://stackoverflow.com/a/23692951/8508004) to suppress the output from `patmatch_results_to_df` function from filling up cell.
```
%%time
%%capture
# Go through each protein sequence file and look for matches to heptad pattern
# LATER POSSIBLE IMPROVEMENT. Translate pasted gene sequence and add SGD REF S228C as first in list `prot_seqs_fn_list`. Because
# although this set of orthologs includes essentially S228C, other lists won't and best to have reference for comparing.
heptad_pattern = "[YF]SP[TG]SP[STAGN]" # will catch repeats#2 through #26 of S288C according to Corden, 2013 PMID: 24040939
from patmatch_results_to_df import patmatch_results_to_df
sum_dfs = []
raw_dfs = []
for prot_seq_fn in prot_seqs_fn_list:
!perl ../../patmatch_1.2/unjustify_fasta.pl {prot_seq_fn}
output = !perl ../../patmatch_1.2/patmatch.pl -p {heptad_pattern} {prot_seq_fn}.prepared
os.remove(os.path.join(prot_seq_fn+".prepared")) #delete file made for PatMatch
raw_pm_df = patmatch_results_to_df(output.n, pattern=heptad_pattern, name="CTD_heptad")
raw_pm_df.sort_values('hit_number', ascending=False, inplace=True)
sum_dfs.append(raw_pm_df.groupby('FASTA_id').head(1))
raw_dfs.append(raw_pm_df)
sum_pm_df = pd.concat(sum_dfs, ignore_index=True)
sum_pm_df.sort_values('hit_number', ascending=False, inplace=True)
sum_pm_df = sum_pm_df[['FASTA_id','hit_number']]
#make protein length into dictionary with ids as keys to map to FASTA_ids in
# order to add protein length as a column in summary table
length_info_by_id= dict(zip(info_df.descr_id,info_df.length))
sum_pm_df['prot_length'] = sum_pm_df['FASTA_id'].map(length_info_by_id)
sum_pm_df = sum_pm_df.reset_index(drop=True)
raw_pm_df = pd.concat(raw_dfs, ignore_index=True)
```
Because of use of `%%capture` to suppress output, need a separate cell to see results summary. (Only showing parts here because will add more useful information below.)
```
sum_pm_df.head() # don't show all yet since lots and want to make this dataframe more useful below
sum_pm_df.tail() # don't show all yet since lots and want to make this dataframe more useful below
```
I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
WHAT ONES MISSING NOW?
Computationally check if any genomes missing from the list of orthologs?
```
subjids = df.sseqid.tolist()
#print (subjids)
#print (subjids[0:10])
subjids = [x.split("-")[0] for x in subjids]
#print (subjids)
#print (subjids[0:10])
len_genome_fn_end = len(fn_to_check) + 1 # plus one to accound for the period that will be
# between `fn_to_check` and strain_id`, such as `SK1.genome.fa`
genome_ids = [x[:-len_genome_fn_end] for x in genomes]
#print (genome_ids[0:10])
ortholg_ids = sum_pm_df.FASTA_id.tolist()
ortholg_ids = [x.split("-")[0] for x in ortholg_ids]
a = set(genome_ids)
#print (a)
print ("initial:",len(a))
r = set(subjids)
print("BLAST results:",len(r))
print ("missing from BLAST:",len(a-r))
if len(a-r):
#print("\n")
print("ids missing in BLAST results:",a-r)
#a - r
print ("\n\n=====POST-BLAST=======\n\n")
o = set(ortholg_ids)
print("orthologs extracted:",len(o))
print ("missing post-BLAST:",len(r-o))
if len(r-o):
print("\n")
print("ids lost post-BLAST:",r-o)
#r - o
print ("\n\n\n=====SUMMARY=======\n\n")
if len(a-r) and len(r-o):
print("\nAll missing in end:",(a-r) | (r-o))
```
## Make the Summarizing Dataframe more informative
Add information on whether a stretch of 'N's is present. Making the data suspect and fit to be filtered out. Distinguish between cases where it is in what corresponds to the last third of the protein vs. elsewhere, if possible. Plus whether stop codon is present at end of encoding sequence because such cases also probably should be filtered out.
Add information from the supplemental data table so possible patterns can be assessed more easily.
#### Add information about N stretches and stop codon
```
# Collect following information for each gene sequence:
# N stretch of at least two or more present in first 2/3 of gene sequence
# N stretch of at least two or more present in last 1/3 of gene sequence
# stop codon encoded at end of sequence?
import re
min_number_Ns_in_row_to_collect = 2
pattern_obj = re.compile("N{{{},}}".format(min_number_Ns_in_row_to_collect), re.I) # adpated from
# code worked out in `collapse_large_unknown_blocks_in_DNA_sequence.py`, which relied heavily on
# https://stackoverflow.com/a/250306/8508004
def longest_stretch2ormore_found(string, pattern_obj):
'''
Check if a string has stretches of Ns of length two or more.
If it does, return the length of longest stretch.
If it doesn't return zero.
Based on https://stackoverflow.com/a/1155805/8508004 and
GSD Assessing_ambiguous_nts_in_nuclear_PB_genomes.ipynb
'''
longest_match = ''
for m in pattern_obj.finditer(string):
if len(m.group()) > len(longest_match):
longest_match = m.group()
if longest_match == '':
return 0
else:
return len(longest_match)
def chunk(xs, n):
'''Split the list, xs, into n chunks;
from http://wordaligned.org/articles/slicing-a-list-evenly-with-python'''
L = len(xs)
assert 0 < n <= L
s, r = divmod(L, n)
chunks = [xs[p:p+s] for p in range(0, L, s)]
chunks[n-1:] = [xs[-r-s:]]
return chunks
n_stretch_last_third_by_id = {}
n_stretch_first_two_thirds_by_id = {}
stop_codons = ['TAA','TAG','TGA']
stop_codon_presence_by_id = {}
for fn in gene_seqs_fn_list:
# read in sequence without using pyfaidx because small and not worth making indexing files
lines = []
with open(fn, 'r') as seqfile:
for line in seqfile:
lines.append(line.strip())
descr_line = lines[0]
seq = ''.join(lines[1:])
gene_seq_id = descr_line.split(":")[0].split(">")[1]#first line parsed for all in front of ":" and without caret
# determine first two-thirds and last third
chunks = chunk(seq,3)
assert len(chunks) == 3, ("The sequence must be split in three parts'.")
first_two_thirds = chunks[0] + chunks[1]
last_third = chunks[-1]
# Examine each part
n_stretch_last_third_by_id[gene_seq_id] = longest_stretch2ormore_found(last_third,pattern_obj)
n_stretch_first_two_thirds_by_id[gene_seq_id] = longest_stretch2ormore_found(first_two_thirds,pattern_obj)
#print(gene_seq_id)
#print (seq[-3:] in stop_codons)
#stop_codon_presence_by_id[gene_seq_id] = seq[-3:] in stop_codons
stop_codon_presence_by_id[gene_seq_id] = "+" if seq[-3:] in stop_codons else "-"
# Add collected information to sum_pm_df
sum_pm_df['NstretchLAST_THIRD'] = sum_pm_df['FASTA_id'].map(n_stretch_last_third_by_id)
sum_pm_df['NstretchELSEWHERE'] = sum_pm_df['FASTA_id'].map(n_stretch_first_two_thirds_by_id)
sum_pm_df['stop_codon'] = sum_pm_df['FASTA_id'].map(stop_codon_presence_by_id)
# Safe to ignore any warnings about copy. I think because I swapped columns in and out
# of sum_pm_df earlier perhaps.
```
#### Add details on strains from the published supplemental information
This section is based on [this notebook entitled 'GSD: Add Supplemental data info to nt count data for 1011 cerevisiae collection'](https://github.com/fomightez/cl_sq_demo-binder/blob/master/notebooks/GSD/GSD%20Add_Supplemental_data_info_to_nt_count%20data%20for%201011_cerevisiae_collection.ipynb).
```
!curl -OL https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-018-0030-5/MediaObjects/41586_2018_30_MOESM3_ESM.xls
!pip install xlrd
import pandas as pd
#sum_pm_TEST_df = sum_pm_df.copy()
supp_df = pd.read_excel('41586_2018_30_MOESM3_ESM.xls', sheet_name=0, header=3, skipfooter=31)
supp_df['Standardized name'] = supp_df['Standardized name'].str.replace('SACE_','')
suppl_info_dict = supp_df.set_index('Standardized name').to_dict('index')
#Make new column with simplified strain_id tags to use for relating to supplemental table
def add_id_tags(fasta_fn):
return fasta_fn[:3]
sum_pm_df["id_tag"] = sum_pm_df['FASTA_id'].apply(add_id_tags)
ploidy_dict_by_id = {x:suppl_info_dict[x]['Ploidy'] for x in suppl_info_dict}
aneuploidies_dict_by_id = {x:suppl_info_dict[x]['Aneuploidies'] for x in suppl_info_dict}
eco_origin_dict_by_id = {x:suppl_info_dict[x]['Ecological origins'] for x in suppl_info_dict}
clade_dict_by_id = {x:suppl_info_dict[x]['Clades'] for x in suppl_info_dict}
sum_pm_df['Ploidy'] = sum_pm_df.id_tag.map(ploidy_dict_by_id) #Pandas docs has `Index.map` (uppercase `I`) but only lowercase works.
sum_pm_df['Aneuploidies'] = sum_pm_df.id_tag.map(aneuploidies_dict_by_id)
sum_pm_df['Ecological origin'] = sum_pm_df.id_tag.map(eco_origin_dict_by_id)
sum_pm_df['Clade'] = sum_pm_df.id_tag.map(clade_dict_by_id)
# remove the `id_tag` column add for relating details from supplemental to summary df
sum_pm_df = sum_pm_df.drop('id_tag',1)
# use following two lines when sure want to see all and COMMENT OUT BOTTOM LINE
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# display(sum_pm_df)
sum_pm_df
```
I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
## Filter collected set to those that are 'complete'
For plotting and summarizing with a good set of information, best to remove any where the identified ortholog gene has stretches of 'N's or lacks a stop codon.
(Keep unfiltered dataframe around though.)
```
sum_pm_UNFILTEREDdf = sum_pm_df.copy()
#subset to those where there noth columns for Nstretch assessment are zero
sum_pm_df = sum_pm_df[(sum_pm_df[['NstretchLAST_THIRD','NstretchELSEWHERE']] == 0).all(axis=1)] # based on https://codereview.stackexchange.com/a/185390
#remove any where there isn't a stop codon
sum_pm_df = sum_pm_df.drop(sum_pm_df[sum_pm_df.stop_codon != '+'].index)
```
Computationally summarize result of filtering in comparison to previous steps:
```
subjids = df.sseqid.tolist()
#print (subjids)
#print (subjids[0:10])
subjids = [x.split("-")[0] for x in subjids]
#print (subjids)
#print (subjids[0:10])
len_genome_fn_end = len(fn_to_check) + 1 # plus one to accound for the period that will be
# between `fn_to_check` and strain_id`, such as `SK1.genome.fa`
genome_ids = [x[:-len_genome_fn_end] for x in genomes]
#print (genome_ids[0:10])
ortholg_ids = sum_pm_UNFILTEREDdf.FASTA_id.tolist()
ortholg_ids = [x.split("-")[0] for x in ortholg_ids]
filtered_ids = sum_pm_df.FASTA_id.tolist()
filtered_ids =[x.split("-")[0] for x in filtered_ids]
a = set(genome_ids)
#print (a)
print ("initial:",len(a))
r = set(subjids)
print("BLAST results:",len(r))
print ("missing from BLAST:",len(a-r))
if len(a-r):
#print("\n")
print("ids missing in BLAST results:",a-r)
#a - r
print ("\n\n=====POST-BLAST=======\n\n")
o = set(ortholg_ids)
print("orthologs extracted:",len(o))
print ("missing post-BLAST:",len(r-o))
if len(r-o):
print("\n")
print("ids lost post-BLAST:",r-o)
#r - o
print ("\n\n\n=====PRE-FILTERING=======\n\n")
print("\nNumber before filtering:",len(sum_pm_UNFILTEREDdf))
if len(a-r) and len(r-o):
print("\nAll missing in unfiltered:",(a-r) | (r-o))
print ("\n\n\n=====POST-FILTERING SUMMARY=======\n\n")
f = set(filtered_ids)
print("\nNumber left in filtered set:",len(sum_pm_df))
print ("Number removed by filtering:",len(o-f))
if len(a-r) and len(r-o) and len(o-f):
print("\nAll missing in filtered:",(a-r) | (r-o) | (o-f))
# use following two lines when sure want to see all and COMMENT OUT BOTTOM LINE
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(sum_pm_df)
#sum_pm_df
```
I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
#### Archive the 'Filtered' set of sequences
Above I saved all the gene and deduced protein sequences of the orthologs in a single archive. It might be useful to just have an archive of the 'filtered' set.
```
# pack up archive of gene and protein sequences for the 'filtered' set.
# Include the summary table too.
# This is different than the other sets I made because this 'filtering' was
# done using the dataframe and so I don't have the file associations. The file names
# though can be generated using the unfiltered file names for the genes and proteins
# and sorting which ones don't remain in the filtered set using 3-letter tags at
# the beginning of the entries in `FASTA_id` column to relate them.
# Use the `FASTA_id` column of sum_pm_df to make a list of tags that remain in filtered set
tags_remaining_in_filtered = [x[:3] for x in sum_pm_df.FASTA_id.tolist()]
# Go through the gene and protein sequence list and collect those where the first
# three letters match the tag
gene_seqs_FILTfn_list = [x for x in gene_seqs_fn_list if x[:3] in tags_remaining_in_filtered]
prot_seqs_FILTfn_list = [x for x in prot_seqs_fn_list if x[:3] in tags_remaining_in_filtered]
# Save the files in those two lists along with the sum_pm_df (as tabular data and pickled form)
patmatchsum_fn_prefix = gene_name + "_orthologs_patmatch_results_summary"
patmatchsum_fn = patmatchsum_fn_prefix + ".tsv"
pklsum_patmatch_fn = patmatchsum_fn_prefix + ".pkl"
import pandas as pd
sum_pm_df.to_pickle(pklsum_patmatch_fn)
sum_pm_df.to_csv(patmatchsum_fn, sep='\t') # keep index is default
FILTEREDseqs_n_df_list = gene_seqs_FILTfn_list + prot_seqs_FILTfn_list + [patmatchsum_fn,pklsum_patmatch_fn]
archive_file_name = gene_name+"_ortholog_seqsFILTERED.tar.gz"
!tar czf {archive_file_name} {" ".join(FILTEREDseqs_n_df_list)} # use the list for archiving command
sys.stderr.write("\nCollected gene and protein sequences"
" (plus table of details) for 'FILTERED' set gathered and saved as "
"`{}`.".format(archive_file_name))
```
Download the 'filtered' sequences to your local machine.
## Summarizing with filtered set
Plot distribution.
```
%matplotlib inline
import math
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeat_distribution'
#sns.distplot(sum_pm_df["hit_number"], kde=False, bins = max(sum_pm_df["hit_number"]));
p= sns.countplot(sum_pm_df["hit_number"],
order = list(range(sum_pm_df.hit_number.min(),sum_pm_df.hit_number.max()+1)),
color="C0", alpha= 0.93)
#palette="Blues"); # `order` to get those categories with zero
# counts to show up from https://stackoverflow.com/a/45359713/8508004
p.set_xlabel("heptad repeats")
#add percent above bars, based on code in middle of https://stackoverflow.com/a/33259038/8508004
ncount = len(sum_pm_df)
for pat in p.patches:
x=pat.get_bbox().get_points()[:,0]
y=pat.get_bbox().get_points()[1,1]
# note that this check on the next line was necessary to add when I went back to cases where there's
# no counts for certain categories and so `y` was coming up `nan` for for thos and causing error
# about needing positive value for the y value; `math.isnan(y)` based on https://stackoverflow.com/a/944733/8508004
if not math.isnan(y):
p.annotate('{:.1f}%'.format(100.*y/(ncount)), (x.mean(), y), ha='center', va='bottom', size = 9, color='#333333')
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png', bbox_inches='tight')
fig.savefig(saveplot_fn_prefix + '.svg');
```
However, with the entire 1011 collection, those at the bottom can not really be seen. The next plot shows this by limiting y-axis to 103.
It should be possible to make a broken y-axis plot for this eventually but not right now as there is no automagic way. So for now will need to composite the two plots together outside.
(Note that adding percents annotations makes height of this plot look odd in the notebook cell for now.)
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeat_distributionLIMIT103'
#sns.distplot(sum_pm_df["hit_number"], kde=False, bins = max(sum_pm_df["hit_number"]));
p= sns.countplot(sum_pm_df["hit_number"],
order = list(range(sum_pm_df.hit_number.min(),sum_pm_df.hit_number.max()+1)),
color="C0", alpha= 0.93)
#palette="Blues"); # `order` to get those categories with zero
# counts to show up from https://stackoverflow.com/a/45359713/8508004
p.set_xlabel("heptad repeats")
plt.ylim(0, 103)
#add percent above bars, based on code in middle of https://stackoverflow.com/a/33259038/8508004
ncount = len(sum_pm_df)
for pat in p.patches:
x=pat.get_bbox().get_points()[:,0]
y=pat.get_bbox().get_points()[1,1]
# note that this check on the next line was necessary to add when I went back to cases where there's
# no counts for certain categories and so `y` was coming up `nan` for those and causing error
# about needing positive value for the y value; `math.isnan(y)` based on https://stackoverflow.com/a/944733/8508004
if not math.isnan(y):
p.annotate('{:.1f}%'.format(100.*y/(ncount)), (x.mean(), y), ha='center', va='bottom', size = 9, color='#333333')
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png')
fig.savefig(saveplot_fn_prefix + '.svg');
```
I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
# This is loosely based on my past use of seaborn when making `plot_sites_position_across_chromosome.py` and related scripts.
# For example, see `GC-clusters relative mito chromosome and feature` where I ran
# `%run plot_sites_position_across_chromosome.py GC_df_for_merging.pkl -o strand_ofGCacross_mito_chrom`
# add the strain info for listing that without chr info & add species information for coloring on that
chromosome_id_prefix = "-"
def FASTA_id_to_strain(FAid):
'''
use FASTA_id column value to convert to strain_id
and then return the strain_id
'''
return FAid.split(chromosome_id_prefix)[0]
sum_pm_df_for_plot = sum_pm_df.copy()
sum_pm_df_for_plot['strain'] = sum_pm_df['FASTA_id'].apply(FASTA_id_to_strain)
# sum_pm_df['species'] = sum_pm_df['FASTA_id'].apply(strain_to_species) # since need species for label plot strips
# it is easier to add species column first and then use map instead of doing both at same with one `apply`
# of a function or both separately, both with `apply` of two different function.
# sum_pm_df['species'] = sum_pm_df['strain'].apply(strain_to_species)
sum_pm_df_for_plot['species'] = 'cerevisiae'
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeats_by_strain'
import matplotlib.pyplot as plt
if len(sum_pm_df) > 60:
plt.figure(figsize=(8,232))
else:
plt.figure(figsize=(8,12))
import seaborn as sns
sns.set()
# Simple look - Comment out everything below to the next two lines to see it again.
p = sns.stripplot(x="hit_number", y="strain", data=sum_pm_df_for_plot, marker="h", size=7.5, alpha=.98, palette="tab20b")
p = sns.stripplot(x="hit_number", y="strain", data=sum_pm_df_for_plot, marker="D", size=9.5, alpha=.98, hue="Clade")
# NOTE CANNOT JUST USE ONE WITH `hue` by 'Clase' because several don't Clades assigned in the supplemental data
# and so those left off. This overlays the two and doesn't cause artifacts when size of first maker smaller.
p.set_xlabel("heptad repeats")
#p.set_xticklabels([" ","23"," ","24", " ", "25"]) # This was much easier than all the stuff I tried for `Adjusted` look below
# and the only complaint I have with the results is that what I assume are the `minor` tick lines show up; still ended up
# needing this when added `xticks = p.xaxis.get_major_ticks()` in order to not show decimals for ones I kept
#p.set(xticks=[]) # this works to remove the ticks entirely; however, I want to keep major ticks
'''
xticks = p.xaxis.get_major_ticks() #based on https://stackoverflow.com/q/50820043/8508004
for i in range(len(xticks)):
#print (i) # WAS FOR DEBUGGING
keep_ticks = [1,3,5] #harcoding essentially again, but at least it works
if i not in keep_ticks:
xticks[i].set_visible(False)
'''
'''
# Highly Adjusted look - Comment out default look parts above. Ended up going with simple above because still couldn't get
# those with highest number of repeats with combination I could come up with.
sum_pm_df_for_plot["repeats"] = sum_pm_df_for_plot["hit_number"].astype(str) # when not here (use `x="hit_number"` in plot) or
# tried `.astype('category')` get plotting of the 0.5 values too
sum_pm_df_for_plot.sort_values('hit_number', ascending=True, inplace=True) #resorting again was necessary when
# added `sum_pm_df["hit_number"].astype(str)` to get 'lower' to 'higher' as left to right for x-axis; otherwise
# it was putting the first rows on the left, which happened to be the 'higher' repeat values
#p = sns.catplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98) #marker size ignored in catplot?
p = sns.stripplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98)
#p = sns.stripplot(x="repeats", y="strain", hue="species", order = list(species_dict.keys()), data=sum_pm_df_for_plot, marker="D",
# size=10, alpha=.98) # not fond of essentially harcoding to strain order but makes more logical sense to have
# strains with most repeats at the top of the y-axis; adding `order` makes `sort` order be ignored
p.set_xlabel("heptad repeats")
sum_pm_df_for_plot.sort_values('hit_number', ascending=False, inplace=True) #revert to descending sort for storing df;
'''
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png', bbox_inches='tight')
fig.savefig(saveplot_fn_prefix + '.svg');
```
(Hexagons are used for those without an assigned clade in [the supplemental data Table 1](https://www.nature.com/articles/s41586-018-0030-5) in the plot above.)
I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
```
%matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
# This is loosely based on my past use of seaborn when making `plot_sites_position_across_chromosome.py` and related scripts.
# For example, see `GC-clusters relative mito chromosome and feature` where I ran
# `%run plot_sites_position_across_chromosome.py GC_df_for_merging.pkl -o strand_ofGCacross_mito_chrom`
# add the strain info for listing that without chr info & add species information for coloring on that
chromosome_id_prefix = "-"
def FASTA_id_to_strain(FAid):
'''
use FASTA_id column value to convert to strain_id
and then return the strain_id
'''
return FAid.split(chromosome_id_prefix)[0]
sum_pm_df_for_plot = sum_pm_df.copy()
sum_pm_df_for_plot['strain'] = sum_pm_df['FASTA_id'].apply(FASTA_id_to_strain)
# sum_pm_df['species'] = sum_pm_df['FASTA_id'].apply(strain_to_species) # since need species for label plot strips
# it is easier to add species column first and then use map instead of doing both at same with one `apply`
# of a function or both separately, both with `apply` of two different function.
# sum_pm_df['species'] = sum_pm_df['strain'].apply(strain_to_species)
sum_pm_df_for_plot['species'] = 'cerevisiae'
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeats_by_proteinlen'
import matplotlib.pyplot as plt
if len(sum_pm_df) > 60:
plt.figure(figsize=(8,232))
else:
plt.figure(figsize=(8,12))
import seaborn as sns
sns.set()
# Simple look - Comment out everything below to the next two lines to see it again.
#p = sns.stripplot(x="hit_number", y="strain", data=sum_pm_df_for_plot, marker="h", size=7.5, alpha=.98, palette="tab20b")
p = sns.stripplot(x="hit_number", y="strain", data=sum_pm_df_for_plot, marker="D", size=9.5, alpha=.98, hue="prot_length")
# NOTE CANNOT JUST USE ONE WITH `hue` by 'Clase' because several don't Clades assigned in the supplemental data
# and so those left off. This overlays the two and doesn't cause artifacts when size of first maker smaller.
p.set_xlabel("heptad repeats")
#p.set_xticklabels([" ","23"," ","24", " ", "25"]) # This was much easier than all the stuff I tried for `Adjusted` look below
# and the only complaint I have with the results is that what I assume are the `minor` tick lines show up; still ended up
# needing this when added `xticks = p.xaxis.get_major_ticks()` in order to not show decimals for ones I kept
#p.set(xticks=[]) # this works to remove the ticks entirely; however, I want to keep major ticks
'''
xticks = p.xaxis.get_major_ticks() #based on https://stackoverflow.com/q/50820043/8508004
for i in range(len(xticks)):
#print (i) # WAS FOR DEBUGGING
keep_ticks = [1,3,5] #harcoding essentially again, but at least it works
if i not in keep_ticks:
xticks[i].set_visible(False)
'''
'''
# Highly Adjusted look - Comment out default look parts above. Ended up going with simple above because still couldn't get
# those with highest number of repeats with combination I could come up with.
sum_pm_df_for_plot["repeats"] = sum_pm_df_for_plot["hit_number"].astype(str) # when not here (use `x="hit_number"` in plot) or
# tried `.astype('category')` get plotting of the 0.5 values too
sum_pm_df_for_plot.sort_values('hit_number', ascending=True, inplace=True) #resorting again was necessary when
# added `sum_pm_df["hit_number"].astype(str)` to get 'lower' to 'higher' as left to right for x-axis; otherwise
# it was putting the first rows on the left, which happened to be the 'higher' repeat values
#p = sns.catplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98) #marker size ignored in catplot?
p = sns.stripplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98)
#p = sns.stripplot(x="repeats", y="strain", hue="species", order = list(species_dict.keys()), data=sum_pm_df_for_plot, marker="D",
# size=10, alpha=.98) # not fond of essentially harcoding to strain order but makes more logical sense to have
# strains with most repeats at the top of the y-axis; adding `order` makes `sort` order be ignored
p.set_xlabel("heptad repeats")
sum_pm_df_for_plot.sort_values('hit_number', ascending=False, inplace=True) #revert to descending sort for storing df;
'''
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png', bbox_inches='tight')
fig.savefig(saveplot_fn_prefix + '.svg');
```
I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
## Make raw and summary data available for use elsewhere
All the raw data is there for each strain in `raw_pm_df`. For example, the next cell shows how to view the data associated with the summary table for isolate ADK_8:
```
ADK_8_raw = raw_pm_df[raw_pm_df['FASTA_id'] == 'ADK_8-20587'].sort_values('hit_number', ascending=True).reset_index(drop=True)
ADK_8_raw
```
The summary and raw data will be packaged up into one file in the cell below. One of the forms will be a tabular text data ('.tsv') files that can be opened in any spreadsheet software.
```
# save summary and raw results for use elsewhere (or use `.pkl` files for reloading the pickled dataframe into Python/pandas)
patmatch_fn_prefix = gene_name + "_orthologs_patmatch_results"
patmatchsum_fn_prefix = gene_name + "_orthologs_patmatch_results_summary"
patmatchsumFILTERED_fn_prefix = gene_name + "_orthologs_patmatch_results_summaryFILTERED"
patmatch_fn = patmatch_fn_prefix + ".tsv"
pkl_patmatch_fn = patmatch_fn_prefix + ".pkl"
patmatchsumUNF_fn = patmatchsumFILTERED_fn_prefix + ".tsv"
pklsum_patmatchUNF_fn = patmatchsumFILTERED_fn_prefix + ".pkl"
patmatchsum_fn = patmatchsum_fn_prefix + ".tsv"
pklsum_patmatch_fn = patmatchsum_fn_prefix + ".pkl"
import pandas as pd
sum_pm_df.to_pickle(pklsum_patmatch_fn)
sum_pm_df.to_csv(patmatchsum_fn, sep='\t') # keep index is default
sys.stderr.write("Text file of summary details after filtering saved as '{}'.".format(patmatchsum_fn))
sum_pm_UNFILTEREDdf.to_pickle(pklsum_patmatchUNF_fn)
sum_pm_UNFILTEREDdf.to_csv(patmatchsumUNF_fn, sep='\t') # keep index is default
sys.stderr.write("\nText file of summary details before filtering saved as '{}'.".format(patmatchsumUNF_fn))
raw_pm_df.to_pickle(pkl_patmatch_fn)
raw_pm_df.to_csv(patmatch_fn, sep='\t') # keep index is default
sys.stderr.write("\nText file of raw details saved as '{}'.".format(patmatchsum_fn))
# pack up archive dataframes
pm_dfs_list = [patmatch_fn,pkl_patmatch_fn,patmatchsumUNF_fn,pklsum_patmatchUNF_fn, patmatchsum_fn,pklsum_patmatch_fn]
archive_file_name = patmatch_fn_prefix+".tar.gz"
!tar czf {archive_file_name} {" ".join(pm_dfs_list)} # use the list for archiving command
sys.stderr.write("\nCollected pattern matching"
" results gathered and saved as "
"`{}`.".format(archive_file_name))
```
Download the tarballed archive of the files to your computer.
For now that archive doesn't include the figures generated from the plots because with a lot of strains they can get large. Download those if you want them. (Look for `saveplot_fn_prefix` settings in the code to help identify file names.)
----
```
import time
def executeSomething():
#code here
print ('.')
time.sleep(480) #60 seconds times 8 minutes
while True:
executeSomething()
```
| github_jupyter |
```
#pip install seaborn
```
# Import Libraries
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
```
# Read the CSV and Perform Basic Data Cleaning
```
# Raw dataset drop NA
df = pd.read_csv("../resources/train_predict.csv")
# Drop the null columns where all values are null
df1 = df.dropna(axis='columns', how='all')
df1.head()
#Reviewing the % of null values
100*df1.isnull().sum()/df.shape[0]
# Drop the null rows data cleaning, making all column headers lowercase
loan_df = df.dropna()
loan_df.columns=df.columns.str.lower()
loan_df.head()
#Update column names
loan_df.columns=['loan_id', 'gender', 'married', 'dependents', 'education','self_employed'
, 'income', 'co_income'
, 'loan_amount', 'loan_term', 'credit_history', 'property_area', 'loan_status']
#Test data_df after drop NAN
loan_df.dtypes
loan_df.shape
#Reviewing data
loan_df['dependents'].unique()
#Reviewing data
loan_df['self_employed'].unique()
#Reviewing data
loan_df['loan_term'].unique()
#Reviewing data
loan_df['credit_history'].unique()
loan_df.describe()
```
# Select your features (columns)
```
# Set features. This will also be used as your x values. Removed 'loan_id', 'property_area'
loan_features_df = loan_df[['gender', 'married', 'dependents', 'education','self_employed'
, 'income', 'co_income'
, 'loan_amount', 'loan_term', 'credit_history', 'loan_status']]
loan_features_df.head()
sns.countplot(y='gender', hue ='loan_status',data =loan_features_df)
sns.countplot(y='married', hue ='loan_status',data =loan_features_df)
sns.countplot(y='credit_history', hue ='loan_status',data =loan_features_df)
sns.countplot(y='loan_term', hue ='loan_status',data =loan_features_df)
```
# Create a Train Test Split
Use `loan_status` for the y values
```
y = loan_features_df[["loan_status"]]
X = loan_features_df.drop(columns=["loan_status"])
print(X.shape, y.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y)
#code to numberic Hold-> ‘Urban’: 3, ‘Semiurban’: 2,’Rural’: 1,
code_numeric = {'Female': 1, 'Male': 2,'Yes': 1, 'No': 2,
'Graduate': 1, 'Not Graduate': 2, 'Y': 1, 'N': 0, '3+': 3}
loan_features_df = loan_features_df.applymap(lambda s: code_numeric.get(s) if s in code_numeric else s)
loan_features_df.info()
```
# Pre-processing
Scale the data and perform some feature selection
```
# Scale Data
from sklearn.preprocessing import StandardScaler
# Create a StandardScater model and fit it to the training data
X_scaler = StandardScaler().fit(X_train)
#y_scaler = StandardScaler().fit(y_train)
# to_categorical(y)
# StandardScaler().fit(X)
# Preprocessing
#from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
# label_encoder = LabelEncoder()
# label_encoder.fit(y_train)
# encoded_y_train = label_encoder.transform(y_train)
# encoded_y_test = label_encoder.transform(y_test)
y_train_categorical = to_categorical(y_train)
y_test_categorical = to_categorical(y_test)
```
# Train the Model
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(units=500, activation='relu', input_dim=10))
# model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=2, activation='softmax'))
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Fit the model to the training data
model.fit(
X_scaled,
y_train_categorical,
epochs=100,
shuffle=True,
verbose=2
)
from sklearn.svm import SVC
model = SVC(kernel='linear')
model.fit(X_train_scaled, y_train.values.ravel())
print(f"Training Data Score: {model.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {model.score(X_test_scaled, y_test)}")
from sklearn.metrics import classification_report
predictions = model.predict(X_test)
print(classification_report(y_test, predictions))
```
# Hyperparameter Tuning
Use `GridSearchCV` to tune the model's parameters
```
# Create the GridSearchCV model
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [1, 2, 10, 50],
'gamma': [0.0001, 0.0005, 0.001, 0.005]}
grid = GridSearchCV(model, param_grid, verbose=3)
# Train the model with GridSearch
grid.fit(X_train, y_train.values.ravel())
#print params, scores
print(grid.best_params_)
print(grid.best_score_)
```
# Save the Model
```
import joblib
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
filename = 'finalized_Plant_model1.sav'
joblib.dump(model, filename)
#To be done later
# load the model from disk
loaded_model = joblib.load(filename)
result = loaded_model.score(X_test, y_test_categorical)
print(result)
```
| github_jupyter |
## Dependencies
```
import glob
import numpy as np
import pandas as pd
from transformers import TFDistilBertModel
from tokenizers import BertWordPieceTokenizer
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, Concatenate
# Auxiliary functions
# Transformer inputs
def preprocess_test(text, context, tokenizer, max_seq_len):
context_encoded = tokenizer.encode(context)
context_encoded = context_encoded.ids[1:-1]
encoded = tokenizer.encode(text)
encoded.pad(max_seq_len)
encoded.truncate(max_seq_len)
input_ids = encoded.ids
attention_mask = encoded.attention_mask
token_type_ids = ([0] * 3) + ([1] * (max_seq_len - 3))
input_ids = [101] + context_encoded + [102] + input_ids
# update input ids and attentions masks size
input_ids = input_ids[:-3]
attention_mask = [1] * 3 + attention_mask[:-3]
x = [np.asarray(input_ids, dtype=np.int32),
np.asarray(attention_mask, dtype=np.int32),
np.asarray(token_type_ids, dtype=np.int32)]
return x
def get_data_test(df, tokenizer, MAX_LEN):
x_input_ids = []
x_attention_masks = []
x_token_type_ids = []
for row in df.itertuples():
x = preprocess_test(getattr(row, "text"), getattr(row, "sentiment"), tokenizer, MAX_LEN)
x_input_ids.append(x[0])
x_attention_masks.append(x[1])
x_token_type_ids.append(x[2])
x_data = [np.asarray(x_input_ids), np.asarray(x_attention_masks), np.asarray(x_token_type_ids)]
return x_data
def decode(pred_start, pred_end, text, tokenizer):
offset = tokenizer.encode(text).offsets
if pred_end >= len(offset):
pred_end = len(offset)-1
decoded_text = ""
for i in range(pred_start, pred_end+1):
decoded_text += text[offset[i][0]:offset[i][1]]
if (i+1) < len(offset) and offset[i][1] < offset[i+1][0]:
decoded_text += " "
return decoded_text
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
MAX_LEN = 128
base_path = '/kaggle/input/qa-transformers/distilbert/'
base_model_path = base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5'
config_path = base_path + 'distilbert-base-uncased-distilled-squad-config.json'
input_base_path = '/kaggle/input/7-tweet-train-distilbert-lower-lower-v2/'
tokenizer_path = input_base_path + 'vocab.txt'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
```
# Tokenizer
```
tokenizer = BertWordPieceTokenizer(tokenizer_path , lowercase=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
x_test = get_data_test(test, tokenizer, MAX_LEN)
```
# Model
```
def model_fn():
input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids')
base_model = TFDistilBertModel.from_pretrained(base_model_path, config=config_path, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids})
last_state = sequence_output[0]
x = GlobalAveragePooling1D()(last_state)
y_start = Dense(MAX_LEN, activation='sigmoid', name='y_start')(x)
y_end = Dense(MAX_LEN, activation='sigmoid', name='y_end')(x)
model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN))
test_end_preds = np.zeros((NUM_TEST_IMAGES, MAX_LEN))
for model_path in model_path_list:
print(model_path)
model = model_fn()
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0] / len(model_path_list)
test_end_preds += test_preds[1] / len(model_path_list)
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], tokenizer), axis=1)
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
MNIST
Aproximate error rate BEFORE training is 90.7 %
Aproximate error rate during iteration 0 is 80.8 %
Aproximate error rate during iteration 100 is 6.6 %
Aproximate error rate during iteration 200 is 4.6 %
Aproximate error rate during iteration 300 is 3.4 %
Aproximate error rate during iteration 400 is 3.4 %
Aproximate error rate during iteration 500 is 2.6 %
Aproximate error rate during iteration 600 is 3.7 %
Aproximate error rate during iteration 700 is 2.6 %
Aproximate error rate during iteration 800 is 2.2 %
Aproximate error rate during iteration 900 is 3.3 %
Total training time: 90.77 seconds
Final aproximate training error is 2.4 %
Final test error is 2.79 %
```
import numpy as np
import random
import matplotlib.pyplot as plt
#
# read original images
#
eval_data_mnist = np.load("tmp/MNIST/eval_data_mnist.npy")
eval_data_mnist.shape
#
# get filter numbers for each of the layers
#
filtersLayer1 = np.load("tmp/MNIST/RandomFiltersIndexMnist_Layer1_Features.npy")
filtersLayer2 = np.load("tmp/MNIST/RandomFiltersIndexMnist_Layer2_Features.npy")
filtersLayer2
#
# read activations
#
# layer 1
Activations1 = np.load("tmp/MNIST/ActivationsMnist_Layer1_Features.npy")
# layer 2
Activations2 = np.load("tmp/MNIST/ActivationsMnist_Layer2_Features.npy")
print(Activations1.shape)
print(Activations2.shape)
#
# read deconvolved activations
#
# layer 1
DeconvActivations1 = np.load("tmp/MNIST/ActivationsMnist_Layer1.npy")
# layer 2
DeconvActivations2 = np.load("tmp/MNIST/ActivationsMnist_Layer2.npy")
print(DeconvActivations1.shape)
print(DeconvActivations2.shape)
#
# get index of images that maximally activates each the specific nodes of each layer
#
maxIndexesLayer1 = np.load("tmp/MNIST/BestImagesMnist_Layer1.npy")
maxIndexesLayer2 = np.load("tmp/MNIST/BestImagesMnist_Layer2.npy")
print(maxIndexesLayer1)
print(maxIndexesLayer2)
```
# Best Nodes Layer 1
## Node 16
```
# index
iNode = 0
indexNode1 = filtersLayer1[0]
# images
plt.subplot(331)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 0] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(332)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 1] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(333)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 2] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
# feature map visualization
plt.subplot(334)
plt.imshow( Activations1[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(335)
plt.imshow( Activations1[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(336)
plt.imshow( Activations1[iNode,2,:,:,0], cmap = "gray" )
# feature map visualization
plt.subplot(337)
plt.imshow( DeconvActivations1[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(338)
plt.imshow( DeconvActivations1[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(339)
plt.imshow( DeconvActivations1[iNode,2,:,:,0], cmap = "gray" )
plt.savefig('../img/node{}_layer1_mnist'.format(indexNode1))
plt.show()
```
## Node 22
```
# index
iNode = 1
indexNode1 = filtersLayer1[1]
# images
plt.subplot(331)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 0] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(332)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 1] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(333)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 2] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
# feature map visualization
plt.subplot(334)
plt.imshow( Activations1[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(335)
plt.imshow( Activations1[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(336)
plt.imshow( Activations1[iNode,2,:,:,0], cmap = "gray" )
# feature map visualization
plt.subplot(337)
plt.imshow( DeconvActivations1[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(338)
plt.imshow( DeconvActivations1[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(339)
plt.imshow( DeconvActivations1[iNode,2,:,:,0], cmap = "gray" )
plt.savefig('../img/node{}_layer1_mnist'.format(indexNode1))
plt.show()
```
## Node 1
```
# index
iNode = 2
indexNode1 = filtersLayer1[2]
# images
plt.subplot(331)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 0] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(332)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 1] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(333)
plt.imshow( eval_data_mnist[int( maxIndexesLayer1[iNode, 2] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
# feature map visualization
plt.subplot(334)
plt.imshow( Activations1[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(335)
plt.imshow( Activations1[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(336)
plt.imshow( Activations1[iNode,2,:,:,0], cmap = "gray" )
# feature map visualization
plt.subplot(337)
plt.imshow( DeconvActivations1[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(338)
plt.imshow( DeconvActivations1[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(339)
plt.imshow( DeconvActivations1[iNode,2,:,:,0], cmap = "gray" )
plt.savefig('../img/node{}_layer1_mnist'.format(indexNode1))
plt.show()
```
# Best Nodes Layer 2
## Node 32
```
# index
iNode = 0
indexNode1 = filtersLayer2[0]
# images
plt.subplot(331)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 0] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(332)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 1] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(333)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 2] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
# feature map visualization
plt.subplot(334)
plt.imshow( Activations2[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(335)
plt.imshow( Activations2[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(336)
plt.imshow( Activations2[iNode,2,:,:,0], cmap = "gray" )
# feature map visualization
plt.subplot(337)
plt.imshow( DeconvActivations2[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(338)
plt.imshow( DeconvActivations2[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(339)
plt.imshow( DeconvActivations2[iNode,2,:,:,0], cmap = "gray" )
plt.savefig('../img/node{}_layer2_mnist'.format(indexNode1))
plt.show()
```
## Node 45
```
# index
iNode = 1
indexNode1 = filtersLayer2[1]
# images
plt.subplot(331)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 0] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(332)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 1] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(333)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 2] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
# feature map visualization
plt.subplot(334)
plt.imshow( Activations2[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(335)
plt.imshow( Activations2[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(336)
plt.imshow( Activations2[iNode,2,:,:,0], cmap = "gray" )
# feature map visualization
plt.subplot(337)
plt.imshow( DeconvActivations2[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(338)
plt.imshow( DeconvActivations2[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(339)
plt.imshow( DeconvActivations2[iNode,2,:,:,0], cmap = "gray" )
plt.savefig('../img/node{}_layer2_mnist'.format(indexNode1))
plt.show()
```
## Node 3
```
# index
iNode = 2
indexNode1 = filtersLayer2[2]
# images
plt.subplot(331)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 0] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(332)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 1] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
plt.subplot(333)
plt.imshow( eval_data_mnist[int( maxIndexesLayer2[iNode, 2] ),:].reshape(28,28) , interpolation="nearest", cmap = "gray" )
# feature map visualization
plt.subplot(334)
plt.imshow( Activations2[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(335)
plt.imshow( Activations2[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(336)
plt.imshow( Activations2[iNode,2,:,:,0], cmap = "gray" )
# feature map visualization
plt.subplot(337)
plt.imshow( DeconvActivations2[iNode,0,:,:,0], cmap = "gray" )
plt.subplot(338)
plt.imshow( DeconvActivations2[iNode,1,:,:,0], cmap = "gray" )
plt.subplot(339)
plt.imshow( DeconvActivations2[iNode,2,:,:,0], cmap = "gray" )
plt.savefig('../img/node{}_layer2_mnist'.format(indexNode1))
plt.show()
```
| github_jupyter |
# Art(ists)(work)
(Milestone 3 - Task 1: Address project feedback)
## Introduction
### Information About the Data
The data I am working on is from the Musuem of Modern Art (MoMA) and it consists of two datasets: Artists and Artworks.
Artists' columns are Artist ID, Name, Nationality, Gender, Birth Year, and Death Year. These features are self explanatory.
The Artworks dataset is more complex than Artists in terms of features due to the uniqueness of each artwork. Artworks' columns are Artwork ID, Title, Artist ID, Name, Date, Medium, Dimensions, Acquisition Date, Credit, Catalogue, Classification, Object Number, Diameter (cm), Circumference (cm), Height (cm), Length (cm), Width (cm), Depth (cm), Weight (kg), and Duration (s). I will be focusing on the Classification because the physical attributes and dates are extremely variable in format. To clarify, Title is the title of the artwork, and Name is the name of the artist who created the work. The physical attributes are Diamater, Circumference, Height, Length, Width, Depth, Weight, and Duration. In particular, duration is for performance art or videos and such.
### Research Questions
<!-- (Explain the importance of those questions) -->
#### What is the Gender Gap between Female and Male Artists within the Artists dataset? Does this ratio differ in American Artists?
This question is important because it addresses the gender disparity in artists that have their work shown in museums. The most commonly known famed artists are men. When we get asked to name an artists, we name men: "Da Vinci, Michelangelo, Andy Warhol, Van Gogh, Rembrandt, Monet, Picasso, Pollock, Dalí. This question seeks not to discredit all great male artists, but pokes at how we can only see and remember men for their work.

Fig 1. "Do Women STILL Have to be Naked to Get Into the Met. Museum?", Guerilla Girls
The subquestion seeks to define a difference in the gender ratio in American artists instead of the whole dataset. I speculate that there would be a higher, albeit still low, female to male ratio because an American Museum would want to display more American female artists.
#### What are the top five Nationalities in the Artists Dataset?
This dataset is from the Museum of Modern Art in America, but what other countries are the artists from? I want to discover what the top five nationalities in artists tells us about the museum itself. In short, the importance of this question lies in the importance of artist nationality.
#### What is the average death of Artists within MoMA?
Many great artists do not seem to live very long, perhaps due to [mental](https://en.wikipedia.org/wiki/Creativity_and_mental_health) or physical illnesses. I do not mean to link creativity to mental illness, but I would like to point out that art is a form of self-expression. Smoking and drinking were very also prevalent in the mid century. I speculate that the average death of artists in the Artists dataset was lower than it is [now](https://en.wikipedia.org/wiki/List_of_countries_by_life_expectancy).
#### What are the 5 most popular Classifications?
I want to find out which category of Classification holds the greatest quantity because it could show how some Classifications are more "modern" than others. Maybe the most quantified methods will also tell us how artists prefer one method over another.
## Analysis
<!-- (why exactly you chose those visualization methods) -->
```
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
# functions
def load_and_process(path):
#(Load data and deal with missing data)
df = (
pd.read_csv(path)
.dropna() )
return df
def clean_date(row): # some dates are ranges. this function choses the start date
if(len(row.Date)>4):
x = row.Date.split('-')
row.Date = x[0]
return row.Date
def clean_df_artworks(artworks):
df_cleaned = artworks.copy()
df_cleaned['Date'] = df_cleaned.apply(lambda row: clean_date(row), axis=1)
return df_cleaned
path_artists = "../../data/raw/artists.csv"
path_artworks = "../../data/raw/artworks.csv"
df_artists = load_and_process(path_artists) # all these artists are dead
df_artworks = pd.read_csv(path_artworks)
df_artworks = clean_df_artworks(df_artworks[df_artworks['Date'].notna()])
# cleaning
df_artists= df_artists[df_artists.Gender != 'male'] #got rid of one mismarked row
df_artists = df_artists[df_artists.Nationality != 'Nationality unknown'] # some artists have an unknown nationality, got rid of that
```
### Gender
```
gender = sns.countplot(y='Gender',data=df_artists).set_title('Gender Ratio')
male_female_ratio = (df_artists[df_artists.Gender == 'Male'].count()/df_artists[df_artists.Gender == 'Female'].count())
```
Fig 1. We can see that for every female artist in MoMA, there are 7.82 males.
I choose a count plot to visualize the gap between the number of female and male artists because it is simple and easy to understand. Blue represents male and orange represents female.
#### American Gender Ratio
Let's see the American male and female chart:
```
df_am_gender = df_artists[df_artists['Nationality']=='American']
gender_am = sns.countplot(y='Gender',data=df_am_gender).set_title('American Gender Ratio')
am_male_female_ratio = (df_am_gender[df_am_gender.Gender == 'Male'].count()/df_am_gender[df_am_gender.Gender == 'Female'].count())
am_male_female_ratio
```
Fig 2. The American male to female ratio is 5.75.
### Nationality
I want to see which which Nationality is the most represented in the MoMA artists
```
nationality=sns.countplot(y='Nationality',data=df_artists,order = df_artists['Nationality'].value_counts().index).set_title('Sorted Number of Nationalities')
fig = plt.gcf()
fig.set_size_inches(8, 12)
```
Fig 3.Top 5 Nationalities (by count): American, French, German, British, Italian
I used a count plot and ordered the nationalities that hold the most artists. It is easy to see that America is in the lead here, not that it is a competition.
### Age
I am going to see if there is anything in Birth Year and Death Year (EDA)
```
hexbin=sns.jointplot(x='Birth Year',y='Death Year',data=df_artists,kind="hex")
```
Fig 4. EDA. This HexBin plot shows more information than the Fig4 scatter plot. there is a high concentration of artists born in the late 1800's - early 1900's and dying in the late 1990's and early 2000's. I can see that the definition of "Modern" works are created by artists born around the late 1800's. So "Modern" most popularily means post-1900's.
I used a joint plot with hexagonal bins to show the pattern between Death Year and Birth Year. The darkest bins show the highest concentration of artists born and dying in a 2 particular years. This plot is also very useful because the bar plots on the side show wear the highest concentration is independent of the other variable.
```
df_artists['age']=(df_artists['Death Year']-df_artists['Birth Year'])
age_line = sns.displot(df_artists['age'])
age_line.fig.suptitle("Age of Death")
df_artists['age'].mean()
# got rid of boxplot
```
Fig 5. Artists in this dataset died at the average age of 72.8.
I got rid of the box plot and although this bar plot does not show the average age of death, it does show how the quantities in each bin can more or less make up the average.
### Artwork
What are the 5 most popular Classifications?
```
plot_class= (
sns.countplot(
y='Classification',
data=df_artworks,
order = df_artworks['Classification']
.value_counts()
.index)
.set_title('Popular Classsifications')
)
fig= plt.gcf()
fig.set_size_inches(8, 12)
```
Fig 6. Top 5 Classifications: Print, Photograph, Illustrated Book, Drawing, and Design.
I count and sort the amounts of each Classification's artworks. The warmer colours on the plot indicate higher quantities of a Classification in Artworks.
## Conclusion
I think these visualizations answered all my questions. If I had more experience in data cleaning I would ask more interesting questions.
#### What is the Gender Gap between Female and Male Artists within the Artists dataset? Does this ratio differ in American Artists?
For every female artist in this dataset, there are 7.82 males. For artists with American Nationality, there are 5.75 males to each female. It makes sense that the ratio is better in terms of gender equality, because an American Mueseum would want to represent more American artists, and therefore more American _female_ artists.
#### What are the top five Nationalities in the Artists Dataset?
The top five Nationalities in this dataset are American, French, German, British and Italian. This clearly shows how Eurocentric MoMA is.
#### What is the average death of Artists within MoMA?
The average age of death of artists in this dataset is 72.8. It is definitely as lower it is [now](https://en.wikipedia.org/wiki/List_of_countries_by_life_expectancy).
#### What are the 5 most popular Classifications?
The top 5 Classifications of Artworks are Print, Photograph, Illustrated Book, Drawing, and Design. I think that there are much more prints and photographs than any other Classification because they are on paper, and are therefore more accessible and perhaps easier to store and digitize. This may be way a museum has more prints and photographs in the Artworks dataset. Artists may prefer to experiment on paper due to its acessibility and flexibility.
#### References
[Guerilla Girls Image](https://www.guerrillagirls.com/naked-through-the-ages)
[Creativity and mental health](https://en.wikipedia.org/wiki/Creativity_and_mental_health)
[Life expectancy](https://en.wikipedia.org/wiki/List_of_countries_by_life_expectancy)
| github_jupyter |
```
import astropy.coordinates as coord
import astropy.table as at
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from scipy.spatial import cKDTree
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
# gala
import gala.coordinates as gc
import gala.dynamics as gd
import gala.integrate as gi
import gala.potential as gp
from gala.units import galactic
from totoro.config import galcen_frame
from totoro.data import load_apogee_sample
from totoro.potentials import potentials, galpy_potentials
t, c = load_apogee_sample('../data/apogee-parent-sample.fits')
t = t[np.argsort(t['APOGEE_ID'])]
galcen = c.transform_to(galcen_frame)
w0s = gd.PhaseSpacePosition(galcen.data)
from galpy.actionAngle import estimateDeltaStaeckel, actionAngleStaeckel
from totoro.config import rsun as ro, vcirc as vo
from totoro.galpy_helpers import gala_to_galpy_orbit
```
### Compute Staeckel delta on a grid:
```
Rz_grids = (np.arange(8-2.5, 8+2.5 + 1e-3, 0.05),
np.arange(-2.5, 2.5 + 1e-3, 0.05))
Rz_grid = np.stack(list(map(np.ravel, np.meshgrid(*Rz_grids)))).T
for pot_name in ['0.4', '1.0', '1.6']:
pot = galpy_potentials[pot_name]
delta_staeckels = []
for i in range(Rz_grid.shape[0]):
R = (Rz_grid[i, 0] * u.kpc).to_value(ro)
z = (Rz_grid[i, 1] * u.kpc).to_value(ro)
delta_staeckels.append(estimateDeltaStaeckel(
pot, R, z))
plt.figure()
plt.title(pot_name)
plt.scatter(Rz_grid[:, 0], Rz_grid[:, 1],
c=delta_staeckels,
vmin=2, vmax=6, s=8, marker='s')
from scipy.interpolate import NearestNDInterpolator
pot = galpy_potentials['1.0']
delta_staeckels = []
for i in range(Rz_grid.shape[0]):
R = (Rz_grid[i, 0] * u.kpc).to_value(ro)
z = (Rz_grid[i, 1] * u.kpc).to_value(ro)
delta_staeckels.append(estimateDeltaStaeckel(
pot, R, z))
delta_interp = NearestNDInterpolator(Rz_grid,
delta_staeckels)
# def fast_actions():
deltas = delta_interp(w0s.cylindrical.rho.to_value(u.kpc),
w0s.z.to_value(u.kpc))
o = gala_to_galpy_orbit(w0s)
aAS = actionAngleStaeckel(pot=pot, delta=deltas)
actions = np.squeeze(aAS(o)).T * ro * vo
```
### Compare to Sanders & Binney actions
```
sb_aaf = at.Table.read('../cache_new_zsun/aaf-1.0.fits')
sb_aaf = sb_aaf[np.isin(sb_aaf['APOGEE_ID'], t['APOGEE_ID'])]
assert len(sb_aaf) == len(t)
sb_aaf = sb_aaf[np.argsort(sb_aaf['APOGEE_ID'])]
assert np.all(t['APOGEE_ID'] == sb_aaf['APOGEE_ID'])
sb_actions = sb_aaf['actions']
actions.shape, sb_actions.shape
from scipy.stats import binned_statistic
from astropy.stats import median_absolute_deviation
k = 0
for k in [0, 2]:
sb_J = sb_actions[:, k]
J = actions[:, k]
mask = np.isfinite(sb_J) & np.isfinite(J)
sb_J = sb_J[mask]
J = J[mask]
stat = binned_statistic(np.log10(sb_J),
(J - sb_J) / sb_J,
statistic=lambda x: 1.5 * median_absolute_deviation(x),
bins=np.arange(-1, 3, 0.1))
bincen = 0.5 * (10 ** stat.bin_edges[:-1] + 10 ** stat.bin_edges[1:])
fig = plt.figure()
plt.plot(sb_J, (J - sb_J) / sb_J,
alpha=0.1, ls='none', ms=2, mew=0)
plt.plot(bincen, stat.statistic)
plt.xscale('log')
plt.xlim(0.1, 2000)
plt.ylim(-1, 1)
fig.set_facecolor('w')
```
---
| github_jupyter |
## Importing necessary library
```
import snscrape.modules.twitter as sntwitter
import pandas as pd
import itertools
import plotly.graph_objects as go
from datetime import datetime
```
## Creating a data frame called "df" for storing the data to be scraped. Here, "2019 Elections" was the search keyword"
```
df = pd.DataFrame(itertools.islice(sntwitter.TwitterSearchScraper(
'"2019 elections"').get_items(), 5000000))
```
## Reading the column names from the dataframe to check the attributes
```
df.columns
```
## Calculate the time for scraping the 5000000 tweets
Here our search parameters are modified to search for tweets around Abuja within __2017-01-01 to 2021-10-23__ using the keyword __2019 elections__.
__NB:__ we set the result to be returned to __5000000__ so we can get as much as possible results (tweets).
```
# Set start time
start_time = datetime.now()
#Creating dataframe called 'data' and storing the tweets
data = pd.DataFrame(itertools.islice(sntwitter.TwitterSearchScraper(
'"2019 elections near:Abuja since:2017-01-01 until:2021-10-23"').get_items(), 5000000))
# Set end time
end_time = datetime.now()
#Printing the time duration for scraping these tweets
print('Duration: {}'.format(end_time - start_time))
#keeping only date, id, content, user, and url and stored into dataframe called 'df'
df = data[['date', 'id', 'content', 'username', 'url']]
# If you don't have transformers library installed before, kindly install it using the command:
# !pip install transformers.
# PS: Remember to remove the leading # in front of "pip install transformers"
#Importing the pipeline from Transformers.
from transformers import pipeline
sentiment_classifier = pipeline('sentiment-analysis')
#Taking only 1000000 (20%) records and creating new dataframe called df1
df1 = df.head(1000000)
# Passing the tweets into the sentiment pipeline and extracting the sentiment score and label
df1 = (df1.assign(sentiment = lambda x: x['content'].apply(lambda s: sentiment_classifier(s)))
.assign(
label = lambda x: x['sentiment'].apply(lambda s: (s[0]['label'])),
score = lambda x: x['sentiment'].apply(lambda s: (s[0]['score']))))
df1.head()
#checking the 1000th tweet, to check the sentiment label whether it is "positive" or “negative”
df1['content'][1000]
# Visualizing the sentiments
fig = go.Figure()
fig.add_trace(go.Bar(x = df1["score"],
y = df1["label"],
orientation = "h")) #set orientation to horizontal because we want to flip the x and y-axis
fig.update_layout(plot_bgcolor = "white")
fig.show()
# Taking the entire 5000000 (100%) records and creating new dataframe called df1
df2 = df
# Passing the tweets into the sentiment pipeline and extracting the sentiment score and label
df2 = (df2.assign(sentiment = lambda x: x['content'].apply(lambda s: sentiment_classifier(s)))
.assign(
label = lambda x: x['sentiment'].apply(lambda s: (s[0]['label'])),
score = lambda x: x['sentiment'].apply(lambda s: (s[0]['score']))))
df2.head()
#Visualizing the sentiments
fig1 = go.Figure()
fig1.add_trace(go.Bar(x = df2["Sentiment score"],
y = df2["Sentiment label"],
orientation = "h")) #set orientation to horizontal because we want to flip the x and y-axis
fig1.update_layout(plot_bgcolor = "white")
fig1.show()
df2.to_csv('Abj-Elect-Tweets-Sentiment.csv', index=True)
df1.to_csv('Abj-Elect-Tweets-Sentiment1.csv', index=True)
```
| github_jupyter |
# 머신 러닝 교과서 3판
# HalvingGridSearchCV
### 경고: 이 노트북은 사이킷런 0.24 이상에서 실행할 수 있습니다.
```
# 코랩에서 실행할 경우 최신 버전의 사이킷런을 설치합니다.
!pip install --upgrade scikit-learn
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases'
'/breast-cancer-wisconsin/wdbc.data', header=None)
from sklearn.preprocessing import LabelEncoder
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, y,
test_size=0.20,
stratify=y,
random_state=1)
```
비교를 위해 `GridSearchCV` 실행 결과를 출력합니다.
```
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
import numpy as np
pipe_svc = make_pipeline(StandardScaler(),
SVC(random_state=1))
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'svc__C': param_range,
'svc__kernel': ['linear']},
{'svc__C': param_range,
'svc__gamma': param_range,
'svc__kernel': ['rbf']}]
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
cv=10,
n_jobs=-1)
gs = gs.fit(X_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
print(np.sum(gs.cv_results_['mean_fit_time']))
```
사이킷런 0.24 버전에서 추가된 `HalvingGridsearchCV`는 모든 파라미터 조합에 대해 제한된 자원으로 실행한 다음 가장 좋은 후보를 골라서 더 많은 자원을 투여하는 식으로 반복적으로 탐색을 수행합니다. 이런 방식을 SH(Successive Halving)이라고 부릅니다. `HalvingGridsearchCV`의 `resource` 매개변수는 반복마다 늘려갈 자원을 정의합니다. 기본값은 `'n_samples'`로 샘플 개수입니다. 이 외에도 탐색 대상 모델에서 양의 정수 값을 가진 매개변수를 지정할 수 있습니다. 예를 들면 랜덤 포레스트의 `n_estimators`가 가능합니다.
`factor` 매개변수는 반복마다 선택할 후보의 비율을 지정합니다. 기본값은 3으로 후보 중에서 성능이 높은 1/3만 다음 반복으로 전달합니다. `max_resources` 매개변수는 각 후보가 사용할 최대 자원을 지정합니다. 기본값은 `'auto'`로 `resources='n_samples'`일 때 샘플 개수가 됩니다.
`min_resources`는 첫 번째 반복에서 각 후보가 사용할 최소 자원을 지정합니다. `resources='n_samples'`이고 `min_resources='smallest'`이면 회귀일 때 `cv` $\times$ 2가 되고 분류일 때는 `cv` $\times$ 클래스개수 $\times$ 2가 됩니다. 그외에는 1입니다. `min_resources='exhaust'`이면 앞에서 계산한 값과 `max_resources`를 `factor`\*\*`n_required_iterations`으로 나눈 몫 중 큰 값입니다. 기본값은 `'exhaust'`입니다(`n_required_iterations`는 $ \text{log}_{factor}(전체 후보 갯수) + 1$ 입니다).
마지막으로 `aggressive_elimination` 매개변수를 `True`로 지정하면 마지막 반복에서 `factor`만큼 후보가 남을 수 있도록 자원을 늘리지 않고 초기에 반복을 여러 번 진행합니다. 기본값은 `False`입니다.
`HalvingGridsearchCV` 아직 실험적이기 때문에 `sklearn.experimental` 패키지 아래에 있는 `enable_halving_search_cv`을 임포트해야 사용할 수 있습니다. `verbose=1`로 지정하면 각 반복 과정을 자세히 살펴 볼 수 있습니다.
```
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
hgs = HalvingGridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
cv=10,
n_jobs=-1, verbose=1)
hgs = hgs.fit(X_train, y_train)
print(hgs.best_score_)
print(hgs.best_params_)
```
출력 결과를 보면 첫 번째 반복(iter: 0)에서 72개의 후보를 40개의 샘플로 교차 검증을 수행합니다. 여기에서 72/3 = 24개의 후보를 뽑아 두 번째 반복(iter: 1)을 수행합니다. 두 번째 반복에서는 40 * 3 = 120개의 샘플을 사용합니다. 같은 방식으로 세 번째 반복(iter: 2)에서는 8개의 후보가 360개의 샘플로 평가됩니다. 최종 결과는 98.3%로 `GridSearchCV` 보다 조금 낮습니다. 찾은 매개변수 조합도 달라진 것을 볼 수 있습니다.
3번의 반복 동안 `HalvingGridSearchCV`가 수행한 교차 검증 횟수는 모두 104번입니다. 각 교차 검증에 걸린 시간은 `cv_results_` 속성의 `mean_fit_time`에 저장되어 있습니다. 이를 `GridSearchCV`와 비교해 보면 5배 이상 빠른 것을 볼 수 있습니다.
```
print(np.sum(hgs.cv_results_['mean_fit_time']))
```
각 반복 단계에서 사용한 샘플 개수와 후보 개수는 각각 `n_resources_` 속성과 `n_candidates_` 속성에 저장되어 있습니다.
```
print('자원 리스트:', hgs.n_resources_)
print('후보 리스트:', hgs.n_candidates_)
```
| github_jupyter |
# RadiusNeighborsRegressor with MinMaxScaler & Polynomial Features
**This Code template is for the regression analysis using a RadiusNeighbors Regression and the feature rescaling technique MinMaxScaler along with Polynomial Features as a feature transformation technique in a pipeline**
### Required Packages
```
import warnings as wr
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler,PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.neighbors import RadiusNeighborsRegressor
from sklearn.metrics import mean_squared_error, r2_score,mean_absolute_error
wr.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path) #reading file
df.head()#displaying initial entries
print('Number of rows are :',df.shape[0], ',and number of columns are :',df.shape[1])
df.columns.tolist()
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
plt.figure(figsize = (15, 10))
corr = df.corr()
mask = np.triu(np.ones_like(corr, dtype = bool))
sns.heatmap(corr, mask = mask, linewidths = 1, annot = True, fmt = ".2f")
plt.show()
correlation = df[df.columns[1:]].corr()[target][:]
correlation
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
#spliting data into X(features) and Y(Target)
X=df[features]
Y=df[target]
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
#we can choose randomstate and test_size as over requerment
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 1) #performing datasplitting
```
### Data Scaling
**Used MinMaxScaler**
* Transform features by scaling each feature to a given range.
* This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.
### Feature Transformation
**PolynomialFeatures :**
* Generate polynomial and interaction features.
* Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree.
## Model
**RadiusNeighborsRegressor**
RadiusNeighborsRegressor implements learning based on the neighbors within a fixed radius of the query point, where is a floating-point value specified by the user.
**Tuning parameters :-**
* **radius:** Range of parameter space to use by default for radius_neighbors queries.
* **algorithm:** Algorithm used to compute the nearest neighbors:
* **leaf_size:** Leaf size passed to BallTree or KDTree.
* **p:** Power parameter for the Minkowski metric.
* **metric:** the distance metric to use for the tree.
* **outlier_label:** label for outlier samples
* **weights:** weight function used in prediction.
```
#training the RadiusNeighborsRegressor
model = make_pipeline(MinMaxScaler(),PolynomialFeatures(),RadiusNeighborsRegressor(radius=1.5))
model.fit(X_train,y_train)
```
#### Model Accuracy
score() method return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
```
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
#prediction on testing set
prediction=model.predict(X_test)
```
### Model evolution
**r2_score:** The r2_score function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
**MAE:** The mean abosolute error function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
**MSE:** The mean squared error function squares the error(penalizes the model for large errors) by our model.
```
print('Mean Absolute Error:', mean_absolute_error(y_test, prediction))
print('Mean Squared Error:', mean_squared_error(y_test, prediction))
print('Root Mean Squared Error:', np.sqrt(mean_squared_error(y_test, prediction)))
print("R-squared score : ",r2_score(y_test,prediction))
#ploting actual and predicted
red = plt.scatter(np.arange(0,80,5),prediction[0:80:5],color = "red")
green = plt.scatter(np.arange(0,80,5),y_test[0:80:5],color = "green")
plt.title("Comparison of Regression Algorithms")
plt.xlabel("Index of Candidate")
plt.ylabel("target")
plt.legend((red,green),('RadiusNeighborsRegressor', 'REAL'))
plt.show()
```
### Prediction Plot
First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
plt.figure(figsize=(10,6))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(X_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Vipin Kumar , Github: [Profile](https://github.com/devVipin01)
| github_jupyter |
# Read Washington Medicaid Fee Schedules
The Washington state Health Care Authority website for fee schedules is [here](http://www.hca.wa.gov/medicaid/rbrvs/Pages/index.aspx).
* Fee schedules come in Excel format
* Fee schedules are *usually* biannual (January and July)
* Publicly available fee schedules go back to January 2011
However, Washington's Medicaid fee schedules are a pain in the ass.
They are publicly available as Microsoft Excel files but...
* File names are not systematic
* They do not read directly into R nicely (using either the `readxl` or `xlsx` packages)
* Data lines start at different rows
All these issues makes codifying difficult.
As a workaround, the following steps were taken.
1. Excel files are saved locally
2. Excel files are converted to CSV
3. CSV files are version controlled in this repository (since they are not large)
4. CSV files are read into R
The first 3 steps were done manually.
The SHA for the commit of the CSV files is 5bde7f3e33e0c83bdace0ed0cf04553a41a8efb1 (5/5/2016).
Step 4 is below.
```
files <- list.files(file.path(getwd(), "Data"))
files
files <- paste("Data", files, sep="/")
```
## Physician-Related/Professional Services
```
library(data.table)
readFS <- function (f, skip) {
require(data.table, quietly=TRUE)
for (i in 11:16) {if (grepl(sprintf("%d\\.csv", i), f)) {year <- as.numeric(sprintf("20%d", i))}}
for (i in 1:12) {
monname <- format(as.Date(sprintf("%d-%d-01", year, i)), format="%B")
if (grepl(sprintf("_%02d", i), f) | grepl(tolower(monname), f, ignore.case=TRUE)) {
mm <- i
}
}
colClasses <- rep("character", 9)
D <- data.table(read.csv(f, header=FALSE, colClasses=colClasses, skip=skip, na.strings=c(""), strip.white=TRUE))
old <- names(D)
keep <- c("code_status_indicator",
"code",
"mod",
"nfs_maximum_allowable",
"fs_maximum_allowable",
"pa_required",
"global_days",
"comments")
if (length(old) > length(keep)) {new <- c(keep, old[(length(keep) + 1):length(old)])}
else {new <- keep}
setnames(D, old, new)
D <- D[, effective_date := as.Date(sprintf("%d-%d-01", year, mm))]
D[, c(keep, "effective_date"), with=FALSE]
}
fs <- rbindlist(list(readFS(file.path(getwd(), "Data/HCA_PREOH_January_1_2013.csv"), 9),
readFS(file.path(getwd(), "Data/physician_010114.csv"), 9),
readFS(file.path(getwd(), "Data/physician_010115.csv"), 9),
readFS(file.path(getwd(), "Data/physician_010116.csv"), 10),
readFS(file.path(getwd(), "Data/physician_040115.csv"), 9),
readFS(file.path(getwd(), "Data/physician_040116.csv"), 10),
readFS(file.path(getwd(), "Data/physician_070114.csv"), 9),
readFS(file.path(getwd(), "Data/physician_070115.csv"), 10),
readFS(file.path(getwd(), "Data/physician_100115.csv"), 10),
readFS(file.path(getwd(), "Data/preoh_010112.csv"), 6),
readFS(file.path(getwd(), "Data/preoh_01012011.csv"), 6),
readFS(file.path(getwd(), "Data/preoh_070112.csv"), 9),
readFS(file.path(getwd(), "Data/preoh_070113.csv"), 9),
readFS(file.path(getwd(), "Data/preoh_07012011.csv"), 6)))
str(fs)
fs[, .N, effective_date][order(effective_date)]
head(fs)
tail(fs)
```
Rename object
```
fsPhysician <- fs
```
## Ambulance Transportation
```
library(data.table)
f <- file.path(getwd(), "Data/ambulance_transportation_022016.csv")
D <- data.table(read.csv(f, header=TRUE, na.strings=c(""), strip.white=TRUE, stringsAsFactors=FALSE))
old <- names(D)
new <- c("code_status_indicator",
"code",
"description",
"fs_maximum_allowable",
"limits")
setnames(D, old, new)
D <- D[, fs_maximum_allowable := as.numeric(gsub("[^0-9\\.]", "", fs_maximum_allowable))]
D <- D[, effective_date := as.Date("2006-07-01")]
str(D)
D
```
| github_jupyter |
# Convolutional Neural Networks
---
In this notebook, we train a **CNN** to classify images from the CIFAR-10 database.
The images in this database are small color images that fall into one of ten classes; some example images are pictured below.

### Test for [CUDA](http://pytorch.org/docs/stable/cuda.html)
Since these are larger (32x32x3) images, it may prove useful to speed up your training time by using a GPU. CUDA is a parallel computing platform and CUDA Tensors are the same as typical Tensors, only they utilize GPU's for computation.
```
import torch
import numpy as np
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
```
---
## Load the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)
Downloading may take a minute. We load in the training and test data, split the training data into a training and validation set, then create DataLoaders for each of these sets of data.
```
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# percentage of training set to use as validation
valid_size = 0.2
# convert data to a normalized torch.FloatTensor
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# choose the training and test datasets
train_data = datasets.CIFAR10('data', train=True,
download=True, transform=transform)
test_data = datasets.CIFAR10('data', train=False,
download=True, transform=transform)
# obtain training indices that will be used for validation
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# specify the image classes
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
```
### Visualize a Batch of Training Data
```
import matplotlib.pyplot as plt
%matplotlib inline
# helper function to un-normalize and display an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy() # convert images to numpy for display
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
# display 20 images
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
imshow(images[idx])
ax.set_title(classes[labels[idx]])
```
### View an Image in More Detail
Here, we look at the normalized red, green, and blue (RGB) color channels as three separate, grayscale intensity images.
```
rgb_img = np.squeeze(images[3])
channels = ['red channel', 'green channel', 'blue channel']
fig = plt.figure(figsize = (36, 36))
for idx in np.arange(rgb_img.shape[0]):
ax = fig.add_subplot(1, 3, idx + 1)
img = rgb_img[idx]
ax.imshow(img, cmap='gray')
ax.set_title(channels[idx])
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center', size=8,
color='white' if img[x][y]<thresh else 'black')
```
---
## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)
This time, you'll define a CNN architecture. Instead of an MLP, which used linear, fully-connected layers, you'll use the following:
* [Convolutional layers](https://pytorch.org/docs/stable/nn.html#conv2d), which can be thought of as stack of filtered images.
* [Maxpooling layers](https://pytorch.org/docs/stable/nn.html#maxpool2d), which reduce the x-y size of an input, keeping only the most _active_ pixels from the previous layer.
* The usual Linear + Dropout layers to avoid overfitting and produce a 10-dim output.
A network with 2 convolutional layers is shown in the image below and in the code, and you've been given starter code with one convolutional and one maxpooling layer.

#### TODO: Define a model with multiple convolutional layers, and define the feedforward network behavior.
The more convolutional layers you include, the more complex patterns in color and shape a model can detect. It's suggested that your final model include 2 or 3 convolutional layers as well as linear layers + dropout in between to avoid overfitting.
It's good practice to look at existing research and implementations of related models as a starting point for defining your own models. You may find it useful to look at [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py) to help decide on a final structure.
#### Output volume for a convolutional layer
To compute the output size of a given convolutional layer we can perform the following calculation (taken from [Stanford's cs231n course](http://cs231n.github.io/convolutional-networks/#layers)):
> We can compute the spatial size of the output volume as a function of the input volume size (W), the kernel/filter size (F), the stride with which they are applied (S), and the amount of zero padding used (P) on the border. The correct formula for calculating how many neurons define the output_W is given by `(W−F+2P)/S+1`.
For example for a 7x7 input and a 3x3 filter with stride 1 and pad 0 we would get a 5x5 output. With stride 2 we would get a 3x3 output.
```
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# convolutional layer (sees 16x16x16 tensor)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
# convolutional layer (sees 8x8x32 tensor)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 4 * 4 -> 500)
self.fc1 = nn.Linear(64 * 4 * 4, 500)
# linear layer (500 -> 10)
self.fc2 = nn.Linear(500, 10)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image input
x = x.view(-1, 64 * 4 * 4)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = self.fc2(x)
return x
# create a complete CNN
model = Net()
print(model)
# move tensors to GPU if CUDA is available
if train_on_gpu:
model.cuda()
```
### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)
Decide on a loss and optimization function that is best suited for this classification task. The linked code examples from above, may be a good starting point; [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py). Pay close attention to the value for **learning rate** as this value determines how your model converges to a small error.
#### TODO: Define the loss and optimizer and see how these choices change the loss over time.
```
import torch.optim as optim
# specify loss function
criterion = torch.nn.CrossEntropyLoss()
# specify optimizer
optimizer = optim.Adam(model.parameters(), lr=0.005)
```
---
## Train the Network
Remember to look at how the training and validation loss decreases over time; if the validation loss ever increases it indicates possible overfitting.
```
# number of epochs to train the model
n_epochs = 15 # you may increase this number to train a final model
valid_loss_min = np.Inf # track change in validation loss
train_losses, test_losses = [], []
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for data, target in train_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
######################
# validate the model #
######################
model.eval()
for data, target in valid_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update average validation loss
valid_loss += loss.item()*data.size(0)
# calculate average losses
train_loss = train_loss/len(train_loader.dataset)
valid_loss = valid_loss/len(valid_loader.dataset)
train_losses.append(train_loss/len(train_loader))
test_losses.append(test_loss/len(test_loader))
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = valid_loss
```
### Load the Model with the Lowest Validation Loss
```
model.load_state_dict(torch.load('model_cifar.pt'))
```
---
## Test the Trained Network
Test your trained model on previously unseen data! A "good" result will be a CNN that gets around 70% (or more, try your best!) accuracy on these test images.
```
# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval()
# iterate over test data
for data, target in test_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
```
### Question: What are your model's weaknesses and how might they be improved?
**Answer**: (double-click to edit and add an answer)
### Visualize Sample Test Results
```
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
images.numpy()
# move model inputs to cuda, if GPU available
if train_on_gpu:
images = images.cuda()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
imshow(images.cpu()[idx])
ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
```
| github_jupyter |
# **CatBoost**
### За основу взят ноутбук из вебинара "CatBoost на больших данных", канал Karpov.Courses, ведущий вебинара Александр Савченко
Репозиторий с исходником: https://github.com/AlexKbit/pyspark-catboost-example
```
%%capture
!pip install pyspark==3.0.3
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler, StringIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql import SparkSession
from pyspark.sql import DataFrame
from pyspark.sql.functions import col
from pyspark.sql.types import StructField, StructType
spark = SparkSession.builder\
.master('local[*]')\
.appName('CatBoostWithSpark')\
.config("spark.jars.packages", "ai.catboost:catboost-spark_3.0_2.12:1.0.3")\
.config("spark.executor.cores", "2")\
.config("spark.task.cpus", "2")\
.config("spark.driver.memory", "2g")\
.config("spark.driver.memoryOverhead", "2g")\
.config("spark.executor.memory", "2g")\
.config("spark.executor.memoryOverhead", "2g")\
.getOrCreate()
spark
import catboost_spark
schema_dataset = "col1 String, col2 String, col3 Double, col4 Double, col5 Double, target Integer"
df = spark.read.csv('/content/data.csv',sep=',',header=True,schema = schema_dataset)
df.printSchema()
print(df.describe().show())
print(df.show(7))
TARGET_LABEL = 'target'
evaluator = MulticlassClassificationEvaluator(
labelCol=TARGET_LABEL,
predictionCol="prediction",
metricName='f1')
train_df, test_df = df.randomSplit([0.75, 0.25])
```
### Train CatBoost with Pool
```
col1_indexer = StringIndexer(inputCol='col1', outputCol="col1_index")
col2_indexer = StringIndexer(inputCol='col2', outputCol="col2_index")
features = ["col1_index", "col2_index", "col3", "col4", "col5"]
assembler = VectorAssembler(inputCols=features, outputCol='features')
def prepare_vector(df: DataFrame)-> DataFrame:
result_df = col1_indexer.fit(df).transform(df)
result_df = col2_indexer.fit(result_df).transform(result_df)
result_df = assembler.transform(result_df)
return result_df
train = prepare_vector(train_df)
test = prepare_vector(test_df)
print(train.show(7))
train_pool = catboost_spark.Pool(train.select(['features', TARGET_LABEL]))
train_pool.setLabelCol(TARGET_LABEL)
train_pool.setFeaturesCol('features')
classifier = catboost_spark.CatBoostClassifier(featuresCol='features', labelCol=TARGET_LABEL)
classifier.setIterations(50)
classifier.setDepth(5)
model = classifier.fit(train_pool)
predict = model.transform(test)
print(f'Model F1 = {evaluator.evaluate(predict)}')
print(predict.show(7))
model.saveNativeModel('catboost_native')
model.write().overwrite().save('catboost_spark')
```
### Pipeline model with CatBoost
```
col1_indexer = StringIndexer(inputCol='col1', outputCol="col1_index")
col2_indexer = StringIndexer(inputCol='col2', outputCol="col2_index")
features = ["col1_index", "col2_index", "col3", "col4", "col5"]
assembler = VectorAssembler(inputCols=features, outputCol='features')
classifier = catboost_spark.CatBoostClassifier(featuresCol='features', labelCol=TARGET_LABEL)
classifier.setIterations(50)
classifier.setDepth(5)
pipeline = Pipeline(stages=[col1_indexer, col2_indexer, assembler, classifier])
p_model = pipeline.fit(train_df)
print(test_df.show(7))
predictions = p_model.transform(test_df)
print(predictions.show(7))
print(f'Model F1 = {evaluator.evaluate(predictions)}')
type(p_model)
p_model.write().overwrite().save('catboost_pipeline')
```
| github_jupyter |
```
import numpy as np
from numpy import format_float_scientific
# prints large floats in readable format.
def sci(number):
print('{:0.3e}'.format(number))
```
### Synapse Firings as FLOPS
The brain is a massive computational substrate that performs countless computations per second.
Many people have speculated about the raw computational power of the brain in terms of actual bit calculations. But I believe a better analogy, would be computation in terms of FLOPS.
This is because of the way we run digital neural networks.
We run large amounts of matrix computations in which float vector *inputs* are multiplied by *weight* matrices of floats, to produce a vector *output* of floats, which is then ran through an *activation function*.
Each input element in a vector, being multiplied by a weight in the matrix, is analagous to an impulse firing into a synapse, from a neuron.
Each element in the input & output vectors, can be thought of as individual neurons, and each weight in the matrix, is the connection between one input neuron and another output neuron. So each floating point multiplication of an input, by a weight, is analogous to the firing of a synapse from one neuron to another.
This allows us to actually compare quantitatively, the computational power of biological and digital networks, if we weigh a synapse firing, as the ONE floating point operation which simulates/represents it in a digital neural network.
#### This idea leads to some interesting math below:
There are ~85 billion *neurons_per_brain*, $( \frac{neurons}{brain} )$
```
neurons_per_brain = 85*1e9
sci(neurons_per_brain)
```
There are ~7000 *synapses_per_neuron* $ (\frac{synapse}{neuron}) $
```
synapses_per_neuron = 7000
```
The number of synapses *synapses_per_brain* is therefore 85 billion x 7000 = 5.95e14 synapes.
```
synapses_per_brain = neurons_per_brain*synapses_per_neuron
sci(synapses_per_brain)
```
According to [the best research I've found](https://aiimpacts.org/rate-of-neuron-firing/#:~:text='But%20generally%2C%20the%20range%20for,'),
The **average** firing rate per neuron is between .3 & 1.8 firings per second
The commonly cited figures for neuron firing rate are ~200Hz but these are considered to be outdated/innacurate, because these are measurements of INDIVIDUAL neurons in highly active parts of the brain.
The majority of neurons are *likely much quieter* than this (see footnote). for global calculation on brain performace, we want the *average* firing rate per neuron.
This is calculated in the above link, by brain energy consumption per volume, neuron density & the [metabolic cost](https://www.nature.com/articles/s41598-019-43460-8) of each individual neuron firing.
$$ .3Hz < FiringRate < 1.8Hz $$
This variable is set here as 1Hz & can be updated when better info is found.
(footnote from above) The AVERAGE firing rate throughout the brain, is likely much lower than the firing rate of neurons in highly active areas, because much of the brain is most likely long term memory & information that isn't constantly being accessed.
```
firing_rate = 1
```
If we then make the approximation, that a *Synapse* between two neurons, is equivalent to a weight in a digital neural network,
then a single synapse performs ONE floating point operation per firing of it's parent neuron.
Therefore, the FLOPS (floating point operations per second) rate of a neuron is ~7000 $\frac{operations}{second}$
We can then calculate the global, computational power of a human brain, as *neurons_per_brain* x *synapses_per_neuron* x *firing_rate* $\frac{operations}{second}$ or FLOPS
as 5.95e14 or 595 TeraFlops
```
brain_flops = neurons_per_brain*synapses_per_neuron*firing_rate
sci(brain_flops)
```
Therefore, the human brain has a computational power of
*number of neurons* X *synapses per neuron* X *average firing rate* = $ 5.95x10^{14} $ FLOPS Or
595 TFLOPS (teraflops)
In comparison, the NVIDIA GeForce 3090 (A new, modern GPU) has a theoretical performance of 35.6 TFLOPS (or
3.56e13 Flops) in float32 computations.
& 69 TFLOPS in float16 computations. [source](https://www.tomshardware.com/news/nvidia-geforce-rtx-3090-ga102-everything-we-know)
I have no idea, how *precise* a firing synapse in a neuron is, and if so, what precision it could be analogous to in terms of bits. #ResearchThis
```
sci(35.6*1e12)
```
IF all the above calculations are true, then computational power is actually approuching that of the human brain.
The reason the human brain is still more powerful, is likely it's MASSIVE (possibly *maximal*) Parralelization. Due to being a physical neural network.
IF we can build better algorithms, & implement parallelization on a MASSIVE scale, we can build more intelligent software NOW!
The brain is a sparse, VERY slow (1Hz) MASSIVELY distributed computing system. Could probably be better be thought of,
as 85 Billion Networked computers, each operating at 1Hz with 7000 compuational (cores?)
Massive distributed nework of 85 billion 7000 FLOP computers LOL.
---
The computers we build in sillicon are performing calculations at speeds like ~3GigaHz (3e9 Hz)
In comparison to the ~1Hz firing rate of neurons in the brain, this is insane.
The brain is still more powerful because 85 billion massively parrallel computers is nothing to sneeze at, even if they are small and slow. The massively parrallel, asyncronous nature of that many computing "cores" is the reason that their slow speed ends up not mattering that much in practice.
---
**BUT**
WE can potentially do the same thing with sillicon computers, which are operating at 3GHz & sending signals at close to the speed of light, vs the human brains 1Hz, 200 mph signal propagation.
### So we're close to there in terms of raw computational. What are the roadblocks?
First of all, let's look at the actual memory storage in the synapses of the human brain:
A synapse in a bioligical neural network is analagous to a weight (between neurons) in a digital neural network.
A single layer in a neural network, is represnted as a Matrix tranform, usually followed by an activation function.
Neural network computation is simulated by multiplying an input vector (representing the set of input neurons), then multiplying this vector by the matrix of weights. Each weight, multiplied by an element of the input vector, then summed as part of one of the output vectors, represents the impulse from one neuron to another. Therefore, that single, floating point operation, of multiplying an element in the input vector by a weight, represents a synapse firing.
The output vector is then ran through an activation function which attempts to map it to an output in the next layer in a sensible way.
The compuationally **hard** part of this, is the actual matrix computation, because it is $ O(n^2) $ space complexity to store a matrix representing weights from n input, to n output neurons. & the time complexity of multiplying the input vector by the matrix is also $O(n^2)$. This can be parrallelized, but still is a bottleneck.
---
In this method of representing a neural network digitally, information is stored in the float values of the weights. So the theoretical, ACTUAL amount of information in a human neural network, is $ 5.95x10^{14} $ synapse weights or floats.
It's unclear how accurate each of these floats needs to be, as the actualy physical property they represent, is (#double check this) the ease with which a signal propagates through the synapse. This is likely a messy biological process (voltage front, propagating through a partially insulated tube of ionized liquid), & not precise in practice. So a lower accuracy float is probably sufficient to simulate it accurately.
For the sake of getting a ballpark figure, we'll say that a weight can be accurately represented by a 32 bit float, then the amount of data in a brain is 4bytes x $ 5.95x10^{14} $ = 2380 TB of floats. This is a MASSIVE amount of data, and too much to store in RAM needed to run matrix computations.
HOWEVER,
The majority of this data is not accessed often/i.e. the neurons fire very rarely and the data does not need to be quickly accessible.
The more important parts of the brain (i.e. regions of neurons which ARE firing at ~200Hz) will nescessarily be less than the 2380TB total, though how much less is a big unknown.
The sets of weights representing THESE regions in a brain, would need to be held in some sort of RAM like memory, in order to quickly pass weights to GPU's in order to crunch large numbers of floats.
The addtional ~2380 TB of rarely accessed data could be stored in some sort of distributed database which pulled in sections of memory as needed.
---
This is all an extremely hard technical challenge.
But if my math is right (& please double check it, let me know if you see issues or am wrong), we're ACTUALLY THERE in terms of raw computational power.
600 FLOPS is doable with 4 or 5 modern, high performance GPUs. The remaining roadblock is no longer computational power, but information retrieval/distributed massive databases, LARGE amounts of RAM, and structuring it all in a form that actually mimics the complex architecture of the human brain.
| github_jupyter |
---
### Universidad de Costa Rica
#### IE0405 - Modelos Probabilísticos de Señales y Sistemas
---
# `Py4` - *Librerías de manipulación de datos*
> **Pandas**, en particular, es una útil librería de manipulación de datos que ofrece estructuras de datos para el análisis de tablas numéricas y series de tiempo. Esta es una introducción al objeto `DataFrame` y otras características básicas.
---
## Librería Pandas
Para trabajar con una gran cantidad de datos, es deseable un conjunto de herramientas que nos permitan efectuar operaciones comunes de forma intuitiva y eficiente. Pandas, es la solución por defecto para hacerlo en Python.
Esta guía está basada en ["10 minutes to pandas"](https://pandas.pydata.org/docs/getting_started/10min.html).
```
import numpy as np
import pandas as pd
import datetime
```
---
## 4.1 - `Series`
En Python, las `Series` corresponden a un arreglo de una dimensión que admite diversos tipos de datos (números enteros, palabras, números flotantes, objetos de Python, etc.) que además están etiquetados mediante un índice que el usuario puede definir o permitir que Python lo cree por defecto. De manera que para crear una lista de valores y dejando que Python los etiqute, se utiliza el siguiente comando:
```
s = pd.Series([1, 3, 5, np.nan, "modelos", 8.5])
print(s)
```
Utilizado el comando de numpy `random.randn` para generar datos aleatorios para la lista y si se desea agregar indices distintos a los numéricos se utiliza el siguiente comando:
```
s = pd.Series(np.random.randn(5), index = ['a', 'b', 'c', 'd', 'e'])
s
```
Una vez creada la `Serie` se pueden ejecutar operaciones vetoriales con la misma o agregar atributos como un nombre, como se muestra a continuación:
```
d= pd.Series(s+s, name = 'suma')
d
```
---
## 4.2 - `DataFrame`
En Python, la asignación de `DataFrames` corresponde a un arreglo de 2 dimensiones etiquetado, semejante a concatenar varias `Series` y de igual forma admite varios tipos de datos, algo así como una hoja de cálculo o una tabla SQL. De igual forma la asignación de las etiquetas puede ser decidida por el usuario y Python hará coincidir los valores, en caso de diferencias en los tamaños de las listas agregadas, rellenará esos espacios siguiendo reglas de sentido común. A continuación un ejemplo de dos `Series` de diferentes tamaños:
```
d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'c', 'd', 'b'])}
df1 = pd.DataFrame(d)
df1
```
Estos indices también pueden indicar una estampa de tiempo, tal como se muestra en el siguiente ejemplo:
```
dates = pd.date_range('20200501', periods=6)
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
df
```
De igual forma que las `Series`, los `DataFrame` pueden asignarse como diccionarios, utilizando diferentes tipos de datos en cada columna, como se muestra a continuación:
```
df2 = pd.DataFrame({'A': 1.,
'B': pd.Timestamp('20200521'),
'C': pd.Series(1, index=list(range(4)), dtype='float32'),
'D': np.array([3] * 4, dtype='int32'),
'E': pd.Categorical(["ceviche", "pizza", "nachos", "chifrijo"]),
'F': 'foo'})
df2
```
Una vez incializada, se pueden ejecutar acciones como extraer, eliminar e insertar de la misma manera que los diccionarios. A continuación un ejemplo:
```
df2['E']
del df2['C']
df2
df2['A']=pd.Series(np.random.randn(4), index=list(range(4)))
df2['mayorA1']=df2['A']>1
df2
```
---
## 4.3 - Visualizar datos
En Python, la visualización de datos permite decidir cuáles datos se quieren ver, por ejemplo del `DataFrame` llamado `df`, para ver las primeras filas de datos se utiliza el comando `head`:
```
df.head(2)
```
Pero si sólo se desea visualizar las útimas tres líneas se utiliza el comando `tail`:
```
df.tail(3)
```
Si bien solo se desean visualizar los indices, se utiliza:
```
df.index
```
Además, en el caso de un `DataFrame` con elementos del mismo tipo de datos, se puede transformar en un dato compatible con Numpy:
```
df.to_numpy()
```
Incluso si el `DataFrame` tiene diversos tipos de datos, también se puede transferir los datos a un arreglo de Numpy:
```
df2.to_numpy()
```
Sin embargo, si todos los elementos son del mismo tipo, se pueden ejecutar más funciones como una rápida revisión de las principales características estadísticas de cada columna:
```
df.describe()
```
O también reordenar los datos con alguna columna de referencia:
```
df.sort_values(by='B')
```
---
## 4.4 - Seleccionar datos
En Python, la selección de datos utilizando Pandas es más eficiente que las expresiones para seleccionar y obtener datos en Numpy. Por ejemplo, para ubicar una fila de datos, se puede utilizar el comando `loc`:
```
df2.loc[2]
```
También se pueden seleccionar un rango de columnas al mismo tiempo:
```
df[0:3]
```
Para obtener una posición en específico, se debe indicar la fila y la columna mediante el comando `at`:
```
df.at[dates[2], 'A']
```
De igual forma se puede ubicar ese mismo elemento por medio de la posición en lugar de los indices, utilizando el comando `iloc`:
```
df.iloc[2, 0]
```
De igual manera se pueden ubicar los datos que cumplan con cierta condición booleana:
```
df[df['A']>0]
```
---
## 4.5 - Operaciones sobre datos
En Python, las operaciones se ejecutan sobre todos los datos arrojando el valor de salida por filas o columnas, por ejemplo para calcular la media estadística de los datos de cada columna, se utiliza el comando `mean` de la siguiente manera:
```
df.mean()
```
Si en cambio se desea conocer la media de los valores por filas, se utiliza la siguiente variación:
```
df.mean(1)
```
También se pueden aplicar operaciones tales como el conteo sobre dichos datos:
```
f = pd.Series(np.random.randint(0, 7, size=10))
f
f.value_counts()
```
También existen operaciones que se pueden aplicar sobre `Series` de palabras:
```
g = pd.Series(['ARbOL', 'BLanCO', 'AvE', 'BuRRo', np.nan])
g.str.lower()
```
---
## 4.6 - Fusionar datos
En Python, para concatenar datos se utiliza el comando `concat()` de la siguiente forma:
```
df = pd.DataFrame(np.random.randn(10,2))
df2 = pd.DataFrame(np.random.randn(10,2))
pieces = [df[:], df2[:]]
pd.concat(pieces)
```
---
## 4.7 - Agrupar datos
En Python, la agrupación se refiere a:
- Separar los datos en grupos basandose en un criterio.
- Aplicar una función a cada grupo independientemente.
- Combinar los resultados en una estructura de datos.
A continuación un ejemplo de agrupación aplicando una suma a los datos:
```
df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo',
'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two',
'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
df
df.groupby('A').sum()
df.groupby(['A', 'B']).sum()
```
---
## 4.8 - Reacomodar datos
En Python, una forma de reacomodar los datos es comprimiendolos mediante el comando `stack`:
```
stacked = df.stack()
stacked
```
También se puede cambiar la forma de ordenar los datos como tablas de pivot:
```
df=pd.DataFrame({'A': ['one', 'one', 'two', 'three']*3,
'B': ['A', 'B', 'C']*4,
'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar']*2,
'D': np.random.randn(12),
'E': np.random.randn(12)})
df
pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C'])
```
---
## 4.9 - Series de tiempo
En Python, la asignación de series de tiempo permite generar secuencias con una frecuencia fija y un lapso de tiempo, como por ejemplo:
```
dti = pd.date_range('1-5-2020', periods=3, freq='H')
dti
```
Cuya hora se puede convertir a una zona horaria diferente, como Central Time:
```
dti = dti.tz_localize('UTC')
dti
```
O el Pacífico de los Estados Unidos:
```
dti.tz_convert('US/Pacific')
```
También se pueden convertir una serie de tiempo a una frecuencia particular:
```
idx = pd.date_range('2020-05-01', periods=5, freq='H')
ts = pd.Series(range(len(idx)), index=idx)
ts
ts.resample('2H').mean()
```
---
## 4.10 - Gráficas
En Python, se utiliza la asignación estándar para utilizar los comandos del API de `matplotlib`, con el cuál se puede graficar una `Serie` de datos:
```
import matplotlib.pyplot as plt
plt.close('all')
ts = pd.Series(np.random.randn(1000),
index=pd.date_range('1/5/2020', periods=1000))
ts = ts.cumsum()
ts.plot()
```
También se pueden graficar arreglos del tipo `DataFrame` de manera que se grafican varias curvas en una misma gráfica como se muestra a continuación:
```
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df=df.cumsum()
plt.figure()
df.plot()
plt.legend(loc='best')
```
---
## 4.11 - Importar y exportar datos
En Python, se puede escribir en un archivo de excel mediante el siguiente comando:
```
df.to_csv('modelos')
```
Cuyo contenido se puede llamar desde python utilizando el comando:
```
pd.read_csv('modelos')
```
---
### Más información
* [Página web](https://www.google.com/)
* Libro o algo
* Tutorial [w3schools](https://www.w3schools.com/python/)
---
---
**Universidad de Costa Rica**
Facultad de Ingeniería
Escuela de Ingeniería Eléctrica
---
| github_jupyter |
```
import os
import sys
import numpy as np
import cv2
from data_loader import *
from fbs_config import TrainFBSConfig, InferenceFBSConfig
from fbs_dataset import FBSDataset
from mrcnn import model as modellib
from datahandler import DataHandler
from sklearn.metrics import f1_score
from scipy.ndimage import _ni_support
from scipy.ndimage.morphology import distance_transform_edt, binary_erosion,\
generate_binary_structure
from tqdm import tqdm
from medpy.io import save
from math import ceil, floor
import skimage.color
from skimage.morphology import cube, binary_closing
from skimage.measure import label
ROOT_DIR = os.path.abspath('../../../')
sys.path.append(ROOT_DIR)
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, 'logs')
DEFAULT_MODEL_DIR = os.path.join(DEFAULT_LOGS_DIR, 'mask_rcnn/kfold')
kernel = np.ones((5,5),np.uint8)
dh = DataHandler()
def destiny_directory(dice_score, post_processing = False):
if post_processing:
pre = './data/eval_pp/mask_rcnn/'
else:
pre = './data/eval/mask_rcnn/'
if dice_score >= 98:
return pre + 'dice_98_100/'
elif dice_score >= 96:
return pre + 'dice_96_98/'
elif dice_score >= 94:
return pre + 'dice_94_96/'
elif dice_score >= 92:
return pre + 'dice_92_94/'
elif dice_score >= 90:
return pre + 'dice_90_92/'
elif dice_score >= 88:
return pre + 'dice_88_90/'
elif dice_score >= 85:
return pre + 'dice_85_88'
elif dice_score >= 80:
return pre + 'dice_80_85/'
elif dice_score >= 70:
return pre + 'dice_70_80/'
elif dice_score >= 60:
return pre + 'dice_60_70/'
else:
return pre + 'dice_less_60'
def getFileName(fname):
original_name = fname.split('/')[-1]
original_name = original_name[:original_name.index('.')]
return original_name
image_files, mask_files = load_data_files('data/kfold_data/')
skf = getKFolds(image_files, mask_files, n=10)
kfold_indices = []
for train_index, test_index in skf.split(image_files, mask_files):
kfold_indices.append({'train': train_index, 'val': test_index})
def getDataset(val_index):
image_val_files = np.take(image_files, val_index)
mask_val_files = np.take(mask_files, val_index)
val_files = ([image_val_files], [mask_val_files])
dataset_val = FBSDataset()
len_dataset_val = dataset_val.load_data(val_files)
dataset_val.prepare()
return dataset_val
def getDiceScore(ground_truth, prediction):
#convert to boolean values and flatten
ground_truth = np.asarray(ground_truth, dtype=np.bool).flatten()
prediction = np.asarray(prediction, dtype=np.bool).flatten()
return f1_score(ground_truth, prediction)
def hd(result, reference, voxelspacing=None, connectivity=1):
hd1 = __surface_distances(result, reference, voxelspacing, connectivity).max()
hd2 = __surface_distances(reference, result, voxelspacing, connectivity).max()
hd = max(hd1, hd2)
return hd
def hd95(result, reference, voxelspacing=None, connectivity=1):
hd1 = __surface_distances(result, reference, voxelspacing, connectivity)
hd2 = __surface_distances(reference, result, voxelspacing, connectivity)
hd95 = np.percentile(np.hstack((hd1, hd2)), 95)
return hd95
def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
if voxelspacing is not None:
voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
voxelspacing = np.asarray(voxelspacing, dtype=np.float64)
if not voxelspacing.flags.contiguous:
voxelspacing = voxelspacing.copy()
footprint = generate_binary_structure(result.ndim, connectivity)
if 0 == np.count_nonzero(result):
raise RuntimeError('The first supplied array does not contain any binary object.')
if 0 == np.count_nonzero(reference):
raise RuntimeError('The second supplied array does not contain any binary object.')
result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)
reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)
dt = distance_transform_edt(~reference_border, sampling=voxelspacing)
sds = dt[result_border]
return sds
def evaluateMask(gt_mask, pred_mask):
return getDiceScore(gt_mask, pred_mask), hd(gt_mask, pred_mask), hd95(gt_mask, pred_mask)
import random
def prepareForSaving(image):
#image = np.swapaxes(image, -1, 0)
image = np.moveaxis(image, 0, -1)
return image
def predictAll(inferenceFBSConfig, val_indices, post_processing = False):
model = modellib.MaskRCNN(mode='inference', config=inferenceFBSConfig, model_dir=DEFAULT_MODEL_DIR)
inferenceFBSConfig.display()
print(DEFAULT_MODEL_DIR)
weights_path = model.find_last()
print('Loading weights from %s'%weights_path)
model.load_weights(weights_path, by_name=True)
dice_scores = []
hd_scores = []
hd95_scores = []
names = []
for image_index in tqdm(val_indices):
#for saving
fname = getFileName(image_files[image_index])
not_used_full_image, hdr = dh.getImageData(image_files[image_index])
dataset = getDataset(image_index)
prediction = []
gt_mask = []
for img_id in dataset.image_ids:
image, image_meta, class_ids, bbox, mask = modellib.load_image_gt(
dataset, inferenceFBSConfig, img_id, use_mini_mask=False)
results = model.detect([image], verbose=0)
r = results[0]
pred = r['masks']
if(len(pred.shape) > 2 and pred.shape[2] == 0):
pred = np.zeros((pred.shape[0],pred.shape[1],1))
if(mask.shape[2] == 0):
mask = np.zeros((pred.shape[0],pred.shape[1],1))
pred[pred>=0.5] = 1
pred[pred<0.5] = 0
pred = np.asarray(pred, dtype=np.uint8)
pred = cv2.dilate(pred,kernel,iterations = 1)
prediction.append(pred)
gt_mask.append(mask)
pred_mask = np.asarray(prediction)
gt_mask = np.asarray(gt_mask)
gt_mask = np.squeeze(gt_mask)
pred_mask = np.squeeze(pred_mask)
if post_processing:
pred_mask = binary_closing(pred_mask, cube(2))
try:
labels = label(pred_mask)
pred_mask = (labels == np.argmax(np.bincount(labels.flat)[1:])+1).astype(int)
except:
pred_mask = pred_mask
pred_mask = np.array(pred_mask, dtype=np.uint16)
dice_score, hd_score, hd95_score = evaluateMask(np.squeeze(gt_mask), pred_mask)
if dice_score == 0:
dice_scores.append(dice_score)
hd_scores.append(200)
hd95_scores.append(200)
names.append(fname)
pred_mask = prepareForSaving(pred_mask)
save_path = destiny_directory(int_dice_score, post_processing=post_processing)
save_path = os.path.join(ROOT_DIR, save_path)
save(pred, os.path.join(save_path, fname + '_mask_rcnn_'
+ str(int_dice_score) + '.nii'), hdr)
continue
names.append(fname)
dice_scores.append(dice_score)
hd_scores.append(hd_score)
hd95_scores.append(hd95_score)
int_dice_score = floor(dice_score * 100)
pred_mask = prepareForSaving(pred_mask)
save_path = destiny_directory(int_dice_score, post_processing=post_processing)
save_path = os.path.join(ROOT_DIR, save_path)
save(pred_mask, os.path.join(save_path, fname + '_mask_rcnn_'
+ str(int_dice_score) + '.nii'), hdr)
return dice_scores, hd_scores, hd95_scores, names
all_dice = []
all_hd = []
all_hd95 = []
all_names = []
for post_processing in [False, True]:
for i in range(10):#len(kfold_indices)):
configParams = {'da': True,'tl': True, 'mask_dim': 28, 'wl': True, 'kfold_i': i}
trainFBSConfig = TrainFBSConfig(**configParams)
inferenceFBSConfig = InferenceFBSConfig(**configParams)
print(inferenceFBSConfig.display())
dice_scores, hd_scores, hd95_scores, names = predictAll(inferenceFBSConfig,
kfold_indices[i]['val'],
post_processing = post_processing)
print('Finished K%d'%i)
all_dice += dice_scores
all_hd += hd_scores
all_hd95 += hd95_scores
all_names.extend(names)
if post_processing:
report_name = 'data/eval_pp/mask_rcnn/mask_rcnn_report.txt'
else:
report_name = 'data/eval/mask_rcnn/mask_rcnn_report.txt'
report_name = os.path.join(ROOT_DIR, report_name)
with open(report_name, 'w+') as f:
for i in range(len(all_dice)):
f.write("%s, %f, %f, %f\n"%(all_names[i],
all_dice[i],
all_hd[i],
all_hd95[i]))
f.write('\n')
f.write('Final results for mask_rcnn\n')
f.write('dice %f\n'%np.mean(all_dice))
f.write('hd %f\n'%np.mean(all_hd))
f.write('hd95 %f\n'%np.mean(all_hd95))
print('dice')
for score in all_dice:
print(score)
print()
print('hd')
for score in all_hd:
print(score)
print()
print('hd95')
for score in all_hd95:
print(score)
```
| github_jupyter |
Lambda School Data Science
*Unit 2, Sprint 2, Module 3*
---
# Cross-Validation
## Assignment
- [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
- [ ] Continue to participate in our Kaggle challenge.
- [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.
- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
- [ ] Commit your notebook to your fork of the GitHub repo.
**You can't just copy** from the lesson notebook to this assignment.
- Because the lesson was **regression**, but the assignment is **classification.**
- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.
So you will have to adapt the example, which is good real-world practice.
1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`
3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)
4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))
## Stretch Goals
### Reading
- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation
- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)
- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation
- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)
- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)
### Doing
- Add your own stretch goals!
- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.
- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.
- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:
> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...
The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
### BONUS: Stacking!
Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
```python
import pandas as pd
# Filenames of your submissions you want to ensemble
files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
target = 'status_group'
submissions = (pd.read_csv(file)[[target]] for file in files)
ensemble = pd.concat(submissions, axis='columns')
majority_vote = ensemble.mode(axis='columns')[0]
sample_submission = pd.read_csv('sample_submission.csv')
submission = sample_submission.copy()
submission[target] = majority_vote
submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
```
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
import numpy as np
def wrangle(X):
#prevent SettingWithCopy warning
X = X.copy()
#replace really small nums in latitude with zeros
X['latitude'] = X['latitude'].replace(-2.000000e-08, 0)
#for both cols replace zeros with nans:
cols_with_zeros = ['longitude', 'latitude', 'construction_year', 'public_meeting', ]
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X['date_recorded'] = pd.to_datetime(X['date_recorded'])
X['day_recorded'] = pd.DatetimeIndex(X['date_recorded']).day
X['month_recorded'] = pd.DatetimeIndex(X['date_recorded']).month
X['year_recorded'] = pd.DatetimeIndex(X['date_recorded']).year
X['years_since_construction'] = X['year_recorded'] - X['construction_year']
# Reduce cardinality for funder feature ...
top49 = train['funder'].value_counts()[:49].index
X.loc[~X['funder'].isin(top49), 'funder'] = 'other'
#enough water?
X['enough'] = np.where(X['quantity'] == 'enough', 1, 0)
#peple per water
X['pop_per_water'] = X['amount_tsh'] / X['population']
#drop one of the same/almost same cols, cols with a lot of missing data
X = X.drop(columns= ['quantity_group', 'date_recorded', 'payment_type', 'num_private',
'source_class', 'source_type', 'waterpoint_type_group', 'quality_group',
'extraction_type_group', 'extraction_type_class', 'management_group', 'scheme_management'])
#return wrangled dataset
return X
train = wrangle(train)
test = wrangle(test)
#select features
target = 'status_group'
#get a data frame with all features
train_features = train.drop(columns = [target,'id', 'recorded_by'])
#get numeric features
numeric = train_features.select_dtypes(include='number').columns.tolist()
#get cardinality of categorical colummns
cardinality = train_features.select_dtypes(exclude = 'number').nunique()
#exlude columns with cardinality more than 50
categorical = cardinality[cardinality <= 50 ].index.tolist()
#combine
features = categorical + numeric
print(features)
X_train = train[features]
y_train = train[target]
X_train['quantity'].value_counts()
y_train
X_train.head()
X_train = X_train.replace([np.inf, -np.inf], np.nan)
# imports
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.model_selection import cross_val_score
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
#StandardScaler(),
#SelectKBest(f_regression, k = 40),
RandomForestClassifier(random_state=0, n_jobs=-1)
)
k = 5
scores = cross_val_score(pipeline, X_train, y_train, cv=k,
scoring='accuracy')
print(f'Accuracy for {k} folds:', scores)
print('Model Hyperparameters:')
print(pipeline.named_steps['randomforestclassifier'])
print(pipeline.named_steps['ordinalencoder'])
print(pipeline.named_steps['simpleimputer'])
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint, uniform
param_distributions = {
#'simpleimputer__strategy' : ['mean', 'median'],
'randomforestclassifier__max_depth': [5, 10, 15, 20, 30, 40, None],
'randomforestclassifier__n_estimators': randint(50, 500),
'randomforestclassifier__max_features': uniform(0, 1),
'randomforestclassifier__min_samples_leaf': [10,20,30,40,50],
}
search = RandomizedSearchCV(
pipeline,
param_distributions= param_distributions,
n_iter = 10,
cv = 5,
scoring = 'accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train);
print('Best hyperparameters: ', search.best_params_)
print('Best accuracy: ', search.best_score_)
pipeline = search.best_estimator_
X_test = test[features]
X_test = X_test.replace([np.inf, -np.inf], np.nan)
y_pred = pipeline.predict(X_test)
print(len(test))
print(len(y_pred))
test.index
sample_submission.head()
submission=pd.DataFrame(y_pred, columns=['status_group'])
submission['id']=sample_submission.id
submission=submission[['id', 'status_group']]
submission.tail()
submission.to_csv('submission_file.csv',index=False)
```
| github_jupyter |
<a name="top"></a>
<div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/src/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Intermediate NumPy</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="http://www.contribute.geeksforgeeks.org/wp-content/uploads/numpy-logo1.jpg" alt="NumPy Logo" style="height: 250px;"></div>
### Questions
1. How do we work with the multiple dimensions in a NumPy Array?
1. How can we extract irregular subsets of data?
1. How can we sort an array?
### Objectives
1. <a href="#indexing">Using axes to slice arrays</a>
1. <a href="#boolean">Index arrays using true and false</a>
1. <a href="#integers">Index arrays using arrays of indices</a>
<a name="indexing"></a>
## 1. Using axes to slice arrays
The solution to the last exercise in the Numpy Basics notebook introduces an important concept when working with NumPy: the axis. This indicates the particular dimension along which a function should operate (provided the function does something taking multiple values and converts to a single value).
Let's look at a concrete example with `sum`:
```
# Convention for import to get shortened namespace
import numpy as np
# Create an array for testing
a = np.arange(12).reshape(3, 4)
a
# This calculates the total of all values in the array
np.sum(a)
# Keep this in mind:
a.shape
# Instead, take the sum across the rows:
np.sum(a, axis=0)
# Or do the same and take the some across columns:
np.sum(a, axis=1)
```
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Finish the code below to calculate advection. The trick is to figure out
how to do the summation.</li>
</ul>
</div>
```
# Synthetic data
temp = np.random.randn(100, 50)
u = np.random.randn(100, 50)
v = np.random.randn(100, 50)
# Calculate the gradient components
gradx, grady = np.gradient(temp)
# Turn into an array of vectors:
# axis 0 is x position
# axis 1 is y position
# axis 2 is the vector components
grad_vec = np.dstack([gradx, grady])
print(grad_vec.shape)
# Turn wind components into vector
wind_vec = np.dstack([u, v])
# Calculate advection, the dot product of wind and the negative of gradient
# DON'T USE NUMPY.DOT (doesn't work). Multiply and add.
```
<div class="alert alert-info">
<b>SOLUTION</b>
</div>
```
# %load solutions/advection.py
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="boolean"></a>
## 2. Indexing Arrays with Boolean Values
Numpy can easily create arrays of boolean values and use those to select certain values to extract from an array
```
# Create some synthetic data representing temperature and wind speed data
np.random.seed(19990503) # Make sure we all have the same data
temp = (20 * np.cos(np.linspace(0, 2 * np.pi, 100)) +
50 + 2 * np.random.randn(100))
spd = (np.abs(10 * np.sin(np.linspace(0, 2 * np.pi, 100)) +
10 + 5 * np.random.randn(100)))
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(temp, 'tab:red')
plt.plot(spd, 'tab:blue');
```
By doing a comparision between a NumPy array and a value, we get an
array of values representing the results of the comparison between
each element and the value
```
temp > 45
```
We can take the resulting array and use this to index into the
NumPy array and retrieve the values where the result was true
```
print(temp[temp > 45])
```
So long as the size of the boolean array matches the data, the boolean array can come from anywhere
```
print(temp[spd > 10])
# Make a copy so we don't modify the original data
temp2 = temp.copy()
# Replace all places where spd is <10 with NaN (not a number) so matplotlib skips it
temp2[spd < 10] = np.nan
plt.plot(temp2, 'tab:red')
```
Can also combine multiple boolean arrays using the syntax for bitwise operations. **MUST HAVE PARENTHESES** due to operator precedence.
```
print(temp[(temp < 45) & (spd > 10)])
```
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Heat index is only defined for temperatures >= 80F and relative humidity values >= 40%. Using the data generated below, use boolean indexing to extract the data where heat index has a valid value.</li>
</ul>
</div>
```
# Here's the "data"
np.random.seed(19990503) # Make sure we all have the same data
temp = (20 * np.cos(np.linspace(0, 2 * np.pi, 100)) +
80 + 2 * np.random.randn(100))
rh = (np.abs(20 * np.cos(np.linspace(0, 4 * np.pi, 100)) +
50 + 5 * np.random.randn(100)))
# Create a mask for the two conditions described above
# good_heat_index =
# Use this mask to grab the temperature and relative humidity values that together
# will give good heat index values
# temp[] ?
# BONUS POINTS: Plot only the data where heat index is defined by
# inverting the mask (using `~mask`) and setting invalid values to np.nan
```
<div class="alert alert-info">
<b>SOLUTION</b>
</div>
```
# %load solutions/heat_index.py
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="integers"></a>
## 3. Indexing using arrays of indices
You can also use a list or array of indices to extract particular values--this is a natural extension of the regular indexing. For instance, just as we can select the first element:
```
print(temp[0])
```
We can also extract the first, fifth, and tenth elements:
```
print(temp[[0, 4, 9]])
```
One of the ways this comes into play is trying to sort numpy arrays using `argsort`. This function returns the indices of the array that give the items in sorted order. So for our temp "data":
```
inds = np.argsort(temp)
print(inds)
```
We can use this array of indices to pass into temp to get it in sorted order:
```
print(temp[inds])
```
Or we can slice `inds` to only give the 10 highest temperatures:
```
ten_highest = inds[-10:]
print(temp[ten_highest])
```
There are other numpy arg functions that return indices for operating:
```
np.*arg*?
```
<a href="#top">Top</a>
<hr style="height:2px;">
| github_jupyter |
```
def dig_pow(n, p):
# creating a placholder
length = len(str(n))
total=0
for digits in range(1,length):
a= n % (10**digits)
print(a)
total+= (a ** (p+length-digits))
print(total)
if total % n==0:
return total //n
else:
return -1
dig_pow(46288,3) # SHOULD RETURN 51 as 4³ + 6⁴+ 2⁵ + 8⁶ + 8⁷ = 2360688 = 46288 * 51
def dig_pow(n, p):
# by list comprehension
total=0
new_p=int(p)
for digit in str(n):
#a= n % (10**digits)
#print(a)
print(digit)
total += (int(digit) ** new_p)
new_p+=1
print(total)
if total % n==0:
return total //n
else:
return -1
dig_pow(46288,3)
def openOrSenior(data):
# List of List and make a newList
newList =[]
for a,b in data:
print (a,b)
#if #more than 60 and more than 7
#newList.append("Senior")
#else:
#newList.append("Open")
#return new_list
openOrSenior([[45, 12],[55,21],[19, -2],[104, 20]])
def dirReduc(arr):
# so n is +1 and s is -1 for vertical, so is the case for e and w for horizontal
# reduce function to return []
# might have to use recursive
newList =[]
#newArray =[]
for i,dir in enumerate(arr):
#print (dir)
print (arr[i])
if arr[i] == "NORTH" and arr[i+1]!= "SOUTH" or arr[i] == "EAST" and arr[i+1]!= "WEST" :
print (arr[i])
#if newArray.append(dir)
return arr
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
dirReduc(a) # West
#Check if North and South are adjacent,
def dirReduc(arr):
newList=[]
for i, element in enumerate(arr):
previous_element = arr[i-1] if i > 0 else None
current_element = element
if current_element is "NORTH" and previous_element is not "SOUTH":
newList.append("NORTH")
elif current_element is "SOUTH" and previous_element is not "NORTH":
newList.append("SOUTH")
elif current_element is "EAST" and previous_element is not "WEST":
newList.append("EAST")
elif current_element is "WEST" and previous_element is not "EAST":
newList.append("WEST")
#if dir== "NORTH" and newList[i-1]
#newList[i].append(dir)
#if dir[i] == "NORTH" and dir[i+1] == "SOUTH":
# dir.pop(i)
print (newList)
def dirReduc(arr):
dir = ["NORTH","SOUTH","EAST","WEST"]
for i,element in enumerate[arr]:
if element in dir
# dir = [("NORTH","SOUTH"),("EAST","WEST"),("SOUTH","NORTH"),("WEST","EAST")]
'for i, element in enumerate(mylist):
previous_element = mylist[i-1] if i > 0 else None
current_element = element
next_element = mylist[i+1] if i < len(mylist)-1 else None
print(previous_element, current_element, next_element)
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
#dirReduc(a) # West
arr=a
dir = [("NORTH","SOUTH"),("EAST","WEST"),("SOUTH","NORTH"),("WEST","EAST")]
def tup(a):
if len(a) % 2 != 0:
listTup= list(zip(a[::2],a[+1::2]))
listTup.append(a[-1])
else:
listTup= list(zip(arr[::2],arr[+1::2]))
return listTup
#i=iter(arr)
#if i in dir and i.next()==
def dirReduc(arr):
dir = [("NORTH","SOUTH"),("EAST","WEST"),("SOUTH","NORTH"),("WEST","EAST")]
def tup(a):
if len(a) % 2 != 0:
listTup= list(zip(a[::2],a[+1::2]))
listTup.append(a[-1])
else:
listTup= list(zip(arr[::2],arr[+1::2]))
return listTup
new=[]
tuplist=tup(arr)
print (tuplist)
if dir in tuplist :
print("y")
else:
print("nah")
print (dir)
while dir in tuplist:
new= (x for x in tuplist if x not in dir)
tuplist =dirReduc(new)
print(new)
#l3 = [x for x in l1 if x not in l2]
#else:
# pass
#print(a)
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
dirReduc(a) # West
def dirReduc(arr):
def tup(a):
if len(a) % 2 != 0:
listTup= list(zip(a[::2],a[+1::2]))
listTup.append(a[-1])
else:
listTup= list(zip(arr[::2],arr[+1::2]))
return listTup
new= tup(arr)
old=[]
dir = [("NORTH","SOUTH"),("EAST","WEST"),("SOUTH","NORTH"),("WEST","EAST")]
#recursive
while len(old)== len(new):
old= tup(new)
new= [element for element in old if element not in dir ]
new= [x for t in new for x in t if len(x)>1]
#and element for x in new fro element in x
print (list(new))
#old=new
#print (old)
#print (new)
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
dirReduc(a) # West
def dirReduc(arr):
def tup(a):
if len(a) % 2 != 0:
listTup= list(zip(a[::2],a[+1::2]))
listTup.append(a[-1])
else:
listTup= list(zip(arr[::2],arr[+1::2]))
return listTup
a=arr
old= tup(a)
new=[]
dir = [("NORTH","SOUTH"),("EAST","WEST"),("SOUTH","NORTH"),("WEST","EAST")]
#recursive?
if len(old) != len(new):
#old= tup(new)
new= [element for element in old if element not in dir ]
print(new)
#new= [element for tup in new for element in new ]
#new= [element for t in new for element in t if len(element)>1]
#and element for x in new fro element in x
print (list(new))
#old=new
#print (old)
else:
print (new)
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
dirReduc(a) # West
```
if arr[index]=="NORTH" and tempList[index+1]!="SOUTH" :#or arr[index]=="SOUTH" and tempList[index+1]!="NORTH" :
newList.append(arr[index])
#elif arr[index]=="EAST" and tempList[index+1]!="WEST" or arr[index]=="WEST" and tempList[index+1]!="EAST" :
newList.append(arr[index])
else:
pass
#while len(newList)==len(tempList):
#print(newList)
#dirReduc(newList)
print(newList)
```
def dirReduc(arr):
tempList= arr
newList=[]
#print(arr)
dir = [("NORTH","SOUTH"),("EAST","WEST"),("SOUTH","NORTH"),("WEST","EAST")]
#print (dir)
for index in range(len(arr)-1):
#for i in range(len(dir)):
#if [(zip(arr[::],tempList[1::]))] not in dir[:]:
#newList.append(arr[index+1])
#print(list(zip(arr[::],tempList[+1::])))
if [list(zip(arr[index:],tempList[index+1:]))] not in [(x for x in dir)]:
newList.append(arr[index+1])
#else:
#pass
print(newList)
dirReduc(["NORTH","SOUTH","SOUTH","EAST","WEST"])
dirReduc(["NORTH","SOUTH","SOUTH","EAST","WEST"])
def dirReduc(arr):
turtle ={'N':0,'S':0,'E':0,'W':0}
turtleNew=turtle
a=arr[1:]
for i in range(0,len(arr)-1):
if arr[i] == "NORTH" and a[i] != "SOUTH":
turtle['N']+=1
#turtle['S']-=1
elif arr[i] == "SOUTH" and a[i] != "NORTH":
turtle['S']+=1
#turtle['N']-=1
elif arr[i] == "EAST" and a[i] != "WEST":
turtle['E']+=1
#turtle['W']-=1
elif arr[i] == "WEST" and a[i] != "EAST":
turtle['W']+=1
#turtle['E']-=1
print(turtle)
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
dirReduc(a)
def dirReduc(arr):
new=arr
for l in arr:
for dir in new:
"""
if new[:-2] =="NORTH" and l == "SOUTH":
new.pop()
new.pop()
print(new)
elif new[:-2]=="SOUTH" and l == "NORTH":
new.pop()
new.pop()
print(new)
elif new[:-2]=="EAST" and l == "WEST":
new.pop()
new.pop()
print(new)
elif new[:-2]=="WEST" and l == "EAST":
new.pop()
new.pop()
print(new)
#else:
#break
# elif new[:-1]==False:
# new.append(l)
"""
print(new)
a = ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"]
dirReduc(a)
```
| github_jupyter |
<h1> 2c. Refactoring to add batching and feature-creation </h1>
In this notebook, we continue reading the same small dataset, but refactor our ML pipeline in two small, but significant, ways:
<ol>
<li> Refactor the input to read data in batches.
<li> Refactor the feature creation so that it is not one-to-one with inputs.
</ol>
The Pandas function in the previous notebook also batched, only after it had read the whole data into memory -- on a large dataset, this won't be an option.
```
import tensorflow as tf
import numpy as np
import shutil
print(tf.__version__)
```
<h2> 1. Refactor the input </h2>
Read data created in Lab1a, but this time make it more general and performant. Instead of using Pandas, we will use TensorFlow's Dataset API.
```
CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
def get_train():
return read_dataset('./taxi-train.csv', mode = tf.estimator.ModeKeys.TRAIN)
def get_valid():
return read_dataset('./taxi-valid.csv', mode = tf.estimator.ModeKeys.EVAL)
def get_test():
return read_dataset('./taxi-test.csv', mode = tf.estimator.ModeKeys.EVAL)
```
<h2> 2. Refactor the way features are created. </h2>
For now, pass these through (same as previous lab). However, refactoring this way will enable us to break the one-to-one relationship between inputs and features.
```
INPUT_COLUMNS = [
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers'),
]
def add_more_features(feats):
# Nothing to add (yet!)
return feats
feature_cols = add_more_features(INPUT_COLUMNS)
```
<h2> Create and train the model </h2>
Note that we train for num_steps * batch_size examples.
```
tf.logging.set_verbosity(tf.logging.INFO)
OUTDIR = 'taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
model = tf.estimator.LinearRegressor(
feature_columns = feature_cols, model_dir = OUTDIR)
model.train(input_fn = get_train(), steps = 100); # TODO: change the name of input_fn as needed
```
<h3> Evaluate model </h3>
As before, evaluate on the validation data. We'll do the third refactoring (to move the evaluation into the training loop) in the next lab.
```
def print_rmse(model, name, input_fn):
metrics = model.evaluate(input_fn = input_fn, steps = 1)
print('RMSE on {} dataset = {}'.format(name, np.sqrt(metrics['average_loss'])))
print_rmse(model, 'validation', get_valid())
```
Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
# Food Image Classifier
This part of the Manning Live project - https://liveproject.manning.com/project/210 . In synposis, By working on this project, I will be classying the food variety of 101 type. Dataset is already availble in public but we will be starting with subset of the classifier
## Dataset
As a general best practice to ALWAYS start with a subset of the dataset rather than a full one. There are two reason for the same
1. As you experiement with the model, You dont want to run over all the dataset that will slow down the process
2. You will end up wasting lots of GPU resources well before the getting best model for the Job
In the Case live Project, The authors already shared the subset of the notebook so we can use the same for the baseline model
```
#!wget https://lp-prod-resources.s3-us-west-2.amazonaws.com/other/Deploying+a+Deep+Learning+Model+on+Web+and+Mobile+Applications+Using+TensorFlow/Food+101+-+Data+Subset.zip
#!unzip Food+101+-+Data+Subset.zip
import torch
from torchvision import datasets,models
import torchvision.transforms as tt
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
from torch.utils.data import DataLoader,random_split,Dataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from fastprogress.fastprogress import master_bar, progress_bar
stats = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_tfms = tt.Compose([tt.RandomHorizontalFlip(),
tt.Resize([224,224]),
tt.ToTensor(),
tt.Normalize(*stats,inplace=True)])
valid_tfms = tt.Compose([tt.Resize([224,224]),tt.ToTensor(), tt.Normalize(*stats)])
```
Create a Pytorch dataset from the image folder. This will allow us to create a Training dataset and validation dataset
```
ds = datasets.ImageFolder('food-101-subset/images/')
class CustomDataset(Dataset):
def __init__(self,ds,transformer):
self.ds = ds
self.transform = transformer
def __getitem__(self,idx):
image,label = self.ds[idx]
img = self.transform(image)
return img,label
def __len__(self):
return len(ds)
train_len=0.8*len(ds)
val_len = len(ds) - train_len
int(train_len),int(val_len)
train_ds,val_ds = random_split(dataset=ds,lengths=[int(train_len),int(val_len)],generator=torch.Generator().manual_seed(42))
t_ds = CustomDataset(train_ds.dataset,train_tfms)
v_ds = CustomDataset(val_ds.dataset,valid_tfms)
batch_size = 32
train_dl = DataLoader(t_ds, batch_size, shuffle=True, pin_memory=True)
valid_dl = DataLoader(v_ds, batch_size, pin_memory=True)
for x,yb in train_dl:
print(x.shape)
break;
def show_batch(dl):
for images, labels in dl:
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(images[:64], nrow=8).permute(1, 2, 0))
break
```
# Create a ResNet Model with default Parameters
```
class Flatten(nn.Module):
def forward(self,x):
return torch.flatten(x,1)
class FoodImageClassifer(nn.Module):
def __init__(self):
super().__init__()
resnet = models.resnet34(pretrained=True)
self.body = nn.Sequential(*list(resnet.children())[:-2])
self.head = nn.Sequential(nn.AdaptiveAvgPool2d(1),Flatten(),nn.Linear(resnet.fc.in_features,3))
def forward(self,x):
x = self.body(x)
return self.head(x)
def freeze(self):
for name,param in self.body.named_parameters():
param.requires_grad = True
def fit(epochs,model,train_dl,valid_dl,loss_fn,opt):
mb = master_bar(range(epochs))
mb.write(['epoch','train_loss','valid_loss','trn_acc','val_acc'],table=True)
for i in mb:
trn_loss,val_loss = 0.0,0.0
trn_acc,val_acc = 0,0
trn_n,val_n = len(train_dl.dataset),len(valid_dl.dataset)
model.train()
for xb,yb in progress_bar(train_dl,parent=mb):
xb,yb = xb.to(device), yb.to(device)
out = model(xb)
opt.zero_grad()
loss = loss_fn(out,yb)
_,pred = torch.max(out.data, 1)
trn_acc += (pred == yb).sum().item()
trn_loss += loss.item()
loss.backward()
opt.step()
trn_loss /= mb.child.total
trn_acc /= trn_n
model.eval()
with torch.no_grad():
for xb,yb in progress_bar(valid_dl,parent=mb):
xb,yb = xb.to(device), yb.to(device)
out = model(xb)
loss = loss_fn(out,yb)
val_loss += loss.item()
_,pred = torch.max(out.data, 1)
val_acc += (pred == yb).sum().item()
val_loss /= mb.child.total
val_acc /= val_n
mb.write([i,f'{trn_loss:.6f}',f'{val_loss:.6f}',f'{trn_acc:.6f}',f'{val_acc:.6f}'],table=True)
```
# Making the Resnet as a Feature Extractor and training model
```
model = FoodImageClassifer()
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
#model.freeze()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
fit(10,model=model,train_dl=train_dl,valid_dl=valid_dl,loss_fn=criterion,opt=optimizer_ft)
```
# Freeze the layers
```
model = FoodImageClassifer()
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.Adam(model.parameters(), lr=1e-4)
model.freeze()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
fit(10,model=model,train_dl=train_dl,valid_dl=valid_dl,loss_fn=criterion,opt=optimizer_ft)
torch.save(model.state_dict,'resnet.pth')
```
| github_jupyter |
```
import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from skimage import color, exposure
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from tensorflow.keras.utils import to_categorical
from hyperopt import hp, STATUS_OK, tpe, Trials, fmin
sns.set()
%load_ext tensorboard
HOME = '/content/drive/My Drive/Colab Notebooks/matrix/dw_matrix_road_signs'
%cd $HOME
train_db = pd.read_pickle('data/train.p')
test_db = pd.read_pickle('data/test.p')
X_train, y_train = train_db['features'], train_db['labels']
X_test, y_test = test_db['features'], test_db['labels']
sign_names = pd.read_csv('data/dw_signnames.csv')
sign_names.head()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
input_shape = X_train.shape[1:]
cat_num = y_train.shape[1]
def get_cnn_v1(input_shape, cat_num, verbose=False):
model = Sequential([Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Flatten(),
Dense(cat_num, activation='softmax')])
if verbose:
model.summary()
return model
cnn_v1 = get_cnn_v1(input_shape, cat_num, True)
def train_model(model, X_train, y_train, params_fit=dict()):
logdir = os.path.join('logs', datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
model.fit(X_train,
y_train,
batch_size=params_fit.get('batch_size', 128),
epochs=params_fit.get('epochs', 5),
verbose=params_fit.get('verbose', 1),
validation_data=params_fit.get('validation_data', (X_train, y_train)),
callbacks=[tensorboard_callback])
return model
model_trained = train_model(cnn_v1, X_train, y_train)
def predict(model, X_test, y_test, scoring=accuracy_score):
y_test_norm = np.argmax(y_test, axis=1)
y_pred_prob = model.predict(X_test)
y_pred = np.argmax(y_pred_prob, axis=1)
return scoring(y_test_norm, y_pred)
def get_cnn(input_shape, cat_num):
model = Sequential([Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same'),
MaxPool2D(),
Dropout(0.3),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(0.3),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(cat_num, activation='softmax')])
return model
def train_and_predict(model, X_train, y_train, X_test, y_test):
model_trained = train_model(model, X_train, y_train)
return predict(model_trained, X_test, y_test)
# train_and_predict(get_cnn(input_shape, cat_num), X_train, y_train, X_test, y_test)
def get_model(input_shape, cat_num, params):
model = Sequential([Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same'),
MaxPool2D(),
Dropout(params['dropout_cnn_0']),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(params['dropout_cnn_1']),
Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same'),
Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),
MaxPool2D(),
Dropout(params['dropout_cnn_2']),
Flatten(),
Dense(1024, activation='relu'),
Dropout(params['dropout_dense_0']),
Dense(1024, activation='relu'),
Dropout(params['dropout_dense_1']),
Dense(cat_num, activation='softmax')])
return model
def func_obj(params):
model = get_model(input_shape, cat_num, params)
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
model.fit(X_train,
y_train,
batch_size=int(params.get('batch_size', 128)),
epochs=params.get('epochs', 5),
verbose=params.get('verbose', 0)
)
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = score[1]
print(f'params={params}')
print(f'accuracy={accuracy}')
return {'loss': -accuracy, 'status': STATUS_OK, 'model': model}
space = {
'batch_size': hp.quniform('batch_size', 100, 200, 10),
'dropout_cnn_0': hp.uniform('dropout_cnn_0', 0.3, 0.5),
'dropout_cnn_1': hp.uniform('dropout_cnn_1', 0.3, 0.5),
'dropout_cnn_2': hp.uniform('dropout_cnn_2', 0.3, 0.5),
'dropout_dense_0': hp.uniform('dropout_dense_0', 0.3, 0.7),
'dropout_dense_1': hp.uniform('dropout_dense_1', 0.3, 0.7),
}
best = fmin(
func_obj,
space,
tpe.suggest,
30,
Trials()
)
```
| github_jupyter |
```
import torch
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import numpy as np
import pickle
from collections import namedtuple
from tqdm import tqdm
import torch
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from adabound import AdaBound
import matplotlib.pyplot as plt
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.MNIST(root='./data_mnist', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=200,
shuffle=True, num_workers=4)
testset = torchvision.datasets.MNIST(root='./data_mnist', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=200,
shuffle=False, num_workers=4)
device = 'cuda:0'
optim_configs = {
'1e-4': {
'optimizer': optim.Adam,
'kwargs': {
'lr': 1e-4,
'weight_decay': 0,
'betas': (0.9, 0.999),
'eps': 1e-08,
'amsgrad': False
}
},
'5e-3': {
'optimizer': optim.Adam,
'kwargs': {
'lr': 5e-3,
'weight_decay': 0,
'betas': (0.9, 0.999),
'eps': 1e-08,
'amsgrad': False
}
},
'1e-2': {
'optimizer': optim.Adam,
'kwargs': {
'lr': 1e-2,
'weight_decay': 0,
'betas': (0.9, 0.999),
'eps': 1e-08,
'amsgrad': False
}
},
'1e-3': {
'optimizer': optim.Adam,
'kwargs': {
'lr': 1e-3,
'weight_decay': 0,
'betas': (0.9, 0.999),
'eps': 1e-08,
'amsgrad': False
}
},
'5e-4': {
'optimizer': optim.Adam,
'kwargs': {
'lr': 5e-4,
'weight_decay': 0,
'betas': (0.9, 0.999),
'eps': 1e-08,
'amsgrad': False
}
},
}
class MLP(nn.Module):
def __init__(self, hidden_size=256):
super(MLP, self).__init__()
self.fc1 = nn.Linear(28 * 28, hidden_size)
self.fc2 = nn.Linear(hidden_size, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
criterion = nn.CrossEntropyLoss()
hidden_sizes = [256, 512, 1024, 2048]
for h_size in hidden_sizes:
Stat = namedtuple('Stat', ['losses', 'accs'])
train_results = {}
test_results = {}
for optim_name, optim_config in optim_configs.items():
torch.manual_seed(0)
np.random.seed(0)
train_results[optim_name] = Stat(losses=[], accs=[])
test_results[optim_name] = Stat(losses=[], accs=[])
net = MLP(hidden_size=h_size).to(device)
optimizer = optim_config['optimizer'](net.parameters(), **optim_config['kwargs'])
print(optimizer)
for epoch in tqdm(range(100)): # loop over the dataset multiple times
train_stat = {
'loss': .0,
'correct': 0,
'total': 0
}
test_stat = {
'loss': .0,
'correct': 0,
'total': 0
}
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).sum()
# calculate
train_stat['loss'] += loss.item()
train_stat['correct'] += c.item()
train_stat['total'] += labels.size()[0]
train_results[optim_name].losses.append(train_stat['loss'] / (i + 1))
train_results[optim_name].accs.append(train_stat['correct'] / train_stat['total'])
with torch.no_grad():
for i, data in enumerate(testloader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).sum()
test_stat['loss'] += loss.item()
test_stat['correct'] += c.item()
test_stat['total'] += labels.size()[0]
test_results[optim_name].losses.append(test_stat['loss'] / (i + 1))
test_results[optim_name].accs.append(test_stat['correct'] / test_stat['total'])
# Save stat!
stat = {
'train': train_results,
'test': test_results
}
with open(f'adam_stat_mlp_{h_size}.pkl', 'wb') as f:
pickle.dump(stat, f)
# Plot loss
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5))
for optim_name in optim_configs:
if 'Bound' in optim_name:
ax1.plot(train_results[optim_name].losses, '--', label=optim_name)
else:
ax1.plot(train_results[optim_name].losses, label=optim_name)
ax1.set_ylabel('Training Loss')
ax1.set_xlabel('# of Epcoh')
ax1.legend()
for optim_name in optim_configs:
if 'Bound' in optim_name:
ax2.plot(test_results[optim_name].losses, '--', label=optim_name)
else:
ax2.plot(test_results[optim_name].losses, label=optim_name)
ax2.set_ylabel('Test Loss')
ax2.set_xlabel('# of Epcoh')
ax2.legend()
plt.suptitle(f'Training Loss and Test Loss for MLP({h_size}) on MNIST', y=1.01)
plt.tight_layout()
plt.show()
# Plot accuracy
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5))
for optim_name in optim_configs:
if 'Bound' in optim_name:
ax1.plot(train_results[optim_name].accs, '--', label=optim_name)
else:
ax1.plot(train_results[optim_name].accs, label=optim_name)
ax1.set_ylabel('Training Accuracy %')
ax1.set_xlabel('# of Epcoh')
ax1.legend()
for optim_name in optim_configs:
if 'Bound' in optim_name:
ax2.plot(test_results[optim_name].accs, '--', label=optim_name)
else:
ax2.plot(test_results[optim_name].accs, label=optim_name)
ax2.set_ylabel('Test Accuracy %')
ax2.set_xlabel('# of Epcoh')
ax2.legend()
plt.suptitle(f'Training Accuracy and Test Accuracy for MLP({h_size}) on MNIST', y=1.01)
plt.tight_layout()
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/WittmannF/udemy-deep-learning-cnns/blob/main/assignment_cnn_preenchido.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Assignment: Fashion MNIST
Now it is your turn! You are going to use the same methods presented in the previous video in order to classify clothes from a black and white dataset of images (image by Zalando, MIT License):

The class labels are:
```
0. T-shirt/top
1. Trouser
2. Pullover
3. Dress
4. Coat
5. Sandal
6. Shirt
7. Sneaker
8. Bag
9. Ankle boot
```
### 1. Preparing the input data
Let's first import the dataset. It is available on [tensorflow.keras.datasets](https://keras.io/datasets/):
```
import tensorflow
fashion_mnist = tensorflow.keras.datasets.fashion_mnist
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
print("Shape of the training set: {}".format(X_train.shape))
print("Shape of the test set: {}".format(X_test.shape))
# TODO: Normalize the training and testing set using standardization
def normalize(x,m,s):
return (x-m)/s
train_mean = X_train.mean()
train_std = X_train.std()
X_train = normalize(X_train, train_mean, train_std)
X_test = normalize(X_test, train_mean, train_std)
print(f'Training Mean after standardization {X_train.mean():.3f}')
print(f'Training Std after standardization {X_train.std():.3f}')
print(f'Test Mean after standardization {X_test.mean():.3f}')
print(f'Test Std after standardization {X_test.std():.3f}')
```
### 2. Training with fully connected layers
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
model = Sequential([
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=2, validation_data=(X_test, y_test))
```
### 3. Extending to CNNs
Now your goal is to develop an architecture that can reach a test accuracy higher than 0.85.
```
X_train.shape
# TODO: Reshape the dataset in order to add the channel dimension
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)
from tensorflow.keras.layers import Conv2D, MaxPooling2D
model = Sequential([
Conv2D(6, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)),
MaxPooling2D(),
Conv2D(16, kernel_size=(3,3), activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
hist=model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test))
import pandas as pd
pd.DataFrame(hist.history).plot()
```
### 4. Visualizing Predictions
```
import numpy as np
import matplotlib.pyplot as plt
label_names = {0:"T-shirt/top",
1:"Trouser",
2:"Pullover",
3:"Dress",
4:"Coat",
5:"Sandal",
6:"Shirt",
7:"Sneaker",
8:"Bag",
9:"Ankle boot"}
# Index to be visualized
for idx in range(5):
plt.imshow(X_test[idx].reshape(28,28), cmap='gray')
out = model.predict(X_test[idx].reshape(1,28,28,1))
plt.title("True: {}, Pred: {}".format(label_names[y_test[idx]], label_names[np.argmax(out)]))
plt.show()
```
| github_jupyter |
```
# Load library
import nltk
import os
from nltk import tokenize
from nltk.tokenize import sent_tokenize,word_tokenize
os.getcwd()
# Read the Data
raw=open("C:\\Users\\vivek\\Desktop\\NLP Python Practice\\Labeled Dateset.txt").read()
```
# Tokenize and make the Data into the Lower Case
```
# Change the Data in lower
raw=raw.lower()
# tokenize the data
docs=sent_tokenize(raw)
docs
# Split the Data into the label and review
docs=docs[0].split("\n")
docs
```
# Pre-processing punctuation
```
from string import punctuation as punc
for d in docs:
for ch in d:
if ch in punc:
d.replace(ch,"")
```
# removing Stop word and stemming
```
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from nltk.stem import PorterStemmer
ps=PorterStemmer()
for d in docs:
for token in word_tokenize(d):
if token in ENGLISH_STOP_WORDS:
d.replace(token,"")
d.replace(token,ps.stem(token))
```
# Ask from the user for test Data
```
for i in range(len(docs)):
print("D"+str(i)+":"+docs[i])
test=input("Enter your text:")
docs.append(test+":")
## Seperating the document into the label,striping off the unwanted white space
x,y=[],[]
for d in docs:
x.append(d[:d.index(":")].strip())
y.append(d[d.index(":")+1:].strip())
# vectorizer using Tfidf
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer=TfidfVectorizer()
vec=vectorizer.fit_transform(x)
# trainning KNN Classifier
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(1)
knn.fit(vec[:6],y[:6])
print("Label: ",knn.predict(vec[6]))
# Sntiment Analysis
from nltk.corpus import wordnet
test_tokens=test.split(" ")
good=wordnet.synsets("good")
bad=wordnet.synsets("evil")
score_pos=0
score_neg=0
for token in test_tokens:
t=wordnet.synsets(token)
if len(t)>0:
sim_good=wordnet.wup_similarity(good[0],t[0])
sim_bad=wordnet.wup_similarity(bad[0],t[0])
if(sim_good is not None):
score_pos =score_pos + sim_good
if(sim_bad is not None):
score_neg =score_neg + sim_bad
if((score_pos - score_neg)>0.1):
print("Subjective Statement, Positive openion of strength: %.2f" %score_pos)
elif((score_neg - score_pos)>0.1):
print("Subjective Statement, Negative openion of strength: %.2f" %score_neg)
else:
print("Objective Statement, No openion Showed")
# Nearest Document
from sklearn.neighbors import NearestNeighbors
nb=NearestNeighbors(n_neighbors=2)
nb.fit(vec[:6])
closest_docs=nb.kneighbors(vec[6])
print("Recomended document with IDs ",closest_docs[1])
print("hiving distance ",closest_docs[0])
```
| github_jupyter |
<img alt="Colaboratory logo" height="45px" src="https://colab.research.google.com/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px">
<h1>Welcome to Colaboratory!</h1>
Colaboratory is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud.
With Colaboratory you can write and execute code, save and share your analyses, and access powerful computing resources, all for free from your browser.
## Running code
Code cells can be executed in sequence by pressing Shift-ENTER. Try it now.
```
import math
import tensorflow as tf
from matplotlib import pyplot as plt
print("Tensorflow version " + tf.__version__)
a=1
b=2
a+b
```
## Hidden cells
Some cells contain code that is necessary but not interesting for the exercise at hand. These cells will typically be collapsed to let you focus at more interesting pieces of code. If you want to see their contents, double-click the cell. Wether you peek inside or not, **you must run the hidden cells for the code inside to be interpreted**. Try it now, the cell is marked **RUN ME**.
```
#@title "Hidden cell with boring code [RUN ME]"
def display_sinusoid():
X = range(180)
Y = [math.sin(x/10.0) for x in X]
plt.plot(X, Y)
display_sinusoid()
```
Did it work ? If not, run the collapsed cell marked **RUN ME** and try again!
## Accelerators
Colaboratory offers free GPU and TPU (Tensor Processing Unit) accelerators.
You can choose your accelerator in *Runtime > Change runtime type*
The cell below is the standard boilerplate code that enables distributed training on GPUs or TPUs in Keras.
```
# Detect hardware
try: # detect TPUs
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() # TPU detection
strategy = tf.distribute.TPUStrategy(tpu)
except ValueError: # detect GPUs
strategy = tf.distribute.MirroredStrategy() # for GPU or multi-GPU machines (works on CPU too)
#strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
#strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # for clusters of multi-GPU machines
# How many accelerators do we have ?
print("Number of accelerators: ", strategy.num_replicas_in_sync)
# To use the selected distribution strategy:
# with strategy.scope:
# # --- define your (Keras) model here ---
#
# For distributed computing, the batch size and learning rate need to be adjusted:
# global_batch_size = BATCH_SIZE * strategy.num_replicas_in_sync # num replcas is 8 on a single TPU or N when runing on N GPUs.
# learning_rate = LEARNING_RATE * strategy.num_replicas_in_sync
```
## License
---
author: Martin Gorner<br>
twitter: @martin_gorner
---
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---
This is not an official Google product but sample code provided for an educational purpose
| github_jupyter |
# PyTorch
# Intro to Neural Networks
Lets use some simple models and try to match some simple problems
```
import numpy as np
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
```
### Data Loading
Before we dive deep into the nerual net, lets take a brief aside to discuss data loading.
Pytorch provides a Dataset class which is fairly easy to inherit from. We need only implement two methods for our data load:
9. __len__(self) -> return the size of our dataset
9. __getitem__(self, idx) -> return a data at a given index.
The *real* benefit of implimenting a Dataset class comes from using the DataLoader class.
For data sets which are too large to fit into memory (or more likely, GPU memory), the DataLoader class gives us two advantages:
9. Efficient shuffling and random sampling for batches
9. Data is loaded in a seperate *processes*.
Number (2) above is *important*. The Python interpretter is single threaded only, enforced with a GIL (Global Interpreter Lock). Without (2), we waste valuable (and potentially expensive) processing time shuffling and sampling and building tensors.
So lets invest a little time to build a Dataset and use the DataLoader.
In or example below, we are going to mock a dataset with a simple function, this time:
y = sin(x) + 0.01 * x^2
```
fun = lambda x: np.sin(x) + 0.01 * x * x
X = np.linspace(-3, 3, 100)
Y = fun(X)
plt.figure(figsize=(7,7))
plt.scatter(X,Y)
plt.legend()
plt.show()
```
### Our First Neural Net
Lets now build our first neural net.
In this case, we'll take a classic approach with 2 fully connected hidden layers and a fully connected output layer.
```
class FirstNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(FirstNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(-1,1)
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
net = FirstNet(input_size=1, hidden_size=64, num_classes=1)
print(net)
```
Lets look at a few key features of our net:
1) We have 2 fully connected layers, defined in our init function.
2) We define a *forward pass* method which is the prediction of the neural net given an input X
3) Note that we make a *view* of our input array. In our simple model, we expect a 1D X value, and we output a 1D Y value. For efficiency, we may wish to pass in *many* X values, particularly when training. Thus, we need to set up a *view* of our input array: Many 1D X values. -1 in this case indicates that the first dimension (number of X values) is inferred from the tensor's shape.
### Logging and Visualizing to TensorboardX
Lets track the progress of our training and visualize in tensorboard (using tensorboardX). We'll also add a few other useful functions to help visualize things.
To view the output, run:
`tensorboard --logdir nb/run`
```
tbwriter = SummaryWriter()
```
### Graph Visualization and Batching
We will begin by adding a graph visualization to tensorboard. To do this, we need a valid input to our network.
Our network is simple - floating point in, floating point out. *However*, pytorch expects us to *batch* our inputs - therefore it expects an *array* of inputs instead of a single input. There are many ways to work around this, I like "unsqueeze".
```
X = torch.FloatTensor([0.0])
tbwriter.add_graph(net, X)
```
### Cuda
IF you have a GPU available, your training will run much faster.
Moving data back and forth between the CPU and the GPU is fairly straightforward - although it can be easy to forget.
```
use_cuda = torch.cuda.is_available()
if use_cuda:
net = net.cuda()
def makeFig(iteration):
X = np.linspace(-3, 3, 100, dtype=np.float32)
X = torch.FloatTensor(X)
if use_cuda:
Y = net.forward(X.cuda()).cpu()
else:
Y = net.forward(X)
fig = plt.figure()
plt.plot(X.data.numpy(), Y.data.numpy())
plt.title('Prediciton at iter: {}'.format(iteration))
return fig
def showFig(iteration):
fig = makeFig(iteration)
plt.show()
plt.close()
def logFig(iteration):
fig = makeFig(iteration)
fig.canvas.draw()
raw = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
raw = raw.reshape(fig.canvas.get_width_height()[::-1] + (3,))
tbwriter.add_image('Prediction at iter: {}'.format(iteration), raw)
plt.close()
showFig(0)
```
Ok, we have a ways to go. Lets use our data loader and do some training. Here we will use MSE loss (mean squared error) and SGD optimizer.
```
%%time
learning_rate = 0.01
num_epochs = 4000
if use_cuda:
net = net.cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
net.train()
X = np.linspace(-3, 3, 100)
Y = fun(X)
X = torch.FloatTensor(X)
Y = torch.FloatTensor(Y).view(-1,1)
if use_cuda:
X = X.cuda()
Y = Y.cuda()
for epoch in range(num_epochs):
pred = net.forward(X)
loss = criterion(pred, Y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tbwriter.add_scalar("Loss", loss.data[0])
if (epoch % 100 == 99):
print("Epoch: {:>4} Loss: {}".format(epoch, loss.data[0]))
for name, param in net.named_parameters():
tbwriter.add_histogram(name, param.clone().cpu().data.numpy(), epoch)
logFig(epoch)
net.eval()
showFig(0)
```
## Conclusions
We've written our first network, take a moment and play with some of our models here.
Try inputting a different function into the functional dataset, such as:
dataset = FunctionalDataset(lambda x: 1.0 if x > 0 else -1.0
Try experimenting with the network - change the number of neurons in the layer, or add more layers.
Try changing the learning rate (and probably the number of epochs).
And lastly, try disabling cuda (if you have a gpu).
#### How well does the prediction match our input function?
#### How long does it take to train?
One last note: we are absolutely *over-fitting* our dataset here. In this example, that's ok. For real work, we will need to be more careful.
Speaking of real work, lets do some real work identifying customer cohorts.
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0.5, 10, 0.001)
y1 = np.log(x)
y2 = 5 * np.sin(x) / x
plt.style.use('seaborn-darkgrid') # Define o fundo do gráfico
plt.figure(figsize=(8,5)) # Define o tamanho do gráfico
# Estipula os parametros das letras do titulo, eixo x e eixo y
plt.title('Dois Gráficos Aleatórios', fontsize=16, fontweight='bold', fontstyle='italic', fontfamily='serif', color='grey')
plt.xlabel('Valores do eixo x', fontsize=13, fontfamily='serif')
plt.ylabel('Valores do eixo y', fontsize=13, fontfamily='serif')
plt.tight_layout()
# Estipula como vai ser as linhas (uma linha sobreposta a outra com largura e opacidade diferente)
plt.plot(x, y1, label='log(x)', color='blue')
plt.plot(x, y1, color='blue', linewidth=10, alpha=0.1)
plt.plot(x, y2, label='sen(x)/x', color='red')
plt.plot(x, y2, color='red', linewidth=10, alpha=0.1)
# Estipula a estilização da legenda
plt.legend(fontsize=13, frameon=True, framealpha=0.2, facecolor='grey')
import numpy as np
x = np.arange(0,10, 0.1)
z = np.arange(0.5, 10)
y1 = np.log(z)
y2 = 5 * np.sin(z) / z
y3 = np.sin(z)
y4 = np.tan(z)
plt.style.use('seaborn-darkgrid')
fig1, f1_axes = plt.subplots(ncols=2, nrows=2, figsize=(15, 10))
fig1.suptitle("Vários Gráficos Em Uma Mesma Figura", fontsize=30, fontweight='bold', fontstyle='italic', fontfamily='serif')
box1 = f1_axes[0, 0]
box2 = f1_axes[0, 1]
box3 = f1_axes[1, 0]
box4 = f1_axes[1, 1]
box1.set_title('Caixa 1', fontsize=15, fontweight='bold')
box1.set_xlabel('Caixa 1 - Eixo x', fontsize=13, fontfamily='serif')
box1.set_ylabel('Caixa 1 - Eixo y', fontsize=13, fontfamily='serif')
box1.plot(np.sin(x), label='sen(x)', color= 'red')
box1.plot(np.sin(x), color='red', linewidth=10, alpha=0.1)
box1.plot(np.cos(x), label='cos(x)', color= 'darkturquoise')
box1.plot(np.cos(x), color='darkturquoise', linewidth=10, alpha=0.1)
box1.legend(fontsize=13, frameon=True, framealpha=0.2, facecolor='grey')
box2.set_title('Caixa 2', fontsize=15, fontweight='bold')
box2.set_xlabel('Caixa 2 - Eixo x', fontsize=13, fontfamily='serif')
box2.set_ylabel('Caixa 2 - Eixo y', fontsize=13, fontfamily='serif')
box2.plot(z, y1, label='log(x)', color='darkslategrey')
box2.plot(z, y1, color='blue', linewidth=10, alpha=0.1)
box2.plot(z, y2, label='sen(x)/x', color='coral')
box2.plot(z, y2, color='red', linewidth=10, alpha=0.1)
box2.legend(fontsize=13, frameon=True, framealpha=0.2, facecolor='grey')
box3.set_title('Caixa 3', fontsize=15, fontweight='bold')
box3.set_xlabel('Caixa 3 - Eixo x', fontsize=13, fontfamily='serif')
box3.set_ylabel('Caixa 3 - Eixo y', fontsize=13, fontfamily='serif')
box3.plot(np.sin(x), label='sen(x)', color= 'black')
box3.plot(np.sin(x), color='black', linewidth=10, alpha=0.1)
box3.plot(np.tan(x), label='cos(x)', color= 'blue')
box3.plot(np.tan(x), color='blue', linewidth=10, alpha=0.1)
box3.legend(fontsize=13, frameon=True, framealpha=0.2, facecolor='grey')
box4.set_title('Caixa 4', fontsize=15, fontweight='bold')
box4.set_xlabel('Caixa 4 - Eixo x', fontsize=13, fontfamily='serif')
box4.set_ylabel('Caixa 4 - Eixo y', fontsize=13, fontfamily='serif')
box4.plot(z, y3, label='sen(z)', color='purple')
box4.plot(z, y3, color='blue', linewidth=10, alpha=0.1)
box4.plot(z, y4, label='tan(z)', color='lime')
box4.plot(z, y4, color='red', linewidth=10, alpha=0.1)
box4.legend(fontsize=13, frameon=True, framealpha=0.2, facecolor='grey')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/WuilsonEstacio/Procesamiento-de-lenguaje-natural/blob/main/codigo_para_abrir_y_contar_palabras_de_archivos.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# para leer un archivo
archivo = open('/content/Hash.txt','r')
for linea in archivo:
print(linea)
archivo.close()
archivo="/content/Hash.txt"
with open(archivo) as f:
text=f.read()
for char in "abcdefghijklmnopqrsrtuvwxyz":
perc=100*count_char(text, char)/len(text)
print("{0}-{1}%".format(char, round(perc, 2)))
# Which of the following is the correct regular expression to extract all the phone numbers from the following chunk of text:
import re
patter = '[(]\d{3}[)]\s\d{3}[-]\d{4}'
print(patter)
re.findall(patter,archivo)
#con este codigo se puede contar las palabras que hay en un archivo
import numpy as np
import pandas as pd
def count_char(text, char):
count=0
for c in text:
if c == char:
count +=1
return count
# con esto cambiamos el contenido de Hash.txt y modificamos el escrito y lo guardamos
file =open("/content/Hash.txt","w")
file.write(""" Usted puede interponer demanda ante los jueces civiles del
circuito que conocen en primera instancia de los
procesos contenciosos de mayor cuantía por responsabilidad médica.
Pretendiendo el pago de los perjuicios materiales """)
file.close()
filename="20-12-2020.txt"
with open('/content/Hash.txt') as f:
text=f.read()
for char in "abcdefghijklmnopqrsrtuvwxyz":
perc=100*count_char(text, char)/len(text)
print("{0}-{1}%".format(char, round(perc, 2)))
import numpy as np
import pandas as pd
filename=input("ingrese el nombre del archivo: ")
with open( filename ) as f:
text=f.read()
filename = open("20-12-2020.txt","r")
for linea in filename.readlines():
#str=filename.read()
#print(len(str))
print(linea)
filename.close()
# importamos librerias
import nltk
nltk.download('cess_esp') # para preeentener
from nltk.corpus import cess_esp as cess
from nltk import UnigramTagger as ut # etiquetador por unigramas
from nltk import BigramTagger as bt # etiquetador por bigramas
# https://www.delftstack.com/es/howto/python-pandas/how-to-load-data-from-txt-with-pandas/#read_csv-m%25C3%25A9todo-para-cargar-los-datos-del-archivo-de-texto
# una forma de leer el archivo con pandas
import pandas as pd
df = pd.read_csv(
'/content/Hash.txt', sep=" ",header=None)
print(df)
# leemos el archivo
import pandas as pd
import numpy as np
archivo = open('/content/Hash.txt','r')
for linea in archivo:
print(linea)
archivo.close()
# pip install win_unicode_console
# Utilizado para vizualizar caracteres correctamente en consola
import codecs
import win_unicode_console
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
# Abrimos el archivo
archivo = codecs.open('/content/Hash.txt', 'r', encoding='utf-8')
texto = ""
#Almacenamos el texto en una variable
for linea in archivo:
linea = linea.strip()
texto = texto + " " + linea
text = word_tokenize(texto)
nltk.pos_tag(text) # etiquetado aplicado al text
#Realizamos el Tokenizing con Sent_Tokenize() a cada una de las sentencias del texto
# tokens = sent_tokenize(texto)
```
# **Test**
1.
Si tenemos un dataset etiquetado donde la categoría adjetivo (ADJ) aparece un total de 500 veces entre todos los tokens, y de esas veces solamente la palabra "noble" le corresponde 200 veces, entonces podemos decir que:
La probabilidad de emisión P(noble|ADJ) = 40%
2.
El proceso mediante el cual un Modelo Markoviano Latente determina la secuencia de etiquetas más probable para una secuencia de palabras es:
Usando el algoritmo de Viterbi para obtener la categoría más probable, palabra por palabra.
3.
Dada una cadena de texto text en español, el procedimiento para asignar las etiquetas gramaticales con Stanza es a partir de un objeto nlp(text), donde:
nlp = stanza.Pipeline('es', processors='tokenize,pos')
4.
La ingeniería de atributos se usa para:
Construir atributos particulares de palabras y textos que permitan dar un input más apropiado a un modelo de clasificación.
5.
El problema de clasificación de texto pertenece a la categoría de Machine Learning supervisado porque:
Durante el entrenamiento, el modelo tiene conocimiento de las etiquetas correctas que debería predecir.
6.
En un modelo de clasificación por categorías gramaticales, el algoritmo de Viterbi se usa para:
El proceso de decodificación: encontrar la secuencia de etiquetas más probable.
7.
En un Modelo Markoviano Latente se necesitan los siguientes ingredientes:
Matrices de transición, emisión y distribución inicial de estados.
8.
En un problema de clasificación de emails entre SPAM y HAM, la métrica de recall tiene la siguiente interpretación:
De todos los correos que realmente son SPAM, la fracción que el modelo logró identificar.
9.
Para entrenar un clasificador de Naive Bayes en NLTK, se escribe en Python:
nltk.NaiveBayesClassifier.train(data)
10.
Si tienes un modelo de clasificación binaria que luego de entrenarlo, obtienes que el número de verdaderos positivos es 200 y el número de falsos positivos es 120, entonces la métrica de precisión de dicho modelo tiene un valor de:
200/320
11.
Un algoritmo general de clasificación de texto:
Es un algoritmo de Machine Learning supervisado.
12.
El tokenizador por defecto en NLTK para el idioma inglés es:
punkt
13.
En una cadena de Markov se necesitan los siguientes elementos:
Matriz de transiciones y distribución inicial de estados.
14.
Entrenar un Modelo Markoviano Latente significa:
Calcular las matrices de probabilidad de transición y emisión con un corpus de textos.
15.
Una de las siguientes no es una categoría de ambigüedades del lenguaje:
Vectorial
16.
El suavizado de Laplace se usa en un algoritmo de clasificación con el objetivo de:
Evitar probabilidades nulas y denominadores iguales a cero.
17.
El clasificador de Naive Bayes es:
Un clasificador probabilístico que hace uso de la regla de Bayes.
18.
En la frase: "mi hermano es muy noble", la palabra noble hace referencia a:
Un adjetivo
19.
Con Naive Bayes preferimos hacer cálculos en espacio logarítmico para:
Evitar productos de números demasiado pequeños para la precisión de máquina.
20.
En un modelo MEMM:
El proceso de decodificación es similar al de un HMM, y por lo tanto se puede usar un tipo de algoritmo de Viterbi.
21.
El accuracy de entrenamiento de un modelo se calcula como:
(número de veces que el modelo predice la categoría correcta) / (total de datos usados para entrenamiento)
22.
Si tenemos una cadena de Markov para describir las probabilidades de transición en cuanto al clima de un dia para otro, y observamos la siguiente secuencia de estados día tras día: (frío, frío, caliente, frío, tibio, caliente, tibio, frío), entonces la probabilidad de transición P(caliente|frío) es:
50%
23.
En un Modelo Markoviano Latente, el problema de calcular la secuencia de etiquetas más probable se expresa con la siguiente expresión matemática:
$${\arg \max}_{(t^n)}\prod_i P(w_i \vert t_i)P(t_i \vert t_{i-1})$$
24.
Para un modelo de clasificación de palabras con Naive Bayes en NLTK, debemos entrenar el algoritmo usando:
nltk.NaiveBayesClassifier.train(train_set) donde usamos una funcion que extrae atributos llamada atributos() y:
train_set = [(atributos(palabra), categoría de la palabra), ...]
25.
Dada una cadena de texto text en inglés, el procedimiento para asignar las etiquetas gramaticales con NLTK es:
nltk.pos_tag(word_tokenize(text))
| github_jupyter |
```
import os
import sys
import glob
import numpy as np
from parse import load_ps
import matplotlib.pyplot as plt
def split_num(s):
head = s.rstrip('0123456789')
tail = s[len(head):]
return head, tail
def files_in_order(folderpath):
npy_files = os.listdir(folderpath)
no_extensions = [os.path.splitext(npy_file)[0] for npy_file in npy_files]
splitted = [split_num(s) for s in no_extensions]
splitted = np.array(splitted)
indices = np.lexsort((splitted[:, 1].astype(int), splitted[:, 0]))
npy_files = np.array(npy_files)
return npy_files[indices]
files = files_in_order(os.path.join('poses_compressed', 'frontraise'))
print(files)
for filename in files:
print("="*30)
print("Starting:", filename)
ps = load_ps("poses_compressed/frontraise/" + filename)
poses = ps.poses
right_present = [1 for pose in poses
if pose.rshoulder.exists and pose.relbow.exists and pose.rwrist.exists]
left_present = [1 for pose in poses
if pose.lshoulder.exists and pose.lelbow.exists and pose.lwrist.exists]
right_count = sum(right_present)
left_count = sum(left_present)
side = 'right' if right_count > left_count else 'left'
# print('Exercise arm detected as: {}.'.format(side))
if side == 'right':
joints = [(pose.rshoulder, pose.relbow, pose.rwrist, pose.rhip, pose.neck) for pose in poses]
else:
joints = [(pose.lshoulder, pose.lelbow, pose.lwrist, pose.lhip, pose.neck) for pose in poses]
# filter out data points where a part does not exist
joints = [joint for joint in joints if all(part.exists for part in joint)]
joints = np.array(joints)
# Neck to hip
back_vec = np.array([(joint[4].x - joint[3].x, joint[4].y - joint[3].y) for joint in joints])
# back_vec = np.array([(joint[3].x, joint[3].y) for joint in joints])
# Check range of motion of the back
# Straining back
back_vec_range = np.max(back_vec, axis=0) - np.min(back_vec, axis=0)
# print("Range of motion for back: %s" % back_vec_range)
# threshold the x difference at 0.3: less is good, more is too much straining and movement of the back.
# Shoulder to hip
torso_vecs = np.array([(joint[0].x - joint[3].x, joint[0].y - joint[3].y) for joint in joints])
# Arm
arm_vecs = np.array([(joint[0].x - joint[2].x, joint[0].y - joint[2].y) for joint in joints])
# normalize vectors
torso_vecs = torso_vecs / np.expand_dims(np.linalg.norm(torso_vecs, axis=1), axis=1)
arm_vecs = arm_vecs / np.expand_dims(np.linalg.norm(arm_vecs, axis=1), axis=1)
# Check if raised all the way up
angles = np.degrees(np.arccos(np.clip(np.sum(np.multiply(torso_vecs, arm_vecs), axis=1), -1.0, 1.0)))
print("Max angle: ", np.max(angles))
```
| github_jupyter |
```
fuelNeeded = 42/1000
tank1 = 36/1000
tank2 = 6/1000
tank1 + tank2 >= fuelNeeded
from decimal import Decimal
fN = Decimal(fuelNeeded)
t1 = Decimal(tank1)
t2 = Decimal(tank2)
t1 + t2 >= fN
class Rational(object):
def __init__ (self, num, denom):
self.numerator = num
self.denominator = denom
def add(self, other):
newNumerator = self.numerator * other.denominator + self.denominator * other.numerator
newDenominator = self.denominator*other.denominator
return Rational(newNumerator, newDenominator)
r1 = Rational(36, 1000)
r2 = Rational(6, 1000)
import numpy as np
from mayavi import mlab
mlab.init_notebook()
s = mlab.test_plot3d()
s
from numpy import pi, sin, cos, mgrid
dphi, dtheta = pi/250.0, pi/250.0
[phi,theta] = mgrid[0:pi+dphi*1.5:dphi,0:2*pi+dtheta*1.5:dtheta]
m0 = 4; m1 = 3; m2 = 2; m3 = 3; m4 = 6; m5 = 2; m6 = 6; m7 = 4;
r = sin(m0*phi)**m1 + cos(m2*phi)**m3 + sin(m4*theta)**m5 + cos(m6*theta)**m7
x = r*sin(phi)*cos(theta)
y = r*cos(phi)
z = r*sin(phi)*sin(theta)
#对该数据进行三维可视化
s = mlab.mesh(x, y, z)
s
mlab.savefig('example.png')
import numpy as np
from mayavi import mlab
@mlab.animate(delay = 100)
def updateAnimation():
t = 0.0
while True:
ball.mlab_source.set(x = np.cos(t), y = np.sin(t), z = 0)
t += 0.1
yield
ball = mlab.points3d(np.array(1.), np.array(0.), np.array(0.))
updateAnimation()
mlab.show()
import numpy
from mayavi import mlab
def lorenz(x, y, z, s=10., r=28., b=8. / 3.):
"""The Lorenz system."""
u = s * (y - x)
v = r * x - y - x * z
w = x * y - b * z
return u, v, w
# Sample the space in an interesting region.
x, y, z = numpy.mgrid[-50:50:100j, -50:50:100j, -10:60:70j]
u, v, w = lorenz(x, y, z)
fig = mlab.figure(size=(400, 300), bgcolor=(0, 0, 0))
# Plot the flow of trajectories with suitable parameters.
f = mlab.flow(x, y, z, u, v, w, line_width=3, colormap='Paired')
f.module_manager.scalar_lut_manager.reverse_lut = True
f.stream_tracer.integration_direction = 'both'
f.stream_tracer.maximum_propagation = 200
# Uncomment the following line if you want to hide the seed:
#f.seed.widget.enabled = False
# Extract the z-velocity from the vectors and plot the 0 level set
# hence producing the z-nullcline.
src = f.mlab_source.m_data
e = mlab.pipeline.extract_vector_components(src)
e.component = 'z-component'
zc = mlab.pipeline.iso_surface(e, opacity=0.5, contours=[0, ],
color=(0.6, 1, 0.2))
# When using transparency, hiding 'backface' triangles often gives better
# results
zc.actor.property.backface_culling = True
# A nice view of the plot.
mlab.view(140, 120, 113, [0.65, 1.5, 27])
mlab.savefig('example.png')
import numpy as np
import mayavi.mlab as mlab
import moviepy.editor as mpy
```
| github_jupyter |
# Week 7 worksheet: Spherically symmetric parabolic PDEs
This worksheet contains a number of exercises covering only the numerical aspects of the course. Some parts, however, still require you to solve the problem by hand, i.e. with pen and paper. The rest needs you to write pythob code. It should usually be obvious which parts require which.
#### Suggested reading
You will see lists of links to further reading and resources throughout the worksheets, in sections titled **Learn more:**. These will include links to the Python documentation on the topic at hand, or links to relevant book sections or other online resources. Unless explicitly indicated, these are not mandatory reading, although of course we strongly recommend that you consult them!
#### Displaying solutions
Solutions will be released after the workshop, as a new `.txt` file in the same GitHub repository. After pulling the file to Noteable, **run the following cell** to create clickable buttons under each exercise, which will allow you to reveal the solutions.
## Note:
This workbook expects to find a diretory called figures in the same folder as well as the scripts folder. Please make sure you download figures (and the files it contains) from the GitHub.
```
%run scripts/create_widgets.py W07
```
*How it works: You will see cells located below each exercise, each containing a command starting with `%run scripts/show_solutions.py`. You don't need to run those yourself; the command above runs a script which automatically runs these specific cells for you. The commands in each of these cells each create the button for the corresponding exercise. The Python code to achieve this is contained in `scripts/show_solutions.py`, and relies on [IPython widgets](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html) --- feel free to take a look at the code if you are curious.*
```
%%javascript
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
```
## Exercise 1
$$
\newcommand{\vect}[1]{\bm #1}
\newcommand{\grad}{\nabla}
\newcommand{\pderiv}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\pdderiv}[2]{\frac{\partial^2 #1}{\partial #2^2}}
$$
Consider the spherically symmetric form of the heat conduction equation
$$
\pdderiv{u}{r} + \frac{2}{r}\pderiv{u}{r} = \frac1\kappa\pderiv{u}{t}
$$
### Part a)
Define
$$
v(r,t) = r u(r,t)
$$
and show that $v$ satisfies the standard one-dimensional heat conduction equation.
What can we expect of a solution as $r\to\infty$?
**Remarks:**
- The worksheet requires understanding of the material from Analytical methods Part 6: Spherical coordinates
- The material is applied in Analytical methods Example 5: Radially symmetric heat conduction example 9.24b
```
%run scripts/show_solutions.py W07_ex1_parta
```
### Part b)
Solve the equation in the annulus $a\le r\le b$ subject to the boundary conditions
\begin{align*}
u(a,t) &= T_0, \quad & t>0 \\
u(b,t) &= 0, \quad & t>0 \\
u(r,0) &= 0, & a\le r\le b
\end{align*}
Show that the solution has the form
$$
T(r,t) = \frac{a T_0}{r} \left[\frac{b-r}{b-a} - \sum_{N=1}^\infty A_N e^{-\kappa\lambda^2 t} \sin\left(\frac{r-a}{b-a}N\pi\right) \right]
$$
where $\lambda(b-a)=N\pi$. Evaluate the Fourier coefficients $A_N$.
```
%run scripts/show_solutions.py W07_ex1_partb
```
### Part c)
Modify the 1D solver from the Explicit-Parabolic Solver workbook so that it is solving the spherically symmetric form of the heat conduction equation,
$$
\pdderiv{u}{r} + \frac{2}{r}\pderiv{u}{r} = \frac1\kappa\pderiv{u}{t}.
$$
Remember that you will need to discretise the first derivative $\pderiv{u}{r}$ using the central 2nd order finite difference approximation and will then need to find the coefficients for the spherical form of the FTCS scheme.
Use this solver to solve the problem on an annulus where $a=0.1$, $b=1$ and $T_0=100$ Celcius. Compare your solution with the analytical solution from part (b)
```
%run scripts/show_solutions.py W07_ex1_partc
```
| github_jupyter |
---
_You are currently looking at **version 1.5** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
---
# Assignment 2
In this assignment you'll explore the relationship between model complexity and generalization performance, by adjusting key parameters of various supervised learning models. Part 1 of this assignment will look at regression and Part 2 will look at classification.
## Part 1 - Regression
First, run the following block to set up the variables needed for later sections.
```
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
np.random.seed(0)
n = 15
x = np.linspace(0,10,n) + np.random.randn(n)/5
y = np.sin(x)+x/6 + np.random.randn(n)/10
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)
# You can use this function to help you visualize the dataset by
# plotting a scatterplot of the data points
# in the training and test sets.
#def part1_scatter():
#import matplotlib.pyplot as plt
#%matplotlib notebook
#plt.figure()
#plt.scatter(X_train, y_train, label='training data')
#plt.scatter(X_test, y_test, label='test data')
#plt.legend(loc=4);
# NOTE: Uncomment the function below to visualize the data, but be sure
# to **re-comment it before submitting this assignment to the autograder**.
# part1_scatter()
```
### Question 1
Write a function that fits a polynomial LinearRegression model on the *training data* `X_train` for degrees 1, 3, 6, and 9. (Use PolynomialFeatures in sklearn.preprocessing to create the polynomial features and then fit a linear regression model) For each model, find 100 predicted values over the interval x = 0 to 10 (e.g. `np.linspace(0,10,100)`) and store this in a numpy array. The first row of this array should correspond to the output from the model trained on degree 1, the second row degree 3, the third row degree 6, and the fourth row degree 9.
<img src="readonly/polynomialreg1.png" style="width: 1000px;"/>
The figure above shows the fitted models plotted on top of the original data (using `plot_one()`).
<br>
*This function should return a numpy array with shape `(4, 100)`*
```
def answer_one():
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
result = []
degrees = [1, 3, 6, 9]
for idx, degree in enumerate(degrees):
poly = PolynomialFeatures(degree=degree)
X_poly = poly.fit_transform(X_train.reshape(-1,1))
linreg = LinearRegression().fit(X_poly, y_train)
pred = poly.fit_transform(np.linspace(0,10,100).reshape(-1,1))
result.append(linreg.predict(pred))
result = np.array(result)
return result# Return your answer
answer_one()
# feel free to use the function plot_one() to replicate the figure
# from the prompt once you have completed question one
#def plot_one(degree_predictions):
# import matplotlib.pyplot as plt
# %matplotlib notebook
# plt.figure(figsize=(10,5))
# plt.plot(X_train, y_train, 'o', label='training data', markersize=10)
# plt.plot(X_test, y_test, 'o', label='test data', markersize=10)
# for i,degree in enumerate([1,3,6,9]):
# plt.plot(np.linspace(0,10,100), degree_predictions[i], alpha=0.8, lw=2, label='degree={}'.format(degree))
# plt.ylim(-1,2.5)
# plt.legend(loc=4)
#
#plot_one(answer_one())
```
### Question 2
Write a function that fits a polynomial LinearRegression model on the training data `X_train` for degrees 0 through 9. For each model compute the $R^2$ (coefficient of determination) regression score on the training data as well as the the test data, and return both of these arrays in a tuple.
*This function should return one tuple of numpy arrays `(r2_train, r2_test)`. Both arrays should have shape `(10,)`*
```
def answer_two():
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics.regression import r2_score
r2_train = np.zeros(10)
r2_test = np.zeros(10)
for degree in range(10):
poly = PolynomialFeatures(degree=degree)
poly_train = poly.fit_transform(X_train.reshape(-1,1))
linreg = LinearRegression().fit(poly_train, y_train)
r2_train[degree] = linreg.score(poly_train, y_train);
poly_test = poly.fit_transform(X_test.reshape(-1,1))
r2_test[degree] = linreg.score(poly_test, y_test)
return (r2_train, r2_test)# Your answer here
answer_two()
```
### Question 3
Based on the $R^2$ scores from question 2 (degree levels 0 through 9), what degree level corresponds to a model that is underfitting? What degree level corresponds to a model that is overfitting? What choice of degree level would provide a model with good generalization performance on this dataset?
Hint: Try plotting the $R^2$ scores from question 2 to visualize the relationship between degree level and $R^2$. Remember to comment out the import matplotlib line before submission.
*This function should return one tuple with the degree values in this order: `(Underfitting, Overfitting, Good_Generalization)`. There might be multiple correct solutions, however, you only need to return one possible solution, for example, (1,2,3).*
```
def answer_three():
r2_scores = answer_two()
df = pd.DataFrame({'training_score':r2_scores[0], 'test_score':r2_scores[1]})
df['mean'] = df.mean(axis=1)
df['diff'] = df['training_score'] - df['test_score']
df = df.sort_values(by=['mean'], ascending=False)
good = df.index[0]
df = df.sort_values(by=['diff'], ascending=False)
ofit = df.index[0]
df = df.sort_values(by=['training_score'])
ufit = df.index[0]
return (ufit, ofit, good)# Return your answer
answer_three()
```
### Question 4
Training models on high degree polynomial features can result in overly complex models that overfit, so we often use regularized versions of the model to constrain model complexity, as we saw with Ridge and Lasso linear regression.
For this question, train two models: a non-regularized LinearRegression model (default parameters) and a regularized Lasso Regression model (with parameters `alpha=0.01`, `max_iter=10000`) both on polynomial features of degree 12. Return the $R^2$ score for both the LinearRegression and Lasso model's test sets.
*This function should return one tuple `(LinearRegression_R2_test_score, Lasso_R2_test_score)`*
```
def answer_four():
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.metrics.regression import r2_score
poly = PolynomialFeatures(degree=12)
X_train_poly = poly.fit_transform(X_train.reshape(-1,1))
X_test_poly = poly.fit_transform(X_test.reshape(-1,1))
linreg = LinearRegression().fit(X_train_poly, y_train)
LinearRegression_R2_test_score = linreg.score(X_test_poly, y_test)
lasso = Lasso(alpha=0.01, max_iter = 10000).fit(X_train_poly, y_train)
Lasso_R2_test_score = lasso.score(X_test_poly, y_test)
return (LinearRegression_R2_test_score, Lasso_R2_test_score) # Your answer here
answer_four()
```
## Part 2 - Classification
Here's an application of machine learning that could save your life! For this section of the assignment we will be working with the [UCI Mushroom Data Set](http://archive.ics.uci.edu/ml/datasets/Mushroom?ref=datanews.io) stored in `readonly/mushrooms.csv`. The data will be used to train a model to predict whether or not a mushroom is poisonous. The following attributes are provided:
*Attribute Information:*
1. cap-shape: bell=b, conical=c, convex=x, flat=f, knobbed=k, sunken=s
2. cap-surface: fibrous=f, grooves=g, scaly=y, smooth=s
3. cap-color: brown=n, buff=b, cinnamon=c, gray=g, green=r, pink=p, purple=u, red=e, white=w, yellow=y
4. bruises?: bruises=t, no=f
5. odor: almond=a, anise=l, creosote=c, fishy=y, foul=f, musty=m, none=n, pungent=p, spicy=s
6. gill-attachment: attached=a, descending=d, free=f, notched=n
7. gill-spacing: close=c, crowded=w, distant=d
8. gill-size: broad=b, narrow=n
9. gill-color: black=k, brown=n, buff=b, chocolate=h, gray=g, green=r, orange=o, pink=p, purple=u, red=e, white=w, yellow=y
10. stalk-shape: enlarging=e, tapering=t
11. stalk-root: bulbous=b, club=c, cup=u, equal=e, rhizomorphs=z, rooted=r, missing=?
12. stalk-surface-above-ring: fibrous=f, scaly=y, silky=k, smooth=s
13. stalk-surface-below-ring: fibrous=f, scaly=y, silky=k, smooth=s
14. stalk-color-above-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y
15. stalk-color-below-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y
16. veil-type: partial=p, universal=u
17. veil-color: brown=n, orange=o, white=w, yellow=y
18. ring-number: none=n, one=o, two=t
19. ring-type: cobwebby=c, evanescent=e, flaring=f, large=l, none=n, pendant=p, sheathing=s, zone=z
20. spore-print-color: black=k, brown=n, buff=b, chocolate=h, green=r, orange=o, purple=u, white=w, yellow=y
21. population: abundant=a, clustered=c, numerous=n, scattered=s, several=v, solitary=y
22. habitat: grasses=g, leaves=l, meadows=m, paths=p, urban=u, waste=w, woods=d
<br>
The data in the mushrooms dataset is currently encoded with strings. These values will need to be encoded to numeric to work with sklearn. We'll use pd.get_dummies to convert the categorical variables into indicator variables.
```
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
mush_df = pd.read_csv('mushrooms.csv')
mush_df2 = pd.get_dummies(mush_df)
X_mush = mush_df2.iloc[:,2:]
y_mush = mush_df2.iloc[:,1]
# use the variables X_train2, y_train2 for Question 5
X_train2, X_test2, y_train2, y_test2 = train_test_split(X_mush, y_mush, random_state=0)
# For performance reasons in Questions 6 and 7, we will create a smaller version of the
# entire mushroom dataset for use in those questions. For simplicity we'll just re-use
# the 25% test split created above as the representative subset.
#
# Use the variables X_subset, y_subset for Questions 6 and 7.
X_subset = X_test2
y_subset = y_test2
```
### Question 5
Using `X_train2` and `y_train2` from the preceeding cell, train a DecisionTreeClassifier with default parameters and random_state=0. What are the 5 most important features found by the decision tree?
As a reminder, the feature names are available in the `X_train2.columns` property, and the order of the features in `X_train2.columns` matches the order of the feature importance values in the classifier's `feature_importances_` property.
*This function should return a list of length 5 containing the feature names in descending order of importance.*
*Note: remember that you also need to set random_state in the DecisionTreeClassifier.*
```
def answer_five():
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier().fit(X_train2, y_train2)
feature_names = []
# Get index of importance values since their order is the same with feature columns
for index, importance in enumerate(dt_clf.feature_importances_):
# Add importance so we can further order this list, and add feature name with index
feature_names.append([importance, X_train2.columns[index]])
# Descending sort
feature_names.sort(reverse=True)
# Turn in to a numpy array
feature_names = np.array(feature_names)
# Select only feature names
feature_names = feature_names[:5,1]
# Turn back to python list
feature_names = feature_names.tolist()
return feature_names # Your answer here
answer_five()
```
### Question 6
For this question, we're going to use the `validation_curve` function in `sklearn.model_selection` to determine training and test scores for a Support Vector Classifier (`SVC`) with varying parameter values. Recall that the validation_curve function, in addition to taking an initialized unfitted classifier object, takes a dataset as input and does its own internal train-test splits to compute results.
**Because creating a validation curve requires fitting multiple models, for performance reasons this question will use just a subset of the original mushroom dataset: please use the variables X_subset and y_subset as input to the validation curve function (instead of X_mush and y_mush) to reduce computation time.**
The initialized unfitted classifier object we'll be using is a Support Vector Classifier with radial basis kernel. So your first step is to create an `SVC` object with default parameters (i.e. `kernel='rbf', C=1`) and `random_state=0`. Recall that the kernel width of the RBF kernel is controlled using the `gamma` parameter.
With this classifier, and the dataset in X_subset, y_subset, explore the effect of `gamma` on classifier accuracy by using the `validation_curve` function to find the training and test scores for 6 values of `gamma` from `0.0001` to `10` (i.e. `np.logspace(-4,1,6)`). Recall that you can specify what scoring metric you want validation_curve to use by setting the "scoring" parameter. In this case, we want to use "accuracy" as the scoring metric.
For each level of `gamma`, `validation_curve` will fit 3 models on different subsets of the data, returning two 6x3 (6 levels of gamma x 3 fits per level) arrays of the scores for the training and test sets.
Find the mean score across the three models for each level of `gamma` for both arrays, creating two arrays of length 6, and return a tuple with the two arrays.
e.g.
if one of your array of scores is
array([[ 0.5, 0.4, 0.6],
[ 0.7, 0.8, 0.7],
[ 0.9, 0.8, 0.8],
[ 0.8, 0.7, 0.8],
[ 0.7, 0.6, 0.6],
[ 0.4, 0.6, 0.5]])
it should then become
array([ 0.5, 0.73333333, 0.83333333, 0.76666667, 0.63333333, 0.5])
*This function should return one tuple of numpy arrays `(training_scores, test_scores)` where each array in the tuple has shape `(6,)`.*
```
def answer_six():
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
svc = SVC(kernel='rbf', C=1, random_state=0)
gamma = np.logspace(-4,1,6)
train_scores, test_scores = validation_curve(svc, X_subset, y_subset,
param_name='gamma',
param_range=gamma,
scoring='accuracy')
scores = (train_scores.mean(axis=1), test_scores.mean(axis=1))
return scores # Your answer here
answer_six()
```
### Question 7
Based on the scores from question 6, what gamma value corresponds to a model that is underfitting (and has the worst test set accuracy)? What gamma value corresponds to a model that is overfitting (and has the worst test set accuracy)? What choice of gamma would be the best choice for a model with good generalization performance on this dataset (high accuracy on both training and test set)?
Hint: Try plotting the scores from question 6 to visualize the relationship between gamma and accuracy. Remember to comment out the import matplotlib line before submission.
*This function should return one tuple with the degree values in this order: `(Underfitting, Overfitting, Good_Generalization)` Please note there is only one correct solution.*
```
def answer_seven():
scores = answer_six()
df = pd.DataFrame({'training_score':scores[0], 'test_score':scores[1]})
df['mean'] = df.mean(axis=1)
df['diff'] = df['training_score'] - df['test_score']
df = df.sort_values(by=['mean'], ascending=False)
good = df.index[0]
df = df.sort_values(by=['diff'], ascending=False)
ofit = df.index[0]
df = df.sort_values(by=['training_score'])
ufit = df.index[0]
gamma = np.logspace(-4,1,6)
ufit = gamma[ufit]
ofit = gamma[ofit]
good = round(gamma[good],1)
result = (ufit, ofit, good)
return result # Return your answer
answer_seven()
```
| github_jupyter |
```
from pymongo import MongoClient
import pandas as pd
import datetime
# Open Database and find history data collection
client = MongoClient()
db = client.test_database
shdaily = db.indexdata
# KDJ calculation formula
def KDJCalculation(K1, D1, high, low, close):
# input last K1, D1, max value, min value and current close value
#设定KDJ基期值
#count = 9
#设定k、d平滑因子a、b,不过目前已经约定俗成,固定为1/3
a = 1.0/3
b = 1.0/3
# 取得过去count天的最低价格
low_price = low #low.min() #min(list1)
# 取得过去count天的最高价格
high_price = high #high.max() #max(list1)
# 取得当日收盘价格
current_close = close
if high_price!=low_price:
#计算未成熟随机值RSV(n)=(Ct-Ln)/(Hn-Ln)×100
RSV = (current_close-low_price)/(high_price-low_price)*100
else:
RSV = 50
#当日K值=(1-a)×前一日K值+a×当日RSV
K2=(1-a)*K1+a*RSV
#当日D值=(1-a)×前一日D值+a×当日K值
D2=(1-b)*D1+b*K2
#计算J值
J2 = 3*K2-2*D2
#log.info("Daily K1: %s, D1: %s, K2: %s, D2: %s, J2: %s" % (K1,D1,K2,D2,J2))
return K1,D1,K2,D2,J2
# Put the first dataset in
# List the data
# initial Values
K1 = 50
D1 = 50
# for each day, calculate data and insert into db
for d in shdaily.find()[:10]:
date = d['date']
datalist = pd.DataFrame(list(shdaily.find({'date':{"$lte": date}}).sort('date', -1)))
data = datalist[:9]
# get previous KDJ data from database
K1 = data.ix[1]['KDJ_K']
D1 = data.ix[1]['KDJ_D']
high = data['high'].values
low = data['low'].values
close = data[:1]['close'].values
K1,D1,K2,D2,J2 = KDJCalculation(K1,D1,max(high),min(low),close)
d['KDJ_K'] = K2[0]
d['KDJ_D'] = D2[0]
d['KDJ_J'] = J2[0]
# K1 = K2
# D1 = D2
print d
#datalist = pd.DataFrame(list(shdaily.find().sort('date', -1)))
#date1 = datetime.strptime("01/01/16", "%d/%m/%y")
# List out the data before or equal a specific date
#list(shdaily.find({'date':{"$lte":'2016-02-08'}}).sort('date', -1))
# Get last day KDJ data from database
datalist = pd.DataFrame(list(shdaily.find({'date':{"$lte": '2016-02-10'}}).sort('date', -1)))
data = datalist.ix[1]
data['KDJ_K']
# Save data to db
# data = datalist[:9]
# data
# K1 = 50
# D1 = 50
# high = data['high'].values
# low = data['low'].values
# close = data[:1]['close'].values
# K1,D1,K2,D2,J2 = KDJCalculation(K1,D1,max(high),min(low),close)
# Another KDJ Calculation based on dataframe
def CalculateKDJ(stock_data):
# Initiate KDJ parameters
endday = pd.datetime.today()
N1= 9
N2= 3
N3= 3
# Perform calculation
#stock_data = get_price(stock, end_date=endday)
low_list = pd.rolling_min(stock_data['LowPx'], N1)
low_list.fillna(value=pd.expanding_min(stock_data['LowPx']), inplace=True)
high_list = pd.rolling_max(stock_data['HighPx'], N1)
high_list.fillna(value=pd.expanding_max(stock_data['HighPx']), inplace=True)
#rsv = (stock_data['ClosingPx'] - low_list) / (high_list - low_list) * 100
rsv = (stock_data['ClosingPx'] - stock_data['LowPx']) / (stock_data['HighPx'] - stock_data['LowPx']) * 100
stock_data['KDJ_K'] = pd.ewma(rsv, com = N2)
stock_data['KDJ_D'] = pd.ewma(stock_data['KDJ_K'], com = N3)
stock_data['KDJ_J'] = 3 * stock_data['KDJ_K'] - 2 * stock_data['KDJ_D']
KDJ = stock_data[['KDJ_K','KDJ_D','KDJ_J']]
return KDJ
```
| github_jupyter |
```
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.utils import to_categorical
from keras.datasets import mnist
import numpy as np
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
from sklearn.metrics import confusion_matrix
import pandas as pd
import seaborn as sns
```
Load the mnist training and test data sets
```
(X_train, y_train), (X_test, y_test) = mnist.load_data()
```
Display the first five images and the labels
```
def plot_gray_image(img, title, ax):
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(img, cmap=cm.gray)
ax.set_title(title)
fig, ax_list = plt.subplots(nrows=1, ncols=5)
for idx, ax in enumerate(ax_list):
plot_gray_image(X_train[idx], y_train[idx], ax)
```
Flatten the two dimensional input data and center it around zero
```
img_size = X_train.shape[1] * X_train.shape[2]
X_train_flat = X_train.reshape(-1, img_size)
X_test_flat = X_test.reshape(-1, img_size)
X_train_flat = X_train_flat/255
X_test_flat = X_test_flat/255
num_classes = 10
y_train_cat = to_categorical(y_train, num_classes)
y_test_cat = to_categorical(y_test, num_classes)
batch_size = 128
epochs = 10
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(img_size,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.summary()
history = model.fit(X_train_flat, y_train_cat,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_test_flat, y_test_cat))
score = model.evaluate(X_test_flat, y_test_cat, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_predict = model.predict_classes(X_test_flat)
```
Display numbers where the prediction is wrong
```
err_idx = np.where(y_test != y_predict)[0]
err_plot_size = 5
fig, ax_list = plt.subplots(nrows=1, ncols=err_plot_size)
fig.set_size_inches(w=6, h=2)
fig.suptitle('a - actual, p - predicted')
for idx, ax in enumerate(ax_list):
data_idx = err_idx[idx]
msg = 'a {}, p {}'.format(y_test[data_idx], y_predict[data_idx])
plot_gray_image(X_test[data_idx], msg, ax)
cmatrix = confusion_matrix(y_test, y_predict)
df_cm = pd.DataFrame(cmatrix)
df_cm
fig, ax = plt.subplots(figsize=(8, 6))
sns.heatmap(df_cm, annot=True, fmt='.0f', ax=ax)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.