code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **[Python Home Page](https://www.kaggle.com/learn/python)**
#
# ---
#
# # Try It Yourself
#
# Functions are powerful. Try writing some yourself.
#
# As before, don't forget to run the setup code below before jumping into question 1.
# + _kg_hide-input=true _kg_hide-output=true
# SETUP. You don't need to worry for now about what this code does or how it works.
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex2 import *
print('Setup complete.')
# -
# # Exercises
# ## 1.
#
# Complete the body of the following function according to its docstring.
#
# HINT: Python has a built-in function `round`.
# +
def round_to_two_places(num):
"""Return the given number rounded to two decimal places.
>>> round_to_two_places(3.14159)
3.14
"""
return round(num, 2)
# Check your answer
q1.check()
# +
# Uncomment the following for a hint
#q1.hint()
# Or uncomment the following to peek at the solution
#q1.solution()
# -
# <hr/>
#
# ## 2.
#
# The help for `round` says that `ndigits` (the second argument) may be negative.
# What do you think will happen when it is? Try some examples in the following cell?
#
# Can you think of a case where this would be useful?
# Put your test code here
num = 3141.59
round(num, -3)
# Check your answer (Run this code cell to receive credit!)
q2.solution()
# <hr/>
#
# ## 3.
#
# In a previous programming problem, the candy-sharing friends Alice, Bob and Carol tried to split candies evenly. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they'll take 30 each and smash 1.
#
# Below is a simple function that will calculate the number of candies to smash for *any* number of total candies.
#
# Modify it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before.
#
# Update the docstring to reflect this new behaviour.
# +
def to_smash(total_candies, num_of_friends=3):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between num_of_friends friends. num_of_friends default is 3.
>>> to_smash(91)
1
"""
return total_candies % num_of_friends
# Check your answer
q3.check()
# +
#q3.hint()
# +
#q3.solution()
# -
# <hr/>
#
# ## 4. (Optional)
#
# It may not be fun, but reading and understanding error messages will be an important part of your Python career.
#
# Each code cell below contains some commented-out buggy code. For each cell...
#
# 1. Read the code and predict what you think will happen when it's run.
# 2. Then uncomment the code and run it to see what happens. (**Tip**: In the kernel editor, you can highlight several lines and press `ctrl`+`/` to toggle commenting.)
# 3. Fix the code (so that it accomplishes its intended purpose without throwing an exception)
#
# <!-- TODO: should this be autochecked? Delta is probably pretty small. -->
# +
def ruound_to_two_places(num):
return round(num, 2)
ruound_to_two_places(9.9999)
# -
x = -10
y = 5
# # Which of the two variables above has the smallest absolute value?
smallest_abs = min(abs(x), abs(y))
smallest_abs
# +
def f(x):
y = abs(x)
return y
print(f(5))
# -
# # Keep Going
#
# Nice job with the code. Next up, you'll learn about *conditionals*, which you'll need to write interesting programs. Keep going **[here](https://www.kaggle.com/colinmorris/booleans-and-conditionals)**
# ---
# **[Python Home Page](https://www.kaggle.com/learn/python)**
#
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
|
python/exercise/functions-and-getting-help.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Part 1 - Building the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# +
# Step 1 - Convolution
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
# -
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# +
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
samples_per_epoch = 8000,
nb_epoch = 25,
validation_data = test_set,
nb_val_samples = 2000)
# -
|
Day 21 - Convonutional Neural Network/CNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Pumpkin Pricing
#
# Load up required libraries and dataset. Convert the data to a dataframe containing a subset of the data:
#
# - Only get pumpkins priced by the bushel
# - Convert the date to a month
# - Calculate the price to be an average of high and low prices
# - Convert the price to reflect the pricing by bushel quantity
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
pumpkins = pd.read_csv('../data/US-pumpkins.csv')
pumpkins.head()
# +
pumpkins = pumpkins[pumpkins['Package'].str.contains('bushel', case=True, regex=True)]
new_columns = ['Package', 'Variety', 'City Name', 'Month', 'Low Price', 'High Price', 'Date', 'City Num', 'Variety Num']
pumpkins = pumpkins.drop([c for c in pumpkins.columns if c not in new_columns], axis=1)
price = (pumpkins['Low Price'] + pumpkins['High Price']) / 2
month = pd.DatetimeIndex(pumpkins['Date']).month
new_pumpkins = pd.DataFrame({'Month': month, 'Variety': pumpkins['Variety'], 'City': pumpkins['City Name'], 'Package': pumpkins['Package'], 'Low Price': pumpkins['Low Price'],'High Price': pumpkins['High Price'], 'Price': price})
new_pumpkins.loc[new_pumpkins['Package'].str.contains('1 1/9'), 'Price'] = price/1.1
new_pumpkins.loc[new_pumpkins['Package'].str.contains('1/2'), 'Price'] = price*2
new_pumpkins.head()
# -
# A basic scatterplot reminds us that we only have month data from August through December. We probably need more data to be able to draw conclusions in a linear fashion.
import matplotlib.pyplot as plt
plt.scatter('Month','Price',data=new_pumpkins)
from sklearn.preprocessing import LabelEncoder
new_pumpkins.iloc[:, 0:-1] = new_pumpkins.iloc[:, 0:-1].apply(LabelEncoder().fit_transform)
print(new_pumpkins['City'].corr(new_pumpkins['Price']))
print(new_pumpkins['Package'].corr(new_pumpkins['Price']))
new_pumpkins.dropna(inplace=True)
new_pumpkins.info()
new_columns = ['Package', 'Price']
lin_pumpkins = new_pumpkins.drop([c for c in new_pumpkins.columns if c not in new_columns], axis='columns')
lin_pumpkins
X = lin_pumpkins.values[:, :1]
y = lin_pumpkins.values[:, 1:2]
# +
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
lin_reg = LinearRegression()
lin_reg.fit(X_train, y_train)
pred = lin_reg.predict(X_test)
accuracy_score = lin_reg.score(X_train, y_train)
print('Model Accuracy: ', accuracy_score)
# +
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, pred, color='blue', linewidth=3)
plt.xlabel('Package')
plt.ylabel('Price')
plt.show()
# +
new_columns = ['Variety', 'Package','City', 'Month', 'Price']
poly_pumpkins = new_pumpkins.drop([c for c in new_pumpkins.columns if c not in new_columns], axis='columns')
poly_pumpkins
# -
corr = poly_pumpkins.corr()
corr.style.background_gradient(cmap='coolwarm')
X = poly_pumpkins.iloc[:, 3:4].values
y = poly_pumpkins.iloc[:, 4:5].values
# +
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(PolynomialFeatures(4), LinearRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
pipeline.fit(np.array(X_train), y_train)
y_pred = pipeline.predict(X_test)
# +
df = pd.DataFrame({'x': X_test[:,0], 'y': y_pred[:,0]})
df.sort_values(by='x', inplace=True)
points = pd.DataFrame(df).to_numpy()
plt.plot(points[:, 0], points[:, 1], color='blue', linewidth=3)
plt.xlabel('Package')
plt.ylabel('Price')
plt.scatter(X, y, color='black')
plt.show()
# -
accuracy_score = pipeline.score(X_train,y_train)
print('Model Accuracy: ', accuracy_score)
|
2-Regression/3-Linear/notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Unscented Kalman Filter (UKF) for Nonlinear Estimation Problems
#
#
# ## The Kalman Filter
#
# The Kalman filter is an algorithm that uses the past measurements of the state of a system (also taking into account statistical noise and other inaccuracies) to predict the future state of the system. Its first use was on the Apollo missions to the Moon, but today Kalman filtering is extensively used in a vast array of applications in fields ranging from robotics to econometrics.
#
# The Kalman filter was originally invented to model linear systems. However, extensions of the method to deal with nonlinear systems have also been developed, such as the extended Kalman filter (EKF) and the unscented Kalman filter (UKF).
#
# In the **unscented Kalman filter (UKF)**, the state distribution is approximated by Gaussian random variables (GRVs) as in the extended Kalman filter (EKF). However the two methods differ in the way GRVs are propagated through the system dynamics: While the EKF propagates GRVs analytically through a first-order linearization of the non-linear system, the UKF uses a deterministic sampling approach, in which a minimal set of sample points (so-called *sigma points*) that capture the true mean and covariance of the GRV is propagated through the *true* nonlinear system. While the posterior mean and the covariance of the EKF achieves only first-order accuracy (Taylor expansion), which often yields sub-optimal performance, the UKF accuracy is in the 2nd order; thus, the UKF presents a superior alternative to the EKF. Remarkably, this performance advantage does not come at an extra computational cost.
#
# ## Problem Definition
# In this notebook, I use the Kalman filter approach (UKF) as a design tool to construct a dynamical system with a desired type of behavior. Qualitatively, (attractive) dynamical systems can exhibit three distinct types of dynamical behavior in the limit $t \rightarrow \infty$: *fixed points*, *oscillations* and *chaos*.
# Accordingly a dynamical system with unknown parameters, can be designed (i.e. its parameters can be inferred) so that it displays a desired dynamical behavior. In such a problem, the UKF method is used in the context of *parameter estimation*.
#
# The parameter estimation (inference) problem is formulated as a state-space model in which a nonlinear mapping,
#
# $$
# \begin{eqnarray*}
# y_k & = & g(x_k, \theta_k)
# \end{eqnarray*}
# $$
#
#
# with the input $x_k$, the output $y_k$, and the parameters to be inferred, $\theta_k$, is reformulated in the state-space representation:
#
#
# $$
# \begin{eqnarray*}
# \theta_k & = & \theta_{k-1} + \nu_k \\
# y_k & = & g(x_k, \theta_k) + u_k.
# \end{eqnarray*}
# $$
#
# Above $u_k \sim N(0, Q_k)$ represents the measurement noise and $\nu_k \sim N(0, R_k)$ is the artifical process noise which drives the system.
#
# In the context of the current problem, that is, designing a dynamical system of desired behavior, the nonlinear mapping $g(.)$ will be a nonlinear numerical routine that outputs the dynamical behavior of the system. In the dynamical systems theory, this behavior is encoded in the maximum of the Lyapunov exponents ($\lambda_{max}$) of the system. Hence, in our problem
#
# $$
# \begin{eqnarray*}
# \theta_k & = & \theta_{k-1} + \nu_k \\
# \lambda_{max} & = & \mathfrak{L}(\theta_k, y_0; f) + u_k,
# \end{eqnarray*}
# $$
#
# where $\mathfrak{L}(.)$ is the nonlinear mapping the system parameters to the target dynamical behavior encoded by the $\lambda_{max}$. Above, $y_0$ is the initial condition for the (n-dimensional) dynamical system, which evolves in time according to
#
# $$
# \frac{dy}{dt} = f(y; \theta).
# $$
#
# where $f$ is a gradient field. Also, notice that the role of our filter in this context will be a *smoother*, since at each step of the time series, the observed 'data' remains the same: $(\lambda_{max}, \lambda_{max}, \lambda_{max}, \ldots)$.
#
# **To summarize, given a dynamical system described by a gradient field $f$, its initial conditions $y_0$, and a set of indetermined parameters $\theta$, we seek to infer the values of the parameters to drive the system to produce the desired behavior as encoded by $\lambda_{max}$. **
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy
from scipy import integrate
import sympy
from IPython.display import display, Math, Latex
sympy.init_printing(use_latex='mathjax')
# %matplotlib inline
# ### The Dynamical System
# Here I apply the UKF technique for the purpose of parameter estimation in the Lorenz model which serves as a good toy model for many applications involving chaos. The Lorenz system is given by the following autonomous dynamical system equations:
#
# $$\begin{align*}
# \dot{x} &= \sigma(y-x), \\
# \dot{y} &= x(\rho-z)-y, \\
# \dot{z} &= xy-\beta z.
# \end{align*}$$
#
# In his orginal paper [<NAME>, <NAME>. Sci. 20, 130 (1963)], Lorenz used the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$ for which the trajectories produce a strange attractor(i.e chaotic dynamics). In our problem, this dynamical behavior (more accurately, the maximal Lyapunov exponent of the Lorenz system with this choice of the parameter values) will be our target. We will initialize the parameter values so that the Lorenz model produces non-chaotic (say, fixed point) dynamics. The unscented Kalman filter will then be utilized to drive the system from this non-chaotic behavior to the chaotic behavior defined by the target value of the maximal Lyapunov exponent; in the process the parameter values will be updated iteratively.
class LorenzSystem:
def __init__(self, sigma = 10, rho = 28, beta = 8./3.):
self.sigma = sigma
self.rho = rho
self.beta = beta
def dx_dt(self, x, t = None):
return np.array([ self.sigma * (x[1] - x[0]),
x[0] * (self.rho - x[2]) - x[1],
x[0] * x[1] - self.beta * x[2]])
# ### ODE Solver
#
# In order to integrate the dynamical system equations using given values of initial points, we can use several standard routines available in `scipy.integrate`. However, here I prefer to use an explicit implementation utilizing a 4th order Runge-Kutta step.
# +
class ODESolver:
def __init__(self, f, dt):
""" f is function in the form f=f(x,t) """
self.f = f
self.dt = dt
def advance(self):
"""Advance solution one time step"""
raise NotImplementedError
def set_initial_condition(self, u0, t0=0.):
self.u = [] # u[k] is solution at time t[k]
self.t = [] # time levels in the solution process
self.u.append(u0)
self.t.append(t0)
self.k = 0 # time level counter
def solve(self, T, terminate=None):
""" Advance solution from t = t0 to t <= T, steps of dt
as long as terminate(u, t, k) is False.
terminate(u, t, k) is a user-given function returning True or False.
By default, a terminate function which always returns False is used """
if terminate is None:
terminate = lambda u, t, k: False
self.k = 0
tnew = 0
while tnew <= T and not terminate(self.u, self.t, self.k):
unew = self.advance()
self.u.append(unew)
tnew = self.t[-1] + self.dt
self.t.append(tnew)
self.k += 1
return np.array(self.u), np.array(self.t)
class RungeKutta4(ODESolver):
def advance(self):
u, dt, f, k, t = self.u, self.dt, self.f, self.k, self.t[-1]
dt2 = dt/2.0
k1 = dt * f(u[k], t)
k2 = dt * f(u[k] + 0.5*k1, t + dt2)
k3 = dt * f(u[k] + 0.5*k2, t + dt2)
k4 = dt * f(u[k] + k3, t + dt)
unew = u[k] + (1./6.)*(k1 + 2*k2 + 2*k3 + k4)
return unew
# -
# ### Calculation of the Lyapunov Spectra
#
# The concept of Lyapunov exponents was introduced in the dynamical systems theory for the purpose of measuring the sensitivity of the system to initial conditions. In an n-dimensional dynamical system, we can consider an initially orthonormal axes of n vectors in the tangent space at $y_0$. As the dynamical system evolve in time, the volume defined by the initially orthonormal axes get distorted to form an n-dimensional ellipsoid in the tangent space at each successive point on the trajectory. An algorithm due to Benettin et.al (see Parker and Chua, 1989 in references) computes the average rate of growth (during the time evolution of the system) of the ith principal axis of the ellipsod as $\lambda_i$. These quantities, sorted as $\lambda_1 \ge \lambda_2, \ldots, \lambda_n$ are called the Lyapunov exponents. The sign of the maximum of these exponents, determines the asymptotic dynamical behavior of the dynamical system. In particular, dynamical systems with $\lambda_{max} < 0 $ converge to the same stable fixed point in the phase space. Systems with $\lambda_{max} = 0 $ converge to an oscillatory solution defined by a limit-cycle and systems with $\lambda_{max} > 0 $ converge to a limit set of solutions defined by a strange (i.e. chaotic) attractor.
#
#
# In the Benettin algorithm, the Lyapunov spectra are obtained iteratively and directly from the dynamical system equations---unlike various other methods using time series. Since the calculation of Lyapunov exponents is computationally very expensive, in the following I implemented the Benettin algorithm using the `SymPy` library in order to compute the derivatives (hence the Jacobian matrix) in a reliable and fast approach using symbolical computation.
# +
def sym_to_np(x, t, params, xdot):
dim = xdot.shape[0]
eta = sympy.Matrix(sympy.symarray('eta', (dim,dim))) # perturbation matrix
Df = xdot.jacobian(x)
etadot = Df * eta
z = x.col_join(eta.reshape(dim*dim,1))
zdot = xdot.col_join(etadot.reshape(dim*dim,1))
# lambdification to obtain a NumPy-aware function to compute zdot:
mat2array = [{'ImmutableMatrix': np.array}, 'numpy']
zdot_ = sympy.lambdify((z, t, params), zdot, modules=mat2array)
zdot_np = lambda z, t, params: zdot_(z, t, params).reshape((dim*dim + dim,))
return zdot_np
def update_state_and_perturbation(zdot_np, params, x, u, tstart=0, tstop=1.0, dt=0.01, integrator='RK4'):
dim = x.shape[0]
Phi0 = np.eye(dim) # initial value for perturbation matrix Phi
Phi0_ = Phi0.reshape((dim*dim,))
z0 = np.concatenate((x, Phi0_)) # initial value for combined {x, Phi}
if integrator == 'RK4':
z_solver = RungeKutta4(lambda z, t:
zdot_np(z, t, params), dt)
z_solver.set_initial_condition(z0, tstart)
z, t = z_solver.solve(tstop)
elif integrator == 'ODEINT':
t = np.arange(tstart, tstop, dt) # time measurements
z = integrate.odeint(lambda z, t: zdot_np(z, t, params), z0, t)
x = z[-1, 0:dim]
Phi = z[-1, dim:dim*dim+dim].reshape((dim,dim))
dx = np.dot(Phi,u)
return x, dx
def lyapunov_gram_schimidt(zdot_np, params, x, T=0.2, dt=0.01, Er=1.e-4, Ea=1.e-4, kmax=1000,
integrator='RK4', complete=True, debug=False):
"""
Remarks:
1) Make sure the initial state array, x, is already on the attractor for
a chaotic system. It is best to externally evolve the system until the trajectory
is settled on the strange attractor, then an arbitrary value of the state can be taken
as an initial value for this program.
2) How to choose a suitable value for T: Too small a value would mean loss of accuracy
due to excessive orthogonalization, too large a value would build up too large numbers hence
loss of numerical precission. Best values are for systems
"""
dim = x.shape[0] # dimension of state space
u = np.eye(dim) # initial perturbation matrix, Remark 2, p.79, Parker and Chua.
if complete is True: # True for computing all, False for computing maximum Lyapunov exp.
nums = dim
else:
nums = 1
lyaps = np.zeros((nums,)) # initialize lyapunov exponents
sum_ = np.zeros((nums,))
if debug is True: # True for storing Lyapunov exponent at each iteration
lyaps_evol = []
k = 0
while True:
k += 1
if (k == kmax):
print("lyapunov_gram_schimidt: no convergence!")
break
lyaps_old = lyaps.copy()
# dx is the linearized, tangent-space perturbation. We evolve the trajectory x
# and the perturbation together for a period of T. We output the result to perform a
# Gram-Schimidt orthonormalization.
x, dx = update_state_and_perturbation(zdot_np, params, x, u, tstart=0, tstop=T, dt=dt,
integrator=integrator)
# perform the orthonormalization and update Lyapunov exponents
for i in range(nums):
vi = dx[:, i]
for j in range(i):
vi = vi - np.dot(vi, u[:,j]) * u[:,j]
norm = np.linalg.norm(vi)
u[:,i] = vi / norm
sum_[i] += np.log(norm)
lyaps[i] = sum_[i] / (k * T)
#print("iter={:d} LE[{:d}] = {:6.3f}".format(k, i, lyaps[i]))
if debug is True:
lyaps_evol.append(np.copy(lyaps))
if (np.linalg.norm(lyaps_old - lyaps) < Er * np.linalg.norm(lyaps) + Ea):
break
if debug is True:
return np.array(lyaps_evol)
else:
return lyaps
# -
# In the following we will use the Benettin algorithm to compute only the maximal Lyapunov exponent, which solely determines the qualitative behaviour of the system. Before moving on to the UKF method and its application, let's compute the maximal Lyapunov exponent of the Lorenz system for the choice of system parameters $\sigma=10., \rho=28.,$ and $\beta=8./3$:
# +
# Define the Lorenz system, SymPy way:
u = sympy.Matrix(sympy.symarray('u', 3)) # dynamic variable symbols
t, sigma, rho, beta = sympy.symbols("t, sigma, rho, beta") # time and system parameter symbols
params = sympy.Matrix([sigma, rho, beta]) # parameter vector
lorenz = sympy.Matrix([sigma * (u[1] - u[0]), u[0] * (rho - u[2]) - u[1], u[0] * u[1] - beta * u[2]])
# Produce the Numpy-aware function (state + perturbation) for the Lorenz system
lorenz_np = sym_to_np(u, t, params, lorenz)
# Now calculate the Lyapunov exponents of the Lorenz System for the chosen parameter set below.
x0 = np.array([ 2, 0.5, 10]) # initial condition for state
params = np.array([10., 28., 8./3.]) # Lorenz system parameters
lyap = lyapunov_gram_schimidt(zdot_np=lorenz_np, params=params, x=x0, T=1.0, dt=0.01, Er=1.e-4,Ea=1.e-4, kmax=10000,
integrator="RK4", complete=False, debug=False)
print("lambda_max = {:.3f}".format(float(lyap)))
# -
# As a numerical check of the accuracy of the Benettin algorithm, compare the value we obtained above, $\lambda_{max} = 0.883$, to the reported value of $\lambda_{max} = 0.906$ (Sprott, 1997). We can improve the accuracy by using smaller $dt$ or trying different values for $T$ parameter in the Benettin algorithm (see reference Parker and Chua), but we want to be able to run the Benettin algorithm fast enough, since we will call it many times in the filter.
# ### The Unscented Kalman Filter
#
#
# The main idea behind the Unscented Kalman Filter (UKF) is to produce several sampling points (the sigma points) around the current state estimate based on its covariance. Once the sigma points are chosen, we propagate these points through the nonlinear mapping to get a more accurate estimation of the mean and the covariance. The application of the filter is therefore an iterative procedure. Each iteration is composed of a prediction step and an update step.
#
# In the "prediction step", we perturb the current parameter estimate by the driving process noise $\nu_k$ to obtain *a priori* estimates of the mean ($\hat{\theta}_k^{pr}$) and the covariance ($P_k^{pr}$) of the parameters, which are conditional on all but the current observation. In the "update step", we use the current observation ($y_k$) to calculate the *a posteriori* estimates of the mean ($\hat{\theta}_k^{po}$) and the covariance ($P_k^{po}$).
#
#
# *Initialization*
#
# $$\begin{eqnarray*}
# \theta_0^{po} & = & E(\theta) \\
# P_0^{po} & = & E( (\hat{\theta_0} - \theta) (\hat{\theta_0} - \theta)^T)
# \end{eqnarray*}$$
#
# *Prediction*
#
# $$\begin{eqnarray*}
# \hat{\theta}_k^{pr} & = & E(\theta | y_{i \leq k-1}) \\
# & = & \hat{\theta}_{k-1}^{po} \\
# P_k^{pr} & = & P_{k-1}^{po} + R_{k-1}
# \end{eqnarray*}$$
#
# *Update*
#
# $$\begin{eqnarray*}
# \hat{\theta}_k^{po} & = & \hat{\theta}_k^{pr} + K_k(y_k - \hat{y}_k)\\
# P_k^{po} & = & P_k^{pr} - K_k P_{\hat{y}_k} K_k^T
# \end{eqnarray*}$$
#
# where
#
# $$\begin{eqnarray*}
# Y_k & = & g(x_k, \Theta_k) \\
# \hat{y}_k & = & \sum_{i=0}^{2L} \omega_i^m [Y_k]_i \\
# P_{\hat{y}_k} & = & \sum_{i=0}^{2L} \omega_i^c ([Y_k]_i -\hat{y}_k ) ([Y_k]_i -\hat{y}_k )^T + Q_k \\
# P_{\hat{\theta}^{pr}_k \hat{y}_k} & = & \sum_{i=0}^{2L} \omega_i^c ([\Theta_k]_i - \hat{\theta}_k^{pr}) ([Y_k]_i -\hat{y}_k )^T \\
# K_k & = & P_{\hat{\theta}^{pr}_k \hat{y}_k} P_{\hat{y}_k}^{-1}
# \end{eqnarray*}$$
#
# The set of sigma points $\Theta_k$ and the corresponding weights are given by
#
# $$\begin{align*}
# [\Theta_k]_0 & = \hat{\theta}_k^{pr} & & & \omega_0^m & = \frac{\lambda}{L+\lambda} & i & =0 \\
# [\Theta_k]_i & = \hat{\theta}_k^{pr} + \left[\sqrt{ (L+\lambda) P_k^{pr}}\right]_i & i & = 1, \ldots, L & \omega_0^c & = \frac{\lambda}{L + \lambda} + (1-\alpha^2 + \beta) & i & = 0 \\
# [\Theta_k]_i & = \hat{\theta}_k^{pr} - \left[\sqrt{ (L+\lambda) P_k^{pr}}\right]_i & i & = L+1, \ldots, 2L & \omega_i^c & = \omega_i^m = \frac{1}{2(L + \lambda)} & i & = 1, \ldots, 2L
# \end{align*}$$
#
# +
class UKF:
def __init__(self, y_target, func, x0, theta0, P0, alpha, beta, kappa, Q, R):
"""func must return a numpy array"""
self.y_target = y_target
self.func = func # external function y=f(x0, *args). [ *list(theta0) -> *args ]
self.N, = y_target.shape # dimension of the observation space, y=f(x)
self.x0 = x0 # initial signal, numpy array of (M,)
self.theta0 = theta0 # initial mean-values of parameters, numpy array of (L,)
self.P0 = P0 # initial covariance matrix, numpy array of (L,L)
self.L, = theta0.shape # dimension of parameter space
self.M, = x0.shape # dimension of signal space
self.alpha = alpha # UKF parameter
self.beta = beta # UKF parameter
self.kappa = kappa # UKF parameter
self.Q = Q # measurement noise covariance in the state-space model
self.R = R # artifical process noise covariance in the state-space model
self.lambda_ = self.alpha**2 * (self.L + self.kappa) - self.L
self.num_sigmas = 2*self.L + 1 # UKF parameter, number of sigma points
Wc = np.full(self.num_sigmas, 1./(2*(self.L + self.lambda_)))
Wm = Wc.copy()
Wm[0] = self.lambda_ / (self.L + self.lambda_)
Wc[0] = Wm[0] + (1. - self.alpha**2 + self.beta)
self.Wc = Wc
self.Wm = Wm
# store values
self.all_theta = None
self.all_P = None
self.all_y = None
def sigma_points(self, theta, P):
#returns sigma points for a distribution of parameters (Theta) and
#for distribution of measurements (Y)
# calculate the sigma points for the parameters
Theta = np.zeros((self.num_sigmas, self.L))
U = scipy.linalg.cholesky((self.L + self.lambda_) * P)
Theta[0] = theta
for k in range(self.L):
Theta[k + 1] = theta + U[k]
Theta[self.L + k + 1] = theta - U[k]
# calculate the sigma-points for the measurements
Y = np.empty((self.num_sigmas,self.N))
for i in range(self.num_sigmas):
Y[i] = self.func(self.x0, *list(Theta[i]))
return Theta, Y
def unscented_transform(self, Y):
# use unscented transform to get the mean and covariance for observations, y and Py
y = np.dot(self.Wm, Y)
Py = np.zeros((self.N, self.N))
for i in range(self.num_sigmas):
Py += self.Wc[i] * np.outer(Y[i] - y, Y[i] - y)
return y, Py
def update(self, theta_pr, P_pr):
#predict values of mean and covariance are updated
Theta, Y = self.sigma_points(theta_pr, P_pr)
# mean and covariance of prediction passed through UT
y, Py = self.unscented_transform(Y)
Py += self.Q
# compute cross variance of the state and the measurements
Pty = np.zeros((self.L, self.N))
for i in range(self.num_sigmas):
Pty += self.Wc[i] * np.outer(Theta[i] - theta_pr, Y[i] - y)
# compute Kalman gain
K = np.dot(Pty, scipy.linalg.inv(Py))
theta = theta_pr + np.dot(K, self.y_target - y)
P = P_pr - K.dot(Py).dot(K.T)
return theta, P
def estimate(self, x0, iter_):
self.all_theta = np.zeros((iter_, self.L))
self.all_P = np.zeros((iter_, self.L, self.L))
self.all_y = np.zeros((iter_, self.N))
theta = self.theta0 # initial value of mean.
P = self.P0 # initial value of cov.
self.all_theta[0,:] = self.theta0
self.all_P[0, :, :] = P
for i in range(1, iter_):
# predict step
theta_pr = theta
P_pr = P + self.R
# update step
theta, P = self.update(theta_pr, P_pr)
self.all_theta[i, :] = theta
self.all_P[i, :, :] = P
self.all_y[i, :] = self.func(x0, *list(theta))
if i % 50 == 0 : # let's print updated values of the parameters at certain intervals
print("iteration = {:4d}: sigma = {:.2f}, rho = {:.2f}, beta = {:.2f}".format(i, *list(theta) ))
# -
# ## Application: Chaotification of the Lorenz System
#
# The Lorenz system that we defined above will serve as a suitable toy model for a parameter estimation application. Let us initialize the Lorenz system in a non-chaotic regime (specifically, a stable fixed point regime). Our goal will be to drive the system to a chaotic regime. In particular, we will conceive a positive real value for the maximal Lyapunov exponent as a target. The UKF then will drive the system to achieve the chaotic dynamics encoded by the maximal Lyapunov exponent by updating the system parameters.
# ### A Technical Side Note: Constraining the Parameters in the UKF
#
# In the formulation of the UKF approach, there are no constraints for any of the system parameters. Therefore, in general, each parameter can take any value in $(-\infty, +\infty)$. However, like many other physical systems, the Lorenz model parameters need to be constrained to positive real numbers. Although this can be achieved through modifying the UKF equations, I opt to improvise a quick-and-dirty approach to achieve the same end. The idea here is to introduce an extra dimension in the observable function (i.e. the nonlinear mapping $g(.)$ in the general formulation) for every parameter to be constrained. These extra "observables" can then be used to assign penalties (such as large positive values) any time the UKF updates the parameters to values lying outside the allowed window, which may be bounded in either or both sides by a finite value. As long as the parameter values remain within the boundaries, no penalty would be given.
# +
def penalty(x, lb=0, ub=1):
const = 100.
if x < lb :
return const * np.exp(abs(x-lb)**2)
elif x > ub:
return const * np.exp(abs(x-ub)**2)
else:
return 0.
def g(x, sigma, rho, beta):
"""
non-linear observable function
x : (3, ) numpy array holding state vector coordinates
theta: (3, ) numpy array holding values of sigma, rho and beta
"""
tstart, tstop, dt = 0., 10., 0.01
lorenz_model = LorenzSystem(sigma, rho, beta)
lorenz_model_derivatives = lorenz_model.dx_dt
lorenz_solver = RungeKutta4(lorenz_model_derivatives, dt)
lorenz_solver.set_initial_condition(x, tstart)
x, tt = lorenz_solver.solve(tstop)
x0 = x[-1,:] # The end point of the trajectory is the new initial point
# for all the computations below
# OBSERVABLES TO EXPORT
# maximal Lyapunov exponent
params = np.array([sigma, rho, beta])
lyap = lyapunov_gram_schimidt(zdot_np=lorenz_np, params=params, x=x0, T=1.0, dt=0.01, Er=1.e-4,Ea=1.e-4, kmax=10000,
integrator="RK4", complete=False, debug=False)
s_penalty = penalty(sigma, lb=0.5, ub=30)
r_penalty = penalty(rho, lb=0.5, ub=30)
b_penalty = penalty(beta, lb=0.5, ub=30)
penalties = np.array([s_penalty, r_penalty, b_penalty])
return np.concatenate((lyap, penalties))
# -
# Note that we modified the mapping $g(.)$, which normally would calculate the maximal Lyapunov exponent as $\lambda_{max} = \mathfrak{L}(\theta_k, y_0; f)$, to return a NumPy array of shape (4,) because we include three penalty values. Above $x$ represents the initial condition, $y_0$, for the Lorenz model's 3-dimensional phase space. In our problem, $x$ will always remain a constant, so every time we update the parameters to obtain a new Lorenz model, the time evolution will always start with this initial condition.
#
#
# +
# initialize state vector in Lorenz model
x0 = np.array([2.0, 0.5, 1.0]) # arbitrarily chosen
# initialize parameters (sigma, rho, beta):
theta0 = np.array([10.0, 10.0, 10.0]) # initial mean values of the parameters
P0 = 0.01*np.diag([1, 1, 1]) # initial state covariance
# initialize measurement and noise covariances
Q = 0.01*np.diag([1, 1, 1, 1]) # measurement noise covariance
R = 0.01*np.diag([1, 1, 1]) # process noise covariance
# initialize UKF parameters (see reference Labbe for explanations)
alpha = 0.8
beta = 2.
kappa = 0.
no_iterations=300 # number of iterations
# set the target dynamics
y_target = np.array([0.883, 0., 0., 0.]) # target value of maximal Lyapunov exponent (calculated above)
# create an instance of the filter for our problem
ukf = UKF(y_target, g, x0, theta0, P0, alpha, beta, kappa, Q, R)
# -
# ### Parameter Estimation (Inference) Using the UKF
#
# Now we are ready to run the filter to infer the Lorenz model's parameter values that would produce the chaotic dynamics encoded by the maximal Layapunov exponent value of 0.883.
ukf.estimate(x0, no_iterations)
iterations = np.arange(no_iterations)
# Let's print out initial and final values of the relevant quantities:
# +
print("Maximal Lyapunov exponent: target value vs achieved value")
print("y_initial = {:.3f}".format(g(x0, *list(theta0))[0]))
print("y_target = {:.3f}".format(y_target[0]))
print("y last = {:.3f}\n".format( ukf.all_y[-1,0]))
print("Inferred parameters: initial and achieved values (mean)")
print("sigma = {:.2f}, rho = {:.2f}, beta = {:.2f}".format(*list(ukf.theta0) ))
print("sigma = {:.2f}, rho = {:.2f}, beta = {:.2f}\n".format(*list(ukf.all_theta[-1,:]) ))
print("Inferred parameters: initial and achieved values (covariance)")
print("P init=", P0)
print("P last=", ukf.all_P[-1,:,:])
# -
# In order to be able to plot how the UKF drives the system to produce the desired output, and infer the values of the system parameters, we need to integrate the Lorenz model equations for the parameter values we started with, and also for the parameter values we obtained at the end.
# +
# integration of the Lorenz model (using the parameter values we started with)
tstart, tstop, dt = 0., 10., 0.01
x0 = np.array([2.0, 2.5, 5.0]) # initial values for Lorenz model state variables (x,y,z)--we keep this constant
params = list(ukf.all_theta[0,:]) # initial values for sigma, rho, beta
lorenz_model = LorenzSystem(*params)
lorenz_model_derivatives = lorenz_model.dx_dt
lorenz_solver = RungeKutta4(lorenz_model_derivatives, dt)
lorenz_solver.set_initial_condition(x0, tstart)
x_ti, t = lorenz_solver.solve(tstop)
# integration of the Lorenz model (using the parameter values we started with)
# since we do not change x0, below we use the same x0 as above.
params = list(ukf.all_theta[-1,:]) # final values for sigma, rho, beta
lorenz_model = LorenzSystem(*params)
lorenz_model_derivatives = lorenz_model.dx_dt
lorenz_solver = RungeKutta4(lorenz_model_derivatives, dt)
lorenz_solver.set_initial_condition(x0, tstart)
x_tj, t = lorenz_solver.solve(tstop)
# +
fig, ax0 = plt.subplots()
fig.set_size_inches(13,5)
ax0.plot(iterations[1:], ukf.all_y[1:,0], lw=3)
#ax0.annotate('', fontsize = 12, xy = (3, -2), xycoords = 'data', \
# xytext=(40, -1.5), arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.1"))
#ax0.annotate('fixed point dynamics', xy = (40, -0.6), xycoords = 'data', fontsize = 12)
ax0.axhline(y=0, c="k")
ax0.axhline(y=y_target[0], c="r")
ax0.set_ylim(ymax=3)
ax0.set_xlabel("iterations", fontsize = 20)
ax0.set_ylabel(r"$\lambda_{max}$", fontsize = 20)
ax1 = fig.add_axes([0.20, 0.15, 0.08, 0.25], projection='3d') # x,y, width, height (all relative to 1)
ax1.axis("on")
ax1.xaxis.set_major_locator(plt.NullLocator())
ax1.yaxis.set_major_locator(plt.NullLocator())
ax1.zaxis.set_major_locator(plt.NullLocator())
ax1.plot(x_ti[:,0], x_ti[:,1], x_ti[:,2], 'r-', lw=2.0)
ax1.view_init(30, 20) # altitude and azimuth in degrees
ax2 = fig.add_axes([0.80, 0.62, 0.08, 0.25], projection='3d') # x,y, width, height (all relative to 1)
ax2.axis("on")
ax2.xaxis.set_major_locator(plt.NullLocator())
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.zaxis.set_major_locator(plt.NullLocator())
ax2.plot(x_tj[:,0], x_tj[:,1], x_tj[:,2], 'r-', lw=0.5)
ax2.view_init(30, 120) # altitude and azimuth in degrees
ax0.annotate('fixed point dynamics', xy = (60, -1.5), xycoords = 'data', fontsize = 12)
ax0.annotate('chaotic dynamics', xy = (220, 1.8), xycoords = 'data', fontsize = 12)
ax0.annotate(r'$\lambda_{max}^{target}$', xy = (1.02, 0.6), xycoords = 'axes fraction', fontsize = 20, color = "r")
# -
# The plot above shows how the UKF drives the system from an initial configuration in the fixed-point regime to the chaotic regime (the boundary between these two dynamical regimes is the $\lambda_{max}=0$ line, on which the dynamical behavior is limit cycles). Parameters of the Lorenz model kept getting updated in the chaotic regime until the $\lambda_{max}$ of the model reached $\lambda_{max}^{target}$.
# +
fig, ax = plt.subplots(1,3)
fig.subplots_adjust(wspace=0.5)
fig.set_size_inches(15, 3)
ax[0].set_xlabel("iterations", fontsize = 20)
ax[0].set_ylabel(r"$\sigma$", fontsize = 20)
ax[0].plot(iterations, ukf.all_theta[:,0])
ax[1].set_xlabel("iterations", fontsize = 20)
ax[1].set_ylabel(r"$\rho$", fontsize = 20)
ax[1].plot(iterations, ukf.all_theta[:,1])
ax[2].set_xlabel("iterations", fontsize = 20)
ax[2].set_ylabel(r"$\beta$", fontsize = 20)
ax[2].plot(iterations, ukf.all_theta[:,2])
# -
# Plots above show the inferred values of the parameters $\sigma$, $\rho$, and $\beta$ at successive UKF iterations.
# ## References:
#
# 1) <NAME>. (2018). *Kalman and Bayesian Filters in Python*. (available online) <br>
# 2) <NAME>. and <NAME>. (1989). *Practical Numerical Algorithms for Chaotic Systems*. New York, USA: Springer-Verlag. <br>
# 3) <NAME>, *Designing attractive models via automated identification of chaotic and oscillatory dynamical regimes*, Nature Communications, (2011) 2:489. <br>
# 4) <NAME>. (1997). *Lyapunov Exponent and Dimension of the Lorenz Attractor*, http://sprott.physics.wisc.edu/chaos/lorenzle.htm
|
.ipynb_checkpoints/kalman-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# metadata:
# interpreter:
# hash: bcf6d6f1e0171d0d91abd06fa98fe319d27d921fb762534eb067c2306250a62d
# name: python3
# ---
# # Profanity Filter
# +
# download datasets
# # !wget https://github.com/vzhou842/profanity-check/raw/master/profanity_check/data/clean_data.csv
# +
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
import pickle
# Read in data
data = pd.read_csv('clean_data.csv')
texts = data['text'].astype(str)
y = data['is_offensive']
# Vectorize the text
vectorizer = CountVectorizer(stop_words='english', min_df=0.0001)
X = vectorizer.fit_transform(texts)
# Train the model
# model = LinearSVC(class_weight="balanced", dual=False, tol=1e-2, max_iter=1e5)
model = LinearSVC()
#cclf = CalibratedClassifierCV(base_estimator=model)
# cclf.fit(X, y)
model.fit(X,y)
# Save the model
# joblib.dump(vectorizer, 'vectorizer.joblib')
# joblib.dump(cclf, 'model.joblib')
with open('vectorizer_profanity.pickle', 'wb') as f:
pickle.dump(vectorizer, f)
with open('model_profanity.pickle', 'wb') as f:
pickle.dump(model, f)
|
notebooks/profanity_filter/profanity_filter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.6 64-bit
# name: python3
# ---
# # Image classification on CINIC-10
# Dataset source: [https://github.com/BayesWatch/cinic-10](https://github.com/BayesWatch/cinic-10)
#
# License: [MIT](https://github.com/BayesWatch/cinic-10/blob/master/LICENSE)
# ## Install dependencies and import
# !pip3 install ipyplot # display images
# !pip3 install crowd-kit==0.0.5
# !pip3 install toloka-kit==0.1.13
# +
import datetime
import time
import pandas as pd
import numpy as np
import ipyplot
from sklearn.metrics import balanced_accuracy_score
import os
import logging
import sys
import toloka.client as toloka
import toloka.client.project.template_builder as tb
from crowdkit.aggregation import DawidSkene
# %matplotlib inline
# -
logging.basicConfig(
format='[%(levelname)s] %(name)s: %(message)s',
level=logging.INFO,
stream=sys.stdout,
)
# # Load the dataset
N_ROWS = 1000
# +
def sample_stratified(df, label_column, n_rows):
"""Function to sample n_rows from a dataframe while presenving class distribution"""
return df.groupby(label_column, group_keys=False) \
.apply(lambda x: x.sample(int(np.rint(n_rows*len(x)/len(df))))) \
.sample(frac=1).reset_index(drop=True)
base_url = 'https://tlk.s3.yandex.net/ext_dataset/CINIC-10'
df = pd.read_csv(os.path.join(base_url, 'test.csv'))
df['img_url'] = df.img_path.apply(lambda p: os.path.join(base_url, p))
df = sample_stratified(df, 'label', n_rows=N_ROWS)
df.head()
# -
ipyplot.plot_class_representations(images=df.img_url, labels=df.label, img_width=70)
# # Setup the project
toloka_client = toloka.TolokaClient(input("Enter your token:"), 'PRODUCTION')
# ## Create project
project = toloka.Project(
public_name='Small images classification',
public_description='Classify small images into 10 categories',
private_comment='OOTB: CINIC-10'
)
input_specification = {'image': toloka.project.UrlSpec()}
output_specification = {'result': toloka.project.StringSpec()}
CINIC_LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
len(CINIC_LABELS)
# ## Annotator interface
# +
image_viewer = tb.ImageViewV1(tb.InputData('image'),
ratio=[1, 1],
popup=False,
)
label_buttons = [tb.GroupFieldOption(l, l.capitalize()) for l in CINIC_LABELS]
radio_group_field = tb.ButtonRadioGroupFieldV1(
tb.OutputData('result'),
label_buttons,
validation=tb.RequiredConditionV1(),
)
task_width_plugin = tb.TolokaPluginV1(
'scroll',
task_width=300,
)
hot_keys_plugin = tb.HotkeysPluginV1(
key_1=tb.SetActionV1(tb.OutputData('result'), 'airplane'),
key_2=tb.SetActionV1(tb.OutputData('result'), 'automobile'),
key_3=tb.SetActionV1(tb.OutputData('result'), 'bird'),
key_4=tb.SetActionV1(tb.OutputData('result'), 'cat'),
key_5=tb.SetActionV1(tb.OutputData('result'), 'deer'),
key_6=tb.SetActionV1(tb.OutputData('result'), 'dog'),
key_7=tb.SetActionV1(tb.OutputData('result'), 'frog'),
key_8=tb.SetActionV1(tb.OutputData('result'), 'horse'),
key_9=tb.SetActionV1(tb.OutputData('result'), 'ship'),
key_0=tb.SetActionV1(tb.OutputData('result'), 'truck'),
)
project_interface = toloka.project.TemplateBuilderViewSpec(
config=tb.TemplateBuilder(
view=tb.ListViewV1([image_viewer, radio_group_field]),
plugins=[task_width_plugin, hot_keys_plugin],
)
)
project.task_spec = toloka.project.task_spec.TaskSpec(
input_spec=input_specification,
output_spec=output_specification,
view_spec=project_interface,
)
# -
project.public_instructions = """
In this task, you will see images from 10 different classes.<br/>
Your task is to classify these images.<br/>
<b>Some images are blurry and hard to label</b>. That's the nature of the task, so just assign whatever label seems most appropriate.
How to complete the task:
<ul>
<li>Look at the picture.</li>
<li>Click on the image to resize it. You can rotate the image if it's in the wrong orientation.</li>
<li>Chose one of the possible answers. If the picture is unavailable or you have any other technical difficulty, please write us about it.</li>
<li>If you think that you can not classify the image correctly, choose the most appropriate label in your opinion.</li>
<li>You can use keyboard shortcuts (numbers from 1 to 0) to pick labels.</li>
</ul>
""".strip()
project = toloka_client.create_project(project)
# ## Create training tasks
training_pool = toloka.training.Training(project_id=project.id,
private_name='Training pool',
training_tasks_in_task_suite_count=10,
task_suites_required_to_pass=1,
may_contain_adult_content=False,
inherited_instructions=True,
assignment_max_duration_seconds=60*5,
retry_training_after_days=1,
mix_tasks_in_creation_order=True,
shuffle_tasks_in_task_suite=True,
)
training_pool = toloka_client.create_training(training_pool)
label_examples = {label: df[df.label == label].head(1).img_url.item() for label in CINIC_LABELS}
tasks = [
toloka.Task(input_values={'image': url},
known_solutions=[toloka.task.BaseTask.KnownSolution(output_values={'result': label})],
message_on_unknown_solution=f'Incorrect label! The actual label is: {label}',
infinite_overlap=True,
pool_id=training_pool.id)
for label, url in label_examples.items()
]
toloka_client.create_tasks(tasks, allow_defaults=True)
# ## Create task Pool
pool = toloka.Pool(
project_id=project.id,
private_name='Pool',
may_contain_adult_content=False,
reward_per_assignment=0.01,
assignment_max_duration_seconds=60*5,
will_expire=datetime.datetime.utcnow() + datetime.timedelta(days=365),
)
pool.defaults = toloka.pool.Pool.Defaults(
default_overlap_for_new_tasks=5,
default_overlap_for_new_task_suites=0,
)
pool.set_mixer_config(
real_tasks_count=10,
)
pool.filter = toloka.filter.Languages.in_('EN')
# +
pool.quality_control.training_requirement = toloka.quality_control.QualityControl.TrainingRequirement(
training_pool_id=training_pool.id,
training_passing_skill_value=30,
)
pool.quality_control.add_action(
collector=toloka.collectors.MajorityVote(
answer_threshold=4,
history_size=5,
),
conditions=[
toloka.conditions.TotalAnswersCount >= 5,
toloka.conditions.IncorrectAnswersRate > 30,
],
action=toloka.actions.RestrictionV2(
scope='PROJECT',
duration=1,
duration_unit='DAYS',
private_comment='Wrong on over 30% cases',
),
)
pool.quality_control.add_action(
collector=toloka.collectors.AssignmentSubmitTime(history_size=5, fast_submit_threshold_seconds=15),
conditions=[
toloka.conditions.TotalSubmittedCount >= 5,
toloka.conditions.FastSubmittedCount >= 3],
action=toloka.actions.RestrictionV2(
scope='PROJECT',
duration=1,
duration_unit='DAYS',
private_comment='Answering too fast',
),
)
pool.quality_control.add_action(
collector=toloka.collectors.SkippedInRowAssignments(),
conditions=[toloka.conditions.SkippedInRowCount >= 3],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=1,
duration_unit='DAYS',
private_comment='Lazy performer',
)
)
# -
pool = toloka_client.create_pool(pool)
# ## Create tasks from dataset
tasks = [
toloka.Task(input_values={'image': url}, pool_id=pool.id)
for url in df.img_url
]
toloka_client.create_tasks(tasks, allow_defaults=True)
# # Start annotation
training_pool = toloka_client.open_pool(training_pool.id)
pool = toloka_client.open_pool(pool.id)
# +
pool_id = pool.id
def wait_pool_for_close(pool_id, minutes_to_wait=0.5):
sleep_time = 60 * minutes_to_wait
pool = toloka_client.get_pool(pool_id)
while not pool.is_closed():
op = toloka_client.get_analytics([toloka.analytics_request.CompletionPercentagePoolAnalytics(subject_id=pool.id)])
op = toloka_client.wait_operation(op)
percentage = op.details['value'][0]['result']['value']
print(
f' {datetime.datetime.now().strftime("%H:%M:%S")}\t'
f'Pool {pool.id} - {percentage}%'
)
time.sleep(sleep_time)
pool = toloka_client.get_pool(pool.id)
print('Pool was closed.')
wait_pool_for_close(pool_id)
# -
training_pool = toloka_client.close_pool(training_pool.id)
# # Extract results
answers_df = toloka_client.get_assignments_df(pool_id)
answers_df = answers_df.rename(columns={
'INPUT:image': 'task',
'OUTPUT:result': 'label',
'ASSIGNMENT:worker_id': 'performer',
})
# # Aggregate results
aggregated_answers = DawidSkene(n_iter=100).fit_predict(answers_df)
aggregated_answers = aggregated_answers.reset_index()
aggregated_answers.columns = ['img_url', 'pred_label']
aggregated_answers = aggregated_answers.merge(df, on='img_url')
aggregated_answers.head()
# # View results
# +
sample = aggregated_answers.sample(10)
captions = [f'True: {row.label}\nPred: {row.pred_label}' for row in sample.itertuples()]
ipyplot.plot_images(
images=sample.img_url.values,
labels=captions,
max_images=10,
img_width=100,
)
# -
# # View mistakes
# +
wrong_answers = aggregated_answers[aggregated_answers.pred_label != aggregated_answers.label]
sample = wrong_answers.sample(12)
captions = [f'True: {row.label}\nPred: {row.pred_label}' for row in sample.itertuples()]
ipyplot.plot_images(
images=sample.img_url.values,
labels=captions,
max_images=10,
img_width=100,
)
# -
# # Obtain accuracy
accuracy = balanced_accuracy_score(aggregated_answers.label, aggregated_answers.pred_label)
print(f'Accuracy: {accuracy:.2f}')
print(f'Error: {1-accuracy:.2f}')
|
examples/benchmarks/image_classification_cinic10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quick Draw GAN
#
# A Generative Adversarial Network trained of the Google Quickdraw dataset found here: https://github.com/googlecreativelab/quickdraw-dataset#preprocessed-dataset
#
# Created in tf.keras
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import tensorflow.keras as K
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Dropout, Activation,Reshape
from tensorflow.keras.layers import BatchNormalization, Conv2D,Flatten, Conv2DTranspose, UpSampling2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint,TensorBoard
from time import time
# ## Helper Functions
from helpers import get_np,imshow,get_callbacks
# ## Load the Data
inp_path = 'full_numpy_bitmap_apple.npy'
data = get_np(inp_path)
img_w,img_h = data.shape[1:3]
imshow(data,1040)
data.shape
# # Discriminator
def discriminator_builder(width = 64,p=0.4):
#Inputs
inputs= Input((img_w,img_h,1))
#Model Layers
conv1 = Conv2D(width*1,5,strides=2,padding='same',activation='relu')(inputs)
conv1 = Dropout(p)(conv1)
conv2 = Conv2D(width*2,5,strides=2,padding='same',activation='relu')(conv1)
conv2 = Dropout(p)(conv2)
conv3 = Conv2D(width*4,5,strides=2,padding='same',activation='relu')(conv2)
conv3 = Dropout(p)(conv3)
conv4 = Conv2D(width*8,5,strides=1,padding='same',activation='relu')(conv3)
conv4 = Dropout(p)(conv4)
conv4 = Flatten()(conv4)
output = Dense(1,activation='sigmoid')(conv4)
#Model Definiton
discriminator = Model(inputs,output)
discriminator.summary()
return discriminator
discriminator = discriminator_builder()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(),
metrics = ['acc'])
|
Discriminator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Notebook 05: Simple ML Regression
#
# ### Goal: Basic training a ML using a single feature/predictor/input and a single ML model
#
# #### Reminder of Problem Statement
#
# Before we jump into the ML, I want to remind you of the ML task we want to accomplish in the paper.
#
# 1. Does this image contain a thunderstorm? <-- Classification
# 2. How many lightning flashes are in this image? <-- Regression
#
# #### Background
#
# For the training of regression problems is basically the same as the classification. We will still use the same steps as the previous notebook, just with a small changed to the labels ```y```.
#
# #### Step 1 & 2: Import packages and load data for Regression
# We only want 1 feature again to make things simple, which is feature 0. We also will need to change ```class_labels``` to false.
# +
#needed packages
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#plot parameters that I personally like, feel free to make these your own.
import matplotlib
matplotlib.rcParams['axes.facecolor'] = [0.9,0.9,0.9] #makes a grey background to the axis face
matplotlib.rcParams['axes.labelsize'] = 14 #fontsize in pts
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.facecolor'] = 'w'
matplotlib.rcParams['savefig.transparent'] = False
#make default resolution of figures much higher (i.e., High definition)
# %config InlineBackend.figure_format = 'retina'
#import some helper functions for our other directory.
import sys
sys.path.insert(1, '../scripts/')
from aux_functions import load_n_combine_df
(X_train,y_train),(X_validate,y_validate),(X_test,y_test) = load_n_combine_df(path_to_data='../datasets/sevir/',features_to_keep=np.arange(0,1,1),class_labels=False)
# -
# Let's check to make sure the labels are indeed decimal numbers instead of 0's and 1's.
plt.hist(y_train,bins=100)
plt.xlabel('number of flahses')
# Great, it is indeed more than just 0's and 1's, but something you'll notice right here, there are ALOT of no flash images. You will see if we plot the number of flashes as a function of the minimum brightness temperature it might be very difficult to fit a linear method (i.e., Linear regression) to the data
# +
#this is something to help make the ticks show up where I want them
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
#color I like. The order of ratios is [Red,Green,Blue]
r = [255/255,127/255,127/255]
#make figure
fig = plt.figure(figsize=(5,5))
#set background color to white so we can copy paste out of the notebook if we want
fig.set_facecolor('w')
#get axis for drawing
ax = plt.gca()
#plot data
ax.scatter(X_train[:,0],y_train,color=r,s=1,marker='+')
#set limits
ax.set_xlim([-95,30])
#label axes
ax.set_xlabel('Minimum Brightness Temperature, [$\degree$C]')
ax.set_ylabel('Number of flashes')
# -
# For now, lets try it anyway
#
# #### Step 3: Initialize model
#
# Same as with classification, we can use the ```()``` after the model name to initialize a ML model.
# +
#load model from sklearn
from sklearn.linear_model import LinearRegression
#initialize
model = LinearRegression()
print(model)
# -
# #### Step 4: Train your ML model!
model = model.fit(X_train,y_train)
# #### Step 5: Evaluate your ML model
#
# As a sanity check, we will first look at the *one-to-one* plot where the x-axis is the predicted number of flashes, and the y-axis is the true number of flashes. A perfect prediction will be directly along the diagonal.
# +
#get predictions
yhat = model.predict(X_validate)
#make figure
fig = plt.figure(figsize=(5,5))
#set background color to white so we can copy paste out of the notebook if we want
fig.set_facecolor('w')
#get axis for drawing
ax = plt.gca()
#plot data
ax.scatter(yhat,y_validate,color=r,s=1,marker='+')
ax.plot([0,3500],[0,3500],'-k')
ax.set_xlabel('ML Prediction, [$number of flashes$]')
ax.set_xlabel('GLM measurement, [$number of flashes$]')
# -
# As you can see, there is not a great correspondence between the ML model predicted flashes and the true number of flashes. One work around of this issue is to train on instances where there is already more than 1 lightning flash. While this might seem like cheating, based on our > 90% accurate classification model, we could use the two ML models in tandem. In other words, we could use the regression model only on images classified to contain flashes from the classification model. So let's drop the zeros out of the dataset and give it a try.
# +
(X_train,y_train),(X_validate,y_validate),(X_test,y_test) = load_n_combine_df(path_to_data='../datasets/sevir/',features_to_keep=np.arange(0,1,1),class_labels=False,dropzeros=True)
#remake scatter plot from Step 2
#make figure
fig = plt.figure(figsize=(5,5))
#set background color to white so we can copy paste out of the notebook if we want
fig.set_facecolor('w')
#get axis for drawing
ax = plt.gca()
#plot data
ax.scatter(X_train[:,0],y_train,color=r,s=1,marker='+')
#set limits
ax.set_xlim([-95,30])
#label axes
ax.set_xlabel('Minimum Brightness Temperature, [$\degree$C]')
ax.set_ylabel('Number of flashes')
print(np.min(y_train))
# -
# Good, there are no 0s in the label vector (```y```). Now let's re-train the model
model = model.fit(X_train,y_train)
# +
#get predictions
yhat = model.predict(X_validate)
#make figure
fig = plt.figure(figsize=(5,5))
#set background color to white so we can copy paste out of the notebook if we want
fig.set_facecolor('w')
#get axis for drawing
ax = plt.gca()
#plot data
ax.scatter(yhat,y_validate,color=r,s=1,marker='+')
ax.plot([0,3500],[0,3500],'-k')
ax.set_xlabel('ML Prediction, [$number of flashes$]')
ax.set_xlabel('GLM measurement, [$number of flashes$]')
# -
# It's better. but still doesn’t look great. But given the relatively non-linear relationship between the minimum brightness temperature and the number of flashes, this is probably to be expected. Let's calculate some metrics to quantitatively evaluate the trained ML model.
#
# The metrics for regression are a bit different than for classification. Common metrics are the mean bias, mean absolute error (MAE), Root Mean Square Error (RMSE) and the coefficient of determination (R^2). Mathematically, these metrics are defined:
#
# $$ \mathrm{Bias} = \frac{1}{N} \sum_{j=1}^{N} (y_j - \hat{y}_j) $$
#
# $$ \mathrm{MAE} = \frac{1}{N} \sum_{j=1}^{N} |y_j - \hat{y}_j| $$
#
# $$ \mathrm{RMSE} = \sqrt{\frac{1}{N} \sum_{j=1}^{N} (y_j - \hat{y}_j)^{2}} $$
#
# $$ \mathrm{R^{2}} = 1 - \frac{\sum_{j=1}^{N} (y_j - \hat{y}_j)^{2}}{\sum_{j=1}^{N} (y_j - \bar{y})^{2}} $$
#
# We have included all of these metrics again in the ```gewitter_functions.py``` script.
# +
from gewitter_functions import get_mae,get_rmse,get_bias,get_r2
yhat = model.predict(X_validate)
mae = get_mae(y_validate,yhat)
rmse = get_rmse(y_validate,yhat)
bias = get_bias(y_validate,yhat)
r2 = get_r2(y_validate,yhat)
#print them out so we can see them
print('MAE:{} flashes, RMSE:{} flashes, Bias:{} flashes, Rsquared:{}'.format(np.round(mae,2),np.round(rmse,2),np.round(bias,2),np.round(r2,2)))
# -
# There we go! We have a simple linear regression predicting the number of flashes in an image. While these results don’t look great (missing on average by 30 flashes) we will show in the more advanced ML model (using more features) that we can get a more accurate model.
# #### Step 6: Save your trained model
#
# Sometimes loading the training dataset and re-training the model each time can be cumbersome. There is a way to save the trained models. We will use the python ```pickle``` package, I know kinda an odd name, to do this.
import pickle
name = 'LnR.pkl'
start_path = '../datasets/sklearnmodels/regression/onefeature/'
savefile = open(start_path + name,'wb')
pickle.dump(model,savefile)
# #### Step 7: Load a saved model
#
# Now that you have it saved, if you need to load it do the following:
# +
import pickle
name = 'LnR.pkl'
start_path = '../datasets/sklearnmodels/regression/onefeature/'
#notice the change from wb to rb
savefile = open(start_path + name,'rb')
#notice the change from dump to load
model = pickle.load(savefile)
print(model)
# -
# In the next notebook we will look at training a ML model with all 36 predictors. Notebook 6 is for classification, while Notebook 7 is for regression.
|
jupyter_notebooks/Notebook05_SimpleMLRegression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import netCDF4 as nc
from salishsea_tools import tidetools, nc_tools
import matplotlib.pyplot as plt
import numpy as np
import os
import re
# %matplotlib inline
resultsDir='/data/eolson/MEOPAR/SS2DSOGruns/'
# -
TS=nc.Dataset(os.path.join(resultsDir,'TS_uniform.nc'))
# read z, T, S into CTD
CTD=np.loadtxt('/data/eolson/SOG/SOG-initial/ctd/SG-S3-2004-10-19.sog',skiprows=12,usecols=(1, 2, 8))
# add z=0 at first row, repeating T,S values from next level
data=np.vstack((CTD[0,:],CTD))
data[0,0]=0.
# interpolate to T-points
data_t=(data[0:101,:]+data[1:102,:])/2
# create temp and sal arrays with correct dimensions
vecTem=np.reshape(data_t[:,1],(101,1,1))
data_Tem=np.tile(vecTem,(1,1,10,1100))
vecSal=np.reshape(data_t[:,2],(101,1,1))
data_Sal=np.tile(vecSal,(1,1,10,1100))
print data_Sal.shape
# +
new_TS=nc.Dataset('/data/eolson/MEOPAR/SS2DSOGruns/TS_1100x10_SG-S3-2014-10-19.nc','w')
new_TS.createDimension('y', 10)
new_TS.createDimension('x', 1100)
new_TS.createDimension('deptht', 101)
new_TS.createDimension('time_counter', None)
nc_tools.show_dimensions(TS)
nc_tools.show_dimensions(new_TS)
# +
new_tc=new_TS.createVariable('time_counter',float,('time_counter'),zlib=True)
new_tc.setncattr('units',TS.variables['time_counter'].units)
new_tc.setncattr('long_name',TS.variables['time_counter'].long_name)
new_tc[:]=TS.variables['time_counter']
print TS.variables['time_counter']
print new_TS.variables['time_counter']
# +
new_z=new_TS.createVariable('deptht',float,('deptht'),zlib=True)
new_z.setncattr('units',TS.variables['deptht'].units)
new_z.setncattr('long_name',TS.variables['deptht'].long_name)
new_z.setncattr('positive',TS.variables['deptht'].positive)
new_z[:]=data_t[:,0]
print TS.variables['deptht']
print new_TS.variables['deptht']
# +
new_x=new_TS.createVariable('x',float,('y','x'),zlib=True)
new_x.setncattr('units',TS.variables['x'].units)
new_x.setncattr('long_name',TS.variables['x'].long_name)
new_x=TS.variables['x']
print TS.variables['x']
print new_TS.variables['x']
# +
new_y=new_TS.createVariable('y',float,('y','x'),zlib=True)
new_y.setncattr('units',TS.variables['y'].units)
new_y.setncattr('long_name',TS.variables['y'].long_name)
new_y=TS.variables['y']
print TS.variables['y']
print new_TS.variables['y']
# -
new_Tem=new_TS.createVariable('votemper',float,('time_counter','deptht','y','x'),zlib=True)
new_Tem.setncattr('units',TS.variables['votemper'].units)
new_Tem.setncattr('long_name',TS.variables['votemper'].long_name)
new_Tem.setncattr('coordinates',TS.variables['votemper'].coordinates)
new_Tem[:,:,:,:]=data_Tem
print TS.variables['votemper']
print new_TS.variables['votemper']
new_Sal=new_TS.createVariable('vosaline',float,('time_counter','deptht','y','x'),zlib=True)
new_Sal.setncattr('units',TS.variables['vosaline'].units)
new_Sal.setncattr('long_name',TS.variables['vosaline'].long_name)
new_Sal.setncattr('coordinates',TS.variables['vosaline'].coordinates)
new_Sal[:,:,:,:]=data_Sal
print TS.variables['vosaline']
print new_TS.variables['vosaline']
new_TS.title="""SS2DSOG 1100x10 T+S initialization"""
new_TS.institution="""
Dept of Earth, Ocean & Atmospheric Sciences, University of British Columbia"""
new_TS.comment= """
Based on SG-S3-2004-10-19.sog"""
new_TS.reference= """
eolson: TS_SS2DSOG5x5_SG-S3-2004-10-19-2d.ipynb"""
nc_tools.show_dataset_attrs(new_TS)
new_TS.close()
TS.close()
B=nc.Dataset('/data/eolson/MEOPAR/SS2DSOGruns/TS_1100x10_SG-S3-2014-10-19.nc')
tem=B.variables['votemper']
sal=B.variables['vosaline']
plt.pcolormesh(tem[0,:,:,3]); plt.colorbar()
# +
plt.pcolormesh(sal[0,:,:,3]); plt.colorbar()
# -
print B.variables
B.close()
|
Elise/plotResults/TS_SS2DSOG5x5_SG-S3-2004-10-19-2d.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (cs109b)
# language: python
# name: cs109b
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> APCOMP 295 Advanced Practical Data Science
# ## Homework 1 - Google Cloud Setup, Docker & Flask
#
#
#
# **Harvard University**<br/>
# **Fall 2020**<br/>
# **Instructors**: <NAME>
#
#
# <hr style="height:2pt">
# **Instructions**:
#
# - Each assignment is graded out of 5 points.
# - Submit this homework individually at Canvas.
#
# <hr style="height:2pt">
#
#
# We illustrated two very simple applications of containers during the demo in class and demonstrated them step by step. Now we should learn how to deploy a more complex application on the cloud. Real-world applications are often composed of many different components. Putting it altogether comprises many steps, which we walk you through below. It is not unusual that the tutorial instructions available on the internet, or in this case the assignment instructions, do not match exactly with what you will face - this is due to the quick development cycles of the libraries and user interfaces. Therefore, it is one of the learning objectives of this homework to learn how to become more comfortable with all the components, follow tutorials and how all connect together. You should use all resources available, including the class forum, or other forums and do not hesitate to ask the teaching staff.
#
# We will be using Google Cloud, and one of the goals of this assignment is to get you set up with Google Cloud.
#
# The final goal is to run your first Docker application with Flask on the cloud !
#
#
# ### <font color=red>Remember to delete your virtual machines/clusters.</font>
# ## Question 1: Create Accounts at Cloud Services (0.5 point)
#
# If you have not already done so, create accounts at the following cloud services we will be using in the course:
# - GitHub: www.github.com
# - Google Cloud: https://cloud.google.com/
# (new users may be offered \$300 free trial - valid for 3 months - See this for more https://cloud.google.com/free/docs/gcp-free-tier )
# - DockerHub: https://hub.docker.com/
#
# #### Submit:
# 1. your username for GitHub and Google Cloud
# 2. a screenshot of the “profile” page of your GitHub account and the Google Cloud console
# 3. A screenshot of the “Account settings” (top half) page of Docker Hub account.
#
# #### Example Submission:
# GitHub username = rashmigb
#
# 
# Google username = <EMAIL> (Your screenshot may look different, if you are exising user)
#
# 
# Docker Hub:
#
# 
# # 1.2 Submission
#
# Github Username: simonwarchol
# 
#
# Google username = <EMAIL>
# 
# Dockerhub username = simonwarchol
# 
#
# ## Question 2: Create project on Google Cloud and Install Google cloud sdk (1 point)
#
# 1. Redeem your Google Cloud credit <font color=red> (You will need @g.harvard.edu email account, look for announcement on Ed before proceeding) </font>
# (this should create a billing account “AC295 Advance Practical Data Science”)
# Every project on Google cloud must be associated with a billing account.
#
# 2. Create a project on Google Cloud Console (top left -> “Select a project” -> New project OR Dashboard -> Create Project) (if you are using .harvard you may select the .harvard organization).
# Project name: ac295-data-science
#
# 3. We will be using gcloud command line. Please follow the instructions on this page to install https://cloud.google.com/sdk/docs/quickstarts .
#
#
# #### Submit:
# 1. Screen shot of your new project
# 2. output from `gcloud config list`.
#
# #### Example Submission:
#
#
# 
#
# ```
# (base) Rashmis-MBP-2:~ rashmi$ gcloud config list
# [compute]
# region = us-east1
# zone = us-east1-b
# [core]
# account = <EMAIL>
# disable_usage_reporting = False
# project = ac295datascience
#
# Your active configuration is: [default]
# (base) Rashmis-MBP-2:~ rashmi$
# ```
#
#
#
# # 2.1 Submission
# 
# 
#
# ## Question 3: Set up SSH Keys (0.5 points)
#
# Many tasks in deploying apps to the cloud are done at the command line. SSH is an essential tool for secure use of command line interfaces to remote systems. SSH is also more convenient than password authentication once you’ve set it up.
# If you have not already done so, create a default SSH key on your main computer.
# Configure GitHub so that you can access your repositories using SSH.
#
# You can follow this tutorial:
# https://docs.github.com/en/github/authenticating-to-github/adding-a-new-ssh-key-to-your-github-account .Try to clone one of your repositories at the command line using the SSH interface.
#
#
# #### Submit:
# 1. a screenshot of your GitHub account showing at least one SSH key.
# 2. a screenshot of a terminal where you clone any repo with the SSH.
#
# Please note that it is always safe to share the public part of your SSH key as in submission 1, but you should **never** share a private RSA key with anyone!
#
# As an example of cloning by SSH, when I type: ` git clone git@github.com:Harvard-IACS/2020F-AC295-private.git` this command works "by magic" on a computer that has one of the SSH keys I’ve configured on GitHub in the default key location `~/.ssh/id_rsa` (linux and Mac) or `C:\Users\<username>\.ssh\id_rsa` (Windows 10)
#
#
#
# #### Example Submission:
#
# 
#
#
# 
# # 3.1 Submission
# 
# 
# ## Flask
#
# Flask is a python based web framework. There are many others for e.g. Django, FastApi and next month will be more. We’ll stick to Flask in this course for many of the examples we will be doing. Though this class is not about teaching web developement, it is advisable to familiarize yourself with it. In the next question we will be using a very simple flask app
# echo.py. We found the following video to be very instructive (aka a little long):
#
# Youtube - https://youtu.be/Z1RJmh_OqeA
#
#
#
#
#
#
#
#
#
#
#
# ## Question 4: Your First Docker App on Google Cloud (2 points)
# We are going to create an echo service, which displays back what we send. We will do this on Google Cloud and also create a docker image on Google Cloud.
#
# Follow these steps (watch the demo video https://youtu.be/aI6jTjwxWVI and replicate yourself):
# - Add a firewall rule (the Compute Engine API must first be enabled before firewall rules can be created). If you do not see the firewall at the top of the pull down menu, look for "Networking".
# - Create a Virtual Machine on google cloud.
# - Install pip3, pandas, flask.
# - Copy echo.py and Dockerfile from https://github.com/bgweber/StartupDataScience/tree/master/containers/echo to the VM
# - Install Docker => https://docs.docker.com/engine/install/debian/
# - Create Dockerfile, Docker Image and run
# - **Must delete VM after you are done.**
#
#
#
#
# Submit (i) screenshot displaying a running VM on google cloud (ii) screenshot of the browser where echo service displays results (with url) (iii) screenshot of docker container running and docker images. (iv) log/notes of commands used.
# Also, don’t hesitate to post questions on the Ed class discussion board. One of your classmates or the teaching staff can help.
#
#
#
from IPython.display import YouTubeVideo
YouTubeVideo('aI6jTjwxWVI') #https://youtu.be/aI6jTjwxWVI
# # 4.2 Submission
# 
# 
# 
#
#
# ## Question 5: Echo Translation (1 point)
# Now we learn all the mechanics, lets create a similar system as in Question 4 but this time the system should display the input text translated to a language of your choice.
#
# **Submit:** Modified echo.py i.e. copy paste the code in a cell below (Please use cell -> Raw NBConvert) and (i) screenshot displaying a running VM on google cloud (ii) screenshot of the browser where echo service displays results (with url) (iii) screenshot of docker container running and docker images. (iv) log/notes of commands used.
#
#
# **Hints:**
# - Make sure you can display characters that are not ASCII. Try different languages.
# - Hint: `pip install googletrans`
#
#
# 
# ## 5.1 Submission
# 
# 
# 
#
# + active=""
# # load Flask
# import flask
# from googletrans import Translator
#
# app = flask.Flask(__name__)
#
#
# # define a predict function as an endpoint
# @app.route("/predict", methods=["GET", "POST"])
# def predict():
# data = {"success": False}
#
# # get the request parameters
# params = flask.request.json
# if (params == None):
# params = flask.request.args
#
# # if parameters are found, echo the msg parameter
# if (params != None):
# translator = Translator()
# data["language"] = "Spanish"
# data["response"] = params.get("msg")
# # converting from english to spanish
# translated_text = translator.translate(data["response"], src='en', dest="es")
# data["translated_text"] = translated_text.text
# data["success"] = True
#
# # return a response in json format
# return flask.jsonify(data)
#
#
# # This allows for unicode characters in the response
# app.config['JSON_AS_ASCII'] = False
#
# # start the flask app, allow remote connections
# app.run(host='0.0.0.0')
#
# -
#
# ### <font color=red>Remember to delete your virtual machines/clusters.</font>
|
content/exercises/exercise1/exercise1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import pandas as pd
import pyarrow as pa
import numpy as np
import json
from datetime import date, datetime
with_json = False
def json_serial(obj):
return int(obj.strftime("%s"))
# +
data = pd.read_csv('flights-10m.csv', encoding='utf-8', dtype={'FlightDate': 'str', 'ArrTime': 'str', 'DepTime': 'str'})
data = data.dropna()
data.head()
# +
renamed = data.rename(index=str, columns={"FlightDate": "FL_DATE", "DepTime": "DEP_TIME", "ArrTime": "ARR_TIME", "Distance": "DISTANCE", "AirTime": "AIR_TIME", "DepDelay": "DEP_DELAY", "ArrDelay": "ARR_DELAY"})
renamed['FL_DATE'] = pd.to_datetime(renamed.FL_DATE, format='%Y-%m-%d').dt.date
renamed['DEP_TIME'] = renamed.DEP_TIME.replace('2400', '0000')
renamed['ARR_TIME'] = renamed.ARR_TIME.replace('2400', '0000')
def toTime(col):
col = pd.to_numeric(col)
col = (col/100).apply(np.floor) + (col.mod(100)) / 60.
return col
renamed['DEP_TIME'] = toTime(renamed['DEP_TIME'])
renamed['ARR_TIME'] = toTime(renamed['ARR_TIME'])
if 'ORIGIN' in renamed.columns:
renamed = renamed.drop(['ORIGIN', 'DEST'], axis=1)
cleaned = renamed.dropna()
right_types = cleaned.astype({
'DEP_DELAY': 'int16',
'ARR_DELAY': 'int16',
'AIR_TIME': 'int16',
'DISTANCE': 'int16',
'DEP_TIME': 'float32',
'ARR_TIME': 'float32'
})
# -
right_types.head()
for size, name in [(10000, 'flights-10k'), (200000, 'flights-200k'), (500000, 'flights-500k'), (1000000, 'flights-1m'), (3000000, 'flights-3m'), (10000000, 'flights-10m')]:
print(name)
smaller = right_types[:size+1]
table = pa.Table.from_pandas(smaller)
if with_json:
d = {}
for column in smaller.columns:
d[column]=list(smaller[column])
with open(f'{name}.json', 'w') as f:
json.dump(d, f, default=json_serial, separators=(',', ':'))
# table = table.column('ARRIVAL').cast(pa.TimestampValue, True)
writer = pa.RecordBatchFileWriter(f'{name}.arrow', table.schema)
writer.write(table)
writer.close()
# !ls -lah
|
data/convert_flights.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Asg-3
#
#
# * Build your own calculator
# - The code is adapted slightly from https://www.dabeaz.com/ply/example.html
#
# - further seal knowledge of lexers (RE based) and parsers (CFG based) in your minds
# + run_control={"frozen": false, "read_only": false}
from lex import *
from yacc import *
# + run_control={"frozen": false, "read_only": false}
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN', 'SUCC' # ADD token'SUCC'
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
t_SUCC = r'\!'
# Add definition for t_SUCC imitating t_PLUS for example
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS' ), # Add SUCC, imitating UMINUS
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
# Add the SUCC rule imitating the p_expression_uminus rule
def p_expression_succ(t):
'expression : SUCC expression'
t[0] = t[2] + 1
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
calclexer = lex()
calcparser = yacc()
while True:
try:
s = input('calc > ') # Use raw_input on Python 2
except EOFError:
break
if (s=='END'):
break
calcparser.parse(s, lexer=calclexer)
# + [markdown] run_control={"frozen": false, "read_only": false}
# Your output must resemble the above.
# -
|
asgjove/asg3/J3_U1067292_Calculator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Deep Learning activation functions examined below include ReLU, Leaky ReLU Sigmoid, tanh
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
z = np.linspace(-5,5,num=1000)
# ### Create plot drawing function
def draw_activation_plot(a,quadrants=2,y_ticks=[0],y_lim=[0,5]):
#Create figure and axis
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#Move left axis
ax.spines['left'].set_position('center')
# Remove top and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#Set x and y labels
plt.xlabel('z')
plt.ylabel('a')
#Set ticks
plt.xticks([])
plt.yticks(y_ticks)
#Set ylim
plt.ylim(y_lim)
#4 Quadrant conditions
if quadrants==4:
#Move bottom axis
ax.spines['bottom'].set_position('center')
#Move x and y labels
ax.yaxis.set_label_coords(.48,.75)
ax.xaxis.set_label_coords(.75,.48)
plt.plot(z,a);
# ## ReLU
# Great default choice for hidden layers. It is frequently used in industry and is almost always adequete to solve a problem.
relu = np.maximum(z,0)
draw_activation_plot(relu)
# +
def draw_2_quad_plot(a):
#Create figure and axis
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#Move bottom and left axes
ax.spines['left'].set_position('center')
#ax.spines['bottom'].set_position('center')
# Remove top and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#Set x and y labels
plt.xlabel('z')
plt.ylabel('a')
#Set ticks
plt.xticks([])
plt.yticks([0])
plt.ylim([0,5])
plt.plot(z,a);
relu = np.maximum(z,0)
draw_2_quad_plot(relu)
# -
# ## Leaky ReLU
# Can help by providing differentiable point at 0.
# +
leaky_ReLU = np.maximum(0.01*z,z)
draw_4_quad_plot(tanh)
# -
# ## tanh
# Usually strictly better than sigmoid
# +
def draw_4_quad_plot(a):
#Create figure and axis
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#Move bottom and left axes
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
# Remove top and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
#Set x and y labels
plt.xlabel('z')
plt.ylabel('a')
#Move x and y labels
ax.yaxis.set_label_coords(.48,.75)
ax.xaxis.set_label_coords(.75,.48)
#Set ticks
plt.xticks([])
plt.yticks([-1,0,1])
plt.plot(z,a);
tanh = (np.exp(z)-np.exp(-z))/(np.exp(z)+np.exp(-z))
draw_4_quad_plot(tanh)
# -
# ## sigmoid
# Almost never used except in output layer when dealing with binary classification.
# +
sigmoid = 1/(1+np.exp(-z))
draw_2_quad_plot(sigmoid)
# -
# +
#Create z and sigma
sigma = 1/(1+np.exp(-z))
#Draw prediction cut-off line
plt.axhline(0.5, color='black',ls='--')
#Label axis
plt.xlabel('z')
plt.ylabel(r'$\hat{y}$')
#Plot graph
plt.tick_params(axis='x',bottom='off',labelbottom='off')
plt.plot(z,sigma,'-',lw=3);
# -
|
content/deep-learning/.ipynb_checkpoints/Activation Functions-Back-up-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# <a href="https://colab.research.google.com/github/vasudevgupta7/boilerplate/blob/main/notebooks/colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="pJbYXou6chZf" outputId="1670f201-ef28-4fe2-fb8e-be06dfa4f642"
# !nvidia-smi
# + id="H7jLn0sy3Yem"
# !pip install huggingface_hub && huggingface-cli login
# -
# !sudo apt install git-lfs && git config --global user.email "<EMAIL>" && git config --global user.name "<NAME>"
# !pip install -r ../requirements.txt
# cd /content/drive/My Drive/<project-directory>
# ls
# !python train.py
|
notebooks/colab.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import numpy as np
import netCDF4 as nc
import xarray as xr
from scipy.interpolate import griddata, interp1d
from salishsea_tools import nc_tools,viz_tools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from fancyimpute import KNN
# +
fname = '/home/ssahu/saurav/JP_BC/cat_42_days_T.nc';
gridT = xr.open_dataset(fname);
# -
gridT.keys
# +
#### Load JP's mask
mask = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/mesh_mask.nc');
tmask_JP = mask.variables['tmask'][0,:,55:400,446:701];
umask_JP = mask.variables['umask'][0,:,55:400,446:701];
vmask_JP = mask.variables['vmask'][0,:,55:400,446:701];
print(tmask_JP.shape, umask_JP.shape, vmask_JP.shape)
# +
#### Slice out our domain of WCVI from JP and mask it at the same time saying the zero values as NAN
votemper_JP = np.array(gridT['votemper'][0,:,55:400,446:701]);
votemper_JP[...] = np.ma.masked_array(votemper_JP[...], mask = tmask_JP[...]);
votemper_JP[votemper_JP == 0] =['Nan'];
vosaline_JP = np.array(gridT['vosaline'][0,:,55:400,446:701]);
vosaline_JP[...] = np.ma.masked_array(vosaline_JP[...], mask = tmask_JP[...]);
vosaline_JP[vosaline_JP == 0] = ['Nan'];
glamt_bc_JP = np.array(gridT['nav_lon'][55:400,446:701]);
gphit_bc_JP = np.array(gridT['nav_lat'][55:400,446:701]);
deptht_JP = np.array(gridT['deptht'][:]);
# -
vosaline_JP[:,0,0]
np.where(vosaline_JP == 0)
np.nanmin(vosaline_JP)
# +
#### Load the WCVI points
fname_wcvi = '/ocean/ssahu/CANYONS/wcvi/grid/coordinates.nc'
with nc.Dataset(fname_wcvi, 'r') as coord:
gphit_wcvi = coord.variables['gphit'][0,...];
glamt_wcvi = coord.variables['glamt'][0,...];
print((glamt_bc_JP[0,0],gphit_bc_JP[0,0]), (glamt_wcvi[0,0],gphit_wcvi[0,0]))
# +
X = glamt_bc_JP.flatten();
Y = gphit_bc_JP.flatten();
points = (X[:],Y[:]);
xi = (glamt_wcvi.flatten(), gphit_wcvi.flatten());
votemper_ic = np.zeros((votemper_JP.shape[0], glamt_wcvi.shape[0], glamt_wcvi.shape[1]));
vosaline_ic = np.zeros((vosaline_JP.shape[0], glamt_wcvi.shape[0], glamt_wcvi.shape[1]));
for i,j in enumerate(votemper_JP[:,...]):
votemper_ic[i,...] = np.reshape(griddata(points, votemper_JP[i,...].flatten(), xi, method= 'linear'), glamt_wcvi.shape)
vosaline_ic[i,...] = np.reshape(griddata(points, vosaline_JP[i,...].flatten(), xi, method= 'linear'), glamt_wcvi.shape)
# +
# for i in np.arange(votemper_ic.shape[0]):
# for j in np.arange(votemper_ic.shape[1]):
# for k in np.arange(votemper_ic.shape[2]):
# if votemper_ic[i,j,k] == 0:
# votemper_ic[i,j,k] == ['Nan']
# else:
# continue
# for i in np.arange(votemper_ic.shape[0]):
# for j in np.arange(votemper_ic.shape[1]):
# for k in np.arange(votemper_ic.shape[2]):
# if vosaline_ic[i,j,k] == 0:
# vosaline_ic[i,j,k] == ['Nan']
# else:
# continue
vosaline_ic[vosaline_ic == 0] = ['Nan'];
votemper_ic[votemper_ic == 0] = ['Nan'];
# +
# for i in np.arange(votemper_ic.shape[0]):
# for j in np.arange(votemper_ic.shape[1]):
# for k in np.arange(votemper_ic.shape[2]):
# if votemper_ic[i,j,k] == 0:
# votemper_ic[i,j,k] == ['Nan']
# else:
# continue
# for i in np.arange(votemper_ic.shape[0]):
# for j in np.arange(votemper_ic.shape[1]):
# for k in np.arange(votemper_ic.shape[2]):
# if votemper_ic[i,j,k] == 0:
# votemper_ic[i,j,k] == ['Nan']
# else:
# continue
# -
vosaline_ic[:,0,0]
np.where(vosaline_ic[...] == 0)
# +
# for i,j in enumerate(votemper_ic[:,...]):
# votemper_ic[i,...] = KNN(k=3).complete(votemper_ic[i,...]);
# vosaline_ic[i,...] = KNN(k=3).complete(vosaline_ic[i,...]);
# +
# vosaline_ic[np.where(np.isnan(vosaline_ic))]=0;
# votemper_ic[np.where(np.isnan(votemper_ic))]=0;
# +
# for i in np.arange(votemper_ic.shape[0]):
# for j in np.arange(votemper_ic.shape[1]):
# for k in np.arange(votemper_ic.shape[2]):
# if np.isnan(votemper_ic[i,j,k]):
# votemper_ic[i,j,k] == votemper_ic[i-1,j,k]
# else:
# continue
# for i in np.arange(votemper_ic.shape[0]):
# for j in np.arange(votemper_ic.shape[1]):
# for k in np.arange(votemper_ic.shape[2]):
# if np.isnan(vosaline_ic[i,j,k]):
# vosaline_ic[i,j,k] == vosaline_ic[i-1,j,k]
# else:
# continue
# +
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
viz_tools.set_aspect(ax)
mesh = ax.pcolormesh(glamt_bc_JP, gphit_bc_JP,votemper_JP[0,...], cmap =cm.ocean)
fig.colorbar(mesh)
plt.title('Temperature JP sliced to WCVI and masked')
plt.show()
# +
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
viz_tools.set_aspect(ax)
mesh = ax.pcolormesh(glamt_wcvi, gphit_wcvi, votemper_ic[0,...], cmap =cm.ocean)
fig.colorbar(mesh)
plt.title('Temperature WCVI KNN imputed and after interpolation')
plt.show()
# +
# for i,j in enumerate(votemper_ic[:,...]):
# for p,q in enumerate(votemper_ic[i,:,...]):
# for l,m in enumerate(votemper_ic[i,p,:]):
# if votemper_ic[i,p,l] == 0:
# votemper_ic[i,p,l] = votemper_ic[i-1,p,l]
# else:
# continue
# for i,j in enumerate(vosaline_ic[:,...]):
# for p,q in enumerate(vosaline_ic[i,:,...]):
# for l,m in enumerate(vosaline_ic[i,p,:]):
# if vosaline_ic[i,p,l] == 0:
# vosaline_ic[i,p,l] = vosaline_ic[i-1,p,l]
# else:
# continue
# +
file_temp = nc.Dataset('/ocean/ssahu/CANYONS/wcvi/initial_conditions/West_coast_temperature_salinity_nomask_JP.nc', 'w', zlib=True)
# dataset attributes
nc_tools.init_dataset_attrs(
file_temp,
title='Temperature and salinity Initial Condition',
notebook_name='Making_IC_from_JP',
nc_filepath='/ocean/ssahu/CANYONS/wcvi/initial_conditions/West_coast_temperature_salinity_nomask_JP.nc',
comment='Temperature and salinity from JP Model, preliminary_grid; used at all grid points')
file_temp.createDimension('xb', votemper_ic.shape[2]);
file_temp.createDimension('yb', votemper_ic.shape[1]);
file_temp.createDimension('deptht', votemper_ic.shape[0]);
file_temp.createDimension('time_counter', None);
nav_lat = file_temp.createVariable('nav_lat', 'float32', ('yb','xb'));
nav_lat.long_name = 'Latitude';
nav_lat.units = 'degrees_north';
nav_lon = file_temp.createVariable('nav_lon', 'float32', ('yb','xb'));
nav_lon.long_name = 'Longitude';
nav_lon.units = 'degrees_east';
deptht = file_temp.createVariable('deptht', 'float32', ('deptht'));
deptht.long_name = 'Vertical T Levels';
deptht.units = 'm';
deptht.positive = 'down';
time_counter = file_temp.createVariable('time_counter', 'float32', ('time_counter'));
time_counter.units = 's';
time_counter.long_name = 'time';
time_counter.calendar = 'noleap';
votemper = file_temp.createVariable('votemper', 'float32', ('time_counter','deptht','yb','xb'));
votemper.units = 'degC'
votemper.long_name = 'Temperature';
votemper.grid = 'WCVI';
vosaline = file_temp.createVariable('vosaline', 'float32', ('time_counter','deptht','yb','xb'));
vosaline.units = 'PSU';
vosaline.long_name = 'Practical Salinity';
vosaline.grid = 'WCVI';
nav_lat[:] = gphit_wcvi[:];
nav_lon[:] = glamt_wcvi[:];
deptht[:] = deptht_JP[:];
time_counter[0] = 1;
votemper[0,:] = votemper_ic[:]
vosaline[0,:]= vosaline_ic[:]
file_temp.close()
# -
votemper_ic[0,...]
np.where(votemper_ic[...] == 0)
deptht.shape
print(votemper_ic.shape)
# ### FIx the zero data points in IC file
# +
# for i in np.arange(len(votemper_ic[:,...])):
# votemper_ic[np.where(votemper_ic[i,...] == 0)] = votemper_ic[i-1,...];
# -
vosaline_ic[0,...]
votemper_ic[0,...]
|
grid/Making_IC_from_JP.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import warnings
warnings.simplefilter("ignore")
df=pd.read_csv("Symptoms.csv")
df.head()
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
df.head()
df["Disease"].unique()
# we have a set of data in disease.csv which have the output symmetery
df.isnull().values.any()
# we need to create an dicinoary to get the data in the format of disesce.csv
disease=pd.read_csv("./Train_Test_data/disease.csv")
disease.head()
def creation_of_map(x):
dicr={}
count=0
for a in x:
dicr[a]=count
count+=1
return dicr
map_data=creation_of_map(disease["Disease"])
print(map_data)
def funtion(x):
x=x.strip()
if(x=="Dimorphic hemmorhoids(piles)"):
x="Dimorphic hemorrhoids(piles)"
return x
df["Disease"]=df["Disease"].apply(funtion)
def convert_to_int(x):
return int(map_data[x])
df["Disease"]=df["Disease"].apply(convert_to_int)
df.head()
df["Disease"].unique()
x = df.drop(["Disease"],axis=1)
y = df["Disease"]
# Splitting the dataset into training and test set.
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test= train_test_split(x, y, test_size= 0.1, random_state=0)
#feature Scaling
from sklearn.preprocessing import StandardScaler
st_x= StandardScaler()
x_train= st_x.fit_transform(x_train)
x_test= st_x.transform(x_test)
x_test
from sklearn.neighbors import KNeighborsClassifier
knn_clf=KNeighborsClassifier()
knn_clf.fit(x_train,y_train)
ypred=knn_clf.predict(x_test)
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
result = confusion_matrix(y_test, ypred)
print("Confusion Matrix:")
print(result)
result1 = classification_report(y_test, ypred)
print("Classification Report:",)
print (result1)
result2 = accuracy_score(y_test,ypred)
print("Accuracy:",result2)
print(ypred)
import seaborn as sn
df_cm = pd.DataFrame(result, range(41),range(41))
plt.figure(figsize = (10,7))
sn.heatmap(df_cm, annot=True)
|
KNN_Algorithm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.1 64-bit (''ex_design_analysis'': pipenv)'
# metadata:
# interpreter:
# hash: d93d3809a412eeca67f3d81705e284a9fa16a5e112e379b94b99b867ad05122c
# name: python3
# ---
# # Two Paired Samples
# Alternative of paired sample t-test
# +
# Enable the commands below when running this program on Google Colab.
# # !pip install arviz==0.7
# # !pip install pymc3==3.8
# # !pip install Theano==1.0.4import numpy as np
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import pymc3 as pm
import theano.tensor as tt
import statistics
import math
plt.style.use('seaborn-darkgrid')
np.set_printoptions(precision=3)
pd.set_option('display.precision', 3)
# -
# Weight of 20 women before/after the diet program.
WEIGHT_BEFORE = [53.1, 51.5, 45.5, 55.5, 49.6, 50.1, 59.2, 54.7, 53.0, 48.6, 55.3, 52.6, 51.7, 48.6, 56.4, 42.9, 50.3, 42.4, 51.2, 39.1]
WEIGHT_AFTER = [48.3, 45.2, 46.6, 56.6, 41.2, 44.6, 51.9, 55.5, 45.4, 47.6, 50.6, 54.5, 49.0, 43.9, 53.8, 40.1, 52.8, 35.3, 55.6, 38.0]
# +
# before
print('[before]')
print('mean: {:.3f}'.format(statistics.mean(WEIGHT_BEFORE)))
print('standard deviation: {:.3f}'.format(statistics.pstdev(WEIGHT_BEFORE)))
print('variance: {:.3f}'.format(statistics.pvariance(WEIGHT_BEFORE)))
print('25, 50, 75%: {}'.format(np.percentile(WEIGHT_BEFORE, [25, 50, 75])))
print()
# after
print('[after]')
print('mean: {:.3f}'.format(statistics.mean(WEIGHT_AFTER)))
print('standard deviation: {:.3f}'.format(statistics.pstdev(WEIGHT_AFTER)))
print('variance: {:.3f}'.format(statistics.pvariance(WEIGHT_AFTER)))
print('25, 50, 75%: {}'.format(np.percentile(WEIGHT_AFTER, [25, 50, 75])))
# -
# Visualize the data (boxplot)
plt.boxplot([WEIGHT_BEFORE, WEIGHT_AFTER], labels=['Before', 'After'])
plt.ylabel('Weight')
plt.show()
# +
# Visualize the data (correlation)
fig, ax = plt.subplots()
ax.scatter(WEIGHT_BEFORE, WEIGHT_AFTER)
plt.xlabel('before (kg)')
plt.ylabel('after (kg)')
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]),
np.max([ax.get_xlim(), ax.get_ylim()])
]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
plt.show()
# -
# Summary
data = pd.DataFrame([WEIGHT_BEFORE, WEIGHT_AFTER], index=['Before', 'After']).transpose()
data.describe()
# +
# mean deviation data
before_mean = sum(WEIGHT_BEFORE) / len(WEIGHT_BEFORE)
mdd_before = list(map(lambda x: x - before_mean, WEIGHT_BEFORE))
after_mean = sum(WEIGHT_AFTER) / len(WEIGHT_AFTER)
mdd_after = list(map(lambda x: x - after_mean, WEIGHT_AFTER))
# covariance
s = sum(list(b * a for b, a in zip(mdd_before, mdd_after))) / len(mdd_after)
print('Covariance: {:.3f}'.format(s))
# correlation coefficient
weight_before_std = list(map(lambda x: x / statistics.pstdev(WEIGHT_BEFORE), mdd_before))
weight_after_std = list(map(lambda x: x / statistics.pstdev(WEIGHT_AFTER), mdd_after))
r = sum(list(b * a for b, a in zip(weight_before_std, weight_after_std))) / len(weight_before_std)
print('Correlation coefficient: {:.3f}'.format(r))
v_before = statistics.pvariance(WEIGHT_BEFORE)
v_after = statistics.pvariance(WEIGHT_AFTER)
cov = np.array([[v_before, s], [s, v_after]])
print('Covariance matrix:\n', cov)
# -
# ## Bayesian analysis
with pm.Model() as mv_model:
# Prior distribution
mu = pm.Normal('mu', 0, 100, shape=2)
sigma = pm.Uniform('sigma', 0, 100, shape=2)
# https://stackoverflow.com/questions/45534752/model-multivariate-normal-with-separate-means-dimension-mismatch-error
C_triu = pm.LKJCorr('omega', n=2, p=2)
C = tt.fill_diagonal(C_triu[np.zeros((2, 2), dtype=np.int64)], 1)
sigma_diag = tt.nlinalg.diag(sigma)
cov = tt.nlinalg.matrix_dot(sigma_diag, C, sigma_diag)
# Likelihood
y_pred = pm.MvNormal('y_pred', mu=mu, cov=cov, observed=np.stack((WEIGHT_BEFORE, WEIGHT_AFTER)).T)
# Difference of average values
delta_mu = pm.Deterministic('mu1 - mu2', mu[0] - mu[1])
trace = pm.sample(21000, chains=5)
chain = trace[1000:]
pm.traceplot(chain)
plt.show()
pm.summary(chain)
pm.plot_posterior(chain['mu1 - mu2'], credible_interval=0.95, point_estimate='mode')
plt.xlabel(r'$\mu$1 - $\mu$2')
plt.show()
# ### RQ1: 「参加後体重」の母平均が「参加前体重」の母平均より軽い確率
#
print('p(mu1 - mu2 > 0) = {:.3f}'.format((chain['mu'][:,0] - chain['mu'][:,1] > 0).mean()))
# print('p(mu1 - mu2 > 0) = {:.3f}'.format((chain['mu1 - mu2'] > 0).mean()))
# ### RQ2: ダイエットプログラムに参加した人と参加前の人では、平均値に何kgの差があるか。また、その減量はどの程度の幅で確信できるか。95%の確信で答えよ。
print('Point estimation (difference of mean values): {:.3f}kg'.format(chain['mu1 - mu2'].mean()))
print('Point estimation (standard deviation): {:.3f}kg'.format(chain['mu1 - mu2'].std()))
hpd_0025 = np.quantile(chain['mu1 - mu2'], 0.025)
hpd_0975 = np.quantile(chain['mu1 - mu2'], 0.975)
print('Credible Interval (95%): ({:.3f}, {:.3f})'.format(hpd_0025, hpd_0975))
# ### RQ3: ダイエットプログラムに参加した人と参加前の人では、少なくともどれだけ体重差があるか。あるいは、どの程度の体重差しか高々見込めないか。95%の確信で答えよ。
hpd_005 = np.quantile(chain['mu1 - mu2'], 0.05)
hpd_095 = np.quantile(chain['mu1 - mu2'], 0.95)
print('At most (95%): {:.3f}kg'.format(hpd_095))
print('At least (95%): {:.3f}kg'.format(hpd_005))
# ### RQ4: ダイエットプログラムに参加した人と参加前の人の平均値差で、2kgより減量できる確率が70%より大きいならば参加したい。参加すべきか、あるいは見送るべきか。
print('p(mu1 - mu2 > 2kg) = {:.3f}'.format((chain['mu1 - mu2'] > 2).mean()))
|
src/bayes/mean/two/two_paired_samples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Ruby 2.3.3
# language: ruby
# name: ruby
# ---
# load gemfile ruby_circuits.rb
require '../../../../../lib/ruby_ciruits'
|
examples/jupyter_notebook/digital_circuits/circuit_helpers/connectors/Connector.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pointCollection as pc
import matplotlib.pyplot as plt
import os
# %load_ext autoreload
# %autoreload 2
# The simplest way to create a data object is from a dictionary, using the from_dict() method:
x=np.arange(10)
y=x**2
z=y
D = pc.data().from_dict({'x':x,'y':y,'z':z})
print(D)
plt.figure(1); plt.scatter(D.x, D.y, c=D.z)
# It is also possible to create a data object from a list of other data objects using the from_list() method. The resulting object will have all of the fields from all of the items in the list, with missing fields filled in with NaNs
D1 = pc.data().from_dict({'x':-x,'y':-y,'z':z,'t':x-2})
D2 = pc.data().from_list([D, D1])
print(D2)
print("D2.t is:")
print(D2.t)
plt.figure(2); plt.scatter(D2.x, D2.y, c=D2.z)
# Data structures can also be sliced using bracket notation, which is an alternative for the copy_subset() method. If the data() structure has the columns keyword set, then a 1-d slice index will return a data structure with dimensions n x N_cols, otherwise it will be 1-D.
D3=D2[np.abs(D2.x)< 4]
print(D3)
plt.figure(3); plt.scatter(D3.x, D3.y, c=D3.z)
# _data_ objects can be written to hdf5 files with the to_h5() method, and the destination group is specified with the group keyword. Reading from a file uses the from_h5() method.
D2.to_h5('data_test.h5', replace=True, group='dummy')
D4=pc.data().from_h5('data_test.h5', group='dummy')
print("wrote data:")
print(D2)
print("read data:")
print(D4)
# Data types other than the basic _data_ class are added by introducing subclasses into the pointCollection module. One example is the ATL06 data type:
ATL06_file='../test_data/ATL06_20190205041106_05910210_209_01.h5'
D_06=pc.ATL06.data().from_h5(ATL06_file)
print(D_06)
# _data_ objects have a get_xy() method that creates x and y fields based on latitude and longitude fields. It takes an argument specifying the coordinate system, which can either be a proj4 string (as a keyword or non-keyword argument) or an EPSG number (as a keyword)
D_06.get_xy(EPSG=3031)
# equivalent to D_06.get_xy(+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs')
plt.figure()
plt.plot(D_06.x, D_06.y, '.'); plt.xlabel('x'); plt.ylabel('y')
plt.axis('equal');
# ### Existing formats
#
# Currently, subclasses of data include:
#
# ATL06: ICESat-2 elevation data
#
# ATM_Qfit: scanning laser altimetry data
#
# indexed_H5: a generic class for storing data in hdf files that include small tiles that can be read and written quickly
#
# grid: gridded data that can handle hdf5 grids and geotifs
#
# Each format defines the default fields to be read, adds keyword options to the from_h5() method, and (sometimes) adds methods to the data class.
# ### Choosing fields to be read
#
# If you don't want to read all of the fields in the defaults defined by data subclasses, and just want to read data from and HDF file, you can specify the fields using a dictionary in the field_dict keyword, where the keys to the dictionary define the groups to be read and the (list) entries give the fields.
Dsub=pc.data().from_h5(ATL06_file, field_dict={'gt1l/land_ice_segments/':['h_li', 'segment_id', 'atl06_quality_summary']})
Dsub.index(Dsub.atl06_quality_summary==0)
plt.plot(Dsub.segment_id, Dsub.h_li,'.')
|
notebooks/point_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:qiskit]
# language: python
# name: conda-env-qiskit-py
# ---
# # Task 1
# +
import qiskit
import numpy as np
from numpy import pi
from qiskit import BasicAer
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute
from qiskit.quantum_info import Statevector
from qiskit.visualization import plot_bloch_multivector, plot_histogram
# %matplotlib inline
# -
backend = BasicAer.get_backend('qasm_simulator')
# ## Part 1
# #### Provide a variational (also called parametric) circuit which is able to generate the most general 1 qubit state. By most general 1 qubit state we mean that there exists a set of the parameters in the circuit such that any point in the Bloch sphere can be reached. Check that the circuit works correctly by showing that by varying randomly the parameters of your circuit you can reproduce correctly the Bloch sphere.
#
# In the function below, r_x and r_z are parameters to the circuit. Varying r_x and r_z changes the rotation on the x-axis and z-axis respectively which allows any point in the Bloch sphere to be reached.
def generate_qubit_state(rx_radians: float=None, rz_radians: float=None) -> Statevector:
"""Generates a general qubit state using Rx and Rz gates. Generates a random qubit state if no arguments are provided
Args:
r_x: Rotation in X-axis in radians. Defaults to random value
r_z: Rotation in Z-axis in radians. Defaults to random value
"""
if not rx_radians:
rx_radians = np.random.uniform(-1,1) * pi
if not rz_radians:
rz_radians = np.random.uniform(-1,1) * pi
# create circuit
qreg_q = QuantumRegister(1, 'q')
circuit = QuantumCircuit(qreg_q)
# can also use a single U gate
circuit.rx(rx_radians, qreg_q[0])
circuit.rz(rz_radians, qreg_q[0])
return Statevector.from_instruction(circuit)
# generate 5 random qubit states and visualize bloch spheres
state = generate_qubit_state()
for _ in range(4):
state = state.tensor(generate_qubit_state())
plot_bloch_multivector(state)
# ## Part 2
# #### Use the circuit built in step 1) and, using the SWAP test, find the best choice of your parameters to reproduce a randomly generated quantum state made with 1 qubit.
def swap_test(state_1: Statevector, state_2: Statevector, backend=backend, shots=1024) -> float:
"""Swap test: if state_1 and state_2 are orthogonal Pr(q[0]=0) = 1/2, otherwise if q[1] and q[2] are equal Pr(q[0]=0) = 1
Args:
state_1, state_2: single qubit states to be compared
Returns:
Pr(q[0]=0)
"""
assert state_1.num_qubits == 1 and state_2.num_qubits == 1
qreg_q = QuantumRegister(3, 'q')
creg_c = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
circuit.initialize(state_1.data, 1)
circuit.initialize(state_2.data, 2)
circuit.h(qreg_q[0])
circuit.cswap(qreg_q[0], qreg_q[1], qreg_q[2])
circuit.h(qreg_q[0])
circuit.measure(qreg_q[0], creg_c[0])
results = execute(circuit, backend=backend, shots=shots).result()
answer = results.get_counts()
return answer['0'] / shots
random_state = generate_qubit_state()
swap_test(random_state, Statevector([1,0]))
# To reproduce a given qubit state using the 2 parameter variational circuit, I cast this as a minimization problem over rx_radians and rz_radians where to goal is to minimize the function to 0. At first I thought about running scalar optimization / binary search one variable at a time, but it may not always work since the optimal value for one parameter may not correspond to the optimal pair of parameters.
from scipy import optimize
def minimize_function_wrapper(params: np.ndarray, reference_state, shots=256):
return 1 - swap_test(reference_state, generate_qubit_state(params[0], params[1]), shots=shots)
# First, I try the methods in scikit.optimize.minimize. Powell seems to work the best from my limited testing, but still requires 100-200 function evaluations and doesn't always succeed. Using bounds with Powell method requires scipy >= 1.5
results = optimize.minimize(minimize_function_wrapper, (0, 0), args=(random_state, 256),
method='Powell', bounds=((-pi, pi), (-pi, pi)), options={'disp': True, 'ftol': 1e-9})
# Next, I try differential evolution. This takes a lot of function evaluations (500+) but pretty much always succeeds with 100% accuracy. Setting maxiter=3 will bring down the function evaluations to about 150 and still retain reasonable accuracy.
results = optimize.differential_evolution(minimize_function_wrapper, args=(random_state, 256), bounds=((-pi, pi), (-pi, pi)))
results
# Lastly, I try minimizing the function using bayesian optimization, which has high (but not perfect accuracy) and only requires less than 50 function calls, but seems to require more shots per function evaluation to be accurate.
from skopt import gp_minimize
results = gp_minimize(lambda x: minimize_function_wrapper(x, random_state, 1024), ((-pi, pi), (-pi, pi)), n_calls=32)
reproduced_state = generate_qubit_state(results.x[0], results.x[1])
plot_bloch_multivector(random_state.tensor(reproduced_state))
swap_test(random_state, reproduced_state)
# ## Part 3
# #### Suppose you are given with a random state, made by N qubits, for which you only know that it is a product state and each of the qubits are in the state | 0 > or | 1>. Perform a qubit by qubit SWAP test to reconstruct the state. This part of the problem can be solved via a simple grid search.
# I am a bit confused why we can't simply measure the individual qubits if they are in |0> or |1> state. But lets attempt the question as given and first create a function that generates a random product state of n qubits in |0> or |1> state. If the state is in any product state, then we can use the previous routine in part 2 to determine the correct choice of parameters to reconstruct the state as well.
def generate_random_computational_product_state(num_qubits: int) -> Statevector:
computational_basis = ([1, 0], [0, 1])
state = Statevector(computational_basis[np.random.randint(0, 2)])
for _ in range(num_qubits - 1):
state = state.tensor(Statevector(computational_basis[np.random.randint(0, 2)]))
return state
given_state = generate_random_computational_product_state(num_qubits=5)
plot_bloch_multivector(given_state)
def reconstruct_computational_product_state(given_state: Statevector, shots=1024) -> Statevector:
"""Initialize given state of n qubits in product state of |0> and |1>s in quantum circuit,
and perform qubit by qubit swap test to reconstruct the state
"""
qreg_q = QuantumRegister(given_state.num_qubits + 2, 'q')
creg_c = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qreg_q, creg_c)
# initialize first n qubits to the given state
circuit.initialize(given_state.data, list(range(given_state.num_qubits)))
measurement_qubit_index = given_state.num_qubits
comparison_qubit_index = given_state.num_qubits + 1
reconstructed_state: Statevector = None
for i in range(given_state.num_qubits):
# qubit by qubit swap test
circuit.h(qreg_q[measurement_qubit_index])
circuit.cswap(qreg_q[measurement_qubit_index], qreg_q[i], qreg_q[comparison_qubit_index])
circuit.h(qreg_q[measurement_qubit_index])
circuit.measure(qreg_q[measurement_qubit_index], creg_c[0])
results = execute(circuit, backend=backend, shots=shots).result()
answer = results.get_counts()
if answer['0'] == shots: # if Pr(q[measurement_qubit]=0) = 1, q[i] and q[comparison_qubit] are equal
if not reconstructed_state:
reconstructed_state = Statevector([1, 0])
else:
reconstructed_state = Statevector([1, 0]).tensor(reconstructed_state)
else: # qubit has to be in |1> state as given in the question
if not reconstructed_state:
reconstructed_state = Statevector([0, 1])
else:
reconstructed_state = Statevector([0, 1]).tensor(reconstructed_state)
circuit.initialize([1, 0], measurement_qubit_index)
circuit.initialize([1, 0], comparison_qubit_index)
return reconstructed_state
reconstructed_state = reconstruct_computational_product_state(given_state, shots=1024)
plot_bloch_multivector(reconstructed_state)
|
swap_test.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
#Summarize, explore, plot Zooplankton data from SoG surveys
# by <NAME>
# Last modified Jan 20 2022
# to-do:
# section 1
# supplemental table 1 from
# https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0245941#sec023
# MODIFIED/Perry2021_Supp1_Tab1.csv
# section 2
# look at the dominant species of each functional group
#raw downloaded from: https://open.canada.ca/data/en/dataset/2822c11d-6b6c-437e-ad65-85f584522adc
#<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Zooplankton variability in the Strait of Georgia, Canada, and relationships with the marine survivals of Chinook and Coho salmon. PLoS ONE, 16(1 January), e0245941. https://doi.org/10.1371/journal.pone.0245941
# to do: should adjust for seasonal bias as did Perry et al (2021) by determining season
# calculating seasonal average biomass and then calculating average annual biomass from those.
# Their cutoff for inclusion in seasonal stats were
# +
library(tidyverse)
library(dplyr)
library(ggplot2)
library(stats)
library(treemapify)
library(zoo)
library(RColorBrewer)
#install.packages("treemapify", repos='http://cran.us.r-project.org')
# -
# # Section 1 - Pre-Aggregated Data Analysis
# ### using zoop groups as defined in Perry et al (2021)
# supplemental table 1 from
# <br> https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0245941#sec023
# <br>
# <br>MODIFIED/Perry2021_Supp1_Tab1.csv
#
# +
# note no bottom depths < 60 m in these data (already filtered)
path = "C://Users//Greig//Sync//6. SSMSP Model//Model Greig//Data//4. Zooplankton//Zoop_Perryetal_2021//MODIFIED//"
file = "Perry2021_Supp2_Tab1.csv"
path_out_TS = "C://Users//Greig//Sync//6. SSMSP Model//Model Greig//Data//4. Zooplankton//Zoop_Perryetal_2021//MODIFIED//"
a1996_2018_supp2_df <- read.csv(paste(path, file,sep=""))
# create survey depth difference field for density calc following Perry et al pers. comm. Nov 2021
a1996_2018_supp2_df['depthrange'] = abs(a1996_2018_supp2_df['startz.m'] - a1996_2018_supp2_df['endz.m'])
#print(colnames(a1996_2018_supp2_df))
head(a1996_2018_supp2_df[21:46])
#calc WW biomass density g m-2 from concentration as per Perry et al (pers comm)
# using assumption that all zoop are 85% water (mult by 6.66) and jelly as 95% water
DW_to_WW_zoop = 6.66 # default
DW_to_WW_scyphozoa = 25 # Mean from Larson, 1986 (tab 2): 5% DW; Acuna, Lopez-Urrutia, Colin 2011 - avg of aurelia (52) and cyanea (34) WW:DW
DW_to_WW_medusae = 25 # Mean from Larson, 1986 (Tab 2): 4% DW; Acuna, Lopez-Urrutia, Colin 2011 & Larson, 19- avg of Aequorea (54), WW:DW
DW_to_WW_ctenop = 25 # Based on Larson, 1986, Pleurobrachia brachei 96 percent water
DW_to_WW_fish = 5 # Omori 1969 Tab
DW_to_WW_lgcope = 5 # avg from Omori of several lg copepods
DW_to_WW_medsmcope = 7 # Omori 1969, avg across several
DW_to_WW_chaetognaths = 20 # Nakamura et al, 2007
DW_to_WW_mysiids = 5.5 # Omori 1969
DW_to_WW_amphipod = 4 # Omori 1969 (avg of gammarid / hyperiid)
DW_to_WW_insect = 3.6 # Omori 1969 - water strider
DW_to_WW_larvacean = 25 # guesstimate
DW_to_WW_decapods = 10 # Larson, 1986 in Moriarty, 2009
DW_to_WW_euphs = 7.5 # avg of Omori 1969 (4.7) and Moriarty, 2009 (10)
DW_to_WW_ostrac = 4.1 #Nakamura et al, 2007
DW_to_WW_siponophora = 28 #Moriarty, 2009
DW_to_WW_others = 10 #others
mg_to_g = 0.001
# col indices for first and last indiv groups
col_beg = 21
col_end = 44
########################## WW B dens ####################################
###################################
# convert from DW to WW and to B per square m (depth int following Perry et al)
# gelatinous groups (index)
# Siphonophorae (44), Scyphozoa (43), Ctenophora (30), Larvacea (33), Medusae (34)
WW_B_conc = a1996_2018_supp2_df %>%
mutate(across(c(21:22), ~(as.numeric(.) * DW_to_WW_amphipod))) %>%
mutate(across(c(24), ~(as.numeric(.) * DW_to_WW_lgcope))) %>%
mutate(across(c(25:26,37), ~(as.numeric(.) * DW_to_WW_medsmcope))) %>%
mutate(across(c(28), ~(as.numeric(.) * DW_to_WW_chaetognaths))) %>%
mutate(across(c(30), ~(as.numeric(.) * DW_to_WW_ctenop))) %>%
mutate(across(c(31), ~(as.numeric(.) * DW_to_WW_euphs))) %>%
mutate(across(c(32), ~(as.numeric(.) * DW_to_WW_fish))) %>%
mutate(across(c(33), ~(as.numeric(.) * DW_to_WW_larvacean))) %>%
mutate(across(c(34), ~(as.numeric(.) * DW_to_WW_medusae))) %>%
mutate(across(c(35), ~(as.numeric(.) * DW_to_WW_mysiids))) %>%
mutate(across(c(38), ~(as.numeric(.) * DW_to_WW_ostrac))) %>%
mutate(across(c(43), ~(as.numeric(.) * DW_to_WW_scyphozoa))) %>%
mutate(across(c(44), ~(as.numeric(.) * DW_to_WW_siponophora))) %>%
mutate(across(c(23,27,29,36,39,40:42), ~(as.numeric(.) * DW_to_WW_others)))
# convert from WW mg m-3 to WW g m-2
#WW_Bdens_df = a1996_2018_supp2_df %>%
# mutate(across(.cols = c(col_beg:col_end),
# .fns = ~. * depthrange * mg_to_g)) %>%
# mutate(Total.Biomass = rowSums(. [col_beg:col_end]))
WW_Bdens_df = WW_B_conc %>%
# convert mg to g
mutate(across(.cols = c(col_beg:col_end),
.fns = ~. * mg_to_g)) %>%
# convert g m-3 to g m-2
mutate(across(.cols = c(col_beg:col_end),
.fns = ~. * depthrange)) %>%
mutate(Total.Biomass = rowSums(. [col_beg:col_end])) %>%
mutate(Total.Biomass.NoEuphs = rowSums(. [c(col_beg:30,32:col_end)])) %>%
# crustaceans and other hard bodied groups (no fish, euphs, gelat)
mutate(Total.Biomass.Hard = rowSums(. [c(21,22,24,25,26,28,29,35,36,37,38,40,41,42)])) %>%
# soft-bodied groups (no fish, euphs, gelat)
mutate(Total.Biomass.Soft = rowSums(. [c(27,30,33,34,39,43,44)]))
# +
# problematic outlier? for euphausiids lead to potentially nutty WW estimates (2500 mt / km +)
badones = WW_Bdens_df %>% filter(Total.Biomass > 1000)
badones[c(1:5,31:47)]
# dry weight mg / m-3 estimate that's probably a mistake
bighaul = a1996_2018_supp2_df %>% filter(ï..key == "<KEY>")
bighaul["euphsWW_B_mt_km2"] = bighaul["Euphs"] * mg_to_g * DW_to_WW_euphs * bighaul["depthrange"]
bighaul[c(31,45:47)]
# -
# +
# set seasons following Perry et al 2021
WW_Bdens_df = WW_Bdens_df %>%
mutate(season = case_when(mon == 12 | mon == 1 | mon == 2 ~ "winter",
mon == 3 | mon == 4 | mon == 5 ~ "spring",
mon == 6 | mon == 7 | mon == 8 ~ "summer",
mon == 9 | mon == 10 | mon == 11 ~ "fall"))
#head(a1996_2018_supp2_df)
#nrow(a1996_2018_supp2_df)
#colnames(a1996_2018_supp2_df)
head(WW_Bdens_df[21:45])
########################## WW B Stats ####################################
###################################
print("Statistics for total.B (transformed to WW)")
summary(WW_Bdens_df$Total.Biomass)
sd(WW_Bdens_df$Total.Biomass)
print("Statistics for total.B (no Euphs)")
summary(WW_Bdens_df$Total.Biomass.NoEuphs)
sd(WW_Bdens_df$Total.Biomass.NoEuphs)
# by season
print("stats of total B by season - not for B param should 'stratify' as in next bit")
WW_Bdens_df %>%
group_by(season) %>%
mutate(meanB = mean(Total.Biomass)) %>%
mutate(stdev = sd(Total.Biomass)) %>%
mutate(meanlogB = mean(log(Total.Biomass))) %>%
mutate(stddevlogB = sd(log(Total.Biomass))) %>%
dplyr::select(season, stdev, meanB, meanlogB, stddevlogB) %>%
summarise(meanB_ = mean(round(meanB, digits=2)),
stdev = mean(round(stdev, digits=2)),
meanlogB_ = mean(round(meanlogB, digits=2)),
stddevlogB_ = mean(round(stddevlogB, digits=2)),
n_= n()) %>%
mutate(logmean_B1 = exp(meanlogB_ + stddevlogB_^2/2)) %>%
mutate(logmean_B2 = exp(meanlogB_ ))
print("Summarize mean B season and year")
WW_Bdens_df %>%
group_by(season,yr) %>%
mutate(meanB = mean(Total.Biomass),
stdev = sd(Total.Biomass),
meanlogB = mean(log(Total.Biomass)),
stddevlogB = sd(log(Total.Biomass)),
meanB_hard = mean(Total.Biomass.Hard),
stdev_hard = sd(Total.Biomass.Hard),
meanB_soft = mean(Total.Biomass.Soft),
stdev_soft = sd(Total.Biomass.Soft),
) %>%
# Total.Biomass.NoEuphs Total.Biomass.Hard Total.Biomass.Soft
#mutate(meanB_NoEuphs = ) %>%
dplyr::select(season, stdev, meanB, meanlogB, stddevlogB, meanB_hard, stdev_hard, meanB_soft, stdev_soft) %>%
summarise(meanB_ = mean(round(meanB, digits=2)),
stdev = mean(round(stdev, digits=2)),
meanlogB_ = mean(round(meanlogB, digits=2)),
stddevlogB_ = mean(round(stddevlogB, digits=2)),
meanB_hard_ = mean(round(meanB_hard, digits=2)),
stdev_hard_ = mean(round(stdev_hard, digits=2)),
meanB_soft_ = mean(round(meanB_soft, digits=2)),
stdev_soft_ = mean(round(stdev_soft, digits=2)),
n_= n()) %>%
mutate(logmean_B1 = exp(meanlogB_ + stddevlogB_^2/2)) %>%
mutate(logmean_B2 = exp(meanlogB_ )) %>%
ungroup() %>%
group_by(season) %>%
mutate(meanB_acrossyrs = mean(meanB_),
meanB_hard_acrosssyrs = mean(meanB_hard_),
meanB_soft_acrosssyrs = mean(meanB_soft_)) %>%
summarise(meanB_acrossyrs_ = mean(round(meanB_acrossyrs, digits=2)),
meanB_hard_acrosssyrs = mean(round(meanB_hard_acrosssyrs, digits=2)),
meanB_soft_acrosssyrs = mean(round(meanB_soft_acrosssyrs, digits=2)))
# get mean of each group by season and year
print("Summarize mean B by group, season and year")
seasonal_B_summary = WW_Bdens_df %>%
dplyr::select(,c(9,21:45,47:50)) %>%
group_by(season,yr) %>%
summarise_all(.funs = funs(B = round(mean(., na.rm=TRUE),digits=2)))
#seasonal_B_summary
# WW_Bdens_df[47:49]
#print("investigate single group: ")
#seasonal_B_summary$Euphs_B
#get mean of each group by year
print("summarize mean B by group and year")
B_yr_summary = seasonal_B_summary %>%
group_by(yr) %>%
dplyr::select(-season) %>%
summarise_all(.funs = funs(B = round(mean(., na.rm=TRUE),digits=2))) %>%
pivot_longer(2:29, names_to = "group", values_to = "mean_B")
#seasonal_B_yr_summary
# seasonal_B_yr_summary can be used as EwE time series
#print("Investigate single group")
#filter(seasonal_B_yr_summary, group == "Euphs_B_B")
# Average B across years
print("Average B by group across years for model B param")
yearround_B_avg = B_yr_summary %>%
group_by(group) %>%
summarise(mean_B = round(sd(mean_B, na.rm=TRUE),digits=2))
yearround_B_avg
# -
# TS by yr and mo
print("Summarize mean B by group, season and year")
yrmon_mean_df = WW_Bdens_df %>%
dplyr::select(,c(9,10,21:45,47:49)) %>%
group_by(yr,mon) %>%
summarise_all(.funs = funs(B = round(mean(., na.rm=TRUE),digits=2)))
head(yrmon_mean_df)
# haven't got this visual working yet
yrmon_mean_df %>%
mutate(pd.to_datetime(df[['yr', 'mon']].assign(DAY=1))) +
ggplot( aes(x=yr, y=mean_B, group=group, color=group)) +
theme(legend.position="bottom") +
geom_line() +
scale_x_continuous(breaks = (seq(min(seasonal_B_yr_summary$yr), max(seasonal_B_yr_summary$yr), by = 2)))
B_yr_summary %>%
#filter(group != "Total.Biomass_B_B") %>%
filter(group == 'Total.Biomass.Hard_B_B' | group == 'Total.Biomass.Soft_B_B' |
group == 'Euphs_B_B' | group == 'Fish_B_B' | group == 'Chaetognatha_B_B') %>%
ggplot( aes(x=yr, y=mean_B, group=group, color=group)) +
theme(legend.position="bottom") +
geom_line() +
scale_x_continuous(breaks = (seq(min(seasonal_B_yr_summary$yr), max(seasonal_B_yr_summary$yr), by = 2)))
B_yr_summary %>%
#filter(group != "Total.Biomass_B_B") %>%
filter(group == 'Fish_B_B') %>%
ggplot( aes(x=yr, y=mean_B, group=group, color=group)) +
theme(legend.position="bottom") +
geom_line() +
scale_x_continuous(breaks = (seq(min(seasonal_B_yr_summary$yr), max(seasonal_B_yr_summary$yr), by = 2)))
# +
yrmon_mean_df$yrmondt = as.Date(paste(yrmon_mean_df$yr, yrmon_mean_df$mon,"01",sep="-"))
# i hate this plotting in ggplot and I don't understand how this actually works
yrmon_mean_df %>%
ungroup() %>%
select(c("Total.Biomass.Hard_B","Total.Biomass.Soft_B", "yrmondt")) %>%
ggplot(aes(x=yrmondt, y=Total.Biomass.Hard_B)) +
theme(legend.position="bottom") +
geom_line()
# #+
# scale_x_continuous(breaks = (seq(min(seasonal_B_yr_summary$yr), max(seasonal_B_yr_summary$yr), by = 2)))
# +
library(zoo) # moving averages
# to make this a nice chart remove / add groups
yrmon_mean_df %>%
ungroup() %>%
arrange(yrmondt) %>%
select(c("Euphs_B","Fish_B","Total.Biomass.Hard_B","Total.Biomass.Soft_B", "yrmondt")) %>%
dplyr::mutate(TBH_03mo = zoo::rollmean(Total.Biomass.Hard_B, k = 3, fill = NA),
TBH__06mo = zoo::rollmean(Total.Biomass.Hard_B, k = 6, fill = NA),
TBH__12mo = zoo::rollmean(Total.Biomass.Hard_B, k = 12, fill = NA),
TBS__03mo = zoo::rollmean(Total.Biomass.Soft_B, k = 3, fill = NA),
TBS__06mo = zoo::rollmean(Total.Biomass.Soft_B, k = 6, fill = NA),
TBS__12mo = zoo::rollmean(Total.Biomass.Soft_B, k = 12, fill = NA),
Eu__03mo = zoo::rollmean(Euphs_B, k = 3, fill = NA),
Eu__06mo = zoo::rollmean(Euphs_B, k = 6, fill = NA),
Eu__12mo = zoo::rollmean(Euphs_B, k = 12, fill = NA),
Fi__03mo = zoo::rollmean(Fish_B, k = 3, fill = NA),
Fi__06mo = zoo::rollmean(Fish_B, k = 6, fill = NA),
Fi__12mo = zoo::rollmean(Fish_B, k = 12, fill = NA)) %>%
pivot_longer(names_to = "rolling_mean_key",
values_to = "rolling_mean_value",
cols = c(TBH__06mo,
TBS__06mo,
Eu__06mo,
Fi__06mo,
)) %>%
ggplot(aes(x=yrmondt, y = rolling_mean_value, color = rolling_mean_key)) +
theme(legend.position="bottom") +
geom_line()
# -
yrmon_mean_df %>%
ungroup() %>%
arrange(yrmondt) %>%
select(c("Fish_B","yrmondt")) %>%
dplyr::mutate(Fi__03mo = zoo::rollmean(Fish_B, k = 3, fill = NA),
Fi__06mo = zoo::rollmean(Fish_B, k = 6, fill = NA),
Fi__12mo = zoo::rollmean(Fish_B, k = 12, fill = NA)) %>%
pivot_longer(names_to = "rolling_mean_key",
values_to = "rolling_mean_value",
cols = c(Fi__03mo,
)) %>%
ggplot(aes(x=yrmondt, y = rolling_mean_value, color = rolling_mean_key)) +
theme(legend.position="bottom") +
geom_point()
# +
# to do: map to explore some distributions
# aggregate to match groups in EwE for TS
# create TS and export
B_yr_summary %>%
filter(group == "AmphiGam_B_B" | group == "AmphiHyp_B_B") %>%
ggplot(aes(x=yr, y = mean_B, color = group)) +
theme(legend.position="bottom") +
geom_line()
# +
# pivot wider and export for EwE time series
ewe_ts = B_yr_summary %>%
pivot_wider(names_from=group,values_from=mean_B) %>%
mutate(Z3_Amphipods_B = AmphiGam_B_B + AmphiHyp_B_B) %>%
mutate(Z5_CalCops_B = CalCops.med_B_B + CalCops.smal_B_B) %>%
mutate(Z2_Euphausiids = Euphs_B_B) %>%
mutate(Z4_CopeLg = CalCops.larg_B_B) %>%
mutate(Z1_ichthyomero = Fish_B_B) %>%
mutate(G3_Ctenophora = Ctenophora_B_B) %>%
mutate(G2_Medusae = Medusae_B_B) %>%
mutate(G1_Scyphozoa = Scyphozoa_B_B)
write.csv(ewe_ts,paste(path_out_TS, "Perry2021_meanB_yr.csv", sep=""),row.names=FALSE)
#amphi_B_yr = B_yr_summary %>%
# filter(group == "AmphiGam_B_B" | group == "AmphiHyp_B_B") %>%
# group_by(yr) %>%
# summarise(amphi_mean_B = round(sum(mean_B, na.rm=TRUE),digits=2))
# create time series for EwE
# monthly is possible but requires interpolating between missing months and adjusting
# for months with too few samples
#ts_f_amphi = "amphi_meanB_yr.csv"
#write.csv(amphi_B_yr,paste(path_out_TS,ts_f_amphi,sep=""),row.names=FALSE)
# -
yearround_B_avg %>%
filter(group != "Total.Biomass_B_B" & group != "Total.Biomass.NoEuphs_B_B" & group != "Total.Biomass.Soft_B_B" & group != "Total.Biomass.Hard_B_B") %>%
ggplot(aes(area = mean_B, fill = group,label = paste(str_sub(group,1,nchar(group)-4),mean_B,sep="\n"))) +
geom_treemap() +
geom_treemap_text(
colour = "white",
place = "center",
size = 15) + theme(legend.position = "bottom")
yearround_B_avg %>%
filter(group == "Total.Biomass.Soft_B_B" | group == "Total.Biomass.Hard_B_B") %>%
ggplot(aes(area = mean_B, fill = group,label = paste(str_sub(group,1,nchar(group)-4),mean_B,sep="\n"))) +
geom_treemap() +
geom_treemap_text(
colour = "white",
place = "center",
size = 15) + theme(legend.position = "bottom")
# basic meta stats on survey # etc
rows(WW_Bdens_df)
WW_Bdens_df %>%
group_by(yr) %>%
summarise(n = n())
# # Section 2 - Un-aggregated Data Analysis
# ### explore dominant species of each functional group
# <br> raw downloaded from: https://open.canada.ca/data/en/dataset/2822c11d-6b6c-437e-ad65-85f584522adc
# <br> <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Zooplankton variability in the Strait of Georgia, Canada, and relationships with the marine survivals of Chinook and Coho salmon. PLoS ONE, 16(1 January), e0245941. https://doi.org/10.1371/journal.pone.0245941
#
#
# +
# Boldt et al 2018 data shows mediocre year in 2000 for herring
# but Perry et al 2021 data show big year?? What species??
path = "C://Users//Greig//Sync//6. SSMSP Model//Model Greig//Data//4. Zooplankton//Zoop_Perryetal_2021//ORIGINAL//"
file = "Zoopl_SofG_1996-2018_EN.csv"
df_2 <- read.csv(paste(path, file,sep=""))
head(df_2)
# correspondence with <NAME> and <NAME> Nov 16 2021
# - the mg m-2 is calculated using the tow start - end depth
# calculate the tow depth range
df_2['tow_depth_range'] = abs(df_2['Tow_start_depth.m.'])-abs(df_2['Tow_end_depth.m.'])
summary(df_2['tow_depth_range'])
## convert DW to WW using same method as previous section
# create a season field
df_2 = df_2 %>%
mutate(B_mg_m3_DW = Abundance.m3. * Average_biomass_per_individual.mg.) %>% # convert to mg m-3
mutate(B_g_m2_DW = B_mg_m3_DW * tow_depth_range * mg_to_g) %>% # mg m-3 to g m-2
# not converting to WW B yet
mutate(season = case_when(Month == 12 | Month == 1 | Month == 2 ~ "winter",
Month == 3 | Month == 4 | Month == 5 ~ "spring",
Month == 6 | Month == 7 | Month == 8 ~ "summer",
Month == 9 | Month == 10 | Month == 11 ~ "fall"))
nrow(df_2)
# Perry et al selected start depths with gt 150 m and those with start depths <150 where
# the tow depth range was gt 70% of the water column
df_2 = df_2 %>%
mutate(towrange_as_prop_watercol = tow_depth_range / Bottom_depth.m.) %>%
filter(towrange_as_prop_watercol >= 0.7 | Tow_start_depth.m. >= 150)
nrow(df_2)
arrange(df_2, desc(B_g_m2_DW), .by_group = FALSE)
# seems like the filter above was already applied
# +
# mean biomass denisty by group calculation
# (altered in the group_by line)
# n_surveys used to avoid biasing density estimates using these presence-only data
# (absent species are not reported in these data)
n_surveys = n_distinct(df_2[c('Index')])
# table of surveys conducted per year
n_surveys_yr = df_2 %>% group_by(Year,Index) %>%
summarise(n = n()) %>%
mutate(surveycount = 1) %>%
ungroup() %>%
group_by(Year) %>%
summarise(count_surveys = sum(surveycount))
df_2 = inner_join(df_2, n_surveys_yr, cols = "Year")
df_2_summary = df_2 %>%
group_by(Phylum,Class,Order,Family,Genus,Species,Genus_species) %>%
#group_by(Class,Order) %>%
# summing below because of the bias issue with presence-only
summarise(B_sum_g_m2_DW = sum(round(B_g_m2_DW, digits=2)),
Body_size_mg_mean = mean(round(Average_biomass_per_individual.mg., digits=2)),
n = n_surveys,
n_occurrences = n()) %>%
mutate(B_mean_g_m2_DW = round(B_sum_g_m2_DW, digits=2) / n) %>%
arrange(desc(B_mean_g_m2_DW), .by_group = FALSE)
# +
# fish Phylum Chordata
#df_2 %>% filter(Phylum=="Chordata")
df_2_summary_fish = df_2_summary %>%
arrange(desc(B_mean_g_m2_DW)) %>%
filter(Phylum=="Chordata" & B_mean_g_m2_DW >= 2.034211e-05 &
Class!="Appendicularia")
# Basic barplot
p<-ggplot(data=df_2_summary_fish,
aes(x=reorder(Genus_species,B_mean_g_m2_DW),
y=B_mean_g_m2_DW,
fill=Order)) +
geom_bar(stat="identity") +
scale_fill_brewer(palette="Paired") +
labs(title="Fish", x="Genus_species", y="DW 1996-2018 mt km-2")
# note https://www.r-graph-gallery.com/38-rcolorbrewers-palettes.html
# Horizontal bar plot
p + coord_flip()
# Leuroglossus schmidti = Northern Smoothtongue
# Bathylagus milleri = deep sea smelt / owlfish /
# Merluccius productus = hake
# +
# Summary by year mean
df_2_yr_mean = df_2 %>%
group_by(Year,Phylum,Class,Order,Family,Genus,Species,Genus_species) %>%
#group_by(Class,Order) %>%
# summing here because of presence-only bias
summarise(B_sum_g_m2_DW = sum(round(B_g_m2_DW, digits=2)),
Body_size_mg_mean = mean(round(Average_biomass_per_individual.mg., digits=2)),
n_surv_yr = mean(count_surveys), # FIX! Not relevant by year
n_occurrences = n()) %>%
mutate(B_mean_g_m2_DW = round(B_sum_g_m2_DW, digits=2) / n_surv_yr) %>%
arrange(desc(B_mean_g_m2_DW), .by_group = FALSE)
# +
df_2_yr_mean_fish = df_2_yr_mean %>%
filter(Phylum=="Chordata" & Class!="Appendicularia")
#filter(Genus_species == "Leuroglossus.schmidti")
#dplyr::mutate(Fi__03mo = zoo::rollmean(Fish_B, k = 3, fill = NA),
# Fi__06mo = zoo::rollmean(Fish_B, k = 6, fill = NA),
# Fi__12mo = zoo::rollmean(Fish_B, k = 12, fill = NA)) %>%
#pivot_longer(names_to = "rolling_mean_key",
# values_to = "rolling_mean_value",
# cols = c(Fi__03mo,
# )) %>%
df_2_yr_mean_allfishtogether = df_2_yr_mean_fish %>%
group_by(Year) %>%
summarise(B_DW_sum = sum(round(B_mean_g_m2_DW, digits=6)))
ggplot(data=df_2_yr_mean_allfishtogether,aes(x=Year, y = B_DW_sum)) +
theme(legend.position="bottom") +
geom_line()
# -
ggplot(filter(df_2_yr_mean_fish, B_mean_g_m2_DW > 0.01), aes(fill=Genus_species, y=B_mean_g_m2_DW, x=Year)) +
geom_bar(position="stack", stat="identity") +
theme(legend.position="bottom")
ggplot(filter(df_2_yr_mean_fish, Genus_species != "Bathylagus.*sp."), aes(fill=Genus_species, y=B_mean_g_m2_DW, x=Year)) +
geom_bar(position="stack", stat="identity") +
theme(legend.position="bottom")
df_2_yr_mean_fish %>% filter(Genus_species == "Bathylagus.*sp.")
# +
# grab just herring to compare to Boldt's stats
df_2_yr_mean_fish %>%
filter(Genus_species == "Clupeiformes.*sp." | Genus_species == "Clupea.pallasii") %>%
group_by(Year) %>%
arrange(Year) %>%
summarize(B_mean_clup_WW = sum(B_mean_g_m2_DW * 5)) %>%
ggplot(y=B_mean_clup_WW, x=Year) +
geom_line(aes(y=B_mean_clup_WW, x=Year)) +
theme(legend.position="bottom")
#
# +
# grab hake as separate TS
df_2_yr_mean_fish %>%
# -
df_2_yr_mean_fish %>% filter(Genus == "Clupeiformes" | Genus == "Clupea")
|
notebooks/.ipynb_checkpoints/Data Prep - Zooplankton Time Series (R)-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Step 4: Exploratory data analysis (Python)
# +
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats as st
# contains: taxi company name and the trips_amount for each company in november 2017
taxi_data = pd.read_csv('project_sql_result_01.csv')
# contains: dropoff location name and the average number of rides that ended in each neighborhood
end_trip_data = pd.read_csv('project_sql_result_04.csv')
#contains: pickup date, time, weather condition and the trip duration
weather_data = pd.read_csv('project_sql_result_07.csv')
# -
taxi_data.head(10) #fist look at the data
taxi_data.info()#cheking for missing values and data types
taxi_data.describe()#cheking for outliners
taxi_data.sort_values('trips_amount').head(10)#cheking for lower outliners
taxi_data.sort_values('trips_amount').tail(10)#cheking for higher outliners
taxi_data['company_name'].unique()#cheking the company name column for outliners
taxi_data.duplicated().sum() #cheking for duplicates
# # conclusion:
# The taxi_data table has no missing values, no duplicates and the data types corresponds with the values of the data. There are some ouliners: there are companies that had only a couple drives in those 2 days of November. Also there is one company that has almost 20 thousand drives in the 2 days of the month, number that is almost twise bigger from the second biggest company in that month. This difference i to be considered when working with the data. I assume that the difference is due the company size. The companies with the few drives are probebly one man company with just one taxi driver.
end_trip_data.head(10) #fist look at the data
end_trip_data.info()#cheking for missing values and data types
end_trip_data.describe() #cheking for outliners
end_trip_data.duplicated().sum()
# # conclusion:
# The end_trip_data table has no missing values, no duplicates and the data types corresponds with the values of the data. Althow there are some outliners and the standart daviation is pretty big, it is logical that there are neighborhoods that people tend to take a taxi to more then others, for example, center of the city and entertainment
# # top 10 neighborhoods
top_10_hoods = end_trip_data.sort_values('average_trips',ascending = False).head(10)
top_10_hoods
#ploting a bar chart
taxi_data.plot(x = 'company_name', figsize = (17, 10), kind = 'bar')
plt.title('company distribution')
plt.ylabel('trips_amount')
# # coclusion:
# We can see that almost half of the companies has a very few drives as appose to the thousends drives for other compenies. Also we can see in the lower side of the chart, that company nams contain numbers and often names. I assume that those are car plates and taxi driver name and they work alone or with a partner. This explains the few drives of those companies. Probebly the bigger companies has a lot more cars at their disposal. This could have been cheked if I could count the number of cars for each company.
top_10_hoods.plot(x = 'dropoff_location_name', figsize = (10, 6), kind = 'bar')
plt.title('neighborhood distribution')
plt.ylabel('average_trips')
# # Conclusion
# We can see that there are 2 most popular neighborhoods('Loop' and 'River North') and 2 more that medium popular('Streeterville' and 'West Loop'), while 6 others has a couple of thousends of drives per month.
# # Step 5. Testing hypotheses (Python)
# Cheking basics of the data frame:
#
weather_data.head() #first look at the data
weather_data.info()# cheking for missing values and data types
#there no missing values' but the date data type is not date time, But isnt relevent for our hypothesis
weather_data['weather_conditions'].unique() #cheking for unique weather condition values
# H0: There no difference between the duration of rides from 'Loop' neighborhood to 'O'Hare' Airport on rainy Saturdays
# H1: There is a difference between the duration of rides from 'Loop' neighborhood to 'O'Hare' Airport on rainy Saturdays
# +
#creating slices different weather
good_weather = weather_data.query('weather_conditions == "Good"')
bad_weather = weather_data.query('weather_conditions == "Bad"')
#testing the hypothesis with t sest to independent variables
results = st.ttest_ind(good_weather['duration_seconds'], bad_weather['duration_seconds'])
results.pvalue/2
# -
# We can not reject the null hypothesis and to support the alternative hypothesis There is a difference between the duration of rides from 'Loop' neighborhood to 'O'Hare' Airport on rainy Saturdays. The p value that was used is 0.025, becaues we dont have hypothesis about the direction of the difference.
#cheking the direction of the difference
weather_data.pivot_table(index = 'weather_conditions', values = 'duration_seconds', aggfunc = ['mean', 'count'])
# # conclusion:
# The rides from 'Loop' neighborhood to 'O'Hare' Airport on rainy Saturdays are longer then not rainy ones. We can also see that the amount of rides at 'Bad' weather are much lower then 'Good' weather. If we could check the November month of 2017 and the proportion of good and bad weather in it we could conclude that people tend to get a taxi on good weather more then in a bad one.
#
|
Project 5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import random
import time
import torch
from x_transformers.x_transformers import XTransformer
import torch
from run_experiment import *
from generate_data import *
# -
# ## Variables
# +
from sklearn.model_selection import ParameterGrid
TAG = '8+30_close2paper'
TASK_NAME = 'retrieval_15'
TRAIN_SIZE = 100_000
VAL_SIZE = 10_000
TEST_SIZE = 20_000
NUM_INITS = 3
NUM_BATCHES = int(1.5e5)
BATCH_SIZE = 32
LEARNING_RATE = 3e-4
GENERATE_EVERY = NUM_BATCHES // 10
ENC_NUM_TOKENS = 26+10+1
DEC_NUM_TOKENS = 10+1
ENC_SEQ_LEN = 9
DEC_SEQ_LEN = 1
INPUT_LEN = 9
TASK_NAME = 'retrieval_15'
model_parameters = ParameterGrid({'dim': [20, 50, 100],
'tie_token_embeds': [True],
'return_tgt_loss': [True],
'enc_num_tokens': [ENC_NUM_TOKENS],
'depth,heads': [(1,1), (2,4)],
'enc_max_seq_len': [5, 10, 15],
'dec_num_tokens': [DEC_NUM_TOKENS],
'dec_max_seq_len': [DEC_SEQ_LEN],
'enc_num_memory_tokens': [0, 2, 4, 8, 16, 32]})
print('Total runs: ', NUM_INITS * len(model_parameters))
# +
# for i, p in enumerate(model_parameters):
# print(i, p)
# -
# #### Generate data
# +
class retrieval_generator:
def __init__(self, K=4):
self.src_mask = torch.ones(BATCH_SIZE, ENC_SEQ_LEN).bool()
self.tgt_mask = torch.ones(BATCH_SIZE, DEC_SEQ_LEN+1).bool()
self.K = K
def __next__(self):
X = np.zeros([BATCH_SIZE, ENC_SEQ_LEN]).astype(int)
y = np.zeros([BATCH_SIZE, DEC_SEQ_LEN+1]).astype(int)
y[:, 0] = 10
for i in range(BATCH_SIZE):
X[i], y[i, 1:] = create_sequence(one_hot=False, K=self.K)
return torch.tensor(X), torch.tensor(y), self.src_mask, self.tgt_mask
# generator = retrieval_generator(4)
# generate_data(generator, task_name='retrieval_4', train_size=TRAIN_SIZE, test_size=TEST_SIZE, val_size=VAL_SIZE)
# ENC_SEQ_LEN = 31
# generator = retrieval_generator(15)
# generate_data(generator, task_name='retrieval_15', train_size=TRAIN_SIZE, test_size=TEST_SIZE, val_size=VAL_SIZE)
# +
# s,t, _, _ = next(generator)
# s[0], t[0]
# -
# ### Run
# +
gen_train = data_loader(task_name=f'{TASK_NAME}_train', batch_size=BATCH_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_val = data_loader(task_name=f'{TASK_NAME}_val', batch_size=VAL_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_test = data_loader(task_name=f'{TASK_NAME}_test', batch_size=TEST_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
t = time.time()
with torch.cuda.device(0):
for init_num in range(NUM_INITS):
print('\n\n\nInit number ', init_num)
for i, param in enumerate(list(model_parameters)):
print(param)
param['enc_depth'], param['enc_heads'] = param['depth,heads']
param['dec_depth'], param['dec_heads'] = param['depth,heads']
param.pop('depth,heads')
print(i / len(model_parameters) * 100, '%')
model = XTransformer(**param).cuda()
model_name = f"{TASK_NAME}_dim{param['dim']}d{param['enc_depth']}h{param['enc_heads']}M{param['enc_num_memory_tokens']}l{param['enc_max_seq_len']}_v{init_num}"
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
train_validate_model(model,
train_generator=gen_train,
val_generator=gen_val,
optim=optim,
model_name=model_name,
dec_seq_len=DEC_SEQ_LEN,
num_batches=NUM_BATCHES,
generate_every=GENERATE_EVERY)
test_model(model, gen_test, model_name, param, TASK_NAME, tag=TAG, dec_seq_len=param['dec_max_seq_len'])
print('Total time: ', time.time() - t)
t = time.time()
# -
# ### Test!
# +
TASK_NAME = 'retrieval_15'
# +
init_num = 0
gen_train = data_loader(task_name=f'{TASK_NAME}_train', batch_size=BATCH_SIZE, enc_seq_len=ENC_SEQ_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_val = data_loader(task_name=f'{TASK_NAME}_val', batch_size=VAL_SIZE, enc_seq_len=ENC_SEQ_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_test = data_loader(task_name=f'{TASK_NAME}_test', batch_size=TEST_SIZE, enc_seq_len=ENC_SEQ_LEN, dec_seq_len=DEC_SEQ_LEN)
param = list(model_parameters)[5]
print(param)
param['enc_depth'], param['enc_heads'] = param['depth,heads']
param['dec_depth'], param['dec_heads'] = param['depth,heads']
param.pop('depth,heads')
model = XTransformer(**param).cuda()
model_name = f"{TASK_NAME}_dim{param['dim']}d{param['enc_depth']}h{param['enc_heads']}M{param['enc_num_memory_tokens']}l{param['enc_max_seq_len']}_v{init_num}"
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
src, tgt, _, _ = next(gen_train)
print(model.encoder.max_seq_len, model.encoder.num_memory_tokens)
model.encoder(torch.cat((src, src)), return_embeddings=True).shape
# -
src[0], tgt[0]
|
run_retrieval30.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="97RbG14vUw1e"
# # **Parte I do Treinamento**
# ## Criando alunos(as) com JSON
# + [markdown] id="b8Pmm9iQU5ld"
# ### Criando os nomes dos Alunos
# + id="me9meYOFU-7e" executionInfo={"status": "ok", "timestamp": 1620682007003, "user_tz": 180, "elapsed": 760, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Importando a biblioteca
import pandas as pd
# + id="wf-ATHIrTx3v" executionInfo={"status": "ok", "timestamp": 1620682008875, "user_tz": 180, "elapsed": 2622, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Usaremos os nomes mais utilizados, consultando o site do IBGE
nomes_m = pd.read_json("https://servicodados.ibge.gov.br/api/v1/censos/nomes/ranking?qtd=200&sexo=m")
nomes_f = pd.read_json("https://servicodados.ibge.gov.br/api/v1/censos/nomes/ranking?qtd=200&sexo=f")
# Nesse caso estamos utilizando arquivos do tipo Json
# + colab={"base_uri": "https://localhost:8080/"} id="LTCWNf0nVZKY" executionInfo={"status": "ok", "timestamp": 1620682008875, "user_tz": 180, "elapsed": 2618, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="34404b98-03aa-4dc5-d5bd-d49b6bd17871"
print('Quantidade de nomes: ', str(len(nomes_f) + len(nomes_m)))
# + colab={"base_uri": "https://localhost:8080/"} id="QeMJ6TQfViU8" executionInfo={"status": "ok", "timestamp": 1620682008876, "user_tz": 180, "elapsed": 2614, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="2285565d-a76e-473d-d760-282f17a425ec"
# Vamos criar uma lista com as duas variáveis de nome
frames = [nomes_f, nomes_m]
type(frames)
# + colab={"base_uri": "https://localhost:8080/"} id="JsCzrlx-V5EJ" executionInfo={"status": "ok", "timestamp": 1620682008877, "user_tz": 180, "elapsed": 2611, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="baa7d147-114a-4fc8-ec3b-20324bd17c69"
frames
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="maLgFSEsV6t3" executionInfo={"status": "ok", "timestamp": 1620682008878, "user_tz": 180, "elapsed": 2607, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="44ac00c6-bf94-4690-a372-e66f2ac80eb4"
# Agora vamos criar um DataFrame com essa lista
nomes = pd.concat(frames)['nome'].to_frame()
# Usamos o sample para mostrar alguns nomes aleatoriamente
nomes.sample(5)
# + [markdown] id="3IszjBLZX8d6"
# ### Incluindo ID dos alunos
# + id="REvCOcjbYTUY" executionInfo={"status": "ok", "timestamp": 1620682008881, "user_tz": 180, "elapsed": 2604, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Importando a biblioteca
import numpy as np
np.random.seed(123)
# + colab={"base_uri": "https://localhost:8080/"} id="tuL9PFfaXOwW" executionInfo={"status": "ok", "timestamp": 1620682008882, "user_tz": 180, "elapsed": 2600, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="44811e05-f099-4989-e86f-def6dc505723"
# Atribuindo o total de alunos a uma variável
total_alunos = len(nomes)
total_alunos
# + id="ymmmAXmjbDku" executionInfo={"status": "ok", "timestamp": 1620682008882, "user_tz": 180, "elapsed": 2595, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Gerando aleatoriamente o id dos alunos
nomes["id_aluno"] = np.random.permutation(total_alunos) + 1
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="YKSZg-WMbOF8" executionInfo={"status": "ok", "timestamp": 1620682008883, "user_tz": 180, "elapsed": 2591, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="f0f74ded-9849-43da-c778-e3182c38c292"
# Retornando alguns alunos aleatoriamente
nomes.sample(5)
# + id="XNdCrAZcbWke" executionInfo={"status": "ok", "timestamp": 1620682008883, "user_tz": 180, "elapsed": 2584, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Criando domínios de e-mail para os alunos
dominios = ['@<EMAIL>', '@servicodoemail.com']
nomes['dominio'] = np.random.choice(dominios, total_alunos)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="kn8eDXiebWvm" executionInfo={"status": "ok", "timestamp": 1620682008884, "user_tz": 180, "elapsed": 2580, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="dbb6a20c-e9b4-4e99-876b-c894baa5ed35"
# Retornando alguns alunos aleatoriamente
nomes.sample(5)
# + id="mWabI93lbkla" executionInfo={"status": "ok", "timestamp": 1620682008884, "user_tz": 180, "elapsed": 2575, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Criando o campo e-mail usando nome + domínio
nomes['email'] = nomes.nome.str.cat(nomes.dominio).str.lower()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="KrbQ6yVAblNf" executionInfo={"status": "ok", "timestamp": 1620682009370, "user_tz": 180, "elapsed": 3053, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="daf9d240-b543-4a53-a38f-43bc0b96c6a7"
# Retornando alguns alunos aleatoriamente
nomes.sample(5)
# + [markdown] id="2Llme42DZd_Z"
# # **Parte II do Treinamento**
# ## Criando cursos lendo HTML
# + [markdown] id="vFNKu2MnZ4df"
# ### Criando a tabela cursos
# + colab={"base_uri": "https://localhost:8080/"} id="vzzCjtanZjhh" executionInfo={"status": "ok", "timestamp": 1620682014461, "user_tz": 180, "elapsed": 8137, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="c195cf05-a0ad-4b7d-e039-87091541d01a"
# Instalando bibliotecas externas
# !pip3 install html5lib
# !pip3 install lxml
# + id="fuk_HTaBcfrI" executionInfo={"status": "ok", "timestamp": 1620682014462, "user_tz": 180, "elapsed": 8134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Importando as biblioteca para uso
import html5lib
# + colab={"base_uri": "https://localhost:8080/"} id="ntIZau75cf0N" executionInfo={"status": "ok", "timestamp": 1620682014462, "user_tz": 180, "elapsed": 8129, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="2e98031b-56b7-45b4-b78c-846e8264f2bd"
# Acessando e lendo o HTML através da URL
url = 'http://tabela-cursos.herokuapp.com/index.html'
cursos = pd.read_html(url)
cursos
# + colab={"base_uri": "https://localhost:8080/"} id="620tgLF1c0wR" executionInfo={"status": "ok", "timestamp": 1620682014462, "user_tz": 180, "elapsed": 8125, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="c453d6e7-68b9-4f65-e77e-1bee9f093a75"
# Mostrando o tipo do objeto
type(cursos)
# + colab={"base_uri": "https://localhost:8080/"} id="WWr-QGRNc2LW" executionInfo={"status": "ok", "timestamp": 1620682014463, "user_tz": 180, "elapsed": 8122, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="89e3072d-f9bc-43c7-ac1d-e90290e9cee0"
# Atribuindo a coluna 0 a uma variável, tornando um DataFrame
cursos = cursos[0]
type(cursos)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="zq3Ay7ppc2Rq" executionInfo={"status": "ok", "timestamp": 1620682014463, "user_tz": 180, "elapsed": 8118, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="bf0f09b7-373a-4657-cbf2-09e03a589442"
cursos.head()
# + [markdown] id="42kgy7AadXuK"
# ### Alterando o index dos cursos
# + id="nAnfL6BmdUSu" executionInfo={"status": "ok", "timestamp": 1620682014464, "user_tz": 180, "elapsed": 8112, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Renomeando colunas
cursos = cursos.rename(columns={'Nome do curso' : 'nome_do_curso'})
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="p3XDKXqIddbs" executionInfo={"status": "ok", "timestamp": 1620682014464, "user_tz": 180, "elapsed": 8108, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="de3d9165-70b1-471c-90b5-9de9949ec28f"
cursos.head(2)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="5AwXns1idoc9" executionInfo={"status": "ok", "timestamp": 1620682014465, "user_tz": 180, "elapsed": 8103, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="1f1a9d9b-824a-44fc-a331-0a4469a24aa8"
# Criando uma coluna id em ordem
cursos['id'] = cursos.index + 1
cursos.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="ZbhnINXxdpzw" executionInfo={"status": "ok", "timestamp": 1620682014965, "user_tz": 180, "elapsed": 8598, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="5303e22e-b1f2-4ada-ac31-b27512265b55"
# Setando a coluna id como index
cursos = cursos.set_index('id')
cursos.head()
# + [markdown] id="T40fhPxX5j5x"
# ### Matriculando os alunos nos cursos
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="6XNN0oQV5i3J" executionInfo={"status": "ok", "timestamp": 1620682014971, "user_tz": 180, "elapsed": 8598, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="4756a460-2400-4e35-ea4c-f31727bb35c9"
nomes.sample(5)
# + id="2HGI2WgeeT8O" colab={"base_uri": "https://localhost:8080/", "height": 0} executionInfo={"status": "ok", "timestamp": 1620682014972, "user_tz": 180, "elapsed": 8593, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="b900d6c1-ce64-4249-d074-0cd094d84618"
# Gerando o número das matrículas de forma aleatória usando o total de alunos
nomes['matriculas'] = np.ceil(np.random.exponential(size=total_alunos) * 1.5).astype(int)
nomes.sample(5)
# + colab={"base_uri": "https://localhost:8080/"} id="BHIenQFx6X0M" executionInfo={"status": "ok", "timestamp": 1620682014974, "user_tz": 180, "elapsed": 8589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="b49c1700-771c-43c3-8131-a32c095df68f"
# Estatísticas gerais
nomes.matriculas.describe()
# + id="4vncCh6q6it4" executionInfo={"status": "ok", "timestamp": 1620682014975, "user_tz": 180, "elapsed": 8586, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
import seaborn as sns
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Cw5g7jL46mJ3" executionInfo={"status": "ok", "timestamp": 1620682014975, "user_tz": 180, "elapsed": 8582, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="8511bffc-c9e2-421f-c436-bb7e31b32617"
sns.distplot(nomes.matriculas)
# + colab={"base_uri": "https://localhost:8080/"} id="RH8BkTmX6tcD" executionInfo={"status": "ok", "timestamp": 1620682014976, "user_tz": 180, "elapsed": 8575, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="f103127a-6b44-42ec-b6af-1c341d292dd0"
# Quantidade de alunos inscritos em 1 ou mais cursos
nomes.matriculas.value_counts()
# + [markdown] id="oNxQgE-l7IAO"
# ### Selecionando os cursos
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="74wTb20z6tv0" executionInfo={"status": "ok", "timestamp": 1620682014977, "user_tz": 180, "elapsed": 8572, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="6781fc56-0316-4217-99e5-afb2de4e181b"
nomes.sample(3)
# + id="VZtEqzqX6uBB" executionInfo={"status": "ok", "timestamp": 1620682014977, "user_tz": 180, "elapsed": 8565, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Criando as variáveis que usaremos no loop
todas_matriculas = []
x = np.random.rand(20)
prob = x / sum(x)
# + id="Tf0_BJfh7asx" executionInfo={"status": "ok", "timestamp": 1620682015491, "user_tz": 180, "elapsed": 9074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Criando um loop para definir aleatoriamente os cursos em que cada aluno está inscrito
for index, row in nomes.iterrows():
id = row.id_aluno
matriculas = row.matriculas
for i in range(matriculas):
mat = [id, np.random.choice(cursos.index, p = prob)]
todas_matriculas.append(mat)
matriculas = pd.DataFrame(todas_matriculas, columns = ['id_aluno', 'id_curso'])
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="cADg4lcI7a0G" executionInfo={"status": "ok", "timestamp": 1620682015492, "user_tz": 180, "elapsed": 9074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="cf77cd43-ea8e-4310-e0fc-70052821559b"
matriculas.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="BLVszHtm_eZW" executionInfo={"status": "ok", "timestamp": 1620682015494, "user_tz": 180, "elapsed": 9072, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="daf77368-7f3e-4f34-9ded-c24c7f347405"
matriculas.groupby('id_curso').count().join(cursos['nome_do_curso']).rename(columns={'id_aluno':'quantidade_de_alunos'})
# + [markdown] id="hc5kS2kH_2vf"
# #### DataFrames que temos até agora...
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="bRKRj0k__ehk" executionInfo={"status": "ok", "timestamp": 1620682015495, "user_tz": 180, "elapsed": 9067, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="a4215f42-b1ae-44ad-cdfe-84b637306445"
nomes.sample(3)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="WKfbBmzX7a9O" executionInfo={"status": "ok", "timestamp": 1620682015495, "user_tz": 180, "elapsed": 9060, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="aef3d16f-2498-4462-b203-c2644b6a5a0f"
cursos.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="3SUq39uC_7Di" executionInfo={"status": "ok", "timestamp": 1620682015496, "user_tz": 180, "elapsed": 9055, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="0f607c3f-2658-4b08-9df3-4d99250c197e"
matriculas.head()
# + id="Fi12vuHM__RF" executionInfo={"status": "ok", "timestamp": 1620682015496, "user_tz": 180, "elapsed": 9049, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
matriculas_por_curso = matriculas.groupby('id_curso').count().join(cursos['nome_do_curso']).rename(columns={'id_aluno':'quantidade_de_alunos'})
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="vfFcYPw1AE1q" executionInfo={"status": "ok", "timestamp": 1620682015497, "user_tz": 180, "elapsed": 9045, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="733dcda2-d1f3-4135-b411-799935e80498"
matriculas_por_curso.head()
# + [markdown] id="PPhwdmWPcFsZ"
# # **Parte III do Treinamento**
# ## Escrevendo CSV, JSON e HTML
# + [markdown] id="Twjbe65qAM0P"
# ### Saída em diferentes formatos
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="qkyK9qqWAPEZ" executionInfo={"status": "ok", "timestamp": 1620682015497, "user_tz": 180, "elapsed": 9039, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="41c324db-54e8-4379-94d7-9aacc68560a0"
matriculas_por_curso.head(3)
# + id="_hgTMdWWAPRt" executionInfo={"status": "ok", "timestamp": 1620682015497, "user_tz": 180, "elapsed": 9033, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Salvando o DataFrame em formato csv
matriculas_por_curso.to_csv('matriculas_por_curso.csv', index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="XqmyFkeWAPZc" executionInfo={"status": "ok", "timestamp": 1620682015498, "user_tz": 180, "elapsed": 9030, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="ed0bb957-8e43-4a25-cfab-f197e91f41ab"
pd.read_csv('matriculas_por_curso.csv')
# + id="h9gDJXNdAPgT" executionInfo={"status": "ok", "timestamp": 1620682015498, "user_tz": 180, "elapsed": 9023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Salvando o DataFrame em formato JSON
matriculas_json = matriculas_por_curso.to_json()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="fHqrSQMFAlVa" executionInfo={"status": "ok", "timestamp": 1620682015499, "user_tz": 180, "elapsed": 9018, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="f99060ff-5ef7-4716-dc59-8d6ae752d51f"
matriculas_json
# + id="q9XPSmpzAlZ4" executionInfo={"status": "ok", "timestamp": 1620682015499, "user_tz": 180, "elapsed": 9013, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Salvando o DataFrame em formato HMTL
matriculas_html = matriculas_por_curso.to_html()
# + colab={"base_uri": "https://localhost:8080/"} id="cvvpM_ipAtNJ" executionInfo={"status": "ok", "timestamp": 1620682015500, "user_tz": 180, "elapsed": 9009, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="b62e01ae-be83-41ae-c848-9045e9050aa9"
print(matriculas_html)
# + id="lawaMG2V6XJk" executionInfo={"status": "ok", "timestamp": 1620682015500, "user_tz": 180, "elapsed": 9007, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# + id="owq2PqhbcS5x" executionInfo={"status": "ok", "timestamp": 1620682015501, "user_tz": 180, "elapsed": 9004, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# + id="K_gRH5gzczpb" executionInfo={"status": "ok", "timestamp": 1620682015501, "user_tz": 180, "elapsed": 9000, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# + [markdown] id="h8NYI2bncTly"
# # **Parte IV do Treinamento**
# ## Lendo e escrevendo SQL
#
#
# > O SQL Lite vem disponível juntamente com o Pandas quando realizamos a instalação e o import dessa biblioteca, sendo necessário somente a instalação do pacote referente a manipulação com a linguagem SQL. Por esse motivo se torna mais fácil e simples a criação e consulta nesse banco de dados.
#
#
# + [markdown] id="oQI3paDRDSer"
# ### Criando o banco SQL
# + id="ZV6HCJ3xcYzI" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620682018146, "user_tz": 180, "elapsed": 11642, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="5a94c1b8-3f47-4b3f-c530-a6ea9664e278"
# Instalando a biblioteca de manipulação SQL
# !pip install sqlalchemy
# + id="c2DiWAuPBGMC" executionInfo={"status": "ok", "timestamp": 1620682018147, "user_tz": 180, "elapsed": 11639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
from sqlalchemy import create_engine, MetaData, Table
# + id="mM8MeGewBGTr" executionInfo={"status": "ok", "timestamp": 1620682018147, "user_tz": 180, "elapsed": 11635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
'''
Criando o banco de dados SQLite com o parâmetro memory fazemos ele ser
salvo em memória local
'''
engine = create_engine('sqlite:///:memory:')
# + colab={"base_uri": "https://localhost:8080/"} id="2ejcia6wBGZw" executionInfo={"status": "ok", "timestamp": 1620682018148, "user_tz": 180, "elapsed": 11632, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="ec748fe7-f9fd-4733-a8cb-eb61680643d0"
engine
# + colab={"base_uri": "https://localhost:8080/"} id="7Jv1ADoRBGgX" executionInfo={"status": "ok", "timestamp": 1620682018148, "user_tz": 180, "elapsed": 11628, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="73a23661-00f2-4db7-84b3-381866f54651"
type(engine)
# + id="2r7qTLEEBGnd" executionInfo={"status": "ok", "timestamp": 1620682018149, "user_tz": 180, "elapsed": 11625, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Transformando um DataFrame em SQL
# Parâmetros - 1 é o nome da tabela e o 2 é o engine criado
matriculas_por_curso.to_sql('matriculas', engine)
# + colab={"base_uri": "https://localhost:8080/"} id="69ow1BIZBGwi" executionInfo={"status": "ok", "timestamp": 1620682018149, "user_tz": 180, "elapsed": 11621, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="1bc5a2d0-671f-4f72-8679-8a85d7efbecf"
print(engine.table_names())
# + [markdown] id="ZstHZNsbGhKu"
# ### Buscando no banco de dados
# + id="fMRynccsBHdy" executionInfo={"status": "ok", "timestamp": 1620682018149, "user_tz": 180, "elapsed": 11618, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
query = 'select * from matriculas where quantidade_de_alunos < 20'
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="3C-ja-5FGrVb" executionInfo={"status": "ok", "timestamp": 1620682018150, "user_tz": 180, "elapsed": 11612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="2addad1e-4c24-4399-8875-271f4e6fc495"
# Realizando um select no banco de dados usando a função query() do Pandas
pd.read_sql(query, engine)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="pnNH_gmSGrei" executionInfo={"status": "ok", "timestamp": 1620682018151, "user_tz": 180, "elapsed": 11608, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="fd1194a3-cd62-44e1-cfea-96bec673f7b8"
# Lendo uma tabela inteira
pd.read_sql_table('matriculas', engine, columns=['nome_do_curso', 'quantidade_de_alunos'])
# + id="8BKQ80jlGrjL" executionInfo={"status": "ok", "timestamp": 1620682018154, "user_tz": 180, "elapsed": 11606, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
muitas_matriculas = pd.read_sql_table('matriculas', engine, columns=['nome_do_curso', 'quantidade_de_alunos'])
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="zadFk_4tJeY6" executionInfo={"status": "ok", "timestamp": 1620682018156, "user_tz": 180, "elapsed": 11603, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="c8640529-7448-4c94-cef7-fd083e2a4f48"
muitas_matriculas = muitas_matriculas.query('quantidade_de_alunos > 70')
muitas_matriculas
# + [markdown] id="2dS8jE08KIMw"
# ### Insert no banco de dados
# + id="Eeiz-8SNJehk" executionInfo={"status": "ok", "timestamp": 1620682018158, "user_tz": 180, "elapsed": 11600, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
muitas_matriculas.to_sql('muitas_matriculas', con=engine)
# + colab={"base_uri": "https://localhost:8080/"} id="2GhAzP4KKL3I" executionInfo={"status": "ok", "timestamp": 1620682018158, "user_tz": 180, "elapsed": 11596, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="f7dcd0c6-0e3e-4051-bbd1-2bac5ff47a29"
print(engine.table_names())
# + [markdown] id="beqOdG3fcZaI"
# # **Parte V do Treinamento**
# ## Lendo e escrevendo Excel
# + [markdown] id="keoq5JrNK6_d"
# ### Nomes dos alunos(as) da próxima turma
# + id="85OYuwhgcb_o" colab={"base_uri": "https://localhost:8080/", "height": 693} executionInfo={"status": "ok", "timestamp": 1620682018159, "user_tz": 180, "elapsed": 11592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="c952befe-6ad1-4a64-cbd5-684eed1c15e6"
matriculas_por_curso
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="GqMfzwdKKipz" executionInfo={"status": "ok", "timestamp": 1620682165793, "user_tz": 180, "elapsed": 546, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="b80fe71c-08fa-4f28-a9e4-eb1024889124"
id_curso = 5
proxima_turma = matriculas.query('id_curso == {}'.format(id_curso))
proxima_turma
# + colab={"base_uri": "https://localhost:8080/"} id="1fYMM2RBKitq" executionInfo={"status": "ok", "timestamp": 1620682324905, "user_tz": 180, "elapsed": 1084, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="12db0a63-a9c5-46cb-d677-de3b3de00dda"
# Juntando dados de 2 DataFrames usando a função join do Pandas
# Isso cria uma lista
proxima_turma.set_index('id_aluno').join(nomes.set_index('id_aluno'))['nome']
# + id="J41jJuIfKi3j" executionInfo={"status": "ok", "timestamp": 1620682532927, "user_tz": 180, "elapsed": 540, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Transformando a lista anterior em um DataFrame
proxima_turma = proxima_turma.set_index('id_aluno').join(nomes.set_index('id_aluno'))['nome'].to_frame()
# + colab={"base_uri": "https://localhost:8080/"} id="Lw8-MATtMeuw" executionInfo={"status": "ok", "timestamp": 1620682536048, "user_tz": 180, "elapsed": 550, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="9872a6c7-4048-4225-cd7d-bd83d7396e2d"
nome_curso = cursos.loc[id_curso]
nome_curso
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="jfjElE2qMpsb" executionInfo={"status": "ok", "timestamp": 1620682537395, "user_tz": 180, "elapsed": 548, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="a4412e1e-4f75-46a2-8825-d53314fa5237"
nome_curso = nome_curso.nome_do_curso
nome_curso
# + id="LZ1mSCBHMpy4" executionInfo={"status": "ok", "timestamp": 1620682661262, "user_tz": 180, "elapsed": 570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
# Renomear coluna para mostrar os alunos do curso definido
proxima_turma = proxima_turma.rename(columns={'nome':'Alunos do curso {}'.format(nome_curso)})
# + colab={"base_uri": "https://localhost:8080/", "height": 233} id="yxnChSV5NQKU" executionInfo={"status": "ok", "timestamp": 1620682675820, "user_tz": 180, "elapsed": 639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="aac63425-83dc-4d50-c029-13eeec583a87"
proxima_turma.sample(5)
# + [markdown] id="Gr0AXJn1N8R7"
# ### Gerando arquivo Excel
# + id="M4kx03dlNuu1" executionInfo={"status": "ok", "timestamp": 1620682772409, "user_tz": 180, "elapsed": 1201, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}}
proxima_turma.to_excel('proxima_turma.xlsx', index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="0jf5iBTxOIrI" executionInfo={"status": "ok", "timestamp": 1620682882650, "user_tz": 180, "elapsed": 559, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GggHt8_mbkDnXZff-k4M3yYcwgljM73TJaOFcf82g=s64", "userId": "04361221466185006768"}} outputId="1aa53520-5433-43b8-e49e-e0bc69bb3eee"
pd.read_excel('proxima_turma.xlsx')
# + id="7d1VqE1aOjvi"
|
FormacaoPythonParaDataScience/PandasFormatosDiferentesInputOutput/PandasDiferentesIO.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nltk
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# ### Mapping STTS nach Universal Tagset
# +
# Mapping STTS nach Universal Tagset
# Das Mapping wurde grundsätzlich aus folgender Quelle übernommen:
# https://raw.githubusercontent.com/slavpetrov/universal-pos-tags/master/de-tiger.map
# und mit dieser Quelle überprüft.
# https://pdfs.semanticscholar.org/ed2c/c779c7eb0004bd6dd50538a2cafca092c94f.pdf
#
# Aufgrund von TIGER_scheme-syntax.pfd Seite 122ff. wurden folgende Anpassungen vorgenommen:
# NNE gelöscht, da nicht Teil des Tagsets
#
# Folgende drei Tags wurden hinzugefügt, obwohl diese nicht im Trainingset vorkommen,
# diese könnten jedoch im Testset des Dozenten auftauchen.
#
# PAV PRON (Gleichbedeutend wie PROAV)
# PIDAT PRON
# SGML X
# SPELL X
stts_to_universal = {
"$(":".",
"$,":".",
"$.":".",
"ADJA":"ADJ",
"ADJD":"ADJ",
"ADV":"ADV",
"APPO":"ADP",
"APPR":"ADP",
"APPRART":"ADP",
"APZR":"ADP",
"ART":"DET",
"CARD":"NUM",
"FM":"X",
"ITJ":"X",
"KOKOM":"CONJ",
"KON":"CONJ",
"KOUI":"CONJ",
"KOUS":"CONJ",
"NE":"NOUN",
"NN":"NOUN",
"PDAT":"PRON",
"PDS":"PRON",
"PIAT":"PRON",
"PIS":"PRON",
"PPER":"PRON",
"PPOSAT":"PRON",
"PPOSS":"PRON",
"PRELAT":"PRON",
"PRELS":"PRON",
"PRF":"PRON",
"PAV":"PRON",
"PROAV":"PRON",
"PTKA":"PRT",
"PTKANT":"PRT",
"PTKNEG":"PRT",
"PTKVZ":"PRT",
"PTKZU":"PRT",
"PWAT":"PRON",
"PWAV":"PRON",
"PWS":"PRON",
"TRUNC":"X",
"VAFIN":"VERB",
"VAIMP":"VERB",
"VAINF":"VERB",
"VAPP":"VERB",
"VMFIN":"VERB",
"VMINF":"VERB",
"VMPP":"VERB",
"VVFIN":"VERB",
"VVIMP":"VERB",
"VVINF":"VERB",
"VVIZU":"VERB",
"VVPP":"VERB",
"XY":"X",
"PIDAT":"PRON",
"SGML":"X",
"SPELL":"X"
}
# -
# ### Hilfsfunktionen
# +
def get_tagged_sentences(raw_lines):
tagged_sentences = []
for line in raw_lines:
if line:
tuples = [tuple(word_and_tag.strip().rsplit('/', 1))
for word_and_tag
in line.strip()[:-1].split(" ; ")]
tagged_sentences.append(tuples)
return tagged_sentences
def create_index(sentences, special_values=[]):
bag_of_words = [word for sentence in sentences for word in sentence]
fdist_words = nltk.FreqDist(bag_of_words)
index = {value : i + len(special_values) for i, value in enumerate(fdist_words.keys())}
for i, value in enumerate(special_values):
index[value] = i
reverse_index = dict([(value, key) for (key, value) in index.items()])
return index, reverse_index
def translate(text, dictionnary, backup_value):
return np.array([dictionnary.get(value, backup_value) for value in text])
def split_tuples(arrays_of_tuples):
arr_left, arr_right = [], []
for arr in arrays_of_tuples:
left, right = zip(*arr)
arr_left.append(np.asarray(left))
arr_right.append(np.asarray(right))
return arr_left, arr_right
def to_categorical_reverse(categorical_sents):
# Keras includes method to_categorical but not the reverse operation
# Method adapted from
# https://stackoverflow.com/questions/47380663/numpy-reverse-keras-to-categorical
categorical_sents_reversed = []
for sent in categorical_sents:
categorical_sents_reversed.append(np.array([np.argmax(y, axis=None, out=None) for y in sent]))
return np.array(categorical_sents_reversed)
def split_too_long_sentences(sentences, max_length):
sentences_splitted = []
for i, sent in enumerate(sentences):
while len(sent) > max_length:
print("Too long sentence", len(sent), "at index", i,
"splitting into", len(sent[:max_length]), "and", len(sent[max_length:]))
sentences_splitted.append(sent[:max_length])
sent = sent[max_length:]
sentences_splitted.append(sent)
return sentences_splitted
def remove_padding(sequences_padded, sequences_target_length):
sequences_without_padding = []
for i, sent in enumerate(sequences_target_length):
sequences_without_padding.append(sequences_padded[i][:len(sent)])
return sequences_without_padding
def plot_history(h):
# copied from TensorFlow_Intro.ipynb
history_dict = h.history
history_dict.keys()
acc = h.history['acc']
val_acc = h.history['val_acc']
loss = h.history['loss']
val_loss = h.history['val_loss']
plt.rcParams['figure.figsize'] = (12.0, 4.0)
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss') # "bo" is for "blue dot"
plt.plot(epochs, val_loss, 'b' , label='Validation loss') # b is for "solid blue line"
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
axes = plt.gca()
axes.set_ylim([0.0,0.2])
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b' , label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
axes = plt.gca()
axes.set_ylim([0.92,1])
plt.show()
# -
# ### Files öffnen
# +
train_text_file = open("POS_German_train.txt", "r")
test_text_file = open("POS_German_minitest.txt", "r")
train_lines = train_text_file.read().split('\n')
test_lines = test_text_file.read().split('\n')
# create sents with (word, tag) tuples
train_tagged_sents = get_tagged_sentences(train_lines)
test_tagged_sents = get_tagged_sentences(test_lines)
# max sentence length based on longest train sentences (test sentences are considered unkonwn at first)
train_max_sent_length = max(len(sentence) for sentence in train_tagged_sents)
print("Max Sent Length ", train_max_sent_length)
# split too long test sentences, usually it would be done in a separate data cleansing step
number_of_test_sents_orig = len(test_tagged_sents)
test_tagged_sents = split_too_long_sentences(test_tagged_sents, train_max_sent_length)
print("Length Test orig ", number_of_test_sents_orig, "and after split", len(test_tagged_sents))
# split (word, tag) into separate sents arrays
train_sents_words, train_sents_tags_stts = split_tuples(train_tagged_sents)
test_sents_words, test_sents_tags_stts = split_tuples(test_tagged_sents)
# map stts to universal tagset
train_sents_tags = [translate(t, stts_to_universal, "tag not found") for t in train_sents_tags_stts]
test_sents_tags = [translate(t, stts_to_universal, "tag not found") for t in test_sents_tags_stts]
print("Raw Line: ", train_lines[1][:80])
print("Tagged sentence ", train_tagged_sents[1][:5])
print("Words ", train_sents_words[1][:5])
print("Tags STTS ", train_sents_tags_stts[1][:5])
print("Tags Universal ", train_sents_tags[1][:5])
print()
print("Raw Line: ", test_lines[1][:80])
print("Tagged sentence ", test_tagged_sents[1][:5])
print("Words ", test_sents_words[1][:5])
print("Tags STTS ", test_sents_tags_stts[1][:5])
print("Tags Universal ", test_sents_tags[1][:5])
# -
# #### Test Mapping STTS nach Universal Tagset
# +
def test_stts_to_universal_mapping(sentences_tags_stts):
bag_of_tags = [tag for sentence in sentences_tags_stts for tag in sentence]
stts_tags = sorted(nltk.FreqDist(bag_of_tags).keys())
universal_tags = translate(stts_tags, stts_to_universal, "error, tag not found")
for (stts, universal) in zip(stts_tags, universal_tags):
print(stts, universal)
test_stts_to_universal_mapping(train_sents_tags_stts)
# -
# ### Zusätzliches Test/Dev Set erstellen
# Es wird mit vier Sets gearbeitet
#
# - Train
# - Test
#
# für möglichst hohe accuracy im POS_German_minitest.txt und
#
# - Train Partial (90% des Train sets)
# - Dev (10% des Train sets)
#
# während der Entwicklung zur Erabeitung möglichst guter Hyperparameter.
#
#
# ### Mapping Word>Index, Tag>Index und umgekehrt anlegen
#
# Auch hier werden Mappings mit gesamtem Train Set erstellt und auch mit 90% der Trainings Daten zum Testen des Dev Sets.
#
# +
# split and shuffle training set again into test and devset
(train_sents_words_partial,
dev_sents_words,
train_sents_tags_partial,
dev_sents_tags) = train_test_split(
train_sents_words,
train_sents_tags,
test_size=0.1)
# create index with 100% of train_sentences
word_to_index, index_to_word = create_index(train_sents_words, ["<PAD>","<UNK>"] )
tag_to_index, index_to_tag = create_index(train_sents_tags, ["<PAD>"])
# create index only with 90% of data (train_sents_words_partial)
word_to_index_dev, index_to_word_dev = create_index(train_sents_words_partial, ["<PAD>","<UNK>"] )
tag_to_index_dev, index_to_tag_dev = create_index(train_sents_tags_partial, ["<PAD>"])
print("Length Train / Test sents: ", len(train_sents_words), len(test_sents_words))
print("Length Train Partial / Dev sents: ", len(train_sents_words_partial), len(dev_sents_words))
print("Length Word / Tag Index: ", len(word_to_index), len(tag_to_index))
print("Length Dev Word / Tag Index: ", len(word_to_index_dev), len(tag_to_index_dev))
print("Word to index: ", list(word_to_index.items())[-4:])
print("Index to word: ", list(index_to_word.items())[-4:])
print("Word to index Dev: ", list(word_to_index_dev.items())[-4:])
print("Index to word Dev: ", list(index_to_word_dev.items())[-4:])
for (a, b) in zip(tag_to_index.items(), index_to_tag.items()):
print(a,b)
# -
# ### Sätze zu Integer übersetzen
# +
train_sents_words_int = [translate(s, word_to_index, word_to_index["<UNK>"]) for s in train_sents_words]
train_sents_tags_int = [translate(s, tag_to_index, tag_to_index["X"]) for s in train_sents_tags]
# the backup value "X" is never used because all tags are known
test_sents_words_int = [translate(s, word_to_index, word_to_index["<UNK>"]) for s in test_sents_words]
test_sents_tags_int = [translate(s, tag_to_index, tag_to_index["X"]) for s in test_sents_tags]
train_sents_words_partial_int = [translate(s, word_to_index_dev, word_to_index_dev["<UNK>"]) for s in train_sents_words_partial]
train_sents_tags_partial_int = [translate(s, tag_to_index_dev, tag_to_index_dev["X"]) for s in train_sents_tags_partial]
dev_sents_words_int = [translate(s, word_to_index_dev, word_to_index_dev["<UNK>"]) for s in dev_sents_words]
dev_sents_tags_int = [translate(s, tag_to_index_dev, tag_to_index_dev["X"]) for s in dev_sents_tags]
print("Train Set")
print(train_sents_words[15][:7])
print(translate(train_sents_words_int[15],index_to_word,"<UNK>")[:7])
print(train_sents_words_int[15][:7])
print(train_sents_tags[15][:7])
print(translate(train_sents_tags_int[15][:7],index_to_tag,"X"))
print(train_sents_tags_int[15][:7])
print()
print("Test Set. Might contain <UNK> words, because translate-index was created only with train set.")
print(test_sents_words[15][:7])
print(translate(test_sents_words_int[15],index_to_word,"<UNK>")[:7])
print(test_sents_words_int[15][:7])
print(test_sents_tags[15][:7])
print(translate(test_sents_tags_int[15][:7],index_to_tag,"X"))
print(test_sents_tags_int[15][:7])
print()
print("Train Set Partial")
print(train_sents_words_partial[15][:7])
print(translate(train_sents_words_partial_int[15],index_to_word_dev,"<UNK>")[:7])
print(train_sents_words_partial_int[15][:7])
print(train_sents_tags_partial[15][:7])
print(translate(train_sents_tags_partial_int[15][:7],index_to_tag_dev,"X"))
print(train_sents_tags_partial_int[15][:7])
print()
print("Dev Set. Might contain <UNK> words, because translate-index was created only with train set partial.")
print(dev_sents_words[15][:7])
print(translate(dev_sents_words_int[15],index_to_word_dev,"<UNK>")[:7])
print(dev_sents_words_int[15][:7])
print(dev_sents_tags[15][:7])
print(translate(dev_sents_tags_int[15][:7],index_to_tag_dev,"X"))
print(dev_sents_tags_int[15][:7])
# -
# ### Sätze padden
# +
train_sents_words_int = keras.preprocessing.sequence.pad_sequences(train_sents_words_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
train_sents_tags_int = keras.preprocessing.sequence.pad_sequences(train_sents_tags_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
test_sents_words_int = keras.preprocessing.sequence.pad_sequences(test_sents_words_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
test_sents_tags_int = keras.preprocessing.sequence.pad_sequences(test_sents_tags_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
train_sents_words_partial_int = keras.preprocessing.sequence.pad_sequences(train_sents_words_partial_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
train_sents_tags_partial_int = keras.preprocessing.sequence.pad_sequences(train_sents_tags_partial_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
dev_sents_words_int = keras.preprocessing.sequence.pad_sequences(dev_sents_words_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
dev_sents_tags_int = keras.preprocessing.sequence.pad_sequences(dev_sents_tags_int, value=word_to_index["<PAD>"], padding='post',maxlen=train_max_sent_length)
print(train_sents_words_int[15])
print(translate(train_sents_words_int[15],index_to_word,index_to_word[1]))
print(train_sents_tags_int[15])
print(translate(train_sents_tags_int[15],index_to_tag,index_to_tag[1]))
print(test_sents_words_int[15])
print(translate(test_sents_words_int[15],index_to_word,index_to_word[1]))
print(test_sents_tags_int[15])
print(translate(test_sents_tags_int[15],index_to_tag,index_to_tag[1]))
print(train_sents_words_partial_int[15])
print(translate(train_sents_words_partial_int[15],index_to_word_dev,index_to_word_dev[1]))
print(train_sents_tags_partial_int[15])
print(translate(train_sents_tags_partial_int[15],index_to_tag_dev,index_to_tag_dev[1]))
print(dev_sents_words_int[15])
print(translate(dev_sents_words_int[15],index_to_word_dev,index_to_word_dev[1]))
print(dev_sents_tags_int[15])
print(translate(dev_sents_tags_int[15],index_to_tag_dev,index_to_tag_dev[1]))
# -
# ### Model erstellen
# +
from keras.models import Sequential
from keras.layers import Dense, LSTM, SimpleRNN, GRU, Bidirectional, TimeDistributed, Embedding
from keras.optimizers import Adam
def create_model(n_words_longest_sentence, n_distinct_words, n_distinct_tags):
# https://keras.io/getting-started/sequential-model-guide/
model = Sequential()
# https://keras.io/layers/embeddings/
# Embedding(n_distinct_words + 1 :: (input_dim should equal size of vocabulary + 1)
# mask_zero = True :: ignores padding
model.add(Embedding(n_distinct_words + 1, 128, mask_zero=True, input_shape=(n_words_longest_sentence, )))
# https://keras.io/layers/recurrent/
# https://nlpforhackers.io/lstm-pos-tagger-keras/
# https://machinelearningmastery.com/timedistributed-layer-for-long-short-term-memory-networks-in-python/
# return_sequences=True :: needed for TimeDistributed
# output_shape :: 128 gives best results
# go_backwards=True leads to bad performance
# SimpleRNN 97.1 - 97.2
# GRU 97.2 - 97.7
# LSTM 97.4 - 97.8 best with small batch size (2)
model.add(Bidirectional(LSTM(128, return_sequences=True)))
# https://keras.io/layers/core/
model.add(TimeDistributed(Dense(n_distinct_tags)))
# https://www.dlology.com/blog/how-to-choose-last-layer-activation-and-loss-function/
# Multi-class, single-label classification :: Activation(softmax) loss='categorical_crossentropy'
model.add(Dense(n_distinct_tags, activation='softmax'))
model.compile(#https://keras.io/losses/
loss='categorical_crossentropy',
# https://keras.io/optimizers/
# https://www.dlology.com/blog/quick-notes-on-how-to-choose-optimizer-in-keras/
optimizer=Adam(0.001), # Adam performs better then 'rmsprop'
# https://keras.io/metrics/
metrics=['accuracy'])
return model
def fit_model(m, sents_words_int, sents_tags_int, validation_split):
hist = m.fit(sents_words_int,
keras.utils.to_categorical(sents_tags_int),
# because the batch_size is very small, best results are achieved already after 2 epochs
# but each epoch takes longer, because more steps are made
epochs=2,
# https://stats.stackexchange.com/questions/153531/what-is-batch-size-in-neural-network
# https://stackoverflow.com/questions/35050753/how-big-should-batch-size-and-number-of-epochs-be-when-fitting-a-model-in-keras
# best val_acc (best epoch) ~training_time per epoch (4 core 2.4GHz)
# batch_size=256 0.9690 (5 epochs) 171s
# batch_size=128 0.9682 (5 epochs) 213s
# batch_size=64 0.9701 (4 epochs) 269s
# batch_size=16 0.9735 (3 epochs) 672s
# batch_size=4 0.9758 (3 epochs) 2000s
# batch_size=2 0.9771 (2 epochs) 3400s Best
# batch_size=1 0.9741 (2 epochs) 8355s
batch_size=2, # small batch_size leads to smaller loss, params are adjusted after each batch
validation_split=validation_split,
verbose=1)
return hist
# -
# ### Hilfsfunktionen
# +
def predict_tags(trained_model, sentences_to_predict, sentences_to_predict_int, true_tags, dictionnary, log_sent=None):
predictions_categorical = trained_model.predict(sentences_to_predict_int)
predictions_with_padding = to_categorical_reverse(predictions_categorical)
predictions_without_padding = remove_padding(predictions_with_padding, true_tags)
predictions_tags = [translate(tag, dictionnary,"error") for tag in predictions_without_padding]
if log_sent:
print("Predictions Categorical ", predictions_categorical[log_sent])
print("Predictions with Padding", predictions_with_padding[log_sent])
print("Predictions no Padding", predictions_without_padding[log_sent])
print("Orig words: ", len(sentences_to_predict[log_sent]),
"True tags: ", len(true_tags[log_sent]),
"Predicted tags: ", len(predictions_tags[log_sent]))
for (a, b, c) in zip(true_tags[log_sent], predictions_tags[log_sent], sentences_to_predict[log_sent]):
print(a,b,c)
return predictions_tags
def calculate_custom_accuracy_without_padding(predicted_tags, true_tags):
# calculates accuracy withot padding
true_tags_flat = [tag for sentence in true_tags for tag in sentence]
predicted_tags_flat = [tag for sentence in predicted_tags for tag in sentence]
return accuracy_score(true_tags_flat, predicted_tags_flat)
def run_model(_train_max_sent_length,
_index_to_word,
_index_to_tag,
_train_sents_words_int,
_train_sents_tags_int,
_test_sents_words,
_test_sents_words_int,
_test_sents_tags,
_test_sents_tags_int,
_validation_split):
model = create_model(_train_max_sent_length, len(_index_to_word), len(_index_to_tag))
model.summary()
history = fit_model(model, _train_sents_words_int, _train_sents_tags_int, _validation_split)
# only two epochs used at the end, no fun in plotting those
# plot_history(history)
sents_predicted_tags = predict_tags(model,
_test_sents_words,
_test_sents_words_int,
_test_sents_tags,
_index_to_tag,
15)
acc_keras = model.evaluate(_test_sents_words_int, keras.utils.to_categorical(_test_sents_tags_int, len(_index_to_tag)))
print("Accuracy Keras: ", acc_keras[1] * 100)
acc_custom = calculate_custom_accuracy_without_padding(sents_predicted_tags, _test_sents_tags)
print("Accuracy Custom (manually ignoring padding): ", acc_custom * 100)
# -
# ### Modell trainieren mit 90% der Trainingsdaten (davon 10% zur Validierung) und mit 10% der Trainingsdaten testen
# Training Time ~2 hours using CPU
run_model(train_max_sent_length,
index_to_word_dev,
index_to_tag_dev,
train_sents_words_partial_int,
train_sents_tags_partial_int,
dev_sents_words,
dev_sents_words_int,
dev_sents_tags,
dev_sents_tags_int,
0.1)
# ### Modell trainieren mit 100% der Trainingsdaten und mit POS_German_minitest.txt testen
run_model(train_max_sent_length,
index_to_word,
index_to_tag,
train_sents_words_int,
train_sents_tags_int,
test_sents_words,
test_sents_words_int,
test_sents_tags,
test_sents_tags_int,
0.0)
|
part_of_speech_tagger/pos_tagger.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Importing the necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn import ensemble
# +
# Importing the rent dataset
dataset_df = pd.read_csv('lagosrent.csv')
dataset_df.head()
# -
# ## Exploratory Data Analysis
# +
# Viewing the summary of the dataset
dataset_df.info()
# -
# The dataset has null values
dataset_df.describe()
# +
# Viewing the number of unique values in the imported dataset
dataset_df.nunique()
# -
dataset_df['rent'].value_counts().plot(kind='bar', figsize = (12,5))
plt.title('Value Counts of Rent')
plt.xlabel('Rent')
plt.ylabel('Value Counts')
# From the above plot, we can see a rent of 1200, which is impossible. The rows containing 'rent' = 1200 will be handled later.
dataset_df['location'].value_counts().plot(kind='bar', figsize = (12,5))
plt.title('Value Counts of Location')
plt.xlabel('Location')
plt.ylabel('Value Counts')
dataset_df['bedroom'].value_counts().plot(kind='bar', figsize = (12,5))
plt.title('Value Counts of bedroom')
plt.xlabel('Bedroom')
plt.ylabel('Value Counts')
dataset_df['bathroom'].value_counts().plot(kind='bar', figsize = (12,5))
plt.title('Value Counts of Bathrooms')
plt.xlabel('Bathroom')
plt.ylabel('Value Counts')
# 120 bathrooms looks unsual for a residential home. This will be handled later.
dataset_df['toilet'].value_counts().plot(kind='bar', figsize = (12,5))
plt.title('Value Counts of Toilets')
plt.xlabel('Toilet')
plt.ylabel('Value Counts')
dataset_df['parking'].value_counts().plot(kind='bar', figsize = (12,5))
plt.title('Value Counts of Parking Spaces')
plt.xlabel('Parking Spaces')
plt.ylabel('Value Counts')
# +
# Checking for null values in the dataset
dataset_df.isnull().sum()
# +
null_bedroom = dataset_df["bedroom"].isnull().sum()
null_bathroom = dataset_df["bathroom"].isnull().sum()
null_toilet = dataset_df["toilet"].isnull().sum()
null_parking = dataset_df["parking"].isnull().sum()
num_entries = len(dataset_df)
print(f"Percentage of null bedrooms: {round(null_bedroom/num_entries*100)}%")
print(f"Percentage of null bathrooms: {round(null_bathroom/num_entries*100)}%")
print(f"Percentage of null toilets: {round(null_toilet/num_entries*100)}%")
print(f"Percentage of null parking spaces: {round(null_parking/num_entries*100)}%")
# +
# Checking the 'location' feature
dataset_df.groupby('location').count()
# -
# 'Banana Island Ikoyi Lagos' and 'Ikotun Lagos' have null values all through, and will be dropped
# +
# Checking the 'bedroom' feature
dataset_df.groupby('bedroom').count()
# +
# Checking the 'toilet' feature
dataset_df.groupby('toilet').count()
# +
# Checking the 'bathroom' feature
dataset_df.groupby('bathroom').count()
# +
# Checking the 'parking' feature
parking_grouped = dataset_df.groupby('parking').count()
parking_grouped
# +
# Checking for outliers in the features
dataset_df[["bedroom", "bathroom", "toilet", "parking"]].boxplot(figsize = (12,6))
plt.show()
# -
# ## Data Cleaning
# +
# Making a copy of the original dataset
data_df = dataset_df.copy()
# +
# Dropping the locations 'Banana Island Ikoyi Lagos' and 'Ikotun Lagos' which have null values in all the features
data_df = data_df.set_index("location")
data_df.drop([" Banana Island Ikoyi Lagos", " Ikotun Lagos"], inplace=True)
data_df.head()
# +
# Dropping the rows with 'bathroom' == 120
data_df = data_df[data_df.bathroom != 120]
# +
data_df.reset_index(inplace=True)
data_df.head()
# -
# Rechecking the number of null values
data_df.isnull().sum()
data_df.shape
# +
rent_list = list(data_df.rent.unique())
rent_list = sorted(rent_list)
print(rent_list)
# -
def null_filler(data, col):
"""
The function takes in a dataset with null values,
performs conditional mode imputation for null values on the specified column, and
returns a cleaned data.
"""
for Rent in rent_list:
if 200000 < Rent <= 600000:
filler = data[data['rent'].apply(lambda x: 200000 < x <= 600000)][col].mode()[0]
df1 = data[data['rent'].apply(lambda x: 200000 < x <= 600000)].fillna({col: filler})
elif 600000 < Rent <= 1000000:
filler = data[data['rent'].apply(lambda x: 600000 < x <= 1000000)][col].mode()[0]
df2 = data[data['rent'].apply(lambda x: 600000 < x <= 1000000)].fillna({col: filler})
elif 1000000 < Rent <= 3000000:
filler = data[data['rent'].apply(lambda x: 1000000 < x <= 3000000)][col].mode()[0]
df3 = data[data['rent'].apply(lambda x: 1000000 < x <= 3000000)].fillna({col: filler})
elif 3000000 < Rent <= 5000000:
filler = data[data['rent'].apply(lambda x: 3000000 < x <= 5000000)][col].mode()[0]
df4 = data[data['rent'].apply(lambda x: 3000000 < x <= 5000000)].fillna({col: filler})
elif 5000000 < Rent <= 8000000:
filler = data[data['rent'].apply(lambda x: 5000000 < x <= 8000000)][col].mode()[0]
df5 = data[data['rent'].apply(lambda x: 5000000 < x <= 8000000)].fillna({col: filler})
elif 8000000 < Rent <= 12000000:
filler = data[data['rent'].apply(lambda x: 8000000 < x <= 12000000)][col].mode()[0]
df6 = data[data['rent'].apply(lambda x: 8000000 < x <= 12000000)].fillna({col: filler})
else:
filler = data[data['rent'].apply(lambda x: 12000000 < x <= 18000000)][col].mode()[0]
df7 = data[data['rent'].apply(lambda x: 12000000 < x <= 18000000)].fillna({col: filler})
data = pd.concat([df1, df2, df3, df4, df5, df6, df7], axis=0).reset_index(drop=True)
return data
# +
# Filling the null values in the 'bedroom' column
non_null_data = null_filler(data_df, 'bedroom')
non_null_data.isnull().sum()
# +
# Filling the null values in the 'bathroom' column
non_null_data = null_filler(non_null_data, 'bathroom')
non_null_data.isnull().sum()
# +
# Filling the null values in the 'toilet' column
non_null_data = null_filler(non_null_data, 'toilet')
non_null_data.isnull().sum()
# +
# Filling the null values in the 'parking' column
non_null_data = null_filler(non_null_data, 'parking')
non_null_data.isnull().sum()
# +
cleaned_data = non_null_data
cleaned_data.head()
# -
cleaned_data.shape
# +
# Summary statistics of the cleaned data
cleaned_data.describe()
# +
cleaned_data[["bedroom", "bathroom", "toilet", "parking"]].boxplot(figsize = (12,10))
plt.show()
# -
# The plot above reveals that the outliers have been taken care of. The 12 parking spaces is a possibility and cannot be intepreted as an outlier.
# ### Feature Engineering
# +
location_dict = {'Bogije Bogije Ibeju Lekki Lagos':'Bogije Ibeju_Lekki Lagos',
'Eleko Eleko Ibeju Lekki Lagos':'Eleko Ibeju_Lekki Lagos',
'Beechwood Estate Bogije Ibeju Lekki Lagos':'Beechwood Estate Bogije Ibeju_Lekki Lagos'}
cleaned_data.replace(location_dict, inplace=True)
# +
# Creating a column for the Districts in Lagos
def get_district(location):
""" The function extracts 'districts' from the location entries """
return location.split(' ')[-2]
cleaned_data['district'] = cleaned_data['location'].apply(lambda x: get_district(x))
# -
cleaned_data.head()
cleaned_data['district'].value_counts()
# +
# Renaming 'VI' to Victoria Island
cleaned_data.replace({'(VI)': 'Victoria Island'}, inplace=True)
cleaned_data['district'].value_counts()
# +
# Creating a column for island districts
district_dict1 = {'Ajah':'yes', 'Gbagada':'no', 'Ikeja':'no', 'Ikoyi':'yes', 'Isolo':'no',
'Lekki':'yes', 'Maryland':'no', 'Surulere':'no', 'Victoria Island':'yes', 'Yaba':'no'}
cleaned_data['island'] = cleaned_data['district'].replace(district_dict1)
cleaned_data.head()
# -
cleaned_data['island'].value_counts()
# +
# Generating a column for industrial districts
district_dict2 = {'Ajah':'no', 'Gbagada':'yes', 'Ikeja':'yes', 'Ikoyi':'no', 'Isolo':'yes',
'Lekki':'no', 'Maryland':'no', 'Surulere':'yes', 'Victoria Island':'no', 'Yaba':'no'}
cleaned_data['industrial'] = cleaned_data['district'].replace(district_dict2)
cleaned_data.head()
# +
# Shuffling the cleaned dataset
cleaned_data = cleaned_data.sample(cleaned_data.shape[0]).reset_index(drop=True)
cleaned_data.head()
# -
project_data_cleaned = cleaned_data.to_csv('project_data_cleaned.csv', index=False)
# +
# Encoding the non-numeric labels
from sklearn.preprocessing import LabelEncoder
label_encode = LabelEncoder()
cleaned_data['district'] = label_encode.fit_transform(cleaned_data['district'])
cleaned_data['island'] = label_encode.fit_transform(cleaned_data['island'])
cleaned_data['industrial'] = label_encode.fit_transform(cleaned_data['industrial'])
# -
cleaned_data.head()
# ### Normalizing the dataset
# +
num_data = cleaned_data.drop(columns=['location'])
num_data.head()
# +
# Scaling the features with MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
min_max_data = pd.DataFrame(min_max_scaler.fit_transform(num_data), columns=num_data.columns)
# -
min_max_data.head()
# +
# Scaling the features with StandardScaler
from sklearn.preprocessing import StandardScaler
std_scaler = StandardScaler()
std_scaled_data = pd.DataFrame(std_scaler.fit_transform(num_data), columns=num_data.columns)
# -
std_scaled_data.head()
# ### Feature Selection
# +
plt.figure(figsize=(12, 6))
mask = np.triu(np.ones_like(cleaned_data.corr(), dtype=np.bool))
heatmap = sns.heatmap(cleaned_data.corr(), mask=mask, vmin=-1, vmax=1, annot=True, cmap='BrBG')
heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':18}, pad=16)
# +
from sklearn.feature_selection import RFE
def rfe_selector(df, model):
rfe_init = RFE(model, 4)
x = df.drop(columns=['rent'])
y = df['rent']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)
rfe = rfe_init.fit(x_train, y_train)
Rfe = list(zip(x_train.columns,rfe.support_,rfe.ranking_))
return Rfe
# -
rfe_selector(min_max_data, LinearRegression())
rfe_list = ['bedroom', 'toilet', 'parking', 'island']
# ### Model Training with all features normalized by MinMaxScaler
def model_fitter(df, model):
# separating the dataset into 'features' and 'targets'
x = df.drop(columns=['rent'])
y = df['rent']
# splitting the 'features' and 'target' into train and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)
# initiate and fit the model on the train variables
model_init = model
model_fit = model.fit(x_train, y_train)
# make predictions of and evaluate the model, based on some metrics
prediction = model.predict(x_test)
train_score = model.score(x_train,y_train)
test_score = model.score(x_test,y_test)
R2_score = round(r2_score(y_test, prediction), 2)
MAE = round(mean_absolute_error(y_test, prediction), 2)
RSS = round(np.sum(np.square(y_test - prediction)), 2)
rmse = round(np.sqrt(mean_squared_error(y_test, prediction)), 2)
print(f'Training score on train data: {train_score}')
print(f'Training score on test data: {test_score}')
print(f'r2_score: {R2_score}')
print(f'MAE score: {MAE}')
print(f'RSS score: {RSS}')
print(f'RMSE score: {rmse}')
# +
# Linear Regression Model on MinMax scaled data
model_fitter(min_max_data, LinearRegression())
# +
# Ridge Regression Model on MinMax scaled data
model_fitter(min_max_data, Ridge(alpha=1.0))
# +
# Lasso Regression Model on MinMax scaled data
model_fitter(min_max_data, Lasso(alpha=0.001))
# +
# Gradient Boosting Regressor on MinMax scaled data
gb_reg = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth =5, min_samples_split=3,
learning_rate=0.003, loss='ls')
model_fitter(min_max_data, gb_reg)
# +
# Random Forest Regressor on MinMax scaled data
randf_reg = ensemble.RandomForestRegressor(n_estimators=400, random_state=0)
model_fitter(min_max_data, randf_reg)
# +
result1 = {'Model':['r2score', 'mae', 'rss', 'rmse'],
'Linear regression':[0.46, 0.14, 104.01, 0.19],
'Rigde regression':[0.46, 0.14, 104.04, 0.19],
'LASSO regression':[0.43, 0.13, 109.93, 0.19],
'Gradient boosting regressor':[0.91, 0.06, 18.16, 0.08],
'Random forest regressor':[0.99, 0.0, 1.09, 0.02]}
result_df = pd.DataFrame(result1)
result_df
# -
# ### Model Training with all features normalized by Standard Scaler
# +
# Linear Regression Model on Standard scaled data
model_fitter(std_scaled_data, LinearRegression())
# +
# Ridge Regression Model on Standard scaled data
model_fitter(std_scaled_data, Ridge(alpha=1.0))
# +
# Lasso Regression Model on Standard scaled data
model_fitter(std_scaled_data, Lasso(alpha=0.001))
# +
# Gradient Boosting Regressor on Standard scaled data
gb_reg = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth =5, min_samples_split=3,
learning_rate=0.003, loss='ls')
model_fitter(std_scaled_data, gb_reg)
# +
# Random Forest Regressor on Standard scaled data
randf_reg = ensemble.RandomForestRegressor(n_estimators=400, random_state=0)
model_fitter(std_scaled_data, randf_reg)
# +
result2 = {'Model':['r2score', 'mae', 'rss', 'rmse'],
'Linear regression':[0.46, 0.54, 1583.58, 0.73],
'Rigde regression':[0.46, 0.54, 1583.58, 0.73],
'LASSO regression':[0.46, 0.54, 1584.25, 0.73],
'Gradient boosting regressor':[0.91, 0.22, 276.5, 0.3],
'Random forest regressor':[0.99, 0.01, 16.63, 0.07]}
result2_df = pd.DataFrame(result2)
result2_df
# -
# ### Model Training with selected features
rfe_list
selected_data = min_max_data[['rent'] + rfe_list]
# +
# Linear Regression Model on selected data
model_fitter(selected_data, LinearRegression())
# +
# Ridge Regression Model on selected data
model_fitter(selected_data, Ridge(alpha=1.0))
# +
# Lasso Regression Model on selected data
model_fitter(selected_data, Lasso(alpha=0.001))
# +
# Gradient Boosting Regressor on selected data
gb_reg = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth =5, min_samples_split=3,
learning_rate=0.003, loss='ls')
model_fitter(selected_data, gb_reg)
# +
# Random Forest Regressor on selected data
randf_reg = ensemble.RandomForestRegressor(n_estimators=500, random_state=0)
model_fitter(selected_data, randf_reg)
# +
result3 = {'Model':['r2score', 'mae', 'rss', 'rmse'],
'Linear regression':[0.45, 0.14, 106.68, 0.19],
'Rigde regression':[0.45, 0.14, 106.69, 0.19],
'LASSO regression':[0.42, 0.13, 111.11, 86.29],
'Gradient boosting regressor':[0.55, 0.11, 86.29, 0.17],
'Random forest regressor':[0.6, 0.09, 76.6, 0.16]}
result3_df = pd.DataFrame(result3)
result3_df
# -
def model_summ(df):
"""This fits a model to a dataset"""
# separating the dataset into 'features' and 'targets'
x = df.drop(columns=['rent'])
y = df['rent']
# splitting the 'features' and 'target' into train and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)
gb_reg = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth =5, min_samples_split=3,
learning_rate=0.003, loss='ls')
models = []
models.append(('LR', LinearRegression()))
models.append(('Ridge', Ridge(alpha=0.4)))
models.append(('Lasso', Lasso(alpha=0.001)))
models.append(('RFR', RandomForestRegressor(n_estimators=400, random_state=0)))
models.append(('GBR', gb_reg))
results = []
names = []
scoring = 'r2'
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=42, shuffle = True)
cv_results = model_selection.cross_val_score(model, x, y, cv=kfold, scoring=scoring)
names.append(name)
print("%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()))
model_summ(min_max_data)
model_summ(std_scaled_data)
# ### Model Improvement
# +
# Checking for imbalance in the categorical features
min_max_data['island'].value_counts()
# -
min_max_data['industrial'].value_counts()
# From the above, it is obvious that the dataset is imbalanced. Undersampling will be used to handle the situation.
def sampler(df, col, n):
df1 = df[df[col] == 1].sample(n)
df2 = df[df[col] == 0]
length = df1.shape[0] + df2.shape[0]
df_final = pd.concat([df1, df2], axis=0).sample(length)
return df_final
sampler(min_max_data, 'island', 5000).head()
sampled_data = sampler(min_max_data, 'island', 4100)
sampled_data['island'].value_counts()
model_fitter(sampled_data, LinearRegression())
model_fitter(sampled_data, Ridge(alpha=1.0))
model_fitter(sampled_data, Lasso(alpha=0.001))
# +
gb_reg = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth =5, min_samples_split=3,
learning_rate=0.003, loss='ls')
model_fitter(sampled_data, gb_reg)
# +
randf_reg = ensemble.RandomForestRegressor(n_estimators=500, random_state=0)
model_fitter(sampled_data, randf_reg)
# +
result4 = {'Model':['r2score', 'mae', 'rss', 'rmse'],
'Linear regression':[0.51, 0.11, 44.16, 0.16],
'Rigde regression':[0.51, 0.11, 44.16, 0.16],
'LASSO regression':[0.51, 0.11, 44.8, 0.17],
'Gradient boosting regressor':[0.9, 0.06, 8.87, 0.07],
'Random forest regressor':[1.0, 0.0, 0.42, 0.02]}
result4_df = pd.DataFrame(result4)
result4_df
# -
# From the above, the GradientBoosting Regressor on the sampled data performed better than others. It will be selected for the task.
# +
x = sampled_data.drop(columns=['rent'])
y = sampled_data['rent']
# splitting the 'features' and 'target' into train and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)
# initiate and fit the model on the train variables
model = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth =5, min_samples_split=3,
learning_rate=0.003, loss='ls')
model_fit = model.fit(x_train, y_train)
# make predictions of and evaluate the model, based on some metrics
prediction = model.predict(x_test)
prediction
# +
pred_df = pd.DataFrame(prediction, columns=['predicted rent'])
y_test_df = y_test.to_frame().reset_index(drop=True)
pred_df = pd.concat([y_test_df, pred_df], axis=1)
# -
pred_df
|
datalab_project_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="7f6c3fcefecc375f4996f9c41351452c97920dc0"
# ## What is Attention?
# * **Attention** is ***simply a vector, often the outputs of dense layer using softmax function.***
# * Before Attention mechanism, ***translation relies on reading a complete sentence and compress all information into a fixed-length vector***, as you can image, a sentence with hundreds of words represented by several words will surely lead to information loss, inadequate translation, etc.
#
# ## Attention Architecture with Idea Behind it.
#
# * The **basic idea:** each time the **model predicts an output word, it only uses parts of an input where the most relevant information is concentrated instead of an entire sentence.** In ***other words, it only pays attention to some input words. Let’s investigate how this is implemented.***
#
# 
#
# * Encoder works as usual, and the**difference is only on the decoder’s part.** As you can see from a picture, ***the decoder’s hidden state is computed with a context vector, the previous output and the previous hidden state. But now we use not a single context vector c, but a separate context vector c_i for each target word.***
# * These context vectors are **computed as a weighted sum of annotations generated by the encoder.** In **Bahdanau’s paper, they use a Bidirectional LSTM, so these annotations are concatenations of hidden states in forward and backward directions.**
# * The weight of each annotation is computed by an alignment model which scores how well the **inputs and the output match.** An alignment model is a **feedforward neural network**, for instance. In general, it can be any other model as well.
# * As a result, the **alphas — the weights of hidden states when computing a context vector — show how important a given annotation is in deciding the next state and generating the output word. These are the attention scores.**
#
# ## Why Attention?
#
# * The **core of Probabilistic Language Model** is to **assign a probability to a sentence by Markov Assumption.** Due to the nature of sentences that consist of different numbers of words, RNN is naturally introduced to model the **conditional probability among words.**
# 
#
# **Vanilla RNN (the classic one) often gets trapped when modeling:**
#
# * ***Structure Dilemma:*** in real world, **the length of outputs and inputs can be totally different**, while **Vanilla RNN** can only **handle fixed-length problem which is difficult for the alignment.** Consider an ***EN-FR translation examples: “he doesn’t like apples” → “Il n’aime pas les pommes”.***
# * ***Mathematical Nature:*** it suffers from **Gradient Vanishing/Exploding** which means ***it is hard to train when sentences are long enough (maybe at most 4 words).***
# * ***Translation often requires arbitrary input length and out put length, to deal with the deficits above, encoder-decoder model is adopted and basic RNN cell is changed to GRU or LSTM cell, hyperbolic tangent activation is replaced by ReLU. We use GRU cell here.***
#
# 
#
# * **Embedding layer** maps **discrete words into dense vectors for computational efficiency**. Then **embedded word vectors are fed into encoder**, aka ***GRU cells sequentially.*** What happened during **encoding?** Information flows from left to right and **each word vector** is **learned according to not only current input but also all previous words.** When **the sentence is completely read, encoder generates an output and a hidden state at timestep 4 for further processing.** For ***encoding part, decoder (GRUs as well) grabs the hidden state from encoder, trained by teacher forcing (a mode that previous cell’s output as current input), then generate translation words sequentially.***
#
# * It seems amazing as this model can be applied to **N-to-M sequence**, yet there still is **one main deficit left unsolved: is one hidden state really enough?**
#
# ## How does attention work?
#
# 
#
# * Similar to the **basic encoder-decoder architecture,** this fancy mechanism **plug a context vector into the gap between encoder and decoder.** According to the schematic above, **blue represents encoder** and **red represents decoder;** and we could see that **context vector takes all cells’ outputs as input to compute the probability distribution of source language words for each single word decoder wants to generate.** By utilizing this mechanism, **it is possible for decoder to capture somewhat global information rather than solely to infer based on one hidden state.**
# * And to **build context vector is fairly simple.** For a **fixed target word**, ***first***, we **loop over all encoders’ states to compare target** and **source states to generate scores for each state in encoders.** Then we could **use softmax to normalize all scores, which generates the probability distribution conditioned on target states.** At last, the ***weights are introduced to make context vector easy to train. That’s it. Math is shown below:***
#
# 
#
# **To understand the seemingly complicated math, we need to keep three key points in mind:**
# * ***During decoding,context vectors are computed for every output word.*** So we will have a **2D matrix whose size is # of target words multiplied by # of source words.** Equation **(1) demonstrates how to compute a single value given one target word and a set of source word.**
# * **Once context vector is computed, attention vector** could be computed by **context vector, target word, and attention function f.**
# * We need **attention mechanism to be trainable**. According to equation **(4), both styles offer the trainable weights (W in Luong’s, W1 and W2 in Bahdanau’s). Thus, different styles may result in different performance.**
#
# ## Attention Scoring
# ### Inputs to the scoring function
# Let's start by looking at the inputs we'll give to the scoring function. We will assume we're in the first step in the decoging phase. The first input to the scoring function is the hidden state of decoder (assuming a toy RNN with three hidden nodes -- not usable in real life, but easier to illustrate):
# + _uuid="94674cfb193c3f311d3689c02859d99d3fe978a6"
dec_hidden_state = [5,1,20]
# + [markdown] _uuid="99ceea530ce3ab9771ae36d3cf6305efe5036321"
# Let's visualize this vector:
# + _uuid="37561214e9b4dda80487afafca91cc09c810525f"
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Let's visualize our decoder hidden state
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(dec_hidden_state)), annot=True, cmap=sns.light_palette("purple", as_cmap=True), linewidths=1)
# + [markdown] _uuid="a8f136f39ed24145145f1cb62215f34d84da4687"
# Our first scoring function will score a single annotation (encoder hidden state), which looks like this:
# + _uuid="4bf3cbb8dabe711887881ed14d6c3a31aab58e89"
annotation = [3,12,45] #e.g. Encoder hidden state
# + _uuid="5dce227099522428a7d2dc101c5773ad1b424ab2"
# Let's visualize the single annotation
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(annotation)), annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
# + [markdown] _uuid="d70d2be3873409f45565e88db40b8c69405f0217"
# ### IMPLEMENT: Scoring a Single Annotation
# Let's calculate the dot product of a single annotation. Numpy's [dot()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) is a good candidate for this operation
# + _uuid="9c502458c6fe9063c5f8bbfdbf39889b1b91b6e8"
def single_dot_attention_score(dec_hidden_state, enc_hidden_state):
# TODO: return the dot product of the two vectors
return np.dot(dec_hidden_state, enc_hidden_state)
single_dot_attention_score(dec_hidden_state, annotation)
# + [markdown] _uuid="f9d8f5b04498e4e052aa15ad926352d8e351367f"
#
# ### Annotations Matrix
# Let's now look at scoring all the annotations at once. To do that, here's our annotation matrix:
# + _uuid="71356443c44bd4e943b35e6c0d603b652e5217ba"
annotations = np.transpose([[3,12,45], [59,2,5], [1,43,5], [4,3,45.3]])
# + [markdown] _uuid="d1a291801967f085998926fe9f4c98c7d4eb6047"
# And it can be visualized like this (each column is a hidden state of an encoder time step):
# + _uuid="d51d0c5f882a94d8f6ae0d87be87a04fb2aa5d3a"
# Let's visualize our annotation (each column is an annotation)
ax = sns.heatmap(annotations, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
# + [markdown] _uuid="0b4f44ea4813f1c80d3042e0745681b17e1d3467"
# ### IMPLEMENT: Scoring All Annotations at Once
# Let's calculate the scores of all the annotations in one step using matrix multiplication. Let's continue to us the dot scoring method
#
# <img src="http://yaox023.com/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/RNN/attention%E7%90%86%E8%AE%BA/Attention_python%E6%BC%94%E7%A4%BA/images/scoring_functions.png" />
#
# To do that, we'll have to transpose `dec_hidden_state` and [matrix multiply](https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html) it with `annotations`.
# + _uuid="8a46ae1d2a8b4b209ea48edc3d57d30ceecd2ff7"
def dot_attention_score(dec_hidden_state, annotations):
# TODO: return the product of dec_hidden_state transpose and enc_hidden_states
return np.matmul(np.transpose(dec_hidden_state), annotations)
attention_weights_raw = dot_attention_score(dec_hidden_state, annotations)
attention_weights_raw
# + [markdown] _uuid="892c876f4cb0d17da2dc115ff2518852fda95408"
# Looking at these scores, can you guess which of the four vectors will get the most attention from the decoder at this time step?
#
# ## Softmax
# Now that we have our scores, let's apply softmax:
# <img src="http://yaox023.com/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/RNN/attention%E7%90%86%E8%AE%BA/Attention_python%E6%BC%94%E7%A4%BA/images/softmax.png" />
# + _uuid="ce5d157db831947caaf360ac003f601a916c1c1d"
def softmax(x):
x = np.array(x, dtype=np.float128)
e_x = np.exp(x)
return e_x / e_x.sum(axis=0)
attention_weights = softmax(attention_weights_raw)
attention_weights
# + [markdown] _uuid="d847c3590c023ef5bd70529ddbe60e1ce0d2e580"
# Even when knowing which annotation will get the most focus, it's interesting to see how drastic softmax makes the end score become. The first and last annotation had the respective scores of 927 and 929. But after softmax, the attention they'll get is 0.12 and 0.88 respectively.
#
# # Applying the scores back on the annotations
# Now that we have our scores, let's multiply each annotation by its score to proceed closer to the attention context vector. This is the multiplication part of this formula (we'll tackle the summation part in the latter cells)
#
# <img src="http://yaox023.com/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/RNN/attention%E7%90%86%E8%AE%BA/Attention_python%E6%BC%94%E7%A4%BA/images/Context_vector.png" />
# + _uuid="291f4b057a14a02adfee3e3ea74940f6ab7dfaec"
def apply_attention_scores(attention_weights, annotations):
# TODO: Multiple the annotations by their weights
return attention_weights * annotations
applied_attention = apply_attention_scores(attention_weights, annotations)
applied_attention
# + [markdown] _uuid="35576668ac1a7eacbdc62a95620ca7367703125c"
# Let's visualize how the context vector looks now that we've applied the attention scores back on it:
# + _uuid="7ab7a984f5351c7a721af40717bb5503f31d701a"
# Let's visualize our annotations after applying attention to them
ax = sns.heatmap(applied_attention, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
# + [markdown] _uuid="4a231097683190f2826b7095ad6bfc88b592fcf8"
# Contrast this with the raw annotations visualized earlier in the notebook, and we can see that the second and third annotations (columns) have been nearly wiped out. The first annotation maintains some of its value, and the fourth annotation is the most pronounced.
#
# # Calculating the Attention Context Vector
# All that remains to produce our attention context vector now is to sum up the four columns to produce a single attention context vector
#
# + _uuid="d646dcb81626f800119d4b5bda976643125a6adf"
def calculate_attention_vector(applied_attention):
return np.sum(applied_attention, axis=1)
attention_vector = calculate_attention_vector(applied_attention)
attention_vector
# + _uuid="5f036492764ee5916cdf9745374369770f45c800"
# Let's visualize the attention context vector
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(attention_vector)), annot=True, cmap=sns.light_palette("Blue", as_cmap=True), linewidths=1)
# + [markdown] _uuid="866be2125e8174fc17bfa92a035d3a4ed375348f"
# Now that we have the context vector, we can concatinate it with the hidden state and pass it through a hidden layer to produce the the result of this decoding time step.
#
# ### References :
#
# [1] Vinyals, Oriol, et al. Show and tell: A neural image caption generator. arXiv:1411.4555 (2014).
# [2] Bahdanau, Dzmitry, <NAME>, and <NAME>. Neural machine translation by jointly learning to align and translate. arXiv:1409.0473 (2014).
# [3] Cho, Kyunghyun, <NAME>, and <NAME>. Describing Multimedia Content using Attention-based Encoder–Decoder Networks. arXiv:1507.01053 (2015)
# [4] <NAME>, et al. Show, attend and tell: Neural image caption generation with visual attention. arXiv:1502.03044 (2015).
# [5] Sukhbaatar, Sainbayar, <NAME>, and <NAME>. End-to-end memory networks. Advances in Neural Information Processing Systems. (2015).
# [6] Joulin, Armand, and <NAME>. Inferring Algorithmic Patterns with Stack-Augmented Recurrent Nets. arXiv:1503.01007 (2015).
# [7] Hermann, <NAME>, et al. Teaching machines to read and comprehend. Advances in Neural Information Processing Systems. (2015).
# [8] Raffel, Colin, and <NAME>. Feed-Forward Networks with Attention Can Solve Some Long-Term Memory Problems. arXiv:1512.08756 (2015).
# [9] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. et al. . Attention Is All You Need. arXiv: 1706.03762 (2017).
|
3 mu sigma using news to predict news/attension-layer-basic-for-nlp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Análise das Aposentadorias por Idade
#
# Este documento apresenta uma descrição dos dados de aposentadoria por idade do Regime Geral de Previdência Social (RGPS) no período de 1995 a 2016.
# ## Filtrando os dados
#
# Inicialmente, vamos obter todos os dados de aposentadoria por idade do banco de dados.
#
# Nos dados obtidos, existem vários tipos de aposentadoria por idade, conforme apresentado abaixo:
# +
# Carrega as variáveis do arquivo de dicionário
# %run util/dicionario_dados.py
print("CÓDIGO\t BENEFÍCIO")
print("-"*20)
# variável que salva os codigos das aposentadorias
codigos_apos_id = []
# A varíavel codigos_especie (criada pelo comando "%run util/dicionario_dados.py") é um dicionário com pares de código e benefícios
# Itera sobre os valores do dicionário filtrando as aposentadorias por idade
for chave, valor in codigos_especie.items():
if 'idade' in valor and 'Apos' in valor:
print("{}\t {}".format(chave,valor))
codigos_apos_id.append(chave)
# -
# Assim, temos 6 tipos de aposentadorias no banco de dados.
#
# Agora vamos verificar quantos registros existem para cada um dos tipos de aposentadoria.
# +
# importa os pacotes que serão utilizados
import pandas as pd
import sqlite3
# Função que executa as queries no banco de dados
def run_query(q):
with sqlite3.connect('microdados/sqlite3/microdadosRGPS.db') as conn:
return pd.read_sql(q, conn)
# -
# Obtém a quantidade de registros por tipo de aposentadoria
run_query('SELECT ESPECIE as CODIGO, COUNT(ESPECIE) as QTD_REGISTROS FROM apos_aux WHERE ESPECIE IN (7,8,41,52,78,81) GROUP BY ESPECIE')
# Conforme observado, no banco de dados só existem registros do tipo 41 (Aposentadoria por idade). Esse fato também é apresentado no documento *analise-aposentadoria-idade.ipynb* deste projeto.
# ### Limpeza dos dados
#
# A primeira etapa da limpeza é verificar a quantidade de benefícios por situação (ATIVO, CESSADO, SUSPENSO etc.).
# Realiza a Query no banco de dados
reg_situacao = run_query("SELECT SITUACAO AS CODIGO, COUNT(SITUACAO) AS QUANTIDADE FROM apos_aux WHERE ESPECIE=41 GROUP BY SITUACAO ORDER BY QUANTIDADE DESC")
# Substitui os códigos da coluna SITUACAO pelo texto correspondente
reg_situacao['SITUACAO'] = reg_situacao['CODIGO'].apply(lambda x: codigos_situacao[x])
# Adiciona mais uma coluna com a porcentagem relativa em relação ao total
reg_situacao['PROPORCAO (%)'] = 100*(reg_situacao['QUANTIDADE']/reg_situacao['QUANTIDADE'].sum())
reg_situacao
# Pelos os dados expostos na tabela acima, 83% dos benefícios estão ativos e cerca de 16% estão cessados.
#
# Para facilitar as análises seguintes, os seguintes agrupamentos serão realizados:
#
# * CESSADO PELA INSPETORIA, CESSADO PELA AUDITORIA, CESSADO PELO SISOBI e CESSADO serão do tipo **CESSADO**
# * SUSPENSO, SUSPENSO PELO CONPAG, SUSPENSO PELO SISOBI e SUSPENSO PELA AUDITORIA serão do tipo **SUSPENSO**
#
# Os benefícios abaixo serão desconsiderados:
# * CANCELADO PELA AUDITORIA
# * CESSADO REVISAO RURAL/95
# * SUSPENSO POR MARCA DE ERRO
#
# Por fim, nosso dados terão somente 3 tipos de situações: **ATIVOS, CESSADOS e SUSPENSOS.**
# +
# Carrega dos dados
apos_id = run_query("SELECT * FROM apos_aux WHERE ESPECIE=41")
# Converte algumas colunas para o tipo "category" para reduzir o uso de memória
# Reduz de 1.18 Gb para 0.8 Gb
colunas = ['ESPECIE', 'MOT_CESSACAO', 'CLIENTELA', 'SEXO', 'SITUACAO']
for c in colunas:
apos_id[c] = apos_id[c].astype('category')
# +
# Remove os dados com situação do tipo 'CANCELADO PELA AUDITORIA', 'CESSADO REVISAO RURAL/95' e 'SUSPENSO POR MARCA DE ERRO'
for cod_situacao in [24,22,4]:
apos_id = apos_id[apos_id['SITUACAO'] != cod_situacao]
# Agrupa os dados da coluna SITUACAO
apos_id['SITUACAO'] == ''
# -
apos_id[apos_id['SITUACAO'] != 4]
# ## Quantidade de concessões por ano
#
# O próximo passo na análise é verificar a quantidade de aposentadorias por idade concedidas a cada ano.
# +
# Idade média de aposentadoria por ano
|
analise-aposentadoria-idade.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Ensemble Learning
#
# ## Initial Imports
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from pathlib import Path
from collections import Counter
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
# ## Read the CSV and Perform Basic Data Cleaning
# +
# Load the data
file_path = Path('Resources/LoanStats_2019Q1.csv')
loanstats = pd.read_csv(file_path)
# Preview the data
loanstats.head()
# -
# ## Split the Data into Training and Testing
# +
# Create our features
X = pd.get_dummies(loanstats.drop('loan_status', axis = 1))
# Create our target
y = loanstats['loan_status']
# -
X.describe()
# Check the balance of our target values
y.value_counts()
# +
# Split the X and y into X_train, X_test, y_train, y_test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
X_train.shape
# -
# ## Data Pre-Processing
#
# Scale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).
# Create the StandardScaler instance
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Fit the Standard Scaler with the training data
# When fitting scaling functions, only train on the training dataset
X_scaler = scaler.fit(X_train)
# Scale the training and testing data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# ## Ensemble Learners
#
# In this section, you will compare two ensemble algorithms to determine which algorithm results in the best performance. You will train a Balanced Random Forest Classifier and an Easy Ensemble classifier . For each algorithm, be sure to complete the folliowing steps:
#
# 1. Train the model using the training data.
# 2. Calculate the balanced accuracy score from sklearn.metrics.
# 3. Display the confusion matrix from sklearn.metrics.
# 4. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
# 5. For the Balanced Random Forest Classifier only, print the feature importance sorted in descending order (most important feature to least important) along with the feature score
#
# Note: Use a random state of 1 for each algorithm to ensure consistency between tests
# ### Balanced Random Forest Classifier
# Resample the training data with the BalancedRandomForestClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
random_forest_model = BalancedRandomForestClassifier(n_estimators = 100, random_state = 1)
random_forest_model = random_forest_model.fit(X_train_scaled, y_train)
y_pred = random_forest_model.predict(X_test_scaled)
# Calculated the balanced accuracy score
bal_acc = balanced_accuracy_score(y_test, y_pred)
bal_acc
# Display the confusion matrix
con_mat = confusion_matrix(y_test, y_pred)
con_mat
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
# List the features sorted in descending order by feature importance
importances = random_forest_model.feature_importances_
feature_list = sorted(zip(random_forest_model.feature_importances_, X.columns), reverse = True)
feature_list
# ### Easy Ensemble Classifier
# Train the Classifier
from imblearn.ensemble import EasyEnsembleClassifier
ensemble_classifier_model = EasyEnsembleClassifier(n_estimators = 100, random_state = 1)
ensemble_classifier_model = ensemble_classifier_model.fit(X_train_scaled, y_train)
y_pred = ensemble_classifier_model.predict(X_test_scaled)
# Calculated the balanced accuracy score
bal_acc = balanced_accuracy_score(y_test, y_pred)
bal_acc
# Display the confusion matrix
con_mat = confusion_matrix(y_test, y_pred)
con_mat
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
# ### Final Questions
#
# 1. Which model had the best balanced accuracy score?
#
# The easy ensembler classifier model had the best balanced accuracy score.
#
# 2. Which model had the best recall score?
#
# The easy ensembler classifier model had the best recall score.
#
# 3. Which model had the best geometric mean score?
#
# The easy ensembler classifier model had the best geometric mean score.
#
# 4. What are the top three features?
#
# The top three features are the total rec prncp, total pymnt, and total pymnt inv.
|
credit_risk_ensemble.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Example notebook to display sparkmeasure APIs for Python#
#
# References:
# [https://github.com/LucaCanali/sparkMeasure](https://github.com/LucaCanali/sparkMeasure)
# sparkmeasure Python docs: [docs/Python_shell_and_Jupyter](https://github.com/LucaCanali/sparkMeasure/blob/master/docs/Python_shell_and_Jupyter.md)
#
# <EMAIL>, July 2018
# Dependencies:
# - This notebook assumes you have an active Spark sessions called "spark"
# - That you have sparkmeasure jar in the driver classpath
# - That you have installed the Python wrapper API package sparkmeasure
#
# This is some example code to help you on that:
#
# ```
# # Note install sparkmeasure.py Python wrapper package if not already done:
# pip install sparkmeasure
#
# export PYSPARK_DRIVER_PYTHON=jupyter-notebook
# export PYSPARK_DRIVER_PYTHON_OPTS="--ip=`hostname` --no-browser"
# # run PySpark
# bin/pyspark --packages ch.cern.sparkmeasure:spark-measure_2.11:0.13
# ```
# +
# Load the Python API in sparkmeasure package
# an attache the sparkMeasure Listener for stagemetrics to the active Spark session
from sparkmeasure import StageMetrics
stagemetrics = StageMetrics(spark)
# +
# Define cell and line magic to wrap the instrumentation
from IPython.core.magic import (register_line_magic, register_cell_magic, register_line_cell_magic)
@register_line_cell_magic
def sparkmeasure(line, cell=None):
"run and measure spark workload. Use: %sparkmeasure or %%sparkmeasure"
val = cell if cell is not None else line
stagemetrics.begin()
eval(val)
stagemetrics.end()
stagemetrics.print_report()
# -
# %%sparkmeasure
spark.sql("select count(*) from range(1000) cross join range(1000) cross join range(1000)").show()
# Print additional metrics from accumulables
stagemetrics.print_accumulables()
# +
# You can also explicitly Wrap your Spark workload into stagemetrics instrumentation
# as in this example
stagemetrics.begin()
spark.sql("select count(*) from range(1000) cross join range(1000) cross join range(1000)").show()
stagemetrics.end()
# Print a summary report
stagemetrics.print_report()
# +
# Another way to encapsulate code and instrumentation in a compact form
stagemetrics.runandmeasure(locals(), """
spark.sql("select count(*) from range(1000) cross join range(1000) cross join range(1000)").show()
""")
# -
# ## Example of collecting using Task Metrics
# Collecting Spark task metrics at the granularity of each task completion has additional overhead
# compare to collecting at the stage completion level, therefore this option should only be used if you need data with this finer granularity, for example because you want
# to study skew effects, otherwise consider using stagemetrics aggregation as preferred choice.
#
# +
from sparkmeasure import TaskMetrics
taskmetrics = TaskMetrics(spark)
taskmetrics.begin()
spark.sql("select count(*) from range(1000) cross join range(1000) cross join range(1000)").show()
taskmetrics.end()
taskmetrics.print_report()
# -
|
examples/SparkMeasure_Jupyer_Python_getting_started.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
# our own stuff
import sampling
# -
# %matplotlib inline
# # Parameters of a Gaussian distribution
mean = -5
standard_deviation = 100
# # Inferring the **mean** of the distribution
@interact(n_samples=(2,100_000,100))
def plot_actual_and_estimate(n_samples: int = 2) -> None:
fig = plt.figure()
axes = plt.axes()
axes.set_xlim(mean - standard_deviation*3, mean + standard_deviation*3)
axes.axvline(mean, linewidth=2, color='red')
samples = sampling.gaussian_samples(n_samples, mean, standard_deviation)
mean_estimate = samples.mean()
axes.axvline(mean_estimate, linestyle='dashed', linewidth=3, color='blue')
print(f'estimate = {mean_estimate} (difference = {np.abs(mean - mean_estimate)})')
|
teaching/notebooks/law of large numbers.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Based on parcel-level EIR zoningmods master file and EIR Alt1 strategies, update s24 zoningmods lookup table to s26.
# +
import pandas as pd
import numpy as np
import time
today = time.strftime('%Y%m%d')
# +
# bring in zoningmods fields from FBP as place holders
# read these fields in s24
lookup_fbp = pd.read_csv(r'C:\Users\ywang\Documents\GitHub\bayarea_urbansim\data\zoning_mods_24.csv',
usecols = ['fbpzoningmodcat', 'add_bldg', 'drop_bldg', 'dua_up', 'far_up',
'dua_down', 'far_down', 'subsidy', 'notes', 'res_rent_cat', 'job_out_cat'])
print('zoning_mods_24 has {} unique fbpzoningmodcat'.format(lookup_fbp.shape[0]))
display(lookup_fbp.head())
#print(list(lookup_fbp))
print('dua_up has the following values: {}'.format(list(lookup_fbp.dua_up.unique())))
print('dua_down has the following values: {}'.format(list(lookup_fbp.dua_down.unique())))
print('far_up has the following values: {}'.format(list(lookup_fbp.far_up.unique())))
print('far_down has the following values: {}'.format(list(lookup_fbp.far_down.unique())))
print('add_bldg has the following values: {}'.format(list(lookup_fbp.add_bldg.unique())))
print('drop_bldg has the following values: {}'.format(list(lookup_fbp.drop_bldg.unique())))
# +
# read parcel-level EIR zoningmods master file
p10_pba50_EIR_attr = pd.read_csv('C:\\Users\\ywang\\Box\\Modeling and Surveys\\Urban Modeling\\Bay Area UrbanSim\\PBA50\\Policies\\Zoning Modifications\\p10_pba50_EIR_attr_20210224.csv')
for i in ['eir_gg_id', 'eir_tra_id', 'eir_sesit_', 'eir_coc_id',
'eir_ppa_id', 'eir_exp202', 'ex_res_bldg']:
p10_pba50_EIR_attr.loc[p10_pba50_EIR_attr[i].isnull(), i] = 'NA'
# display(p10_pba50_EIR_attr[i].unique())
p10_pba50_EIR_attr_modcat = p10_pba50_EIR_attr.merge(lookup_fbp,
left_on='fbpzoningm',
right_on='fbpzoningmodcat', how='left')
print('p10_pba50_EIR_attr_modcat has {} rows'.format(p10_pba50_EIR_attr_modcat.shape[0]))
# +
# collapsed to lookup table based on 'eirzoningm' and EIR geography fields, 'fbpzoningmodcat'
# was kept to inherent Final Blueprint values
EIR_modcat_df = p10_pba50_EIR_attr_modcat[['ACRES', 'fbpzoningmodcat', 'eirzoningm', 'juris',
'eir_gg_id', 'eir_tra_id', 'eir_sesit_', 'eir_coc_id',
'eir_ppa_id', 'eir_exp202', 'ex_res_bldg',
'add_bldg', 'drop_bldg', 'dua_up', 'far_up',
'dua_down', 'far_down', 'subsidy', 'res_rent_cat', 'job_out_cat']]
EIR_modcat_df = EIR_modcat_df[['eirzoningm', 'juris',
'eir_gg_id', 'eir_tra_id', 'eir_sesit_', 'eir_coc_id',
'eir_ppa_id', 'eir_exp202', 'ex_res_bldg',
'add_bldg', 'drop_bldg', 'dua_up', 'far_up',
'dua_down', 'far_down', 'subsidy','res_rent_cat', 'job_out_cat']].drop_duplicates()
# rename columns
EIR_modcat_df.rename(columns = {'eir_gg_id': 'gg_id',
'eir_tra_id': 'tra_id',
'eir_sesit_': 'sesit_id',
'eir_coc_id': 'coc_id',
'eir_ppa_id': 'ppa_id',
'eir_exp202': 'exp2020_id'}, inplace=True)
# add 'manual_county' column
juris_county = pd.read_csv(r'C:\Users\ywang\Documents\GitHub\petrale\zones\jurisdictions\juris_county_id.csv',
usecols = ['juris_name_full', 'county_id'])
juris_county.columns = ['juris','manual_county']
EIR_modcat_df = EIR_modcat_df.merge(juris_county, on='juris', how='left')
# +
## Update "dua_up" and "add_bldg" for residential - EIR Alt1 H3 strategy
EIR_modcat_alt1 = EIR_modcat_df.copy()
# first, set to nan
EIR_modcat_alt1.dua_up = np.nan
EIR_modcat_alt1.loc[(EIR_modcat_alt1.add_bldg == 'HM') | (EIR_modcat_alt1.add_bldg == 'HS'), 'add_bldg'] = np.nan
# update values
gg_tra1_idx = (EIR_modcat_alt1.gg_id == 'GG') & (
EIR_modcat_alt1.tra_id == 'tra1') & (
EIR_modcat_alt1.ppa_id != 'ppa')
EIR_modcat_alt1.loc[gg_tra1_idx, 'dua_up'] = 300
EIR_modcat_alt1.loc[gg_tra1_idx, 'add_bldg'] = 'HM'
gg_tra2a_idx = (EIR_modcat_alt1.gg_id == 'GG') & (
EIR_modcat_alt1.tra_id == 'tra2a') & (
EIR_modcat_alt1.ppa_id != 'ppa')
EIR_modcat_alt1.loc[gg_tra2a_idx, 'dua_up'] = 250
EIR_modcat_alt1.loc[gg_tra2a_idx, 'add_bldg'] = 'HM'
gg_tra2b_idx = (EIR_modcat_alt1.gg_id == 'GG') & (
EIR_modcat_alt1.tra_id == 'tra2b') & (
EIR_modcat_alt1.ppa_id != 'ppa')
EIR_modcat_alt1.loc[gg_tra2b_idx, 'dua_up'] = 200
EIR_modcat_alt1.loc[gg_tra2b_idx, 'add_bldg'] = 'HM'
gg_tra2c_idx = (EIR_modcat_alt1.gg_id == 'GG') & (
EIR_modcat_alt1.tra_id == 'tra2c') & (
EIR_modcat_alt1.ppa_id != 'ppa')
EIR_modcat_alt1.loc[gg_tra2c_idx, 'dua_up'] = 150
EIR_modcat_alt1.loc[gg_tra2c_idx, 'add_bldg'] = 'HM'
gg_tra3_idx = (EIR_modcat_alt1.gg_id == 'GG') & (
EIR_modcat_alt1.tra_id == 'tra3') & (
EIR_modcat_alt1.ppa_id != 'ppa')
EIR_modcat_alt1.loc[gg_tra3_idx, 'dua_up'] = 100
EIR_modcat_alt1.loc[gg_tra3_idx, 'add_bldg'] = 'HM'
# +
## if within PPA, drop_bldg is HM
EIR_modcat_alt1.loc[EIR_modcat_alt1.ppa_id == 'ppa', 'drop_bldg'] = 'HM'
# +
## Update "dua_down" for residential - EIR Alt1 UGB strategy
no_dua_down_fbp = EIR_modcat_alt1.loc[EIR_modcat_alt1.dua_down.isnull()]
display(no_dua_down_fbp.exp2020_id.value_counts())
# zoningmods with 'inun' is the 'exp2020_id' field should have 'dua_down = 0'
EIR_modcat_alt1.loc[EIR_modcat_alt1.exp2020_id == 'inun', 'dua_down'] = 0
# finally, where dua_down==0, dua_up should be nan, add_bldg should be nan
EIR_modcat_alt1.loc[EIR_modcat_alt1.dua_down == 0, 'dua_up'] = np.nan
EIR_modcat_alt1.loc[EIR_modcat_alt1.dua_down == 0, 'add_bldg'] = np.nan
# -
EIR_modcat_alt1.juris.unique()
# +
# Update "far_up" and "add_bldg" for non-residential - EIR Alt1 H3 strategy
# first, set to nan
EIR_modcat_alt1.far_up = np.nan
EIR_modcat_alt1.loc[EIR_modcat_alt1.add_bldg == 'IW', 'add_bldg'] = np.nan
# update values
EIR_modcat_alt1.loc[(EIR_modcat_alt1.gg_id=='GG') & (EIR_modcat_alt1.tra_id=='tra1'), 'far_up'] = 12
EIR_modcat_alt1.loc[(EIR_modcat_alt1.gg_id=='GG') & (EIR_modcat_alt1.tra_id=='tra1') & (
(EIR_modcat_alt1.juris == 'san_francisco') | (
EIR_modcat_alt1.juris == 'oakland') | (
EIR_modcat_alt1.juris == 'daly_city') | (
EIR_modcat_alt1.juris == 'san_leandro')), 'far_up'] = 15
EIR_modcat_alt1.loc[(EIR_modcat_alt1.gg_id=='GG') & (
EIR_modcat_alt1.ppa_id=='ppa') & (
EIR_modcat_alt1.tra_id != 'tra1'), 'far_up'] = 2
EIR_modcat_alt1.loc[(EIR_modcat_alt1.gg_id=='GG') & (
EIR_modcat_alt1.ppa_id=='ppa') & (
EIR_modcat_alt1.tra_id != 'tra1'), 'add_bldg'] = 'IW'
# +
# Update "far_down" for non-residential - EIR Alt1 UGB strategy
no_far_down_fbp = EIR_modcat_alt1.loc[EIR_modcat_alt1.far_down.isnull()]
display(no_far_down_fbp.exp2020_id.value_counts())
# zoningmods with 'inun' is the 'exp2020_id' field should have 'dua_down = 0'
EIR_modcat_alt1.loc[EIR_modcat_alt1.exp2020_id == 'inun', 'far_down'] = 0
# finally, where far_down==0, far_up should be nan
EIR_modcat_alt1.loc[EIR_modcat_alt1.far_down == 0, 'far_up'] = np.nan
# +
# drop duplicates
EIR_modcat_alt1 = EIR_modcat_alt1.drop_duplicates()
print('EIR_modcat_alt1 has {} rows'.format(EIR_modcat_alt1.shape[0]))
# add 'FREQUENCE', 'SUM_ACRES' columns
EIR_modcat_stats = p10_pba50_EIR_attr_modcat.groupby('eirzoningm').agg({'ACRES': ['count','sum']}).reset_index()
EIR_modcat_stats.columns = ['eirzoningm', 'FREQUENCY', 'SUM_ACRES']
print('EIR_modcat_stats has {} rows'.format(EIR_modcat_stats.shape[0]))
EIR_modcat_alt1 = EIR_modcat_alt1.merge(EIR_modcat_stats, on='eirzoningm', how='left')
print('p10_pba50_EIR_modcat_df has {} rows'.format(EIR_modcat_alt1.shape[0]))
# add 'modcat_id' column
EIR_modcat_alt1['modcat_id'] = EIR_modcat_alt1.index + 1
# recoder the fields
EIR_modcat_alt1 = EIR_modcat_alt1[['eirzoningm', 'modcat_id', 'FREQUENCY', 'SUM_ACRES', 'manual_county', 'juris',
'gg_id', 'tra_id', 'sesit_id', 'coc_id', 'ppa_id', 'exp2020_id', 'ex_res_bldg',
'add_bldg', 'drop_bldg', 'dua_up', 'far_up', 'dua_down', 'far_down', 'subsidy', 'res_rent_cat', 'job_out_cat']]
# +
# check
# PPA parcels should have no dua_up
ppa_chk = EIR_modcat_alt1.loc[EIR_modcat_alt1.ppa_id == 'ppa']
display(ppa_chk.dua_up.unique()) # should only contain nan
# PPA parcels should have drop_bldg = HM
display(ppa_chk.drop_bldg.unique()) # should only contain 'HM'
# only parcels with value 'in' in field 'exp2020_id' don't have dua_down=0
display(EIR_modcat_alt1.loc[EIR_modcat_alt1.dua_down.isnull()].exp2020_id.unique()) # should only contain 'in'
# only parcels with value 'in' in field 'exp2020_id' don't have far_down=0
display(EIR_modcat_alt1.loc[EIR_modcat_alt1.far_down.isnull()].exp2020_id.unique()) # should only contain 'in'
# there should be no overlap between dua_up and dua_down, between far_up and far_down
dua_chk = EIR_modcat_alt1.loc[(EIR_modcat_alt1.dua_down == 0) &
EIR_modcat_alt1.dua_up.notnull()]
print(dua_chk.shape[0]) # should be 0
far_chk = EIR_modcat_alt1.loc[(EIR_modcat_alt1.far_down == 0) &
EIR_modcat_alt1.far_up.notnull()]
print(far_chk.shape[0]) # should be 0
# parcels outside of 'in' in field 'exp2020_id' should not have add_blg
add_bldg_chk = EIR_modcat_alt1.loc[EIR_modcat_alt1.exp2020_id != 'in'] # should only contain nan
display(add_bldg_chk.add_bldg.unique())
# -
# export
EIR_modcat_alt1.rename(columns={'eirzoningm': 'eirzoningmodcat'}, inplace=True)
print('export zoning_mods lookup table of {} rows'.format(EIR_modcat_alt1.shape[0]))
EIR_modcat_alt1.to_csv('C:\\Users\\ywang\\Box\\Modeling and Surveys\\Urban Modeling\\Bay Area UrbanSim\\PBA50\\Policies\\Zoning Modifications\\BAUS input files\\zoning_mods_26_{}.csv'.format(today), index=False)
|
policies/plu/update_EIR_zoningmods_lookup_Alt1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tflearn]
# language: python
# name: conda-env-tflearn-py
# ---
# # Sentiment analysis with TFLearn
#
# In this notebook, we'll continue <NAME>'s work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using [TFLearn](http://tflearn.org/), a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you.
#
# We'll start off by importing all the modules we'll need, then load and prepare the data.
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
# ## Preparing the data
#
# Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this.
# ### Read the data
#
# Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way.
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
reviews.head()
# ### Counting word frequency
#
# To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a [bag of words](https://en.wikipedia.org/wiki/Bag-of-words_model). We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the [Counter class](https://docs.python.org/2/library/collections.html#collections.Counter).
#
# > **Exercise:** Create the bag of words from the reviews data and assign it to `total_counts`. The reviews are stores in the `reviews` [Pandas DataFrame](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html). If you want the reviews as a Numpy array, use `reviews.values`. You can iterate through the rows in the DataFrame with `for idx, row in reviews.iterrows():` ([documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.iterrows.html)). When you break up the reviews into words, use `.split(' ')` instead of `.split()` so your results match ours.
# +
from collections import Counter
total_counts = # bag of words here
print("Total words in data set: ", len(total_counts))
# -
# Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort `vocab` by the count value and keep the 10000 most frequent words.
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
# What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
print(vocab[-1], ': ', total_counts[vocab[-1]])
# The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
#
# **Note:** When you run, you may see a different word from the one shown above, but it will also have the value `30`. That's because there are many words tied for that number of counts, and the `Counter` class does not guarantee which one will be returned in the case of a tie.
#
# Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension.
#
# > **Exercise:** Create a dictionary called `word2idx` that maps each word in the vocabulary to an index. The first word in `vocab` has index `0`, the second word has index `1`, and so on.
word2idx = ## create the word-to-index dictionary here
# ### Text to vector function
#
# Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this:
#
# * Initialize the word vector with [np.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html), it should be the length of the vocabulary.
# * Split the input string of text into a list of words with `.split(' ')`. Again, if you call `.split()` instead, you'll get slightly different results than what we show here.
# * For each word in that list, increment the element in the index associated with that word, which you get from `word2idx`.
#
# **Note:** Since all words aren't in the `vocab` dictionary, you'll get a key error if you run into one of those words. You can use the `.get` method of the `word2idx` dictionary to specify a default returned value when you make a key error. For example, `word2idx.get(word, None)` returns `None` if `word` doesn't exist in the dictionary.
def text_to_vector(text):
pass
# If you do this right, the following code should return
#
# ```
# text_to_vector('The tea is for a party to celebrate '
# 'the movie so she has no time for a cake')[:65]
#
# array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
# ```
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
# Now, run through our entire review data set and convert each review to a word vector.
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
# ### Train, Validation, Test sets
#
# Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function `to_categorical` from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later.
# +
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
# -
trainY
# ## Building the network
#
# [TFLearn](http://tflearn.org/) lets you build the network by [defining the layers](http://tflearn.org/layers/core/).
#
# ### Input layer
#
# For the input layer, you just need to tell it how many units you have. For example,
#
# ```
# net = tflearn.input_data([None, 100])
# ```
#
# would create a network with 100 input units. The first element in the list, `None` in this case, sets the batch size. Setting it to `None` here leaves it at the default batch size.
#
# The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units.
#
#
# ### Adding layers
#
# To add new hidden layers, you use
#
# ```
# net = tflearn.fully_connected(net, n_units, activation='ReLU')
# ```
#
# This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument `net` is the network you created in the `tflearn.input_data` call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with `n_units`, and set the activation function with the `activation` keyword. You can keep adding layers to your network by repeated calling `net = tflearn.fully_connected(net, n_units)`.
#
# ### Output layer
#
# The last layer you add is used as the output layer. Therefore, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax.
#
# ```
# net = tflearn.fully_connected(net, 2, activation='softmax')
# ```
#
# ### Training
# To set how you train the network, use
#
# ```
# net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
# ```
#
# Again, this is passing in the network you've been building. The keywords:
#
# * `optimizer` sets the training method, here stochastic gradient descent
# * `learning_rate` is the learning rate
# * `loss` determines how the network error is calculated. In this example, with the categorical cross-entropy.
#
# Finally you put all this together to create the model with `tflearn.DNN(net)`. So it ends up looking something like
#
# ```
# net = tflearn.input_data([None, 10]) # Input
# net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden
# net = tflearn.fully_connected(net, 2, activation='softmax') # Output
# net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
# model = tflearn.DNN(net)
# ```
#
# > **Exercise:** Below in the `build_model()` function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc.
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
model = tflearn.DNN(net)
return model
# ## Intializing the model
#
# Next we need to call the `build_model()` function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want.
#
# > **Note:** You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon.
model = build_model()
# ## Training the network
#
# Now that we've constructed the network, saved as the variable `model`, we can fit it to the data. Here we use the `model.fit` method. You pass in the training features `trainX` and the training targets `trainY`. Below I set `validation_set=0.1` which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the `batch_size` and `n_epoch` keywords, respectively. Below is the code to fit our the network to our word vectors.
#
# You can rerun `model.fit` to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. **Only use the test set after you're completely done training the network.**
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=10)
# ## Testing
#
# After you're satisified with your hyperparameters, you can run the network on the test set to measure its performance. Remember, *only do this after finalizing the hyperparameters*.
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
# ## Try out your own text!
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
# +
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
|
intro-to-tflearn/.ipynb_checkpoints/Sentiment Analysis with TFLearn-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Bias and variance of polynomial fit
#
#
# Demo overfitting, underfitting, and validation and learning curves with
# polynomial regression.
#
# Fit polynomes of different degrees to a dataset: for too small a degree,
# the model *underfits*, while for too large a degree, it overfits.
#
#
#
# +
import numpy as np
import matplotlib.pyplot as plt
def generating_func(x, err=0.5):
return np.random.normal(10 - 1. / (x + 0.1), err)
# -
# A polynomial regression
#
#
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
# A simple figure to illustrate the problem
#
#
# +
n_samples = 8
np.random.seed(0)
x = 10 ** np.linspace(-2, 0, n_samples)
y = generating_func(x)
x_test = np.linspace(-0.2, 1.2, 1000)
titles = ['d = 1 (under-fit; high bias)',
'd = 2',
'd = 6 (over-fit; high variance)']
degrees = [1, 2, 6]
fig = plt.figure(figsize=(9, 3.5))
fig.subplots_adjust(left=0.06, right=0.98, bottom=0.15, top=0.85, wspace=0.05)
for i, d in enumerate(degrees):
ax = fig.add_subplot(131 + i, xticks=[], yticks=[])
ax.scatter(x, y, marker='x', c='k', s=50)
model = make_pipeline(PolynomialFeatures(d), LinearRegression())
model.fit(x[:, np.newaxis], y)
ax.plot(x_test, model.predict(x_test[:, np.newaxis]), '-b')
ax.set_xlim(-0.2, 1.2)
ax.set_ylim(0, 12)
ax.set_xlabel('house size')
if i == 0:
ax.set_ylabel('price')
ax.set_title(titles[i])
# -
# Generate a larger dataset
#
#
# +
from sklearn.model_selection import train_test_split
n_samples = 200
test_size = 0.4
error = 1.0
# randomly sample the data
np.random.seed(1)
x = np.random.random(n_samples)
y = generating_func(x, error)
# split into training, validation, and testing sets.
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size)
# show the training and validation sets
plt.figure(figsize=(6, 4))
plt.scatter(x_train, y_train, color='red', label='Training set')
plt.scatter(x_test, y_test, color='blue', label='Test set')
plt.title('The data')
plt.legend(loc='best')
# -
# Plot a validation curve
#
#
# +
from sklearn.model_selection import validation_curve
degrees = np.arange(1, 21)
model = make_pipeline(PolynomialFeatures(), LinearRegression())
# The parameter to vary is the "degrees" on the pipeline step
# "polynomialfeatures"
train_scores, validation_scores = validation_curve(
model, x[:, np.newaxis], y,
param_name='polynomialfeatures__degree',
param_range=degrees)
# Plot the mean train error and validation error across folds
plt.figure(figsize=(6, 4))
plt.plot(degrees, validation_scores.mean(axis=1), lw=2,
label='cross-validation')
plt.plot(degrees, train_scores.mean(axis=1), lw=2, label='training')
plt.legend(loc='best')
plt.xlabel('degree of fit')
plt.ylabel('explained variance')
plt.title('Validation curve')
plt.tight_layout()
# -
# Learning curves
# ###########################################################
#
# Plot train and test error with an increasing number of samples
#
#
# +
# A learning curve for d=1, 5, 15
for d in [1, 5, 15]:
model = make_pipeline(PolynomialFeatures(degree=d), LinearRegression())
from sklearn.model_selection import learning_curve
train_sizes, train_scores, validation_scores = learning_curve(
model, x[:, np.newaxis], y,
train_sizes=np.logspace(-1, 0, 20))
# Plot the mean train error and validation error across folds
plt.figure(figsize=(6, 4))
plt.plot(train_sizes, validation_scores.mean(axis=1),
lw=2, label='cross-validation')
plt.plot(train_sizes, train_scores.mean(axis=1),
lw=2, label='training')
plt.ylim(ymin=-.1, ymax=1)
plt.legend(loc='best')
plt.xlabel('number of train samples')
plt.ylabel('explained variance')
plt.title('Learning curve (degree=%i)' % d)
plt.tight_layout()
plt.show()
|
_downloads/plot_bias_variance.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (data-gov)
# language: python
# name: data-gov
# ---
# # World Bank Data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("GFDDData.csv")
df.head()
def transform(data: pd.DataFrame) -> pd.DataFrame:
"""
Transposes csv data so that years are rows and
indicators are columns
"""
# isolate unique countries
countries = data["Country Name"].unique()
tables = []
for country in countries:
subset = data.loc[data["Country Name"] == country]
country_name = subset["Country Name"].iloc[0]
country_code = subset["Country Code"].iloc[0]
subset = subset.drop(columns=["Country Code",
"Country Name",
"Indicator Code"])
subset = subset.set_index("Indicator Name")
subset = subset.T
subset["Year"] = subset.index
subset["Country Name"] = country_name
subset["Country Code"] = country_code
subset = subset.reset_index(drop=True)
subset.columns.name = None
# include only rows where at least one native data point exists
subset = subset.dropna(thresh=4)
# drop all columns where 80% of the data is missing
subset = subset.dropna(thresh=int(subset.shape[0] * .2), axis=1)
tables.append(subset)
output = pd.concat(tables)
return output
df = transform(df)
df.head()
|
worldbank-gfdd-2020-10-27/gfdd_eda.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Language Understanding
#
# Increasingly, we expect computers to be able to use AI in order to understand spoken or typed commands in natural language. For example, you might want to implement a home automation system that enables you to control devices in your home by using voice commands such as "switch on the light" or "put the fan on", and have an AI-powered device understand the command and take appropriate action.
#
# 
#
# ## Create Authoring and Prediction Resources
#
# Microsoft cognitive services includes the Language Understanding service, which enables you to define *intents* that are applied to *entities* based on *utterances*. You can use either a **Language Understanding** or **Cognitive Services** resource to *publish* a Language Understanding app, but you must create a separate **Language Understanding** resource for *authoring* the app.
#
# 1. In another browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), signing in with your Microsoft account.
# 2. Click **+ Create a resource**, and search for *Language Understanding*.
# 3. In the list of services, click **Language Understanding**.
# 4. In the **Language Understanding** blade, click **Create**.
# 5. In the **Create** blade, enter the following details and click **Create**
# - **Create option**: Both
# - **Name**: *A unique name for your service*
# - **Subscription**: *Select your Azure subscription*
# - **Resource Group**: *Select an existing resource group or create a new one*
# - **Authoring location**: *Select any available location*
# - **Authoring pricing tier**: F0
# - **Runtime location**: *Same as authoring location*
# - **Runtime pricing tier**: F0
# 6. Wait for the resources to be created, and note that two Language Understanding resources are provisioned; one for authoring, and another for prediction. You can view these by navigating to the resource group where you created them.
#
# ### Create a Language Understanding App
#
# To implement natural language understanding with Language Understanding, you create an app; and then add entities, intents, and utterances to define the commands you want the app to understand:
#
# 1. In a new browser tab, open the Language Understanding portal at [https://www.luis.ai](https://www.luis.ai), and sign in using the Microsoft account associated with your Azure subscription. If this is the first time you have signed into the Language Understanding portal, you may need to grant the app some permissions to access your account details. Then complete the *Welcome* steps by selecting the existing Language Understanding authoring resource you just created in your Azure subscription.
# 2. Open the **My Apps** page, and select your subscription and Language Understanding authoring resource. Then create a new app for conversation with the following settings:
# - **Name**: Home Automation
# - **Culture**: English
# - **Description**: Simple home automation
# - **Prediction resource**: *Your Language Understanding prediction resource*
# 3. If a panel with tips for creating an effective Language Understanding app is displayed, close it.
#
# ### Create an Entity
#
# An *entity* is a thing that your language model can identify and do something with. In this case, your Language Understanding app will be used to control various *devices* in the office, such as lights or fans; so you'll create a *device* entity that includes a list of the types of device that you want the app to work with. For each device type, you'll create a sublist that identifies the name of the device (for example *light*) and any synonyms that might be used to refer to this type of device (for example *lamp*).
#
# 1. In the Language Understanding page for your app, in the pane on the left, click **Entities**. Then click **Create**, and create a new entity named **device**, select the **List** type, and click **Create**.
# 2. In the **List items** page, under **Normalized Values**, type **light**, then press ENTER.
# 3. After the **light** value has been added, under **Synonyms**, type **lamp** and press ENTER.
# 4. Add a second list item named **fan** with the synonym **AC**.
#
# ### Create Intents
#
# An *intent* is an action you want to perform on one or more entities - for example, you might want to switch a light on, or turn a fan off. In this case, you'll define two intents: one to switch a device on, and another to switch a device off. For each intent, you'll specify sample *utterances* that indicate the kind of language used to indicate the intent.
#
# 1. In the pane on the left, click **Intents**. Then click **Create**, and add an intent with the name **switch_on** and click **Done**.
# 2. Under the **Examples** heading and the **Example user input** subheading, type the utterance ***turn the light on*** and press **Enter** to submit this utterance to the list.
# 3. In the *turn the light on* utterance, click the word "light", and assign it to the **device** entity's **light** value.
# 4. Add a second utterance to the **switch_on** intent, with the phrase ***turn the fan on***. Then assign the word "fan" to the **device** entity's **fan** value.
# 5. In the pane on the left, click **Intents** and click **Create**, to add a second intent with the name **switch_off**.
# 6. In the **Utterances** page for the **switch_off** intent, add the utterance ***turn the light off*** and assign the word "light" to the **device** entity's **light** value.
# 7. Add a second utterance to the **switch_off** intent, with the phrase ***turn the fan off***. Then connect the word "fan" to the **device** entity's **fan** value.
#
# ### Train and Test the Language Model
#
# Now you're ready to use the data you've provided in the form of entities, intents, and uterances to train the language model for your app.
#
# 1. At the top of the Language Understanding page for your app, click **Train** to train the language model
# 2. When the model is trained, click **Test**, and use the Test pane to view the predicted intent for the following phrases:
# * *switch the light on*
# * *turn off the fan*
# * *turn the lamp off*
# * *switch on the AC*
# 3. Close the Test pane.
#
# ### Publish the Model and Configure Endpoints
#
# To use your trained model in a client application, you must publish it as an endpoint to which the client applications can send new utterances; from which intents and entitites will be predicted.
#
# 1. At the top of the Language Understanding page for your app, click **Publish**. Then select **Production slot** and click **Done**.
# 2. After the model has been published, at the top of the Language Understanding page for your app, click **Manage**. Then on the **Application Information** tab, note the **Application ID** for your app. Copy this and paste it in the code below to replace **YOUR_LU_APP_ID** (if CTRL+V doesn't paste, try SHIFT+CTRL+V).
# 3. On the **Azure Resources** tab, note the **Primary key** and **Endpoint URL** for your prediction resource. Copy these and paste them into the code below, replacing **YOUR_LU_KEY** and **YOUR_LU_ENDPOINT**.
# 4. Run the cell below by clicking its **Run cell** (▷) button (to the left of the cell), and when prompted, enter the text *turn the light on*. The text is interpreted by your Language Understanding model and an appropriate image is displayed.
#
# + tags=[] gather={"logged": 1599696381331}
from python_code import luis
import matplotlib.pyplot as plt
from PIL import Image
import os
# %matplotlib inline
try:
# Set up API configuration
luis_app_id = 'YOUR_LU_APP_ID'
luis_key = 'YOUR_LU_KEY'
luis_endpoint = 'YOUR_LU_ENDPOINT'
# prompt for a command
command = input('Please enter a command: \n')
# get the predicted intent and entity (code in python_code.home_auto.py)
action = luis.get_intent(luis_app_id, luis_key, luis_endpoint, command)
# display an appropriate image
img_name = action + '.jpg'
img = Image.open(os.path.join("data", "luis" ,img_name))
plt.axis('off')
plt. imshow(img)
except Exception as ex:
print(ex)
# -
# Re-run the cell above, trying the following phrases:
#
# * *turn on the light*
# * *put the lamp off*
# * *switch the fan on*
# * *switch the light on*
# * *switch off the light*
# * *turn off the fan*
# * *switch the AC on*
#
# > **Note**: If you're curious about the code used to retrieve the intents and entitites from your Language Understanding app, look at the **luis.py** file in the **python_code** folder.
# ## Add Voice Control
#
# So far, we've seen how analyze text; but increasingly AI systems enable humans to communicate with software services through speech recognition. To support this, the **Speech** cognitive service provides a simple way to transcribe spoken language into text.
#
# ### Create a Cognitive Services Resource
#
# If you don't already have one, use the following steps to create a **Cognitive Services** resource in your Azure subscription:
#
# 1. In another browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), signing in with your Microsoft account.
# 2. Click the **+Create a resource** button, search for *Cognitive Services*, and create a **Cognitive Services** resource with the following settings:
# - **Name**: *Enter a unique name*.
# - **Subscription**: *Your Azure subscription*.
# - **Location**: *Any available location*.
# - **Pricing tier**: S0
# - **Resource group**: *Create a resource group with a unique name*.
# 3. Wait for deployment to complete. Then go to your cognitive services resource, and on the **Quick start** page, note the keys and endpoint. You will need these to connect to your cognitive services resource from client applications.
#
# ### Get the Key and Endpoint for your Cognitive Services Resource
#
# To use your cognitive services resource, client applications need its endpoint and authentication key:
#
# 1. In the Azure portal, on the **Keys and Endpoint** page for your cognitive service resource, copy the **Key1** for your resource and paste it in the code below, replacing **YOUR_COG_KEY**.
# 2. Copy the **Endpoint** for your resource and and paste it in the code below, replacing **YOUR_COG_ENDPOINT**.
# 3. Copy the **Location** for your resource and paste it in the code below, replacing **YOUR_COG_REGION** .
# 4. Run the code in the cell below.
# + tags=[] gather={"logged": 1599696409914}
cog_key = 'YOUR_COG_KEY'
cog_endpoint = 'YOUR_COG_ENDPOINT'
cog_region = 'YOUR_COG_REGION'
print('Ready to use cognitive services in {} using key {}'.format(cog_region, cog_key))
# + [markdown] nteract={"transient": {"deleting": false}}
# To use the Speech service in your Cognitive Services resource, you'll need to install the Azure Cognitive Services Speech SDK.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# !pip install azure.cognitiveservices.speech
# -
# Now run the cell below to transcribe speech from an audio file, and use it as a command for your Language Understanding app.
# + tags=[] gather={"logged": 1599696420498}
from python_code import luis
import os
import IPython
import os
from azure.cognitiveservices.speech import SpeechConfig, SpeechRecognizer, AudioConfig
try:
# Get spoken command from audio file
file_name = 'light-on.wav'
audio_file = os.path.join('data', 'luis', file_name)
# Configure speech recognizer
speech_config = SpeechConfig(cog_key, cog_region)
audio_config = AudioConfig(filename=audio_file) # Use file instead of default (microphone)
speech_recognizer = SpeechRecognizer(speech_config, audio_config)
# Use a one-time, synchronous call to transcribe the speech
speech = speech_recognizer.recognize_once()
# Get the predicted intent and entity (code in python_code.home_auto.py)
action = luis.get_intent(luis_app_id, luis_key, luis_endpoint, speech.text)
# Get the appropriate image
img_name = action + '.jpg'
# Play audio and display image
IPython.display.display(IPython.display.Audio(audio_file, autoplay=True),
IPython.display.Image(data=os.path.join("data", "luis" ,img_name)))
except Exception as ex:
print(ex)
# -
# Try modifying the cell above to use the **light-off.wav** audio file.
#
# ## Learn More
#
# Learn more about Language Understanding in the [service documentation](https://docs.microsoft.com/azure/cognitive-services/luis/)
|
02d - Language Understanding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # lists - part 2
# # Modifying lists
a = []
print(a)
a.append('hi')
print(a)
a.append('there')
print(a)
a.extend(['everyone','how','are','you'])
print(a)
a.insert(1,'oh')
print(a)
a.insert(2,'and greetings')
print(a)
# # remove (and return) last element from list
b = a.pop()
print(b)
print(a)
# # just delete an element
del a[2]
print(a)
# # or a slice
del a[0:4]
print(a)
# # concatenate lists
b = ['some','more','things']
c = a+b
print(c)
# # reverse and sort
a.reverse()
print(a)
b.sort()
print(b)
# # map a function to every element of a list
a = ['3.141','1.21','-3.45','46']
print(a)
b = map(float,a)
print(list(b))
# # list comprehensions - <br>
# # another way to do the same thing, but more general
b = [2*float(x) + 10 for x in a]
print(b)
|
notebooks/06.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="tYIM4g9_Gpb-"
# #**Instalando a biblioteca PySpark**
#
# Obs: Toda vez que for trabalhar com o PySpark no Colab tem que fazer a instalação do PySpark
# + colab={"base_uri": "https://localhost:8080/"} id="Tu3mvwNkGgCS" outputId="fd165ee2-862d-45ce-f7f1-dc2e75dbd3c8"
pip install pyspark
# + [markdown] id="6QxZdujaIjRU"
# #**Importando Bibliotecas**
# + id="vZY8w9UzH3SN"
from pyspark.sql import SparkSession
# + [markdown] id="-_aYfuRtIqnT"
# #**Configurando a SparkSession**
# + colab={"base_uri": "https://localhost:8080/", "height": 213} id="2l5OJcHIIlh7" outputId="1b2fa8c2-b3ce-47ed-e5ec-afb7763a5045"
spark = (SparkSession.builder\
.master("local")\
.appName("mini_projeto_final")\
.config("spark.ui.port", "4050")\
.getOrCreate())
spark
# + [markdown] id="AIGk4B05ItV7"
# #**Importando o arquivo .csv**
# + id="ArUsuugGI0aj"
df = (spark.read.format("csv")
.option("inferSchema", "true")
.option("header", "true")
.option("sep", ",")
.load("/content/drive/MyDrive/dados/df_pyspark_campanhaMKT.csv")
.createOrReplaceTempView("campanha_mkt")
)
# + [markdown] id="COWT_68EKaCq"
# # **Fazendo consultas utilizando o SparkSQL**
# + colab={"base_uri": "https://localhost:8080/"} id="UwUrIIvCKC0r" outputId="46c6f044-debf-4b1a-860d-966c7a547b75"
# Consulta feita para visualizar todos os dados
spark.sql("SELECT * FROM campanha_mkt").show()
# + colab={"base_uri": "https://localhost:8080/"} id="4QZqEIshKWyt" outputId="9f546c6f-5580-4ade-9283-211dc740b7bf"
# Consulta realizada para fazer comparação entre graduacao, estado_civil e renda
spark.sql("SELECT graduacao, estado_civil, renda_anual FROM campanha_mkt").show()
# + colab={"base_uri": "https://localhost:8080/"} id="-Saycjx0LTE1" outputId="e506ee19-5bf1-45b1-ad36-9a7fa62ee9e3"
# Consulta realizada para saber a quantidade de filhos contidos por cada estado civil
spark.sql("SELECT estado_civil, COUNT(criancas_em_casa, adolescentes_em_casa) AS qtd_filhos FROM campanha_mkt GROUP BY estado_civil").show()
# + colab={"base_uri": "https://localhost:8080/"} id="83rr_V8ZMELj" outputId="759022c1-6564-46c1-dfda-a15fe64f5a57"
# Consulta realizada para saber a quantidade de consumo anual com idade > 30
spark.sql("SELECT idade_cliente, COUNT(soma_qtd_produtos) AS consumo_idade FROM campanha_mkt GROUP BY idade_cliente HAVING idade_cliente > 30").show()
# + colab={"base_uri": "https://localhost:8080/"} id="cFMFpBynNR_v" outputId="05293988-eb0b-4385-e037-fe15bb58085a"
# Consulta realizada para saber a somatória, o mínimo, o máximo e a média do consumo de vinhos por renda anual
spark.sql("SELECT idade_cliente, SUM(qts_vinhos) AS soma_qts_vinhos, MIN(qts_vinhos) AS min_qts_vinhos, MAX(qts_vinhos) AS max_qts_vinhos, AVG(qts_vinhos) AS media_qts_vinhos FROM campanha_mkt GROUP BY idade_cliente ORDER BY idade_cliente").show()
# + colab={"base_uri": "https://localhost:8080/"} id="z3_Y_VlGThaN" outputId="70d89f3d-3903-4efa-d3c5-fb9c004ffbc9"
# Consulta realizada para saber quantas visitas na web tiveram por idade
spark.sql("SELECT idade_cliente, COUNT(num_visitas_web_mes) AS visitas_web FROM campanha_mkt GROUP BY idade_cliente ORDER BY idade_cliente").show()
# + colab={"base_uri": "https://localhost:8080/"} id="_Usc42V4V_0N" outputId="c3ebe9ec-af86-4225-c02f-5711392a118e"
# Consulta realizada para saber a idade, a graduação, a renda anual e o consumo de carne
spark.sql("SELECT idade_cliente, graduacao, renda_anual, qts_produtos_carne FROM campanha_mkt").show()
# + colab={"base_uri": "https://localhost:8080/"} id="Inac1BTQX-FU" outputId="f7b80ac2-d227-4586-a950-3805a98ca5dc"
# Consulta realizada para saber o consumo de doces e quantas crianças tem em casa
spark.sql("SELECT qts_produtos_doces, SUM(criancas_em_casa) AS qts_filhos FROM campanha_mkt GROUP BY qts_produtos_doces").show()
# + colab={"base_uri": "https://localhost:8080/"} id="Pgv40cGGeNhd" outputId="2f67d910-2bab-4ea7-d026-74c8420bc41c"
# Consulta realizada para saber o nível de escolaridade que aceitaram a campanha de mkt
spark.sql("SELECT graduacao, SUM(resposta_alvo) AS aceitaram_campanha FROM campanha_mkt GROUP BY graduacao").show()
# + colab={"base_uri": "https://localhost:8080/"} id="DWaNHssve6O_" outputId="f3946768-255a-42d2-924d-935fd95aa9eb"
# Consulta realizada para saber a idade do cliente e o consumo de peixe
spark.sql("SELECT idade_cliente, qts_produtos_peixe FROM campanha_mkt").show()
# + id="nI26-DwHhnUe"
|
Mini_Projeto_Final_PySpark_SQL.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <br>
# <br>
# <font size='6'><u><b>CSV File Minifier</b></u></font>
# <br>
#
# ##### Written by <NAME>
#
# We are trying to reduce .csv file size.
#
# ___
# # Setup
import pandas as pd
# ## Reduce .csv file size
# +
def reduce_size(file_path, new_path):
#We reduce .csv file size by taking one every ten rows.
originalFile = pd.read_csv(file_path)
print("Input rows #: {0}".format(len(originalFile)))
#take only one every ten rows
miniFile = pd.read_csv(file_path, skiprows = lambda x: x % 5 != 0)
print("Output rows #: {0}".format(len(miniFile)))
miniFile.to_csv(new_path)
reduce_size("nbdata/MW_xyz_data.csv", "nbdata/MW_xyz_data.csv")
# -
# ## Reduce Sims folder size
#
# We reduce the size of 'Sims' folder by deleting unused .txt files.
#
# Slider's step in the notebook is 10... but files in folder's step was 2... there were several unused files which could be deleted to save space.
# +
from os import listdir, remove
from os.path import isfile, join
from pathlib import Path
onlyfiles = [f for f in listdir("nbdata/Sims/M33_VLowRes/") if isfile(join("nbdata/Sims/M33_VLowRes/", f))]
#List all filenames in the directory
#print(onlyfiles)
print("Number of files: ", len(onlyfiles))
for file in onlyfiles:
snap = (Path(file).stem)[-3:]
if (int(snap) % 10 != 0):
remove("nbdata/Sims/M33_VLowRes/M33_%s.txt"%snap)
onlyfiles = [f for f in listdir("nbdata/Sims/M33_VLowRes/") if isfile(join("nbdata/Sims/M33_VLowRes/", f))]
print("Number of files: ", len(onlyfiles))
# -
|
06_EPO/e-TeenAstronomyCafe/04_Galactic_Neighborhood/miniCSV.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kaggle-mri
# language: python
# name: kaggle-mri
# ---
# +
import argparse
import os
import time
import numpy as np
import pandas as pd
import torch
import torchio as tio
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from sklearn.model_selection import train_test_split
# -
from dataset import RsnaDataset
from main import get_train_val_loaders
# +
from easydict import EasyDict as edict
args = edict({
'data_path': '/home/asheesh/Documents/Github/kaggle-rsna-miccai/data/rsna-miccai-brain-tumor-radiogenomic-classification',
'epochs': 10,
'batch_size': 4,
'validation_pct': 0.2,
'device': torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
'data_dir': 'train',
'sequence_length': 30,
})
# -
transform = tio.Compose((
tio.CropOrPad((3, 256, 256)),
tio.RescaleIntensity((0, 1)),
tio.ZNormalization()
))
train_loader, val_loader = get_train_val_loaders(args, transform=transform)
data = next(iter(train_loader))
x, y = data['X'], data['y']
x.shape
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 6))
for i in range(4):
plt.subplot(1, 4, i + 1)
plt.imshow(x[0][19].numpy()[i], cmap="gray")
# -
image = x[0][19][2]
plt.imshow(image.numpy(), cmap='gray')
print(image.max())
print(image.min())
|
code/test_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reverse Geocode Places Interim Data
#
# The [places interim dataframe](../data/interim/places.csv) consists of many **places** with a **latitude** and a **longitude** and some with only a **country** defined. Futhermore, there are some places which are actually nationalities and have none of these defined. Our goal here is obtain identifiable [ISO 3166-1 alpha 2 country codes](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2), [ISO 3166-1 alpha 3 country codes](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) and **continent codes** for places, which can be used during feature construction. The process of mapping from a latitude and longitude to a location is known as [reverse geocoding](https://en.wikipedia.org/wiki/Reverse_geocoding). We will use the python library [reverse-geocoder](https://github.com/thampiman/reverse-geocoder) to help me with this task.
#
# As mentioned, some places do not have a latitude or longitude, but do have a country defined. For places of this type we will use the python library [pycountry-convert](https://github.com/TuneLab/pycountry-convert) to convert between the **country name** and **country codes**. This will fail in some instances due to some free form text in the country variable. In such cases, we will resort to [named entity recognition](https://en.wikipedia.org/wiki/Named-entity_recognition), to extract [geopolicatal entities](https://en.wiktionary.org/wiki/geopolitical_entity). For this task we will use the excellent natural language processing library [spacy](https://spacy.io/usage/linguistic-features#section-named-entities).
#
# For places which are actually nationalities, we will convert them to country codes by [normalizing nationalities via an ISO 3166-1 alpha 2 country codes list](https://t2a.io/blog/normalising-nationalities-via-a-good-iso-3166-country-list/). It is important to note that some of the places do not have a latitude, longitude, country or nationality defined. In such cases, we will have to get creative! More on that later. OK enough babble for now. Time to go on a mapping frenzy!
# ## Setting up the Environment
#
# A few initialization steps are needed to setup the environment:
# - The locale needs to be set for all categories to the user’s default setting (typically specified in the LANG environment variable) to enable correct sorting of words with accents.
# - Load `en_core_web_sm` which is the default English language model in `spacy`.
# +
import locale
import spacy
locale.setlocale(locale.LC_ALL, '')
nlp = spacy.load('en_core_web_sm')
# +
import numpy as np
import pandas as pd
from pycountry_convert import convert_continent_code_to_continent_name
from pycountry_convert import country_alpha2_to_continent_code
from pycountry_convert import country_alpha2_to_country_name
from pycountry_convert import country_name_to_country_alpha2
from pycountry_convert import country_name_to_country_alpha3
import reverse_geocoder as rg
from src.data.country_utils import nationality_to_alpha2_code
# -
# ## Reading in the Places Data
#
# First let's read the places data into a dataframe and take a look at the columns of interest for the first few entries.
places = pd.read_csv('../data/interim/places.csv')
place_cols = ['fullName', 'lat', 'long', 'country']
places.head(20)[place_cols]
# Already, it's obvious to see that there are places with latitudes, longitudes and countries and some with none of these defined. Exactly how many though?
print('Number of places: ', len(places))
print('Number with lat / long: ', (places.lat.notna() & places.long.notna()).sum())
assert(places.lat.isna().sum() == places.long.isna().sum())
print('Number with country: ', (places.country.notna()).sum())
print('Number with neither: ', (places.lat.isna() & places.long.isna() & places.country.isna()).sum())
# There are two reasons why it is clearly better to start with the latitude and longitude first before using the country:
#
# - There are more values in the dataframe for latitude and longitude than country.
# - The latitude and longitude values are more precise than the country values since there is free form text in the latter field.
# ## Reverse Geocoding
#
# OK let's perform the reverse geocoding to obtain the alpha 2 country code and take a look at the first few places.
def reverse_geocode(places):
"""Reverse geocode the places dataframe.
Use latitude and longitudes to find ISO 3166-1 alpha-2 country codes.
Args:
places (pandas.DataFrame): Dataframe of places data.
Returns:
pandas.DataFrame: Dataframe containing ISO 3166-1 alpha-2 country codes.
Identical to `places` except that it contains an extra column for ISO
3166-1 alpha-2 country codes when latitude and longitude are present.
"""
rg_places = places.copy()
coords = list(zip(places.lat, places.long))
coords = [coord for coord in coords if not np.isnan(coord[0]) and not np.isnan(coord[1])]
ccs = [result['cc'] for result in rg.search(coords)]
coords_indices = [i for (i, val) in enumerate(
places.lat.notna().values & places.long.notna().values) if val]
country_codes = [np.nan] * len(places)
for i in coords_indices:
country_codes[i] = ccs.pop(0)
rg_places['countryAlpha2Code'] = country_codes
return rg_places
places = reverse_geocode(places)
assert(places.lat.isna().sum() == places.countryAlpha2Code.isna().sum())
place_cols.append('countryAlpha2Code')
places.head(20)[place_cols]
# `reverse_geocoder` seems to be quite accurate, but we can see that there is one error here. Adelaide is not in Japan (JP)! Let's investigate this further.
rg.search([(34.929001, 138.600998)])
# The above confirms the value in the dataframe above and matches with the [lat](http://www.w3.org/2003/01/geo/wgs84_pos#lat) and [long](http://www.w3.org/2003/01/geo/wgs84_pos#long) values in the [source JSON file](http://dbpedia.org/data/Adelaide.json). So what's wrong? A little trial and error reveals that there is an input error in the source. The latitude value is missing a minus sign.
rg.search([(-34.929001, 138.600998)])
# OK nice to know it's not a reverse geocoding error. However, it does further raise some questions as to the accuracy of DBpedia data. Athough not particularly rigorous, a quick scan through the data reveals that this type of issue is rare. Time to move on now and check how many places have values for the country but not a country alpha 2 code.
(places.country.notna() & places.countryAlpha2Code.isna()).sum()
# Not too many, but time to take care of them nonetheless.
# ## Converting Countries to Alpha-2 Country Codes
#
# We are now going to convert the remaining places with only countries to their associated alpha-2 country codes.
def country_to_alpha2_code(text):
"""Create ISO 3166-1 alpha-2 country codes from countries.
Use the country to find ISO 3166-1 alpha-2 country codes.
This function should only be called for a subset of the
places dataframe where country is defined and latitude or
longitude is not (or equivalently ISO 3166-1 alpha-2
country code is not defined).
Args:
text (str): Text containing countries.
Returns:
`str` or `numpy.nan`: Pipe separated list of ISO 3166-1
alpha-2 country codes if found, otherwise numpy.nan.
"""
countries = text.split('|')
alpha2_codes = set()
for country in countries:
try:
alpha2 = country_name_to_country_alpha2(country)
alpha2_codes.add(alpha2)
except KeyError:
doc = nlp(country)
for ent in (ent for ent in doc.ents if ent.label_ == 'GPE'):
try:
alpha2 = country_name_to_country_alpha2(ent.text)
alpha2_codes.add(alpha2)
except KeyError:
pass
if alpha2_codes:
alpha2_codes = '|'.join(sorted(alpha2_codes, key=locale.strxfrm))
else:
alpha2_codes = np.nan
return alpha2_codes
places_countries = places[places.countryAlpha2Code.isna() &
places.country.notna()][['country', 'countryAlpha2Code']]
places.loc[places_countries.index, 'countryAlpha2Code'] = (
places_countries.country.apply(country_to_alpha2_code))
places.loc[places_countries.index][place_cols]
# ## Converting Nationalities to Alpha-2 Country Codes
#
# Looking at the dataframe, it is clear that some of the remaining places are nationalities.
places[places.countryAlpha2Code.isna()][place_cols]
# We will now read in the nationality list that will help us to convert these nationalities to their associated alpha-2 country codes. It's important at this point to turn off the default behavior of pandas which is to treat the string literal 'NA' as a missing value. In the dataset, 'NA' is the ISO 3166 alpha-2 country code of Namibia. We then have to remember to impute the missing values since pandas replaces them with the empty string.
# +
try:
# NB: I have manually fixed the csv to have 'NA' as the country code
# for Namibia. The author of the file clearly did not realize that by
# default 'NA' in a field is treated as NAN by pandas.
nationalities = pd.read_csv('../data/external/Countries-List.csv', keep_default_na=False)
nationalities = nationalities.replace('', np.nan)
except FileNotFoundError:
nationalities = pd.read_csv('https://t2a.io/blog/wp-content/uploads/2014/03/Countries-List.csv',
encoding = 'ISO-8859-1')
nationalities.to_csv('../data/external/Countries-List.csv', index=False)
assert(nationalities[nationalities.Name == 'Namibia']['ISO 3166 Code'].values == 'NA')
nationalities
# -
# We will now manually add some commonly used names and demonyms to the dataframe. Despite these being neither countries or nationalities, they either are or were in common use.
other_nationalities = pd.DataFrame(
[
['GB', 'England', 'English', np.nan, np.nan],
['CI', 'Ivory Coast', 'Ivorian', np.nan, np.nan],
['GB', 'Northern Ireland', 'Northern Irish', np.nan, np.nan],
['IR', 'Persia', 'Persian', np.nan, np.nan],
['DE', 'Prussia', 'Prussian', np.nan, np.nan],
['IE', 'Republic of Ireland', 'Irish', np.nan, np.nan],
['GB', 'Scotland', 'Scottish', 'Scot', np.nan],
['RU', 'Soviet Union', 'Soviet', np.nan],
['US', 'United States', 'American', np.nan, np.nan],
['GB', 'Wales', 'Welsh', np.nan, np.nan]
],
columns=nationalities.columns
)
nationalities = nationalities.append(
other_nationalities, ignore_index=True).sort_values(by='ISO 3166 Code')
assert(len(nationalities) - len(other_nationalities) == 249)
nationalities
# Now let's convert the remaining places which are nationalities to their associated alpha-2 country codes.
places_nationalities = places[places.countryAlpha2Code.isna()][['fullName', 'countryAlpha2Code']]
places.loc[places_nationalities.index, 'countryAlpha2Code'] = (
places_nationalities.fullName.apply(nationality_to_alpha2_code, args=(nationalities,)))
places[places.lat.isna() & places.country.isna() & places.countryAlpha2Code.notna()][place_cols]
# Please take note that although this is process is very accurate, it is not perfect, as it can result in a few false positives. For instance, *Scottish Church Collegiate School* is actually in India and not Scotland and *Petit Luxembourg* is a hotel in Paris and not in Luxembourg. However, since the quantity of true positives far outweigh the false positives, we will go with it. Now we are left with just the following places without a country code. They are a mix of companies, educational institutions, cities and some plain random stuff.
places[places.countryAlpha2Code.isna()][place_cols]
# Interestingly, examining the *categories* column of the false positives above gives us the idea of applying the `nationality_to_alpha2_code` function to it also, since the correct information is available there.
print(places[places.fullName == 'Scottish Church Collegiate School']['categories'].values)
print()
print(places[places.fullName == 'Petit Luxembourg']['categories'].values)
# However, rather than blindly applying the function to all nationalities in the places dataframe which would give many false positives, such as the following:
print(places[places.fullName == 'Albanians']['categories'].values)
print()
print(places[places.fullName == '<NAME>']['categories'].values)
# We will be conservative and only apply it to the *categories* column of the remaining places without a country code. That is to the mix of companies, educational institutions, cities and plain random stuff shown in the dataframe above.
places_others = places[places.countryAlpha2Code.isna()][['fullName', 'categories', 'countryAlpha2Code']]
places.loc[places_others.index, 'countryAlpha2Code'] = (
places_others.categories.apply(nationality_to_alpha2_code, args=(nationalities,)))
places.loc[places_others.index][place_cols]
# Again the success rate is so high that it is definitely sufficient to proceed with this. However, as usual there are few false positives. A clear example of this is *Cape Canaveral* which of course is located in the United States and not India. This is due to the fact that it is situated near the *Indian* River Lagoon.
display(places[places.fullName == '<NAME>'][place_cols])
print(places[places.fullName == '<NAME>'].categories.values)
# Let's check to see how many places remain without a country code.
places[places.countryAlpha2Code.isna()][place_cols]
# Very few indeed. In fact many of these are not even **places**. We have managed to map nearly all of the places to country codes, so it's time to move on.
print('Percentage of places mapped to country codes:',
100 * round(places.countryAlpha2Code.notna().sum() /
len(places), 2), '%')
# ## Mapping Alpha-2 Country Codes to Other Codes and Names
#
# Finally, we can now use `pycountry-convert` to map from all the alpha-2 country codes to alpha-3 country codes, continent codes, country names and continent names.
# +
def alpha2_to_codes_names(places):
"""Create other codes and names from ISO 3166-1 alpha-2 country codes.
Use ISO 3166-1 alpha-2 country codes to find country name, ISO 3166-1
alpha-3 country codes, continent code and continent name.
Args:
places (pandas.DataFrame): Dataframe of places data.
Returns:
pandas.DataFrame: Dataframe containing the extra fields mentioned above.
Identical to `places` except that it contains extra columns mentioned
above.
"""
codes_names_places = places.copy()
codes_names_places['countryName'] = (
codes_names_places.countryAlpha2Code.apply(
_text_to_loc_or_codes, args=(country_alpha2_to_country_name,)))
codes_names_places['countryAlpha3Code'] = (
codes_names_places.countryName.apply(
_text_to_loc_or_codes, args=(country_name_to_country_alpha3,)))
codes_names_places['continentCode'] = (
codes_names_places.countryAlpha2Code.apply(
_text_to_loc_or_codes, args=(country_alpha2_to_continent_code,)))
codes_names_places['continentName'] = (
codes_names_places.continentCode.apply(
_text_to_loc_or_codes, args=(convert_continent_code_to_continent_name,)))
return codes_names_places
def _text_to_loc_or_codes(text, rg_function):
if isinstance(text, float):
return text
texts = text.split('|')
items = set()
for text in texts:
# Exclude French Southern Territories and Vatican City when
# converting to continents since they are not recognized
exclude_cc = ['TF', 'VA']
if text in exclude_cc:
continue
item = rg_function(text)
items.add(item)
if items:
items = '|'.join(sorted(items, key=locale.strxfrm))
else:
items = np.nan
return items
# -
places = alpha2_to_codes_names(places)
assert((places.countryAlpha2Code.isna() & places.country.notna()).sum() == 0)
place_cols = place_cols + ['countryAlpha3Code', 'countryName', 'continentCode', 'continentName']
places[place_cols]
# ## Persisting the Data
#
# Now we have the places and nationalities dataframes, we will persist them for future use in feature construction.
places = places.reindex(sorted(places.columns), axis='columns')
places.head(20)
places.to_csv('../data/processed/places.csv', index=False)
nationalities.to_csv('../data/processed/Countries-List.csv', index=False)
|
nobel_physics_prizes/notebooks/2.4-reverse-geocode-places-interim-data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
try:
import jax
import jax.numpy as jnp
except:
ModuleNotFoundError
# %pip install jax jaxlib
import jax
import jax.numpy as jnp
try:
import blackjax
except:
ModuleNotFoundError
# %pip install blackjax
import blackjax
try:
import optax
except:
ModuleNotFoundError
# %pip install optax
import optax
try:
from rich import print
from rich.table import Table
except:
ModuleNotFoundError
# %pip install rich
from rich import print
from rich.table import Table
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# -
from jax import random, jit, vmap, grad
num_points = 100
X = jnp.linspace(-5., 5., num_points)
Phi = jnp.c_[jnp.ones(num_points)[:, None], X]
f = 4*X + 5
key = random.PRNGKey(0)
eps = random.normal(key, (num_points,))
y = f + eps
plt.scatter(X, y, alpha=0.5)
plt.plot(X, f, lw = 2, color='k')
sns.despine()
def ll(theta):
rv = random.multivariate_normal()
dist = tfp.distributions
# # Creating a dataset
#
# Let us create a dataset. We will assume the coin toss to be given as per the Bernoulli distribution. We will assume that $\theta = p(H)
# = 0.75$ and generate 10 samples. We will fix the random seeds for reproducibility.
#
# We will be encoding Heads as 1 and Tails as 0.
key = jax.random.PRNGKey(0)
key
# +
distribution = dist.Bernoulli(probs=0.75)
dataset_100 = distribution.sample(seed=key, sample_shape=(100))
# -
dataset_100
# ### MLE
#
# #### Obtaining MLE analytically
#
# As per the principal of MLE, the best estimate for $\theta = p(H) = \dfrac{n_h}{n_h+n_t}$
mle_estimate = dataset_100.sum()/100
mle_estimate
# We will now verify if we get the same result using jax+TFP using optimization. But, first, we can create a graphical model for our problem.
# +
pgm = daft.PGM([4, 3], origin=[0, 0])
pgm.add_node(daft.Node("theta", r"$\theta$", 1, 2.5, aspect=1.8))
pgm.add_node(daft.Node("obs", r"$obs_i$", 1, 1, aspect=1.2, observed=True))
pgm.add_edge("theta", "obs")
pgm.add_plate([0, 0.5, 2, 1.0], label=r"$N$", shift=-0.1)
_ = pgm.render(dpi=150)
# -
def neg_log_likelihood(theta, dataset):
distribution_obj = dist.Bernoulli(probs=theta)
return -distribution_obj.log_prob(dataset).sum()
# We can find the likelihood for different thetas.
neg_log_likelihood(0.2, dataset_100), neg_log_likelihood(0.6, dataset_100)
# We can also use `vmap` to compute the likelihood over a range of thetas.
# None for second argument as we don't need vmap for dataset
neg_log_likelihood_vmap = jax.vmap(neg_log_likelihood, in_axes=(0, None))
theta_array = jnp.linspace(0.01, 0.99, 100)
nll_array = neg_log_likelihood_vmap(theta_array, dataset_100)
plt.plot(theta_array, nll_array)
sns.despine()
plt.axvline(0.75, linestyle='--', color='k', label=r'True $\theta$')
plt.legend()
plt.ylabel("Negative Log Likelihood")
_ = plt.xlabel(r"$\theta$")
# ### Learning MLE parameters via gradient descent
# We need gradient only respect to the first argument
grad_loss = jax.grad(neg_log_likelihood)
grad_loss(0.5, dataset_100)
grad_loss(0.8, dataset_100)
# We can see that the gradient values starting with $\theta = 0.5$ will push towards increasing $\theta$ and vice versa starting with $\theta = 0.8$
# +
optimizer = optax.sgd(learning_rate=0.001)
theta = jnp.array(0.1).round(2)
opt_state = optimizer.init(theta)
# +
table = Table(title="MLE Convergence")
table.add_column("Iteration", justify="right", style="cyan", no_wrap=True)
table.add_column("Loss", justify="right", style="magenta")
table.add_column("Theta", justify="right", style="green")
for i in range(10):
cost_val = neg_log_likelihood(theta, dataset_100)
table.add_row(str(i), f"{cost_val:0.2f}", f"{theta:0.2f}")
grad_theta_val = grad_loss(theta, dataset_100)
updates, opt_state = optimizer.update(grad_theta_val, opt_state)
theta = optax.apply_updates(theta, updates)
print(table)
# -
# ### MAP
# +
pgm = daft.PGM([4, 4], origin=[0, 0])
pgm.add_node(daft.Node("alpha", r"$\alpha$", 0.5, 3.5, aspect=1.8))
pgm.add_node(daft.Node("beta", r"$\beta$", 1.5, 3.5, aspect=1.8))
pgm.add_node(daft.Node("theta", r"$\theta$", 1, 2.5, aspect=2))
pgm.add_node(daft.Node("obs", r"$obs_i$", 1, 1, aspect=1.2, observed=True))
pgm.add_edge("theta", "obs")
pgm.add_edge("alpha", "theta")
pgm.add_edge("beta", "theta")
pgm.add_plate([0, 0.5, 2, 1.0], label=r"$N$", shift=-0.1)
_ = pgm.render(dpi=110)
# -
prior_alpha = 10.0
prior_beta = 10.0
prior_dist = dist.Beta(concentration1=prior_alpha, concentration0=prior_beta)
# Our prior will give us samples on $\theta$. Let us draw a 100 samples and draw their histogram.
prior_samples = prior_dist.sample(sample_shape=(100), seed=key)
sns.kdeplot(prior_samples, bw_adjust=2)
sns.despine()
# Now, given a $\theta$, we can evaluate the log prior and log likelihood and optimize their sum them to obtain the MAP estimate.
def neg_log_prior(theta, prior_dist):
return -prior_dist.log_prob(theta)
neg_log_prior(0.1, prior_dist)
neg_log_prior(0.5, prior_dist)
# Clearly, we are much more likely to sample $\theta = 0.5$ from our prior in comparison to $\theta = 0.1$.
def joint_neg_log_prior_log_likelihood(theta, dataset, prior_dist):
return neg_log_prior(theta, prior_dist) + neg_log_likelihood(theta, dataset)
# +
grad_loss = jax.grad(joint_neg_log_prior_log_likelihood)
optimizer = optax.sgd(learning_rate=0.001)
theta = jnp.array(0.1).round(2)
opt_state = optimizer.init(theta)
# +
table = Table(title="MAP Convergence")
table.add_column("Iteration", justify="right", style="cyan", no_wrap=True)
table.add_column("Loss", justify="right", style="magenta")
table.add_column("Theta", justify="right", style="green")
for i in range(10):
cost_val = joint_neg_log_prior_log_likelihood(theta, dataset_100, prior_dist)
table.add_row(str(i), f"{cost_val:0.2f}", f"{theta:0.2f}")
grad_theta_val = grad_loss(theta, dataset_100, prior_dist)
updates, opt_state = optimizer.update(grad_theta_val, opt_state)
theta = optax.apply_updates(theta, updates)
print(table)
# -
f"{(dataset_100.sum()+10)/(120):0.2f}"
# ### Analytical Posterior
#
# $P(\theta|Data) \sim Beta(\#Heads~in~Data + \alpha, \#Tails~in~Data + \beta)$
analytical_posterior = dist.Beta(dataset_100.sum() + prior_alpha, 100.-dataset_100.sum() + prior_beta)
analytical_posterior.concentration1, analytical_posterior.concentration0
# +
plt.plot(theta_array, analytical_posterior.prob(theta_array), label='Posterior')
plt.plot(theta_array, prior_dist.prob(theta_array), label='Prior')
plt.axvline(0.75, linestyle='--', color='k', label=r'True $\theta$')
sns.despine()
_ = plt.xlabel(r"$\theta$")
plt.legend()
# +
plt.plot(theta_array, analytical_posterior.log_prob(theta_array), label='Log Posterior')
plt.plot(theta_array, prior_dist.log_prob(theta_array), label='Log Prior')
plt.axvline(0.75, linestyle='--', color='k', label=r'True $\theta$')
sns.despine()
_ = plt.xlabel(r"$\theta$")
plt.legend()
# -
# ### MCMC
#
# Implementation of Metropolis algorithm from scratch
@jax.jit
def next_sample(cur_sample, key):
return dist.Normal(loc = cur_sample, scale=0.1,
validate_args=False).sample(seed=key, sample_shape=() )
key = jax.random.PRNGKey(5)
_, k = jax.random.split(key, 2)
next_sample(1.0, k)
@jax.jit
def lp(theta):
return -joint_neg_log_prior_log_likelihood(theta, dataset_100, prior_dist)
lp(0.1)
import numpy as onp
# +
x_start = 0.5
num_iter = 5000
xs = onp.empty(num_iter)
xs[0] = x_start
lu = jnp.log(dist.Uniform(0, 1).sample(sample_shape=[num_iter], seed=k))
keys = jax.random.split(k, num_iter)
for i in range(1, num_iter):
ns = next_sample(xs[i-1], keys[i])
if ns > 0.99:
ns = 0.99
if ns < 0.01:
ns = 0.01
xs[i] = ns
la = lp(xs[i]) - lp(xs[i-1])
if lu[i] > la:
xs[i] = xs[i-1]
# -
plt.plot(jnp.array(xs).reshape(-1, 1))
sns.kdeplot(jnp.array(xs), label='MCMC estimate density', lw=2)
plt.hist(onp.array(xs), density=True, label='MCMC histogram', alpha=0.2, bins=10)
plt.plot(theta_array, analytical_posterior.prob(theta_array), label='Posterior', lw=2, linestyle='--')
plt.legend()
sns.despine()
# TODO
#
# 1. remove the warnings
# 2. check if can replace TFP with distrax here or pure jax.scipy.?
# 3. VI from scratch?
# 4. Document (and maybe create Class for MH sampling)
# 5. Better way for MCMC sampling when parameters are constrained? (like theta between 0 and 1?)
# 6. where other can we jit?
#
|
notebooks/introduction/.ipynb_checkpoints/simpson-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="J_nQixLUYpPf"
# # Collect the Dataset
# + [markdown] id="0NxnqJsbbgX5"
# We use, for example, the dataset present in the article, but if you want to train a model in your dataset, you must use your dataset. Or, if you're going to use the dataset present in the article with another model, you only must change the model
# + id="3gdX0jFkYyF9"
# !wget https://raw.githubusercontent.com/adailtonaraujo/app_review_analysis/master/Sentiment/Datasets/Dataset_Sentiment_Word-Embedding.zip
# + [markdown] id="CH4tOlUAzjok"
# # Word-Embeddings
# + [markdown] id="EgVWx1qY1ePW"
# ## Install - Instanciando Framework ABSA
# + id="1TvGWlw3moeK"
# !git clone https://github.com/songyouwei/ABSA-PyTorch
# !mv ABSA-PyTorch/* .
# !pip install -r requirements.txt
# + [markdown] id="lbWOL1OeerOc"
# ## Adjustments to the train.py file
#
# Replace line 254 of the train.py file with the excerpt below (app review dataset).
#
# },
# 'app_review': {
# 'train': './app_review/treinamento.csv',
# 'test': './app_review/teste.csv'
# }
# + [markdown] id="7XjycOEtg2H7"
# # Train
# + id="WwPs4pLWztKd"
#parm definition
epc=1
appName='Spotify'
# + id="4qJ0f6jzmzVf"
# !cp app_review/train_except_{appName}.csv app_review/treinamento.csv
# !cp app_review/test_data_{appName}.txt app_review/teste.csv
# !python train.py --model_name bert_spc --dataset app_review --num_epoch $epc
|
Sentiment/Codes/SentimentAnalysis_Train_Word_Embedding.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="nBvojspHGWrc"
# <h1><Center> Smart open-domain Chatbot using deep learning Attention mechanism </Center></h1>
#
#
#
# + [markdown] id="zgEIq_RCHqNe"
# # Lesson Goals
# This lesson is the continuation of big data natural language text processing where we took three dataset from Answers.Com, Yahoo Answers, Quora API. In this lesson, I will show you how to develop smart open-domain chatbot by using deep learning Attention mechanism.
# # Prerequests
#
# * if your system has GPU you can run this example on your system but if not you can run it in GCP.
#
#
# + [markdown] id="Q8pz2aaCJZQa"
# # Getting Started
# Import modules:
# + id="VlFctlhgJdDS"
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import os
import time
# + id="my6BV0KtJkAS"
# + [markdown] id="o_VzstOtJs-R"
# ## Data Preparation:
# # Load up the data
# + id="c6kgd7vyJ3wL"
def load_dataset(path):
"""
Reads the data from the csv file.
Arguments:
path: a string.
Returns:
dataset: a Pandas Dataframe with two columns, namely: `Question` and `Answer`.
X: a NumPy array representing `Question` column.
Y: a NumPy array representing `Answer` column.
"""
# read the csv file into a pandas dataframe
dataset = pd.read_csv(path, usecols=['Question', 'Answer'])
# make sure all cell values are strings; because some of them
# only contain numbers, so they maybe mistaken with other types.
dataset = dataset.applymap(str)
# shuffle the rows of the dataframe and then reset the index
dataset = dataset.sample(frac=1).reset_index(drop=True)
X = np.asarray(dataset['Question'])
Y = np.asarray(dataset['Answer'])
X = np.apply_along_axis(lambda sen: '<start> '+ sen + ' <end>', 0, X)
Y = np.apply_along_axis(lambda sen: '<start> '+ sen + ' <end>', 0, Y)
return dataset, X, Y
# + colab={"base_uri": "https://localhost:8080/"} id="bhxQkXbIW3NB" outputId="e9d83235-1411-4707-9fee-3de821130a07"
os
# + colab={"base_uri": "https://localhost:8080/"} id="OixKyY2piyJB" outputId="5055b38b-d0c2-4870-d4c2-872e8e466e42"
os.getcwd
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="ybteJilpW8mY" outputId="94286cc4-73aa-4963-c8b6-d40e52337c08"
os.getcwd()
# + id="JQwL1XO-X5Ke"
# !mkdir path
# + colab={"base_uri": "https://localhost:8080/"} id="WLvoeGBDYFCI" outputId="7694edcf-028e-46b5-d776-5ccaa16e0ad4"
# cd path/
# + colab={"base_uri": "https://localhost:8080/", "height": 72, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="P0SdSQK4YI0-" outputId="dd236cbb-a781-4b80-f5be-e68729b37cbf"
from google.colab import files
uploaded = files.upload()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="33Z0V9_2YsoM" outputId="1cd73020-37ae-40d2-aecc-d858452c602d"
files_path = '/content/path/'
qa_dataframe, X, Y = load_dataset(files_path + 'dataset.csv')
qa_dataframe.head()
# + colab={"base_uri": "https://localhost:8080/"} id="ge5qWhphaYGP" outputId="8c7e53bc-b7f5-4c0b-e8c2-baa67b85f93d"
print(f"Number of question-answer pairs in the dataset: {len(qa_dataframe)}")
# + id="nfue4BGiarvq"
# + [markdown] id="8tAMWya1b6zN"
# Cache all hyperparameters into this dictionary:
# + id="jxRAiIxlb8El"
hyperparameters = dict()
# + [markdown] id="10tosrvqcc_7"
# **Choose the size of vocabulary**:
#
# This is a hyperparameter that you can play with.
# + id="EpLvCpzMchWa"
VOCAB_SIZE = 10000
# + id="eab_AKFsc5Q5"
# + [markdown] id="9mq5XOsQd_-z"
# Now, let's tokenize the data, by converting each sentence (question or answer) to a sequence of integers which represent indices of their embeddings.
# + id="fSLTe8EQeBPa"
def tokenize(sentences, vocab_size):
"""
Using Tensorflow Tokenizer to turn each sentence into a sequence of integers
(each integer being the index of a token in a dictionary).
Arguments:
X: a list or a NumPy array of strings, where each element is a sentence.
Returns:
tensor: a NumPy ndarray, where each row represents the the sequence of
integers that maps the words of the equivalent sentence in
the `sentences` list to a their indices (for embbedings). shape=(batch_size, )
lang_tokenizer: a Tensorflow Tokenizer which have been fit on `sentences`.
"""
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size,
filters='')
lang_tokenizer.fit_on_texts(sentences)
tensor = lang_tokenizer.texts_to_sequences(sentences)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')
return tensor, lang_tokenizer
# + id="BMAkgHRZj1nE"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/"} id="sInSx-SkjKWe" outputId="cdd8f491-cad8-43a2-bd85-c3b9bb6683f7"
# all sentences (Questions & Answers)
# we need to fit the tokenizer on all sentences; to create word_index and
# index_word mapper dictionaries for the most frequent VOCAB_SIZE= 10,000 words.
texts = np.concatenate((X, Y))
# tokenize the data
tensor, text_tokenizer = tokenize(texts, VOCAB_SIZE)
print(f"texts[0]= {texts[0]}")
print(f"tensor[0]= {tensor[0]}")
# + [markdown] id="lmtkW2j6kIGw"
# As we see, each sentence has been converted to an array with indices that will be used to map words to their embedding vectors.
#
# Now, let's extract back X (for questions) and Y (for answers) arrays from tensor:
# + colab={"base_uri": "https://localhost:8080/"} id="8IwWWzIij6x1" outputId="26ea69b1-ddb5-4ff8-cf07-d216adc62201"
# extract questions and answers back from tensor
X, Y = tensor[:len(X)], tensor[len(X):]
print(f"Shape of X (questions): {X.shape}")
print(f"Shape of Y (answers): {Y.shape}")
# + [markdown] id="vvTrZyQnk3jv"
# Get the maximum sequence length for both input and target tensors:
# + id="fhxA6nfKk5mt"
def max_seq_length(tensor):
"""
Get maximum sequence length in the corpus. And, make sure that all rows in
`tensor` has the same length.
Arguments:
tensor: a NumPy ndarray of shape (batch_size,) where each row represents
indices mapping to words in equivalent sentences.
Returns:
max_len: an integer representing maximum sequence length.
"""
batch_size = len(tensor)
lengths = [len(sentence) for sentence in tensor]
max_len = max(lengths)
# check if all rows in `tensor` has the same length (equal to max_len)
assert lengths == [max_len]*batch_size
return max_len
# + colab={"base_uri": "https://localhost:8080/"} id="_lspQyaDlpnW" outputId="6b4885dd-d3a8-4e84-edec-4c6a351356cc"
# Get max_seq_length of input and target tensors
max_length_inp, max_length_targ = max_seq_length(X), max_seq_length(Y)
print(f"Maximum sequence length for input (questions) tensor: {max_length_inp}")
print(f"Maximum sequence length for target (answers) tensor: {max_length_targ}")
# + id="OqZSW-HxmOGr"
# + [markdown] id="I9hiXIPFmTKy"
# Save the arrays as npy files:
# + id="jEIj7r3JmUVU"
data_arrays_path = os.path.join(files_path, 'data_arrays')
# create the folder if it does not exist
if not os.path.exists(data_arrays_path):
os.makedirs(data_arrays_path)
np.save(os.path.join(data_arrays_path, 'X.npy'), X)
np.save(os.path.join(data_arrays_path, 'Y.npy'), Y)
# + id="eqkuDXpdmsG5"
def read_glove_vectors(glove_file):
"""
This function reads GloVe vectors from .txt file and
returns a word to vector dictionary.
Arguments:
glove_file: a string path to GloVe word embeddings file.
Returns:
word_to_vec: a Python dictionary that maps words to their embeddings.
"""
import numpy as np
# open the file
with open(glove_file, 'r', encoding="utf-8") as f:
words = set()
word_to_vec = {}
# loop over the rows in the file
for line in f:
# read the line, strip it (remove leading and trailing spaces) and split it
line = line.strip().split()
# first item in the list 'line' is the word itself
curr_word = line[0]
# add the word to set of words
words.add(curr_word)
# add the words with its vector representation as a (key, value) pair to the dictionary
word_to_vec[curr_word] = np.array(line[1:], dtype=np.float64)
return word_to_vec
# + [markdown] id="r2ElvRTHoPWq"
# ## Download GloVe Vectors
# + colab={"base_uri": "https://localhost:8080/"} id="cLdQisZzoLhC" outputId="ae7d0831-46eb-4b83-9f45-42b7da1fdb05"
# !wget http://nlp.stanford.edu/data/glove.6B.zip
# + colab={"base_uri": "https://localhost:8080/"} id="QkzYZnMFoWe5" outputId="151e19ed-5883-4e6e-b9e3-22ed76a6a0ff"
# !unzip glove.6B.zip
# + colab={"base_uri": "https://localhost:8080/"} id="h7ujxU3jqABS" outputId="780e1588-f341-4ea5-c2d6-eb49430a25d5"
# !ls
# !pwd
# + id="_7tMpvoaqOSt"
glove_file = "/content/path/glove.6B.200d.txt"
word_to_vec = read_glove_vectors(glove_file)
# + id="_tBwaBBHqeZD"
# + [markdown] id="CVh6S9nBqsOc"
# # The Model
# **Choosing Hyperparameters:**
# + id="jL0Sa5t3q1IY"
BUFFER_SIZE = X.shape[0]
BATCH_SIZE = 128
steps_per_epoch = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 200
units = 512
vocab_size = VOCAB_SIZE + 1
# + colab={"base_uri": "https://localhost:8080/"} id="kZx_YyGgs1uj" outputId="4eef287f-cde2-4db8-905e-3dbb13b4f32c"
print(f"Buffer size: {BUFFER_SIZE}, Batch size: {BATCH_SIZE}, Steps per epoch: {steps_per_epoch}")
print(f"Embedding size: {embedding_dim}, # of units: {units}")
print(f"Vocab size: {vocab_size}")
# + id="-X5NDv5ktT-B"
# cache these values into the hyperparameters dictionary
hyperparameters['buffer_size'] = BUFFER_SIZE
hyperparameters['batch_size'] = BATCH_SIZE
hyperparameters['steps_per_epoch'] = steps_per_epoch
hyperparameters['embedding_dim'] = embedding_dim
hyperparameters['units'] = units
hyperparameters['vocab_size'] = vocab_size
# + id="bT8XjR-kuc_N"
# + [markdown] id="xCM6FDiRumX_"
# Create the embedding matrix for words in the vocabulary:
# + id="wkEhofmluo-Q"
def create_embedding_matrix(words, word_to_vec, vocab_size, emb_dim):
"""
Returns an embeddings matrix for the words in the vocabulary.
Arguments:
words: a list of words.
word_to_vec: a dictionary that maps words to their embedding vectors.
vocab_size: an integer which represents the size of the vocabulary.
emb_dim: an integer which represents the dimension of word embeddings.
Returns:
embedding_matrix: a NumPy array with shape of (vocab_size, emb_dim).
"""
# create embedding matrix
embedding_matrix = np.zeros((vocab_size, emb_dim), dtype=np.float64)
# loop over the words in our vocabulary
for i, word in enumerate(words):
if word in word_to_vec.keys():
# if the current word is in glove vocab, get its glove vector.
embedding_matrix[i, :] = word_to_vec[word]
else:
# if the current word does not exist in the vocabulary, set its vector to zeros.
embedding_matrix[i, :] = np.zeros((emb_dim,), dtype=np.float64)
return embedding_matrix
# + id="HXxhqXqhwDGd"
# since Tensorflow tokenizer word_index still have all words even when
# vocab_size is passed while defining the Tokenizer.
# So, we need to grab the first 10,000 words.
words = list(text_tokenizer.word_index.keys())[:vocab_size]
embedding_matrix = create_embedding_matrix(words, word_to_vec, vocab_size,
embedding_dim)
# + colab={"base_uri": "https://localhost:8080/"} id="DYiK-GDEwSD4" outputId="6de4867a-f79b-460f-c1c2-e2aa38da33b0"
print(f"Embedding matrix shape: {embedding_matrix.shape}")
# + id="LbcZP8jkwYIQ"
# save the embedding matrix
np.save(os.path.join(data_arrays_path, 'embedding_matrix.npy'), embedding_matrix)
# + id="aMAGfbLRwhD1"
# + [markdown] id="rGh7vksnwul5"
# Create a tf.data dataset for X and Y with buffer size equal to BUFFER_SIZE and batch size equal to BATCH_SIZE, we will use this dataset to generate batches while training:
# + id="oOfB0cnJwvx3"
dataset = tf.data.Dataset.from_tensor_slices((X, Y)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE)
# + id="3FeLjHWOxskF"
# + [markdown] id="HadyHRWvxz1G"
# Example of an input batch that the model will receive while iterating over dataset batches:
# + colab={"base_uri": "https://localhost:8080/"} id="7WfYzk3Ox0sb" outputId="1681fb8d-47da-4740-c3d2-35eea2b9de9b"
example_input_batch, example_target_batch = next(iter(dataset))
example_input_batch.shape, example_target_batch.shape
# + id="AvC3I9MBx_Jr"
# + [markdown] id="2GP0bcRIyIL1"
# # Model Architecture
# I'm going to use an encoder-decoder model with Bahdanau's attention, which has been described in detail in this paper:
#
# [Effective Approaches to Attention-based Neural Machine Translation](https://)
#
# Some properties of the model:
#
# The encoder will be a bidirectional encoder with 512 hidden units for each direction, resulting in 1024 cells.
#
# Both encoder & decoder will use LSTM as the cell.
#
# The decoder will be unidirectional with 1024 hidden units.
#
# **The Encoder**
# Define the bidirectional encoder architecture which will consist of the following:
#
# 1. An Embedding layer that will map input sentences to their embeddings. The vocabulary size is equal to VOCAB_SIZE=10,000 and the dimension of embedding is 200 (Note: although a power of 2 embedding size would be more suitable to speed up training time by increasing cache utilization during data movement, thus reducing bottlenecks, but GloVe embeddings do not come with a power of 2 embedding size).
#
# 2. A forward & backward (bidirectional) LSTM layer with 512 units for each.
#
# The input to the encoder has a shape of (batch_size, ), which is the input array of sentences (questions).
#
# The hidden state arrays of the encoder are all of the shape (batch_size, n_units).
#
# The output of the encoder has a shape of (batch_size, max_sequence_length, n_units x2) ;the hidden size of the output is equal to double the size of units since the encoder is bidirectional.
# + id="SdxXkt5zyo8e"
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, embedding_matrix, enc_units,
batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim,
weights=[embedding_matrix],
trainable=True)
self.bi_lstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform'))
def call(self, x, hidden):
"""
Propogates the input `x` through the bidirectional encoder and returns
the outputs along with hidden states.
Arguments:
x: a tensor with shape (batch_size, max_seq_length)
hidden: a tuple or a list, with four tensors representing hidden
and memory states for both the forward and backward LSTMs.
Each of them having a shape of (batch_size, max_seq_length)
Returns:
output: a tensor representing the output of the encoder with a shape
of (batch size, max_sequence length, units*2)
state_h: a tensor, with forward and backward hidden states of
the encoder with a shape of (batch size, units*2)
state_c: a tensor, with forward and backward memory cell states of
the encoder with a shape of (batch size, units*2)
"""
x = self.embedding(x)
output, fstate_h, fstate_c, bstate_h, bstate_c = self.bi_lstm(x, initial_state = hidden)
state_h = tf.keras.layers.Concatenate()([fstate_h, bstate_h])
state_c = tf.keras.layers.Concatenate()([fstate_c, bstate_c])
return output, state_h, state_c
def initialize_hidden_state(self):
# forward_state_h forward hidden state output, backward_state_h backward hidden state output
# forward_state_c forward cell (memory) state output, backward_state_c backward cell (memory) state output
return (tf.zeros((self.batch_sz, self.enc_units)), tf.zeros((self.batch_sz, self.enc_units)),
tf.zeros((self.batch_sz, self.enc_units)), tf.zeros((self.batch_sz, self.enc_units)))
# + id="P7Nguxho111f"
import tensorflow as tf
# + [markdown] id="Inyjb2reZONk"
# Define the encoder:
# + colab={"base_uri": "https://localhost:8080/"} id="saSLZxRrZPik" outputId="86158968-f67d-4c09-ff28-e8163ea4747e"
encoder = Encoder(vocab_size, embedding_dim, embedding_matrix, units,
BATCH_SIZE)
# usage example
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_h, sample_c = encoder(example_input_batch, sample_hidden)
print (f'Encoder output shape: (batch size, sequence length, units*2) {sample_output.shape}')
print (f'Encoder Hidden state shape: (batch size, units*2) {sample_h.shape}')
print (f'Encoder Memory state shape: (batch size, units*2) {sample_c.shape}')
# + [markdown] id="NZLjE5w0qmoJ"
# Bahdanau Attention Mechanism
# The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence. The below picture and formulas are an example of attention mechanism from Effective Approaches to Attention-based Neural Machine Translation.
# To learn more about Bahdanau's attention, you can refer to this paper: [Neural Machine Translation by Jointly Learning to Align and Translate](https://)
# + id="-uqAKPwnqqx1"
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units*2)
self.W2 = tf.keras.layers.Dense(units*2)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
query_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(query_with_time_axis) + self.W2(values)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
# + id="XsfWisphs8LI"
# + [markdown] id="4Sbg0HfKuEs5"
# Example of using Bahdanau's attention:
# + colab={"base_uri": "https://localhost:8080/"} id="fAomKEoeuGQ4" outputId="63d24d85-8ae1-434a-8537-b845348af42f"
# usage example
attention_layer = BahdanauAttention(10)
attention_result, attention_weights = attention_layer(sample_h, sample_output)
print(f"Attention result shape: (batch size, units*2) {attention_result.shape}")
print(f"Attention weights shape: (batch_size, sequence_length, 1) {attention_weights.shape}")
# + id="x9dIEFkfuRlc"
# + [markdown] id="iKZBrXBZvtGG"
# # The Decoder
# Define the decoder architecture which will consist of the following:
#
# 1. Bahdanau's attention which is applied to the output of the encoder to get the context which will be passed, along with the embeddings of the decoder input, to the LSTM layer of the decoder.
#
# 2. An Embedding layer with a vocabulary size of VOCAB_SIZE=10,000 words and an embedding dimension of 200. The embedding takes the decoder input which is of the shape (batch_size, ) and outputs a tensor of the shape (batch_size, 1, embedding_dim).
#
# 3. A forward (unidirectional) LSTM layer with 1024 units for each.
#
# The shape of the decoder's output is (batch_size, vocab_size) and the decoder hidden state size is of the shape (batch_size, units x2).
# + id="cEE9qNkbwCYR"
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, embedding_matrix, dec_units,
batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim,
weights=[embedding_matrix],
trainable=True)
self.lstm = tf.keras.layers.LSTM(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
"""
Takes decoder input, hidden state and output of the encoder and
returns the decoder's predictions along with decoder hidden state and
attention weights.
Arguments:
x: a tensor with shape (batch_size, 1) which is the decoder input
at some timestep.
hidden: a tensor representing hidden state of the encoder with a
shape of (batch size, units*2)
enc_output: a tensor representing the encoder's output with a shape
of (batch size, sequence length, units*2)
Returns:
x: a tensor with shape (batch_size, vocab size) and it's decoder's
output.
state: a tensor that represents the hidden state of the decoder
and has a shape of (batch_size, units*2)
attention_weights: a tensor that represents attention weights and
has a shape of (batch_size, sequence_length, 1)
"""
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the LSTM
output, state_h, state_c = self.lstm(x)
state = state_h
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
# + id="BG35BcgowQyQ"
# + [markdown] id="WAzhl9JW5FUF"
# Define the decoder:
# + colab={"base_uri": "https://localhost:8080/"} id="Zi4sRGFI5GcR" outputId="af89cb85-f5ed-4981-adc2-969500f10954"
decoder = Decoder(vocab_size, embedding_dim, embedding_matrix, units*2,
BATCH_SIZE)
# usage example
sample_decoder_output, dec_h, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)),
sample_h, sample_output)
print(f'Decoder output shape: (batch_size, vocab size) {sample_decoder_output.shape}')
print(f'Decoder hidden state shape: (batch_size, units*2) {dec_h.shape}')
# + id="rR1o5CFa5LLO"
# + [markdown] id="95qhVlR25YiD"
# # Defining Optimizer, Loss Function and Metric
# Define the loss function:
# + id="4tkdI4ko5epc"
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
reduction='none')
def compute_loss(real, pred):
"""
This function returns the loss for model's predictions on a batch
of data in comparison with the real outputs at a timestep.
Arguments:
real: real output, a Tensorflow tensor with a shape
of: (batch_size, max_seq_length)
pred: model's predictions at a certain timestep, a Tensorflow tensor
with a shape of: (batch_size, max_seq_length)
Returns:
A Tensorflow tensor with the loss.
"""
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
# + id="NBiady7P56EC"
# + [markdown] id="j7Jxosdx5_bN"
# Define the perplexity metric:
#
# **Note**: I was going to incorporate BLEU score as another metric, but to compute BLEU score on a batch of data, it turns out that we need access the data to count the number of n-grams. In order to do that, we can't use Tensorflow's AutoGraph feature tf.function. Anyways, I tried it and using tf.function was around 2.5 times faster than dropping this feature only to compute BLEU score. In case you want to use BLEU score, you can use nmt's open source implementation which you can find [here](https://), or you can use NLTK's implementation.
# + id="BLo6Sikl6ELS"
def compute_perplexity(real, pred):
"""
This function returns the perplexity for model's predictions on a batch
of data in comparison with the real outputs at a timestep.
Arguments:
real: real output, a Tensorflow tensor with a shape
of: (batch_size, max_seq_length)
pred: model's predictions at a certain timestep, a Tensorflow tensor
with a shape of: (batch_size, max_seq_length)
Returns:
A Tensorflow tensor with the perplexity.
"""
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.cast(tf.pow(math.e, tf.keras.backend.mean(loss_, axis=-1)),
dtype=tf.keras.backend.floatx())
# + id="ExRXFbR56g6_"
# + [markdown] id="5y_Cijoc7BrL"
# # Checkpoints (Object-based saving)
# + id="EQPRj8j77D4x"
checkpoint_dir = '/content/path/training_checkpoints'
# create the folder if it does not exist
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
# + id="sHIopPWj7fpM"
# + [markdown] id="KxkRQDXz7pYe"
# # TensorBoard
# + id="lmJ4GSiu7s_V"
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# + id="I-12nHE57xTu"
log_file_name =f"/content/path/logs/metrics_{int(time.time())}"
# create the folder if it does not exist
if not os.path.exists(log_file_name):
os.makedirs(log_file_name)
summary_writer = tf.summary.create_file_writer(log_file_name)
# + id="c68Q00cF8Efg"
# + [markdown] id="9XE9zfxW8MA4"
# # Define training step:
# + id="UUOSmhOM8PLo"
@tf.function
def train_step(inp, targ, enc_hidden):
"""
This function performs a training step for the model on a batch of data.
Arguments:
inp: a tensor, the input to the encoder network which is a batch of
vectors of integers (indices of words) for sentences (questions)
with a shape of (batch_size, encoder_max_seq_len)
targ: a tensor, the real output that the decoder will use to learn
using teacher forcing. It has a shape same as `inp`
which is (batch_size, encoder_max_seq_len)
enc_hidden: a tuple of four tensors, the initial hidden states for the
encoder network, each of the shape (batch_size, n_units*2)
Returns:
batch_loss: loss for the given batch.
batch_acc: accuracy for the given batch.
batch_bleu: bleu score for the given batch.
batch_ppl: perplexity for the given batch.
"""
loss = 0
ppl = 0
with tf.GradientTape() as tape:
# Run the input through the encoder and get back the encoder output
# and the hidden states of the encoder.
enc_output, enc_h, enc_c = encoder(inp, enc_hidden)
# Set the hidden state of the encoder to be the initial hidden state
# of the decoder.
dec_hidden = enc_h
# Define the decoder input which is basically the '<start>' token,
# for every sentence in the batch.
dec_input = tf.expand_dims([text_tokenizer.word_index['<start>']]
* BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
# looping over timesteps
for t in range(1, targ.shape[1]):
# Pass encoder output to the decoder along with the decoder
# input and initial hidden state and get back the predictions
# for this batch at the current timestep with the hidden state
# of the decoder
predictions, dec_hidden, _ = decoder(dec_input,
dec_hidden,
enc_output)
# predictions shape: (batch_size, decoder_vocab_size)
# dec_hidden shape: (batch_size, units*2)
# attention weights (3rd output that has been discarded)
# shape: (batch_size, decoder_max_seq_len, 1)
# compute the loss
loss += compute_loss(targ[:, t], predictions)
# compute the perplexity
ppl += compute_perplexity(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
# compute the loss for the batch
batch_loss = (loss / int(targ.shape[1]))
# compute the perplexity for the batch
batch_ppl = (ppl / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
# get gradients
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss, batch_ppl
# + id="guTsnoOF8aBq"
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return f"{h}:{m}:{round(s,1)}"
# + id="ir_y3gxj_QZM"
# + [markdown] id="GNW0yWb9_Wal"
# ## Training the model:
# + colab={"base_uri": "https://localhost:8080/"} id="FwvI1LJT_YWE" outputId="9d54da1f-bfb7-4569-f063-5b752a785591"
print(hyperparameters)
# + id="ObaDUvlZ_eP4"
# + [markdown] id="4SARxgmL_6Sv"
# Let's train the model:
# + colab={"base_uri": "https://localhost:8080/"} id="sJ42PyUl_7p8" outputId="e45b5516-b9d1-48d0-d7b5-3f33e69f1d04"
EPOCHS = 10
start_time = time.time()
# to cache loss and perlexity over epochs
cache = dict({'train_loss': [], 'train_ppl':[]})
with tf.device('/device:GPU:0'):
for epoch in range(EPOCHS):
start_epoch = time.time()
# Initialize encoder hidden state
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
total_ppl = 0
# Training the model using the training data
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
# Train the model on current batch
(batch_loss, batch_ppl) = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
total_ppl += batch_ppl
# print loss and perplexity for current batch
if batch % 400 == 0:
print(f"Epoch {epoch + 1}/{EPOCHS} - "
f"batch: {batch}/{steps_per_epoch} - "
f"loss: {batch_loss.numpy()} - ppl: {batch_ppl}")
# compute batch loss and perplexity
total_loss = total_loss / steps_per_epoch
total_ppl = total_ppl / steps_per_epoch
# Log loss and perplexity to TensorBoard for current epoch
with summary_writer.as_default():
tf.summary.scalar('training_loss', total_loss, step=epoch)
tf.summary.scalar('training_perplexity', total_ppl, step=epoch)
# Save (checkpoint) the model every 15 epochs
if ((epoch+1) > 1) and ((epoch+1) % 15 == 0):
checkpoint.save(file_prefix = checkpoint_prefix)
print(f'Saved checkpoint for epoch {epoch+1}/{EPOCHS} to: {checkpoint_prefix}')
# cache the loss and perplexity for current epoch
cache['train_loss'].append(total_loss)
cache['train_ppl'].append(total_ppl)
# print loss and perplexity for current epoch
print(f"Epoch {epoch + 1}/{EPOCHS} - loss: {total_loss} - "
f"ppl: {total_ppl}")
print(f"Time taken for epoch ({epoch + 1}): "
f"{time.time() - start_epoch} sec\n")
execution_time = (time.time() - start_time)
print(f'Elapsed time: {hms_string(execution_time)}')
# + [markdown] id="VWcQLRstauOw"
# Plot metrics over time:
# + id="Fe-lcqrlAYcE"
def plot_metrics(cache):
"""
Plots loss and perplexity over epochs:
Arguemnts:
cache: a dictionary that contains values over epochs
for both loss and perplexity.
"""
# since, items of cache['train_loss'] and cache['train_ppl']
# are tensors, so let's extract values from these tensors.
loss = [tensor.numpy() for tensor in cache['train_loss']]
ppl = [tensor.numpy() for tensor in cache['train_ppl']]
# Plot the loss
plt.figure()
plt.plot(loss, label='Training Loss')
plt.title('Loss')
plt.xlabel('Epochs')
plt.show()
# Plot the perplexity
plt.figure()
plt.plot(ppl, label='Training Accuracy')
plt.title('Perplexity')
plt.xlabel('Epochs')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="48tczVrMbQFw" outputId="55c8821a-433d-42fa-c75a-323177d0777f"
plot_metrics(cache)
# + [markdown] id="eGWHf6eWbsVC"
# # Inference Mode
# Now, it's time to try the model and ask him a few questins :)
#
# Helper function: to clean the text the user has entered.
# + id="F25u2_84bxbj"
def clean_text(text):
"""
This method takes a string, applies different text preprocessing
(characters replacement, removal of unwanted characters, removal of extra
whitespaces) operations and returns a string.
Arguments:
text: a string.
Returns:
a cleaned version of text.
"""
import re
text = str(text)
# REPLACEMENT
text = re.sub('\"', '\'', text)
text = re.sub("“", '\'', text)
text = re.sub("”", '\'', text)
text = re.sub('’', '\'', text)
text = re.sub('\[','(', text)
text = re.sub('\]',')', text)
text = re.sub('\{','(', text)
text = re.sub('\}',')', text)
text = re.sub("([?.!,:;'?!+\-*/=%$@&()])", r" \1 ", text)
pattern = re.compile('[^a-zA-Z0-9_\.\,\:\;\'\?\!\+\-\*\/\=\%\$\@\&\(\)]')
# remove unwanted characters
text = re.sub(pattern, ' ', text)
# lower case the characters in the string
text = text.lower()
# REMOVAL OF EXTRA WHITESPACES
# remove duplicated spaces
text = re.sub(' +', ' ', text)
# remove leading and trailing spaces
text = text.strip()
return text
# + id="894XvPzNb7vH"
# + [markdown] id="7LVX2z55b_kv"
# The following fucntion passes a sentence that has been entered by the user to the model and returns its answer along with the input.
#
#
# + id="0rUgD4MecAdz"
def evaluate(sentence):
"""
This function takes a sentence (question) and returns the model's output
(answer) to it.
Arguemnts:
sentence: a string.
Returns:
result: a string, representing model's output to the input.
sentence: a string, the input sentence.
"""
# clean the input sentence (question) to prepare for the encoder
sentence = clean_text(sentence)
sentence = '<start> ' + sentence + ' <end>'
# tokenize the input sentence and pad zeros if its length is less than
# maximum sequence length.
inputs = [text_tokenizer.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
# initilize the hidden state of the encoder
enc_hidden = (tf.zeros((1, units)),
tf.zeros((1, units)),
tf.zeros((1, units)),
tf.zeros((1, units)))
enc_output, enc_hidden, enc_c = encoder(inputs, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([text_tokenizer.word_index['<start>']], 0)
# generate answer, where the maximum length for the answer is equal
# to max_length_targ=32
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_output)
predicted_id = tf.argmax(predictions[0]).numpy()
result += text_tokenizer.index_word[predicted_id] + ' '
if text_tokenizer.index_word[predicted_id] == '<end>':
return result, sentence
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence
# + id="swhPdcLhcNuV"
def answer(sentence):
"""
This function takes an input sentence by the user and prints the
model's answer along with the user's input sentence.
Arguments:
sentence: a string.
"""
result, sentence = evaluate(sentence)
print(f'INPUT: {sentence}')
print(f'CHATBOT ANSWER: {result}')
# + [markdown] id="52LmYigxcYo4"
# Let's try to talk to the chatbot:
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="YHPv7nbKcZo3" outputId="b585d000-ad25-4928-8af5-982bd63f4e37"
answer('tell me something funny')
# + colab={"base_uri": "https://localhost:8080/"} id="Rby0I7kGcddX" outputId="d0b826e2-2e95-414e-bcfa-d0f79231e5df"
answer('What is it that you want in life?')
# + colab={"base_uri": "https://localhost:8080/"} id="GvLfMiJJcpMZ" outputId="1743f1b7-0d52-4c82-cf9e-d1c12df58a64"
answer("you're so fun")
# + colab={"base_uri": "https://localhost:8080/"} id="QXKLi3h4c1tK" outputId="c74e8c8b-eef0-4ae3-e626-8e10f5a372bf"
answer("do you like annoying people?")
# + colab={"base_uri": "https://localhost:8080/"} id="X4yq_0_Yc8aT" outputId="6f569dd2-83cd-465e-9b68-d060f812c43a"
answer('are you my friend?')
# + colab={"base_uri": "https://localhost:8080/"} id="7FJcHyoidBsj" outputId="4298df2f-4cd1-4cb7-e576-090b01a3f90a"
answer("what is your favorite movie?")
# + colab={"base_uri": "https://localhost:8080/"} id="pd7uqq-hdJ5w" outputId="47b31b8b-7123-4f36-c96d-7cdae15328e3"
answer("how do you sleep at night?")
# + colab={"base_uri": "https://localhost:8080/"} id="-I4jNQTadVpr" outputId="5fe72c07-81d9-4a71-dc24-eab23e35bd67"
answer("what will happen if you went inside a black hole?")
# + colab={"base_uri": "https://localhost:8080/"} id="pKgVQ-JzdbDW" outputId="d74d3358-54b1-43c8-ae29-402d1115fa1d"
answer('If we count sheep to fall asleep, what do they count?')
# + colab={"base_uri": "https://localhost:8080/"} id="O7U1bSBMdfo7" outputId="bd30deb7-a974-414a-d543-8ab3023add6a"
answer("Why do people order double cheese burgers, large fries, and a diet coke?")
# + colab={"base_uri": "https://localhost:8080/"} id="wzBCs138di4P" outputId="2b604127-ba33-4510-9675-a469705ba97c"
answer("What is always coming, but never arrives?")
# + colab={"base_uri": "https://localhost:8080/"} id="357po0YGdodB" outputId="0d7be9cf-371b-45b7-beeb-6ad4af8b4eac"
answer("who is the most stupid person you know?")
# + colab={"base_uri": "https://localhost:8080/"} id="RQTsCVQvdvhe" outputId="575b3963-2f8b-4890-c252-6cdd4ad12976"
answer("knock knock")
# + colab={"base_uri": "https://localhost:8080/"} id="tPHLRFR8dzuC" outputId="b5fcbb92-b90b-4235-be32-373c35749b47"
answer("tell me a joke")
# + colab={"base_uri": "https://localhost:8080/"} id="AnUZiEDId4M9" outputId="872e426a-ae20-4b88-f85a-3ad649b82081"
answer("Do you exercise?")
# + colab={"base_uri": "https://localhost:8080/"} id="UlXtKR0Bd97l" outputId="d4212f32-8307-43a9-f7bc-4a180fdb6367"
answer("How do you keep a clear mind during hard times?")
# + colab={"base_uri": "https://localhost:8080/"} id="Y2XIZ-6GeBoY" outputId="3655d723-cef6-4a2f-a4d9-2c1fdd278440"
answer("who is your best friend?")
# + colab={"base_uri": "https://localhost:8080/"} id="9ZdoEhNDeH66" outputId="d0989143-6476-4086-f849-c96862a8aeb5"
answer("what do you have in mind?")
# + colab={"base_uri": "https://localhost:8080/"} id="T8l3sC72erSy" outputId="198e2111-96f1-4f10-c0a8-7bf99a5812ee"
answer("who is you favorite singer?")
# + colab={"base_uri": "https://localhost:8080/"} id="v7e9jnWXesNt" outputId="217d0043-abb8-41f0-d8d9-d3b2f3c52096"
answer("Are you enjoying this conversation?")
# + colab={"base_uri": "https://localhost:8080/"} id="GDsVxFvvezBd" outputId="be920464-7ebd-484a-ca4e-f2d79f5e6f99"
answer("Do you know Google assistant?")
# + colab={"base_uri": "https://localhost:8080/"} id="Xmxp7rWtez0x" outputId="ce5e2d38-18cd-4567-b81b-763c73e25134"
answer("what do you like to talk about the most?")
# + colab={"base_uri": "https://localhost:8080/"} id="oOk9yvMpe4U4" outputId="cc736de6-239a-4a7b-cfff-e1a002ff50b0"
answer("Why do you like to talk about fire?")
# + colab={"base_uri": "https://localhost:8080/"} id="1-KgZao1e7ef" outputId="4485d1cf-5b09-4f67-a437-e171ae67e94f"
answer("What was the best thing before sliced bread?")
# + colab={"base_uri": "https://localhost:8080/"} id="SBf7j1ZcfABd" outputId="8df808db-a245-479a-eb0d-ea3f75388b9f"
answer("What is better than the FOUNTAIN OF YOUTH?")
# + colab={"base_uri": "https://localhost:8080/"} id="HYqw_nKffDEx" outputId="be2686d0-e4cb-4e36-f985-7ec60ca5e8dd"
answer("Are you lazy?")
# + colab={"base_uri": "https://localhost:8080/"} id="o9-g_wrMfHFr" outputId="f3696226-b344-46dd-8ecd-c76918c6f587"
answer("How do you know that you'll never quit smoking?")
# + colab={"base_uri": "https://localhost:8080/"} id="71qmGXp4fK7w" outputId="f6b7b300-562a-4b63-eae2-26e55329939f"
answer("When will you quit smoking?")
# + colab={"base_uri": "https://localhost:8080/"} id="eI4DbQ5ifOBQ" outputId="45dd8e0e-95a9-4c30-ed76-b5edb1f6ec40"
answer("Who talks the most?")
# + colab={"base_uri": "https://localhost:8080/"} id="3JLJVZAOfSAS" outputId="d61901bb-c49d-4df1-83f1-a7b357b0171f"
answer("Who lies the most?")
# + colab={"base_uri": "https://localhost:8080/"} id="tH3RWYXJfU1l" outputId="3fa722f2-50fb-4b92-e51c-553584f24e7a"
answer("What do you know about computers?")
# + colab={"base_uri": "https://localhost:8080/"} id="GuY4cToafXkO" outputId="652cca3d-9ac2-44b2-de29-9f10e0d864a6"
answer("Where do like to travel?")
# + colab={"base_uri": "https://localhost:8080/"} id="XqkE9IvgfaXa" outputId="ca22663b-a337-494e-df7f-b76e4f6fbf88"
answer("Which country do you want to visit?")
# + colab={"base_uri": "https://localhost:8080/"} id="AlA0Jhk_ffD9" outputId="7b1a6a8d-5647-4260-bdae-b59d43a48a30"
answer("Which type of music do you like?")
# + colab={"base_uri": "https://localhost:8080/"} id="Ho8dd2ckfh87" outputId="3a71e416-ad6d-476f-d542-745a61f0f77b"
answer("what's your job?")
# + colab={"base_uri": "https://localhost:8080/"} id="Kl9L42RGflEM" outputId="1945a6e9-38f2-4949-e6ff-040901233612"
answer("Are you an artificial intelligence model?")
# + colab={"base_uri": "https://localhost:8080/"} id="RKjJ8Qipfo-_" outputId="2056fc6d-506a-451c-ece7-2df98aec1fe1"
answer("How are you doing?")
# + colab={"base_uri": "https://localhost:8080/"} id="9LkU3mrSfsmJ" outputId="6bb7a160-4c6c-408c-b71a-2f6bc419263c"
answer("How are you?")
# + colab={"base_uri": "https://localhost:8080/"} id="FvPR-3dFfvn_" outputId="ca24c264-2c2f-470c-9453-3d1c86bf4d92"
answer("How was your day?")
# + colab={"base_uri": "https://localhost:8080/"} id="r6ASX8sWfylt" outputId="06bb2c79-870e-4055-9dd0-6491fa6c29eb"
answer("Where do you vacation?")
# + colab={"base_uri": "https://localhost:8080/"} id="aLwCBVkEf3DG" outputId="42ab383d-9472-4ecc-a1df-235488a11a08"
answer("Thanks for your time.")
# + colab={"base_uri": "https://localhost:8080/"} id="MlpG-isPf6Ml" outputId="0d86d9e4-f1f8-459f-9228-e558893a73c4"
answer("What is your least favorite food?")
# + colab={"base_uri": "https://localhost:8080/"} id="EktR9hzif-O8" outputId="d2e685fd-38fd-4e3b-9356-c9d6ade78392"
answer("What is the most hilarious childhood memory you can think of?")
# + colab={"base_uri": "https://localhost:8080/"} id="UtWvSlizgBPg" outputId="155c9a57-084e-429a-c15b-05480b922d93"
answer("Are you funny?")
# + colab={"base_uri": "https://localhost:8080/"} id="S7dfNIeDgFWm" outputId="881c7f46-597c-4892-eebd-0e7ac44172d4"
answer("Beethoven or Bach?")
# + colab={"base_uri": "https://localhost:8080/"} id="JktyN4x2gIMN" outputId="8285c3b4-82e6-4433-ffb3-fa05ef851f99"
answer("How do you like your coffee?")
# + colab={"base_uri": "https://localhost:8080/"} id="S0fw08VXgMSL" outputId="fbccb4f7-937c-4fcd-89bd-f559a0069218"
answer("What is your favorite sport?")
# + colab={"base_uri": "https://localhost:8080/"} id="FF9GVoUNgQJ9" outputId="8215f0b7-486e-4b26-86b0-441d91742f82"
answer("What is your favorite car?")
# + id="BrwjZ7qEgT7H"
# + [markdown] id="oWm7WQsygXmA"
# Start a conversation with the chatbot:
#
# The following function takes an input sentence from the user and prints the model's answer to it until the user enters exit() to finish the conversation:
# + id="8WZs-gXkgYl4"
def chat():
"""
This function takes input from the user and outputs the chatbot's answer
until the user inputs `exit()`
"""
print("Start your conversation with the chatbot!")
print("If you want to end this conversation, enter: exit()\nHave fun!")
while True:
# take user input
user_input = str(input('>>'))
if user_input == 'exit()':
break
try:
# generate an answer for the user's question.
chatbot_answer, _ = evaluate(user_input)
print(chatbot_answer)
except:
# in case there's word out of chatbot vocabulary.
print("Oops! can't help you there! Try different words or restructure your sentence.")
# + colab={"base_uri": "https://localhost:8080/"} id="_ovK5NNJgk9m" outputId="ff0eb0ed-0cff-4ac6-f399-492df74302f4"
chat()
# + [markdown] id="9nUclib2hlTy"
# # Conclusion
# As we notice, not all the answers generated by the model are good or valid answers and there's a lot to improve here. For example, there's some answers with repeated blocks of words. Also, some of the answers are not valid responses to the questions and do not relate the topic being proposed in the question. This kind of performance is expected for a deep learning chatbot with this very small dataset but it's okay for a demo project which can be improved.
# + id="gP1oXOSpgokr"
# + [markdown] id="lOnirwdPhsSH"
# # Next
# ## What can we do to improve performance:
# **More data**: if we want to have good performance with somewhat impressive responses, we need to train on a lot more data, maybe millions of pairs. However, training on a large scale dataset requires more computational power (maybe a more powerful GPU) and a more sophisticated model with more layers and units, let alone finding and acquiring such dataset.
#
# **DISCLAIMER**: the model has been trained on 175,671 question-answer pairs which have been gathered from different sources like reddit, and it turns out that some of the model's answers are impolite, just something to be aware of.
#
#
#
#
# ## Further Reading:
# - Effective Approaches to Attention-based Neural Machine Translation
#
# - Neural machine translation with attention
#
# - Neural Machine Translation by Jointly Learning to Align and Translate
#
# - Neural Machine Translation (seq2seq) Tutorial
# + id="y8xtTTOSiKfI"
# !ls {checkpoint_dir}
# + id="f28Ce-DuotAv"
|
Smart_bot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
if __name__ == '__main__':
n = int(input())
nlist = [[input(), float(input())] for _ in range(n)]
second_lowest = sorted(list(set([marks for name, marks in nlist])))[1]
print('\n'.join([a for a, b in sorted(nlist) if b == second_lowest]))
# `n = int(input())` is the first value of `5` entered above, which indicates how many students, along with their total scores, are listed. `nlist = [[input(), float(input())] for _ in range(n)]` iterates through students/points items entered. `[input(), float(input())]` sets the format of each nested list, e.g. `['Harry', 37.21]`. `nlist` stores the nested lists: `[['Harry', 37.21], ['Berry', 37.21], ['Tina', 37.2], ['Akriti', 41.0], ['Harsh', 39.0]]`. `second_lowest = sorted(list(set([marks for name, marks in nlist])))[1]` gets the second lowest score. The `set()` function removes the extra `37.21` value so that the second lowest value can be indexed, e.g. `[1]`. Finally, `'\n'.join([a for a, b in sorted(nlist) if b == second_lowest])` is for iterating through the identical second lowest scores, sorting them, and printing them line by line, similar to format.
|
python-note/python_notebooks-master/types/nested-list-comprehension.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17} colab_type="code" executionInfo={"elapsed": 891, "status": "ok", "timestamp": 1530189153873, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-lEB64EIQ2g8/AAAAAAAAAAI/AAAAAAAAALs/KdUALeppBj8/s50-c-k-no/photo.jpg", "userId": "104166334908365009478"}, "user_tz": -330} id="hKsW3roGKPBV" outputId="d06eb8f7-0ba3-4c56-e563-91bb36970d6c"
from __future__ import print_function
import numpy as np
from PIL import Image
from matplotlib.pyplot import imshow
import os
from keras.models import model_from_yaml
from keras.applications.vgg16 import VGG16
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 236} colab_type="code" executionInfo={"elapsed": 888, "status": "error", "timestamp": 1530190596404, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-lEB64EIQ2g8/AAAAAAAAAAI/AAAAAAAAALs/KdUALeppBj8/s50-c-k-no/photo.jpg", "userId": "104166334908365009478"}, "user_tz": -330} id="jJTrzzJlKXDC" outputId="e591b803-1170-48b9-b620-5c6370e7e4f8"
test_path = '../test-jpg/'
test_count = len(os.listdir(test_path))
norm = [103.939, 116.779, 123.68]
yaml_file = open('./model.yaml', 'r')
load_model = yaml_file.read()
yaml_file.close()
model = model_from_yaml(load_model)
model.load_weights('../checkpoints/weights.hdf5')
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="tbIhqcMmdg3v"
img_resize = (224,224)
rand_id = np.random.randint(test_count, size = 1)
img = Image.open(os.path.join(test_path,'test_'+str(rand_id[0])+'.jpg'))
img.thumbnail(img_resize)
img_array = np.asarray(img.convert("RGB"), dtype=np.float32)
imshow(img_array)
for ch in range(img_array.shape[2]):
img_array[:, :, ch] -= norm[ch]
y_map = {0: 'agriculture', 1: 'artisinal_mine', 2: 'bare_ground', 3: 'blooming', 4: 'blow_down', 5: 'clear', 6: 'cloudy',
7: 'conventional_mine', 8: 'cultivation', 9: 'habitation', 10: 'haze', 11: 'partly_cloudy', 12: 'primary', 13: 'road', 14: 'selective_logging', 15: 'slash_burn', 16: 'water'}
img_array_reshape = np.reshape(img_array, (1, 224, 224, 3))
pred = model.predict(img_array_reshape)
scores = np.where(pred>0.5)[1]
for item in scores:
print(y_map[item])
# -
|
demo/.ipynb_checkpoints/demo-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Welcome to the geoapps
#
# Our collection of open-source applications for geosciences.
#
# See the [**Online Documentation**](https://geoapps.readthedocs.io/en/latest/content/applications.html) for details.
#
# Check for updates on the Release Page.
# ## Workspace
#
#
# |[<img align="left" width="200px" src="./images/export_thumbnail.png">](Export_to.ipynb)|
# |:---:|
# | [Export](./Export_to.ipynb) |
#
# ## Object/Data Utilities
# | [<img align="left" width="250px" src="./images/object_selection_thumbnail.png">](./Calculator.ipynb) | [<img align="left" width="200px" src="./images/contours_thumbnail.png">](./Create_Contours.ipynb) | [<img align="left" width="200px" src="./images/coordinate_transform_thumbnail.png">](./Coordinate_Transformation.ipynb) |
# |:---:|:---:|:---:|
# | [Calculator](./Calculator.ipynb) | [Contouring](./Create_Contours.ipynb) | [Coordinate Transformation](Coordinate_Transformation.ipynb) |
#
# | [<img align="left" width="300px" src="./images/data_interp_thumbnail.png">](./Data_Transfer.ipynb) | [<img align="left" width="300px" src="./images/model_surface.png">](./Create_Surfaces.ipynb) | [<img align="left" width="300px" src="./images/iso_surface.png">](./Create_Iso_Surfaces.ipynb)|
# |:---:|:---:|:---:|
# | [Data Transfer](./Data_Transfer.ipynb) | [Create 2.5D Surfaces](./Create_Surfaces.ipynb) | [Create Isosurfaces](./Create_Iso_Surfaces.ipynb)|
#
# | [<img align="left" width="200px" src="./images/octree_mesh_creation.png">](./Octree_Mesh_Creation.ipynb) |
# |:---:|
# | [Octree Mesh Creation](./Octree_Mesh_Creation.ipynb) |
# ## Machine Learning
# | [<img align="center" width="250px" src="./images/peak_finder_parameters.png">](./Peak_Finder.ipynb)| [<img align="center" width="250px" src="./images/cluster_thumbnail.png">](./Data_Clustering.ipynb)| [<img align="center" width="250px" src="./images/edge_detection_thumbnail.png">](./Edge_Detection.ipynb)|
# |:---:|:---:|:---:|
# |[Peak Finder](./Peak_Finder.ipynb)|[Data Clustering](./Data_Clustering.ipynb)| [Edge Detection](./Edge_Detection.ipynb)|
# ## Geophysical Inversion
# | [<img align="center" width="250px" src="./images/inversion_grav_mag_thumbnail.png">](./Inversion_Grav_Mag_app.ipynb)| [<img align="center" width="250px" src="./images/inversion_em1d_thumbnail.png">](./Inversion_EM1D_app.ipynb)|[<img align="center" width="250px" src="./images/inversion_dcip_thumbnail.png">](./Inversion_DCIP_app.ipynb)|
# |:---:|:---:|:---:|
# |[Gravity and Magnetics](./Inversion_Grav_Mag_app.ipynb)|[EM-1D (Time/Frequency)](./Inversion_EM1D_app.ipynb)|[DC&IP 3D](./Inversion_DCIP_app.ipynb)|
# ## Visualization
#
# |[<img align="center" width="300px" src="./images/scatter_thumbnail.png">](./scatter_plots.ipynb)|
# |:---:|
# | [Scatter Plots](./scatter_plots.ipynb) |
#
# Need help? Contact us at <EMAIL>
|
geoapps/applications/Index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (mlenv)
# language: python
# name: mlenv
# ---
# +
import os
import sys
import matplotlib.pyplot as plt
import IPython.display as ipd
import pandas as pd
import re
import subprocess
import numpy as np
import math
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
sys.path.append('../src')
# -
pred_path = './fusion/'
# +
subm2 = {
'name': 'subm2',
'folds': 4,
}
subm3_resnet = {
'name': 'subm3(resnet)',
'folds': 4,
}
subm3_svm = {
'name': 'subm3(svm)',
'folds': 4,
}
subm4 = {
'name': 'subm4',
'folds': 10,
}
subm4_rv3 = {
'name': 'subm4(rv3)',
'folds': 10,
}
subm4_rv4 = {
'name': 'subm4(rv4)',
'folds': 10,
}
# +
def plot_confusion_matrix(cm, classes, title='Confusion Matrix', color_map=plt.cm.Blues, fig_path=None):
"""
This function prints and plots the confusion matrix
"""
if not title:
title = 'Confusion matrix'
# Compute confusion matrix
# Only use the labels that appear in the data
norm_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Confusion matrix with normalization')
np.set_printoptions(precision=3)
print(norm_cm)
print('Confusion matrix without normalization')
print(cm)
np.set_printoptions(precision=6)
fig, ax = plt.subplots(figsize=(8, 6))
im = ax.imshow(cm, interpolation='nearest', cmap=color_map)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# ax.set_xticks(np.arange(cm.shape[1] + 1)-.5)
# ax.set_yticks(np.arange(cm.shape[0] + 1)-.5)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, '{0:d}\n{1:.2f}%'.format(cm[i, j], norm_cm[i, j] * 100),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
if fig_path:
plt.savefig(fig_path, dpi = 300)
else:
plt.show(block=False)
# +
from sklearn.metrics import recall_score, confusion_matrix
uars = []
models = [subm4_rv3, subm4_rv4]
all_preds = []
all_targets = []
alphas = [0 for i in range(0, models[0]['folds'])]
all_test_preds = []
for i in range(0, models[0]['folds']):
all_devel = []
all_test = []
for model_i in range(len(models)):
devel_data = np.loadtxt(open(os.path.join(pred_path, "{}_devel_preds_{}.csv".format(models[model_i]['name'], i)), "rb"),
delimiter=",", skiprows=0)
test_data = np.loadtxt(open(os.path.join(pred_path, "{}_test_preds_{}.csv".format(models[model_i]['name'], i)), "rb"),
delimiter=",", skiprows=0)
all_devel.append((devel_data[:,0], devel_data[:,2]))
all_test.append(test_data[:, 0])
uar_max = 0
for alpha in range(0, 41):
alpha = alpha / 40
preds = alpha * all_devel[0][0] + (1 - alpha) * all_devel[1][0]
uar = recall_score(all_devel[0][1], np.asarray([0 if p > .5 else 1 for p in preds]), average='macro')
if uar > uar_max:
uar_max = uar
alphas[i] = alpha
preds = alphas[i] * all_devel[0][0] + (1 - alphas[i]) * all_devel[1][0]
all_preds.append([0 if p > .5 else 1 for p in preds])
all_targets.append(all_devel[0][1])
print('Fold: {}. Alpha: {}'.format(i, alphas[i]))
uars.append(uar_max)
print('Best UAR: {}'.format(uar_max))
print('Model 1 Uar: {}'.format(recall_score(all_devel[0][1], np.asarray([0 if p > .5 else 1 for p in all_devel[0][0]]), average='macro')))
print('Model 2 Uar: {}'.format(recall_score(all_devel[1][1], np.asarray([0 if p > .5 else 1 for p in all_devel[1][0]]), average='macro')))
test_preds = alphas[i] * all_test[0] + (1 - alphas[i]) * all_test[1]
all_test_preds.append(test_preds)
print('Mean UAR: {}'.format(recall_score(np.concatenate(all_targets), np.concatenate(all_preds), average='macro')))
cm = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_preds), [0, 1])
res_name = 'Weighted'
plot_confusion_matrix(cm=cm, classes=[0, 1], title='{}.png'.format(res_name), fig_path=None)
# +
pred_file_name = 'ComParE2020_Mask.{0}.test.IIAS_5.csv'.format('mel_64')
file_names = pd.read_csv('ComParE2020_Mask.mel_64.test.IIAS_4.csv')['file_name'].values
testing_predicts = np.mean(np.stack(all_test_preds), axis=0)
submission_df = pd.DataFrame.from_dict({'file_name': [f for f in file_names],
'prediction': ['clear' if pred > .5 else 'mask' for pred in testing_predicts]})
submission_df.to_csv(pred_file_name, index=False)
# +
# D=63.4 | T=70.8
test_top_1 = pd.read_csv('../src/applications/ComParE2020_Mask.DeepSpectrum_resnet50.test.baseline_1.csv')
# D=77.232 | T=74.0
subm1 = pd.read_csv('ComParE2020_Mask.mel_64.test.IIAS_1.csv')
# D=79.127 | T=75.1
subm2 = pd.read_csv('ComParE2020_Mask.mel_64.test.IIAS_2.csv')
# D=82.17 | T=75.3
subm4 = pd.read_csv('ComParE2020_Mask.mel_64.test.IIAS_4.csv')
new_subm = pd.read_csv('ComParE2020_Mask.mel_64.test.IIAS_5.csv')
# +
test_top_1['predMatch_subm1?'] = np.where(test_top_1['prediction'] == subm1['prediction'], 'True', 'False')
test_top_1['predMatch_subm2?'] = np.where(test_top_1['prediction'] == subm2['prediction'], 'True', 'False')
test_top_1['predMatch_subm4?'] = np.where(test_top_1['prediction'] == subm4['prediction'], 'True', 'False')
test_top_1['predMatch_subm5?'] = np.where(test_top_1['prediction'] == new_subm['prediction'], 'True', 'False')
# -
test_top_1.describe()
subm4['predMatch_new_subm?'] = np.where(subm4['prediction'] == new_subm['prediction'], 'True', 'False')
subm4.describe()
|
Mask/notebooks/1.0-Maxim-Fusion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/stephenbeckr/randomized-algorithm-class/blob/master/Demos/demo08_higherAccuracyRegression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="pcXAkDfs6OJD"
# # High-accuracy sketched least-squares
#
# Demo of the
# 1. Iterative Hessian Sketch (IHS) cf. Pilanci and Wainwright; and of the
# 2. preconditioned approaches (BLENDENPIK, LSRN)
#
# These are two methods to get high-accuracy l2 regression
#
# The goal is to approximate the solution of
# $$ \min_{x} \| Ax-b \|_2^2 $$
# where $A$ is $M \times N$ and we are assuming $M \gg N$.
#
# Code: <NAME>, Oct 2021
#
# References:
# - "Iterative Hessian Sketch: Fast and Accurate Solution
# Approximation for Constrained Least-Squares" (Pilanci, Wainwright; JMLR 2016
# http://www.jmlr.org/papers/volume17/14-460/14-460.pdf )
# - "Blendenpik: Supercharging LAPACK's Least-Squares Solver" (Avron et al. 2010, https://epubs.siam.org/doi/abs/10.1137/090767911);
# - "LSRN: A Parallel Iterative Solver for Strongly Over- or Underdetermined Systems" (Meng et al. 2014, https://epubs.siam.org/doi/abs/10.1137/120866580 )
# + id="D-WX9lSc9XBx"
import numpy as np
import numpy.linalg
from numpy.linalg import norm
from numpy.random import default_rng
rng = default_rng()
from matplotlib import pyplot as plt
import scipy.linalg
# Download sketching code
# !wget -q https://raw.githubusercontent.com/stephenbeckr/randomized-algorithm-class/master/Code/sketch.py
import sketch as sk
# + [markdown] id="ydwuiw9T4CsM"
# Setup some problem data
# + id="WmciMdGD9q_v"
M, N = int(14e4), int(5e2)
# M, N = int(8e4), int(5e2)
A = rng.standard_normal( (M,N) )@np.diag(np.logspace(0,3,N))@(
rng.standard_normal((N,N) ) + 0.1*np.eye(N) )
x = rng.standard_normal( (N,1) )
b = A@x
b += 0.3*norm(b)/np.sqrt(M)*rng.standard_normal( (M,1) ) # add noise
# (The larger the noise, the worse sketch-to-solve will perform )
# + [markdown] id="oAU-PEWV4Exa"
# #### Solve via standard direct solver, nothing randomized
# + colab={"base_uri": "https://localhost:8080/"} id="TTaJVxq2Ipmv" outputId="68423543-d4fd-42d8-8487-95884294c4aa"
print("Solving via classical dense method")
# %time xLS, residLS, rank, singVals = np.linalg.lstsq(A,b,rcond=None)
print(f'Condition number of A is {singVals[0]/singVals[-1]:.3e}')
AxLS = A@xLS
# print(f'Relative residual ||Ax-b||/||b|| is {norm(AxLS-b)/norm(b):.2f}')
print(f'Relative residual ||Ax-b||/||b|| is {np.sqrt(residLS[0])/norm(b):.2f}')
# and use this to create error metrics
def errors(x):
Ax = np.ravel(A@x)
# Careful: do the ravel() since if we try (n,) - (n,1) then numpy
# tries to broadcast this to something huge, and isn't what we want.
err1 = norm( Ax-np.ravel(b) )/norm(np.ravel(AxLS)-np.ravel(b)) - 1 # error in objective value
err2 = norm( np.ravel(x) - np.ravel(xLS) )/norm( xLS ) # error in x - xLS (relative error)
err3 = norm( Ax-np.ravel(AxLS) )/norm(AxLS) # error in IHS analysis
return err1, err2, err3
# + [markdown] id="tIhugSMX29EV"
# ### Choose a sketch to use
# Usually choose FJLT, but could choose Gaussian (if problem isn't too big) or CountSketch (if problem is huge)
# + colab={"base_uri": "https://localhost:8080/"} id="sTWGiD0gZa4t" outputId="590ce13c-ada7-49ea-b735-25d291712a6b"
# %%time
m = 40*N # sketch size
print(f"m is {m}, M is {M}, N is {N}")
if M < 1e4 and False:
# This runs out of memory if M is too large
S = sk.Gaussian( (m,M) )
print('Using a Gaussian sketch')
elif False:
# == Use a count-sketch:
S = sk.Count( (m,M) )
print('Using a Count sketch')
else:
# == ... or try a FJLT ...
S = sk.FJLT( (m,M) )
print('Using a FJLT sketch')
SA = S@A
Sb = S@b
print(f'||Sb||/||b|| is {norm(Sb)/norm(b):.4f}')
# + id="5eLg0IALaPIa"
def full_sketch( SA, Sb, cond=1e-12,columnVec = True):
""" SA should be S@A and Sb should be S@b
Solves min_x || S(Ax-b) ||_2 """
# return np.linalg.lstsq(SA,Sb,rcond=None)[0]
x = scipy.linalg.lstsq(SA,Sb,cond=cond,lapack_driver='gelsd')[0]
if columnVec:
return np.reshape( x, (-1,1) ) # make sure it is (n,1) not (n,)
else:
# it will have the same shape convention as Sb, so if Sb is (m,1)
# then x will be (n,1) and if Sb is (m,) then x will be (n,)
return x
def partial_sketch(SA,Atb, printOutput=False, solver=0, reg=0,columnVec = True):
""" SA should be S@A and Atb should be A.T@b
Solves min_x ||SAx||_2^2 - 2<x,A^T b>,
i.e., x = ( (SA)^T SA )^{-1} A^T b
Solver choices:
solver=0 is using scipy.linalg.solve on (SA)^T(SA) which is fast
but less accurate since it square the condition number of SA,
so recommended for all but the most ill-conditioned problems.
Set reg>0 (e.g., reg=1e-10) to add a small amount of regularization
(relative to the largest singular value)
solver=1 uses a pivoted QR decomposition and is more appropriate when
the matrix is ill-conditioned, but a bit slower. `reg` has no effect
solver=2 uses an unpivoted QR decomposition and is a bit faster than
solver=1. `reg` has no effect
"""
if solver == 0:
# == Below is the basic code that fails if ill-conditioned: ==
if reg is None or reg==0:
x = scipy.linalg.solve( SA.T@SA, Atb, assume_a='pos')
else:
# == Slightly better for ill-conditioned, still not good at all though ==
G = SA.T@SA
normG = norm(G,ord=2)
if printOutput:
print(f"||G|| is {normG:.2e} and has condition number {np.linalg.cond(G):.2e}")
# Add in a bit of regularization:
x = scipy.linalg.solve( G + reg*normG*np.eye(N), Atb, assume_a='pos')
elif solver == 1:
# == The above still has problems when ill-conditioned. Let's do SA = QR
# Then G = R^T R and we can do back substitution
R, perm = scipy.linalg.qr( SA, mode='r', pivoting=True )
R = R[:N,:] # Annoyingly, in mode='r', R is rectangular not square, but 'economic' mode is slow.
y = scipy.linalg.solve_triangular( R, Atb[perm], trans='T')
x = np.zeros_like(y)
x[perm] = scipy.linalg.solve_triangular( R, y, trans='N')
elif solver == 2:
# == Same as solver==1 but no pivoting, and use numpy not scipy
# since it gives us thin factorization (but doesn't support pivoting)
R = numpy.linalg.qr( SA, mode='r')
y = scipy.linalg.solve_triangular( R, Atb, trans='T')
x = scipy.linalg.solve_triangular( R, y, trans='N')
if printOutput:
res = norm( SA.T@(SA@x) - Atb )/norm(Atb)
print(f'Relative residual ||(SA)^T (SA)x - A^T b||/||A^T b|| is {res:.2e}')
if columnVec:
return np.reshape( x, (-1,1) ) # make sure it is (n,1) not (n,)
else:
# it will have the same shape convention as Sb, so if Sb is (m,1)
# then x will be (n,1) and if Sb is (m,) then x will be (n,)
return x
# + [markdown] id="ueJ8T0iC3WVy"
# # IHS (Iterative Hessian Sketch) demo
# #### Start solving regression problems with the sketches
#
# The "full sketch" is the standard "sketch-to-solve" which is our baseline method. We don't expect it to be that good in $\|\hat{x}-x_\text{LS}\|$ unless the data $b$ is almost entirely in the column space of $A$.
# + colab={"base_uri": "https://localhost:8080/"} id="_chnpN663Vth" outputId="140c5a5f-c4d3-4413-bddc-0986f6a8b544"
print(f'\nFull sketch')
# %time xFull = full_sketch( SA, Sb )
err1, err2, err3 = errors(xFull)
print( f'\n\tErrors are {err1:.1e}, {err2:.1e} and {err3:.1e}' )
print(f'\nPartial sketch')
# %time xPartial = partial_sketch( SA, A.T@b, printOutput=True, solver=0)
err1, err2, err3 = errors(xPartial)
print( f'\n\tErrors are {err1:.1e}, {err2:.1e} and {err3:.1e}' )
# + colab={"base_uri": "https://localhost:8080/"} id="cwRmxpmWeBOD" outputId="01afdfa0-fe28-4a68-82e8-222107b3f754"
k = 5 # number of iterations for Iterative Hessian Sketch
def IHS(k=5):
mm = m // k
xHat= np.zeros((N,1))
bHat= b.copy() # important!!!
print(f'Iterative Hessian Sketch, dividing {m} total rows into {k} blocks of {mm}')
for i in range(k):
xx = partial_sketch( np.sqrt(m/mm)*SA[i*mm:(i+1)*mm,:], A.T@bHat )
rho = norm( A@xx-A@(xLS-xHat) )/norm(A@(xLS-xHat) )
xHat += xx
bHat -= A@xx
err1, err2, err3 = errors(xHat)
print(f' Iter {i+1:2d}, contraction factor {rho:.2f}, errors {err1:5.2e}, {err2:5.2e}, {err3:5.2e}')
print(f'\n\n')
IHS(1)
IHS(5)
IHS(8)
IHS(10)
IHS(20)
# + [markdown] id="_FECNYlw-BDi"
# ### What happens if we re-use the same sketch in the iterative part?
#
# Our theory doesn't hold since the problem data $b$ is no longer a constant (it's a random variable that is dependent on the sketch $S$)
#
# But maybe it will work??
# - actually, this idea (or a variant) is in [Faster Least Squares Optimization
# ](https://arxiv.org/abs/1911.02675) by <NAME> Pilanci, 2019
# - See also this journal version [Optimal Randomized First-Order Methods for Least-Squares Problems](http://proceedings.mlr.press/v119/lacotte20a.html) by <NAME> Pilanci, ICML 2020
# + colab={"base_uri": "https://localhost:8080/"} id="eHYnljAkvyCf" outputId="bcce2450-1a26-41cd-c43b-c5513747bcd2"
k = 10 # number of iterations for Iterative Hessian Sketch
xHat= np.zeros((N,1))
bHat= b.copy() # important!!!
print('Iterative Hessian Sketch, RE-USING OLD SKETCHES!! This is off-label usage')
for i in range(k):
xx = partial_sketch( SA, A.T@bHat ) # full SA matrix
rho = norm( A@xx-A@(xLS-xHat) )/norm(A@(xLS-xHat) )
xHat += xx
bHat -= A@xx
bHat = b.copy() - A@xHat # if you're worried about accumulating error
err1, err2, err3 = errors(xHat)
print(f' Iter {i+1:2d}, contraction factor {rho:.2f}, errors {err1:5.2e}, {err2:5.2e}, {err3:5.2e}')
# + [markdown] id="ErWmWB6xjZGZ"
# # BLENDENPIK/LSRN Sketch-to-precondition
# + [markdown] id="o9wcVYl4uZHS"
# Let's start by using a standard linear solver for least squares, [`lsqr`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsqr.html)
# + id="Cr-pHiwmVycp" colab={"base_uri": "https://localhost:8080/"} outputId="013bb63c-811b-46de-bf40-f7bb439a00c4"
from scipy.sparse.linalg import lsqr
from scipy.sparse.linalg import LinearOperator, aslinearoperator
# %time xHat, flag, iter, nrm = lsqr( A, b, show=True, iter_lim=int(1e2))[:4]
err1, err2, err3 = errors(xHat)
print( f'\n\tErrors are {err1:.1e}, {err2:.1e} and {err3:.1e}' )
# + [markdown] id="k0lj8P7fvFur"
# Now let's precondition. We use the `R` from the thin `QR` decomposition of the *sketched* matrix $SA$.
#
# Then, we want to solve the system
# $$
# \min_z || AR^{-1}z - b ||^2
# $$
# where we've done the change-of-variables $x=R^{-1}z$
# so after solving the system for $z$, we do one final conversion back to $x$.
#
# We need to give `scipy` a linear operator that can multiply $x\mapsto AR^{-1}x$, which is easy using the `LinearOperator` class.
# + colab={"base_uri": "https://localhost:8080/"} id="4xfF4DSLkf3i" outputId="812d7375-32ff-4081-8bdd-364429a98fd9"
# %time R = numpy.linalg.qr( SA, mode='r')
Rinv_f = lambda x : scipy.linalg.solve_triangular( R, x)
Rinv_t = lambda x : scipy.linalg.solve_triangular( R, x, trans='T')
Rinv = LinearOperator((N,N), matvec = Rinv_f, rmatvec = Rinv_t)
AR = aslinearoperator(A)@Rinv
AR.shape
# + [markdown] id="5fWI1c5yvkFL"
# ### Solving the preconditioned system
# Now we solve via `lsqr` and see if it converges more quickly
# + colab={"base_uri": "https://localhost:8080/"} id="isALA4mvlQfg" outputId="711c6fc7-d42c-4470-daa8-bd758e5eb0b1"
# %time zHat, flag, iter, nrm = lsqr( AR, b, show=True, atol=1e-16,btol=1e-16, iter_lim=10)[:4]
xHat = Rinv_f(zHat)
err1, err2, err3 = errors(xHat)
print( f'\n\tErrors are {err1:.1e}, {err2:.1e} and {err3:.1e}' )
print( f'\tLSQR took {iter} iterations')
# + id="xcXc5MkmlRfZ" colab={"base_uri": "https://localhost:8080/"} outputId="09487409-3d39-4dce-b5bc-fb9fbd682152"
# Find the condition number. This may be slow...
AR_explicit = AR@np.eye(N)
cnd = np.linalg.cond( AR_explicit )
print(f'Condition number of AR^{-1} is {cnd:.2e}')
# + [markdown] id="q85GaHeI9aZY"
# ### Repeat for using the Count Sketch
# Let's see how fast we are
# + id="PbD5XTJk4a6q" colab={"base_uri": "https://localhost:8080/"} outputId="a955d0c4-3c3d-4cef-c62e-7d0972ead6e1"
# %%time
S = sk.Count( (m,M) )
R = numpy.linalg.qr( S@A, mode='r')
Rinv_f = lambda x : scipy.linalg.solve_triangular( R, x)
Rinv_t = lambda x : scipy.linalg.solve_triangular( R, x, trans='T')
Rinv = LinearOperator((N,N), matvec = Rinv_f, rmatvec = Rinv_t)
AR = aslinearoperator(A)@Rinv
zHat, flag, iter, nrm = lsqr( AR, b, show=False,iter_lim=7)[:4]
xHat = Rinv_f(zHat)
err1, err2, err3 = errors(xHat)
print( f'\n\tErrors are {err1:.1e}, {err2:.1e} and {err3:.1e}' )
print( f'\tLSQR took {iter} iterations')
# + id="xvpP99qz9xsK"
|
Demos/demo08_higherAccuracyRegression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D2_LinearSystems/W2D2_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Tutorial 4: Autoregressive models
# **Week 2, Day 2: Linear Systems**
#
# **By Neuromatch Academy**
#
# **Content Creators**: <NAME>, <NAME>
#
# **Content Reviewers**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# ---
# # Tutorial Objectives
#
# *Estimated timing of tutorial: 30 minutes*
#
# The goal of this tutorial is to use the modeling tools and intuitions developed in the previous few tutorials and use them to _fit data_. The concept is to flip the previous tutorial -- instead of generating synthetic data points from a known underlying process, what if we are given data points measured in time and have to learn the underlying process?
#
# This tutorial is in two sections.
#
# **Section 1** walks through using regression of data to solve for the coefficient of an OU process from Tutorial 3. Next, **Section 2** generalizes this auto-regression framework to high-order autoregressive models, and we will try to fit data from monkeys at typewriters.
# + cellView="form"
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/snv4m/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# -
# ---
# # Setup
# +
# Imports
import numpy as np
import matplotlib.pyplot as plt
# + cellView="form"
#@title Figure settings
import ipywidgets as widgets # interactive display
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form"
# @title Plotting Functions
def plot_residual_histogram(res):
"""Helper function for Exercise 4A"""
fig = plt.figure()
plt.hist(res)
plt.xlabel('error in linear model')
plt.title('stdev of errors = {std:.4f}'.format(std=res.std()))
plt.show()
def plot_training_fit(x1, x2, p):
"""Helper function for Exercise 4B"""
fig = plt.figure()
plt.scatter(x2 + np.random.standard_normal(len(x2))*0.02,
np.dot(x1.T, p), alpha=0.2)
plt.title('Training fit, order {r:d} AR model'.format(r=r))
plt.xlabel('x')
plt.ylabel('estimated x')
plt.show()
# + cellView="form"
# @title Helper Functions
def ddm(T, x0, xinfty, lam, sig):
'''
Samples a trajectory of a drift-diffusion model.
args:
T (integer): length of time of the trajectory
x0 (float): position at time 0
xinfty (float): equilibrium position
lam (float): process param
sig: standard deviation of the normal distribution
returns:
t (numpy array of floats): time steps from 0 to T sampled every 1 unit
x (numpy array of floats): position at every time step
'''
t = np.arange(0, T, 1.)
x = np.zeros_like(t)
x[0] = x0
for k in range(len(t)-1):
x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1)
return t, x
def build_time_delay_matrices(x, r):
"""
Builds x1 and x2 for regression
Args:
x (numpy array of floats): data to be auto regressed
r (scalar): order of Autoregression model
Returns:
(numpy array of floats) : to predict "x2"
(numpy array of floats) : predictors of size [r,n-r], "x1"
"""
# construct the time-delayed data matrices for order-r AR model
x1 = np.ones(len(x)-r)
x1 = np.vstack((x1, x[0:-r]))
xprime = x
for i in range(r-1):
xprime = np.roll(xprime, -1)
x1 = np.vstack((x1, xprime[0:-r]))
x2 = x[r:]
return x1, x2
def AR_prediction(x_test, p):
"""
Returns the prediction for test data "x_test" with the regression
coefficients p
Args:
x_test (numpy array of floats): test data to be predicted
p (numpy array of floats): regression coefficients of size [r] after
solving the autoregression (order r) problem on train data
Returns:
(numpy array of floats): Predictions for test data. +1 if positive and -1
if negative.
"""
x1, x2 = build_time_delay_matrices(x_test, len(p)-1)
# Evaluating the AR_model function fit returns a number.
# We take the sign (- or +) of this number as the model's guess.
return np.sign(np.dot(x1.T, p))
def error_rate(x_test, p):
"""
Returns the error of the Autoregression model. Error is the number of
mismatched predictions divided by total number of test points.
Args:
x_test (numpy array of floats): data to be predicted
p (numpy array of floats): regression coefficients of size [r] after
solving the autoregression (order r) problem on train data
Returns:
(float): Error (percentage).
"""
x1, x2 = build_time_delay_matrices(x_test, len(p)-1)
return np.count_nonzero(x2 - AR_prediction(x_test, p)) / len(x2)
# -
# # Section 1: Fitting data to the OU process
#
#
# + cellView="form"
# @title Video 1: Autoregressive models
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1fK4y1s7AQ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="VdiVSTPbJ7I", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# To see how this works, let's continue the previous example with the drift-diffusion (OU) process. Our process had the following form:
#
# $x_{k+1} = x_{\infty} + \lambda(x_k - x_{\infty}) + \sigma \eta$
#
# where $\eta$ is sampled from a standard normal distribution.
#
# For simplity, we set $x_\infty = 0$. Let's plot a trajectory for this process again below. Take note of the parameters of the process because they will be important later.
# + cellView="form"
#@markdown Execute to simulate the drift diffusion model
np.random.seed(2020) # set random seed
# parameters
T = 200
x0 = 10
xinfty = 0
lam = 0.9
sig = 0.2
# drift-diffusion model from tutorial 3
t, x = ddm(T, x0, xinfty, lam, sig)
fig = plt.figure()
plt.title('$x_0=%d, x_{\infty}=%d, \lambda=%0.1f, \sigma=%0.1f$' % (x0, xinfty, lam, sig))
plt.plot(t, x, 'k.')
plt.xlabel('time')
plt.ylabel('position x')
plt.show()
# -
# What if we were given these positions $x$ as they evolve in time as data, how would we get back out the dynamics of the system $\lambda$?
#
# Since a little bird told us that this system takes on the form
#
# $x_{k+1} = \lambda x_k + \eta$,
#
# where $\eta$ is noise from a normal distribution, our approach is to solve for $\lambda$ as a **regression problem**.
#
# As a check, let's plot every pair of points adjacent in time ($x_{k+1}$ vs. $x_k$) against eachother to see if there is a linear relationship between them.
# + cellView="form"
# @markdown Execute to visualize X(k) vs. X(k+1)
# make a scatter plot of every data point in x
# at time k versus time k+1
fig = plt.figure()
plt.scatter(x[0:-2], x[1:-1], color='k')
plt.plot([0, 10], [0, 10], 'k--', label='$x_{k+1} = x_k$ line')
plt.xlabel('$x_k$')
plt.ylabel('$x_{k+1}$')
plt.legend()
plt.show()
# -
# Hooray, it's a line! This is evidence that that the _dynamics that generated the data_ is **linear**. We can now reformulate this task as a regression problem.
#
# Let $\mathbf{x_1} = x_{0:T-1}$ and $\mathbf{x_2} = x_{1:T}$ be vectors of the data indexed so that they are shifted in time by one. Then, our regression problem is
#
# $$\mathbf{x}_2 = \lambda \mathbf{x}_1$$
#
# This model is **autoregressive**, where _auto_ means self. In other words, it's a regression of the time series on itself from the past. The equation as written above is only a function of itself from _one step_ in the past, so we can call it a _first order_ autoregressive model.
#
# Now, let's set up the regression problem below and solve for $\lambda.$ We will plot our data with the regression line to see if they agree.
# + cellView="form"
#@markdown Execute to solve for lambda through autoregression
# build the two data vectors from x
x1 = x[0:-2]
x1 = x1[:, np.newaxis]**[0, 1]
x2 = x[1:-1]
# solve for an estimate of lambda as a linear regression problem
p, res, rnk, s = np.linalg.lstsq(x1, x2, rcond=None)
# here we've artificially added a vector of 1's to the x1 array,
# so that our linear regression problem has an intercept term to fit.
# we expect this coefficient to be close to 0.
# the second coefficient in the regression is the linear term:
# that's the one we're after!
lam_hat = p[1]
# plot the data points
fig = plt.figure()
plt.scatter(x[0:-2], x[1:-1], color='k')
plt.xlabel('$x_k$')
plt.ylabel('$x_{k+1}$')
# plot the 45 degree line
plt.plot([0, 10], [0, 10], 'k--', label='$x_{k+1} = x_k$ line')
# plot the regression line on top
xx = np.linspace(-sig*10, max(x), 100)
yy = p[0] + lam_hat * xx
plt.plot(xx, yy, 'r', linewidth=2, label='regression line')
mytitle = 'True $\lambda$ = {lam:.4f}, Estimate $\lambda$ = {lam_hat:.4f}'
plt.title(mytitle.format(lam=lam, lam_hat=lam_hat))
plt.legend()
plt.show()
# -
# Pretty cool! So now we have a way to predict $x_{k+1}$ if given any data point $x_k$. Let's take a look at how accurate this one-step prediction might be by plotting the residuals.
#
# ## Coding Exercise 1: Residuals of the autoregressive model
#
# *Referred to as exercise 4A in video*
#
# Plot a histogram of residuals of our autoregressive model, by taking the difference between the _data_ $\mathbf{x_2}$ and the _model_ prediction. Do you notice anything about the standard deviation of these residuals and the equations that generated this synthetic dataset?
# + cellView="code"
##############################################################################
## Insert your code here take to compute the residual (error)
raise NotImplementedError('student exercise: compute the residual error')
##############################################################################
# compute the predicted values using the autoregressive model (lam_hat), and
# the residual is the difference between x2 and the prediction
res = ...
# Visualize
plot_residual_histogram(res)
# +
# to_remove solution
# compute the predicted values using the autoregressive model (lam_hat), and
# the residual is the difference between x2 and the prediction
res = x2 - (lam_hat * x[0:-2])
# Visualize
with plt.xkcd():
plot_residual_histogram(res)
# -
# ---
# # Section 2: Higher order autoregressive models
#
# *Estimated timing to here from start of tutorial: 15 min*
# + cellView="form"
# @title Video 2: Monkey at a typewriter
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1si4y1V7Ru", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="f2z0eopWB8Y", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# Now that we have established the autoregressive framework, generalizing for dependence on data points from the past is straightfoward. **Higher order** autoregression models a future time point based on _more than one points in the past_.
#
# In one dimension, we can write such an order-$r$ model as
#
# $x_{k+1} = \alpha_0 + \alpha_1 x_k + \alpha_2 x_{k-1} + \alpha_3 x_{k-2} + \dots + \alpha_{r+1} x_{k-r}$,
#
# where the $\alpha$'s are the $r+1$ coefficients to be fit to the data available.
# These models are useful to account for some **history dependence** in the trajectory of timeseries. This next part of the tutorial will explore one such timeseries, and you can do an experiment on yourself!
#
# In particular, we will explore a binary random sequence of 0's and 1's that would occur if you flipped a coin and jotted down the flips.
#
# The difference is that, instead of actually flipping a coin (or using code to generate such a sequence), you -- yes you, human -- are going to generate such a random Bernoulli sequence as best as you can by typing in 0's and 1's. We will then build higher-order AR models to see if we can identify predictable patterns in the time-history of digits you generate.
# **But first**, let's try this on a sequence with a simple pattern, just to make sure the framework is functional. Below, we generate an entirely predictable sequence and plot it.
# + cellView="both"
# this sequence is entirely predictable, so an AR model should work
monkey_at_typewriter = '1010101010101010101010101010101010101010101010101'
# Bonus: this sequence is also predictable, but does an order-1 AR model work?
#monkey_at_typewriter = '100100100100100100100100100100100100100'
# function to turn chars to numpy array,
# coding it this way makes the math easier
# '0' -> -1
# '1' -> +1
def char2array(s):
m = [int(c) for c in s]
x = np.array(m)
return x*2 - 1
x = char2array(monkey_at_typewriter)
fig = plt.figure()
plt.step(x, '.-')
plt.xlabel('time')
plt.ylabel('random variable')
plt.show()
# -
# Now, let's set up our regression problem (order 1 autoregression like above) by defining $\mathbf{x_1}$ and $\mathbf{x_2}$ and solve it.
# +
# build the two data vectors from x
x1 = x[0:-2]
x1 = x1[:, np.newaxis]**[0, 1]
x2 = x[1:-1]
# solve for an estimate of lambda as a linear regression problem
p, res, rnk, s = np.linalg.lstsq(x1, x2, rcond=None)
# -
# take a look at the resulting regression coefficients
print('alpha_0 = {a0:.2f}, alpha_1 = {a1:.2f}'.format(a0=p[0], a1=p[1]))
# ## Think! 2: Understanding autoregressive parameters
#
# Do the values we got for $\alpha_0$ and $\alpha_1$ make sense? Write down the corresponding autoregressive model and convince yourself that it gives the alternating 0's and 1's we asked it to fit as data.
# to_remove explanation
"""
The corresponding autoregressive model is:
x_{k+1} = 0 - x_{k}
""";
# Truly random sequences of numbers have no structure and should not be predictable by an AR or any other models.
#
# However, humans are notoriously terrible at generating random sequences of numbers! (Other animals are no better...)
#
# To test out an application of higher-order AR models, let's use them to **model a sequence of 0's and 1's that a human tried to produce at random**. In particular, I convinced my 9-yr-old monkey to sit at a typewriter (my laptop) and enter some digits as randomly as he is able. The digits he typed in are in the code, and we can plot them as a timeseries of digits here.
#
# If the digits really have no structure, then we expect our model to do about as well as guessing, producing an error rate of 0.5. Let's see how well we can do!
# + cellView="both"
# data generated by 9-yr-ld JAB:
# we will be using this sequence to train the data
monkey_at_typewriter = '10010101001101000111001010110001100101000101101001010010101010001101101001101000011110100011011010010011001101000011101001110000011111011101000011110000111101001010101000111100000011111000001010100110101001011010010100101101000110010001100011100011100011100010110010111000101'
# we will be using this sequence to test the data
test_monkey = '00100101100001101001100111100101011100101011101001010101000010110101001010100011110'
x = char2array(monkey_at_typewriter)
test = char2array(test_monkey)
## testing: machine generated randint should be entirely unpredictable
## uncomment the lines below to try random numbers instead
# np.random.seed(2020) # set random seed
# x = char2array(np.random.randint(2, size=500))
# test = char2array(np.random.randint(2, size=500))
fig = plt.figure()
plt.step(x, '.-')
plt.show()
# -
# ## Coding Exercise 2: Fitting AR models
#
# *Referred to in video as exercise 4B*
#
# Fit a order-5 ($r=5$) AR model to the data vector $x$. To do this, we have included some helper functions, including ``AR_model``.
#
# We will then plot the observations against the trained model. Note that this means we are using a sequence of the previous 5 digits to predict the next one.
#
# Additionally, output from our regression model are continuous (real numbers) whereas our data are scalar (+1/-1). So, we will take the sign of our continuous outputs (+1 if positive and -1 if negative) as our predictions to make them comparable with data. Our error rate will simply be the number of mismatched predictions divided by the total number of predictions.
# + cellView="form"
# @markdown Execute this cell to get helper function `AR_model`
def AR_model(x, r):
"""
Solves Autoregression problem of order (r) for x
Args:
x (numpy array of floats): data to be auto regressed
r (scalar): order of Autoregression model
Returns:
(numpy array of floats) : to predict "x2"
(numpy array of floats) : predictors of size [r,n-r], "x1"
(numpy array of floats): coefficients of length [r] for prediction after
solving the regression problem "p"
"""
x1, x2 = build_time_delay_matrices(x, r)
# solve for an estimate of lambda as a linear regression problem
p, res, rnk, s = np.linalg.lstsq(x1.T, x2, rcond=None)
return x1, x2, p
help(AR_model)
# + cellView="code"
##############################################################################
## TODO: Insert your code here for fitting the AR model
raise NotImplementedError('student exercise: fit AR model')
##############################################################################
# define the model order, and use AR_model() to generate the model and prediction
r = ...
x1, x2, p = AR_model(...)
# Plot the Training data fit
# Note that this adds a small amount of jttter to horizontal axis for visualization purposes
plot_training_fit(x1, x2, p)
# +
# to_remove solution
# define the model order, and use AR_model() to generate the model and prediction
r = 5 # remove later
x1, x2, p = AR_model(x, r)
# Plot the Training data fit
# Note that this adds a small amount of jttter to horizontal axis for visualization purposes
with plt.xkcd():
plot_training_fit(x1, x2, p)
# -
# Let's check out how the model does on the test data that it's never seen before!
# + cellView="form"
# @markdown Execute to see model performance on test data
x1_test, x2_test = build_time_delay_matrices(test, r)
fig = plt.figure()
plt.scatter(x2_test+np.random.standard_normal(len(x2_test))*0.02,
np.dot(x1_test.T, p), alpha=0.5)
mytitle = 'Testing fit, order {r:d} AR model, err = {err:.3f}'
plt.title(mytitle.format(r=r, err=error_rate(test, p)))
plt.xlabel('test x')
plt.ylabel('estimated x')
# -
# Not bad! We're getting errors that are smaller than 0.5 (what we would have gotten by chance).
#
# Let's now try **AR models of different orders** systematically, and plot the test error of each.
#
# _Remember_: The model has never seen the test data before, and random guessing would produce an error of $0.5$.
# + cellView="form"
# @markdown Execute to visualize errors for different orders
# range of r's to try
r = np.arange(1, 21)
err = np.ones_like(r) * 1.0
for i, rr in enumerate(r):
# fitting the model on training data
x1, x2, p = AR_model(x, rr)
# computing and storing the test error
test_error = error_rate(test, p)
err[i] = test_error
fig = plt.figure()
plt.plot(r, err, '.-')
plt.plot([1, r[-1]], [0.5, 0.5], c='r', label='random chance')
plt.xlabel('Order r of AR model')
plt.ylabel('Test error')
plt.xticks(np.arange(0,25,5))
plt.legend()
plt.show()
# -
# Notice that there's a sweet spot in the test error! The 6th order AR model does a really good job here, and for larger $r$'s, the model starts to overfit the training data and does not do well on the test data.
#
# In summary:
#
# "**I can't believe I'm so predictable!**" - JAB
# ---
# # Summary
#
# *Estimated timing of tutorial: 30 minutes*
#
# In this tutorial, we learned:
#
# * How learning the parameters of a linear dynamical system can be formulated as a regression problem from data.
# * Time-history dependence can be incorporated into the regression framework as a multiple regression problem.
# * That humans are no good at generating random (not predictable) sequences. Try it on yourself!
|
tutorials/W2D2_LinearSystems/W2D2_Tutorial4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import math
import timeit
import numpy as np
import torch
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from datasets.NYUMultiDataset import NYUMultiDataset
from model.DepthGenLM import DepthGenLM
# -
def plot_hands(ax, points, color, linewidth='3'):
# Add bone connections
bones = [(0, 1),
(1, 2),
(2, 3),
(3, 4),
(0, 21),
(21, 5),
(5, 6),
(6, 7),
(7, 8),
(21, 9),
(9, 10),
(10, 11),
(11, 12),
(21, 13),
(13, 14),
(14, 15),
(15, 16),
(21, 17),
(17, 18),
(18, 19),
(19, 20)]
for connection in bones:
coord1 = points[connection[0]]
coord2 = points[connection[1]]
coords = np.stack([coord1, coord2])
ax.plot(coords[:, 0], coords[:, 1], coords[:, 2], c=color, linewidth=linewidth)
# +
tsfms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(224, Image.NEAREST),
transforms.ToTensor(),
])
data_path = "/home/alex/Data/nyu/dataset/test/"
dataset = NYUMultiDataset(data_path, tsfms, tsfms, 1024, True, False)
# +
mesh_path = "/home/alex/dev/projects/libhand-public/nyu_synth/ogre/hand.mesh.xml"
skeleton_path = "/home/alex/dev/projects/libhand-public/nyu_synth/ogre/hand.skeleton.xml"
checkpoint_path = "/home/alex/dev/projects/pytorch-depthgen/saved/resnet_multiview_synth_pretrain.ckpt"
model = DepthGenLM.load_from_checkpoint(checkpoint_path)
model.cuda()
model.eval()
# +
idx = torch.randint(0, len(dataset), [1]).item()
sample1, sample2 = dataset[idx]
sample, target, kps, kps14, center, norm_size, bbox, padding = sample1
joint_idxs = [1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 20, 22, 23, 24, 25, 6]
# Default output
# x = torch.zeros(1, 17, 4, device=torch.device('cuda'))
# x[:, :, 0] = 1
# preds = model.depth_gen(x)
preds, _ = model(sample.unsqueeze(0).cuda())
points = preds[0]
coords = preds[2]
points_vis = points[0].detach().cpu()
coords_vis = coords[0, joint_idxs].detach().cpu()
kp_vis = kps.cpu()
# Normalization length
segments = np.array([
[1, 2],
[2, 3],
[3, 4],
[5, 6],
[6, 7],
[7, 8],
[9, 10],
[10, 11],
[11, 12],
[13, 14],
[14, 15],
[15, 16],
[17, 18],
[18, 19],
[19, 20]
])
total_length = 0
for idx, val in enumerate(segments):
l = np.linalg.norm(kp_vis[val[0]] - kp_vis[val[1]])
total_length += l
mean_length = total_length / segments.shape[0]
print("Mean length = {}".format(mean_length))
# %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(sample[0])
kp_fig = plt.figure()
kp_ax = kp_fig.add_subplot(111, projection='3d')
kp_ax.view_init(90, -90)
kp_ax.set_xlim([-2.25, 2.25])
kp_ax.set_ylim([-2.5, 2.0])
kp_ax.set_zlim([-2.25, 2.25])
plot_hands(kp_ax, kp_vis, color='r')
plot_hands(kp_ax, coords_vis, color='b')
annotations = [str(i) for i in range(22)]
for i, anno in enumerate(annotations):
kp_ax.text(kps[i, 0], kps[i, 1], kps[i, 2], anno)
kp_ax.text(coords_vis[i, 0], coords_vis[i, 1], coords_vis[i, 2], anno)
pc_fig = plt.figure()
pc_ax = pc_fig.add_subplot(111, projection='3d')
pc_ax.view_init(90, -90)
pc_ax.scatter(target[:, 0], target[:, 1], target[:, 2], alpha=0.4, c='r')
pc_ax.scatter(points_vis[:, 0], points_vis[:, 1], points_vis[:, 2], alpha=0.4, c='b')
pc_ax.set_xlim([-2.25, 2.25])
pc_ax.set_ylim([-2.5, 2.0])
pc_ax.set_zlim([-2.25, 2.25])
|
notebooks/nyu_multi_dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sagas.ofbiz.services import OfService, MetaService
from sagas.ofbiz.entities import MetaEntity
ms = OfService()
print(ms._name)
MetaService('getInventoryAvailableByFacility').desc(False)
# -
print("t_id" in "product_id")
# +
from sagas.ofbiz.runtime_context import platform
oc=platform.oc
finder=platform.finder
group_reader=oc.delegator.getModelGroupReader()
names=group_reader.getGroupNames("default")
print(names)
result_set=group_reader.getEntityNamesByGroup("default", 'org.apache.ofbiz.tenant')
print(len(result_set))
print(result_set)
# +
def search_entity(name_filter):
name_filter=name_filter.lower()
model_reader=oc.delegator.getModelReader()
names=model_reader.getEntityNames()
# print(len(names))
for name in names:
if name_filter in name.lower():
print(name)
search_entity('facility')
# +
entity = MetaEntity("Product")
rec = entity.find_one(productId='GZ-2644')
print(rec['productId'], rec['lastUpdatedTxStamp'])
entity = MetaEntity("Facility")
rec = entity.find_one(facilityId='WebStoreWarehouse')
print(rec['facilityId'], rec['lastUpdatedTxStamp'])
entity = MetaEntity("Facility")
rec = entity.find_one(facilityId='WebStore_not_exists')
if not rec is None:
print(rec['facilityId'], rec['lastUpdatedTxStamp'])
# -
ms = OfService()
ok, ret=ms.getInventoryAvailableByFacility(productId='GZ-2644', facilityId='WebStoreWarehouse')
if ok:
print(ret['quantityOnHandTotal'], ret['availableToPromiseTotal'])
search_entity('inventoryItem')
# +
def record(entity, id_val):
ent=MetaEntity(entity)
pk=ent.model.getOnlyPk()
ctx=oc.j.HashMap()
ctx.put(pk.getName(), id_val)
return oc.delegator.findOne(ent.name, ctx, True)
record("InventoryItem", '9024')
# -
OfService().createPhysicalInventoryAndVariance(inventoryItemId='9024',
varianceReasonId='VAR_LOST')
|
notebook/procs-ofbiz-inventory.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from statsmodels.tsa.filters.hp_filter import hpfilter
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
df = pd.read_excel(r'\Data\India_Exchange_Rate_Dataset.xls',
index_col=0,parse_dates=True)
EXINUS_cycle,EXINUS_trend = hpfilter(df['EXINUS'], lamb=1600)
df['cycle'] =EXINUS_cycle
df['trend'] =EXINUS_trend
df[['cycle']].plot(figsize=(15,6)).autoscale(axis='x',tight=True)
plt.title('Extracting Cyclic Variations', fontsize=16)
plt.xlabel('Year')
plt.ylabel('EXINUS exchange rate')
plt.show()
|
hands-on-time-series-analylsis-python/Chapter 1/8.Detecting cyclical variation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RoyMillamis/CPEN-1-2/blob/main/Demo1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KN7TwgvV7hm2"
# ##Introduction to Python
#
# + colab={"base_uri": "https://localhost:8080/"} id="rdGUxVrq7wW3" outputId="ae1c2792-bc4c-46c0-f440-ef5e33a57002"
#Python Indention
if 5>2:
print("five is greater than two!")
# + colab={"base_uri": "https://localhost:8080/"} id="C1x9BLXm9kBr" outputId="c52fe225-b491-4049-b013-5486aa98c26b"
x = 1 # This is a single variable with single value
x,y = 1,2 # these are two variables with two different values
x,y,z= 1,2,3
print(x)
print(y)
print(z)
# + id="HuWb7y6p-Na6"
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="2Z_eRgYJ-NkE" outputId="f3eaa478-7ef6-4917-b355-1cbdc90d2ff2"
x,y="four",2
x
y
x
# + [markdown] id="uOXjg9xZ-rzG"
# ### Casting
# + colab={"base_uri": "https://localhost:8080/"} id="J2gta0JU-uv9" outputId="9a46e49d-0acf-4601-ae4d-234f467a02f4"
b = int(4)
b
c= float(4)
c
# + [markdown] id="MGbzicPd_And"
# ### Type Function
# + colab={"base_uri": "https://localhost:8080/"} id="7XdMRs6K_Ehc" outputId="f089de76-38c2-44a9-a93a-0179af012db5"
x=5
y= "John" # This is a type of string
h= "ana"
H='Ana'
print(type(x))
print(type(y))
print(h)
print(H)
# + [markdown] id="dIzMdWz4ACxX"
# ## One Value to Multiple Variables
# + colab={"base_uri": "https://localhost:8080/"} id="lXUqWc27AGng" outputId="ddb193b8-8c97-478c-d850-8fa03d76bbc6"
x = y = z = 'four'
print(x)
print(y)
print(z)
# + colab={"base_uri": "https://localhost:8080/"} id="Fzf6TTa6AeUk" outputId="fad3c9fb-204d-4317-dccb-db522e832ae7"
x = "enjoying"
print("Python Programming is" " " + x)
# + colab={"base_uri": "https://localhost:8080/"} id="lz8tZrErA-2B" outputId="6ee025a4-bf20-4bda-d720-60a652c9418a"
x = 11
y = 12
z = 13
print(x+y+z)
# + colab={"base_uri": "https://localhost:8080/"} id="tV9YKMcIBOIH" outputId="7c6dbd67-8a91-4f07-a8f4-00d83cd77c96"
x+=3 #This is the same as x = +3
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="vxtPJVm6-XSr" outputId="ef9b390d-5e78-45c5-9e8c-e4d6df3a0a15"
y+=5
print(y)
# + colab={"base_uri": "https://localhost:8080/"} id="YVYEtRv4BqDE" outputId="b0c4ea88-016f-48b1-b420-2951919b767a"
x<y and x!=x # pag isang lang yung true false na
# + colab={"base_uri": "https://localhost:8080/"} id="EoXtpYi6_B_d" outputId="76bd1d91-4882-4e74-d1e6-1e97a4c3741d"
x>y or not y==z # kahit isa lang yung true, true paden
# + colab={"base_uri": "https://localhost:8080/"} id="QRRVsPDY_awB" outputId="21154849-cfb0-4ea6-c5da-02928a5e608b"
not(print(x>y))
# + colab={"base_uri": "https://localhost:8080/"} id="MCXvXW07_4oN" outputId="c649995b-fac7-4d1e-cd07-c236c85bf8e0"
#Identity operations
print (x is y)
print (x is not z)
|
Demo1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
import os
import torch
import pandas as pd
#from skimage import io, transform
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torchvision.transforms.functional as Ft
import torch.utils.data as data
from torch.utils.data.sampler import SubsetRandomSampler
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# -
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
## This class is part of the transforms pipeline used by the DataLoader and generates
## a number of crops according to the list of coord given at initialization
class MyCrop(object):
def __init__(self, size, coord, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):
self.size = size
self.padding = padding
# center points
self.coord = coord
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def _get_image_size(img):
return img.shape
def __call__(self, img):
if self.padding is not None:
img = Ft.pad(img, self.padding, self.fill, self.padding_mode)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = Ft.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = Ft.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
crops = []
for coord in self.coord:
crops.append(Ft.crop(img, coord[0], coord[1], self.size, self.size))
return crops
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
# +
opts = {}
opts['epochs'] = 100
opts['batch_size'] = 64
opts['lr'] = 0.003
opts['nr_classes'] = 2
coord = [(155,321),(201,419),(45,460),(121,158)]
coord_flipped = [(321,155),(419,201),(460,45),(158,121)]
TRAIN_DATA_PATH = "./train_data_3"
TEST_DATA_PATH = "./test_data_3"
TRANSFORM_IMG = transforms.Compose([
#transforms.CenterCrop(64),
MyCrop(64,coord),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
#transforms.Resize(256),
#transforms.ToTensor(),
#transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225] )
])
TRANSFORM_IMG_TEST = transforms.Compose([
MyCrop(64,coord_flipped),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])
writer = SummaryWriter()
train_data = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH, transform=TRANSFORM_IMG)
test_data = torchvision.datasets.ImageFolder(root=TEST_DATA_PATH, transform=TRANSFORM_IMG_TEST)
## here, there are two ways of splitting the data into training and test sets (additional validation
## set for hyperparameter tuning may also be of use). One is to generate two directories of
## preprocessed images (train_data and test_data) and use two data loaders and the other is
## to split the sets on the fly. In the latter case, the issue is the strong time correlation
## between subsequent images which leads to overoptimistic test scores (can be alleviated by
## commenting out the np.random.shuffle(indices) line)
print(len(train_data.imgs))
# split into (train,val,test)
dataset_size = len(train_data)
indices = list(range(dataset_size))
test_percentage = 0.10
split = int(np.floor(test_percentage * dataset_size))
#np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
#train_data_loader = data.DataLoader(train_data, batch_size=opts['batch_size'], sampler=train_sampler, num_workers=4)
#valid_data_loader = data.DataLoader(train_data, batch_size=opts['batch_size'], sampler=valid_sampler, num_workers=4)
train_data_loader = data.DataLoader(train_data, batch_size=opts['batch_size'], shuffle=True, num_workers=4)
valid_data_loader = data.DataLoader(test_data, batch_size=opts['batch_size'], shuffle=False, num_workers=4)
#train_iter = iter(train_data_loader)
#test_data = torchvision.datasets.ImageFolder(root=TEST_DATA_PATH, transform=TRANSFORM_IMG)
#test_data_loader = data.DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
# -
# #### display the images as a sanity check (do for both train and valid data loader)
for batch_idx, sample in enumerate(train_data_loader):
images, labels = sample
print(images[0].shape)
#labels_q = np.digitize(labels,bins)
for i in range(4):
plt.imshow( sample[0][0][i].permute(1, 2, 0) )
plt.show()
break;
# ## Models
# +
class BinaryCNN(torch.nn.Module):
def __init__(self):
super(BinaryCNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.drop_out = nn.Dropout()
self.fc1 = nn.Linear(7 * 7 * 64, 1000)
self.fc2 = nn.Linear(1000, 4)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.fc1(out)
out = self.fc2(out)
return out
class CNN2(nn.Module):
# initializers
def __init__(self, d=32, mode='classification'):
super(CNN2, self).__init__()
self.conv1 = nn.Conv2d(12, d, 4, 2, 1)
self.conv1_bn = nn.BatchNorm2d(d)
self.conv2 = nn.Conv2d(d, d*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d*2)
self.conv3 = nn.Conv2d(d*2, d*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d*4)
self.conv4 = nn.Conv2d(d*4, d*8, 4, 2, 1)
self.conv4_bn = nn.BatchNorm2d(d*8)
#print(self.conv4_bn)
if mode=='classification':
self.linear = nn.Linear(d*8*4*4,3)
else:
self.linear = nn.Linear(d*8*4*4,1)
#self.conv5 = nn.Conv2d(d*8, 4, 4, 1, 0)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
def forward(self, input):
x = F.leaky_relu(self.conv1_bn(self.conv1(input)), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
x = x.reshape(x.size(0), -1)
#x = F.sigmoid(self.conv5(x))
x = self.linear(x)
return x
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
# -
# ## Training loop
# +
import torch.optim as optim
net = CNN2()
net = net.cuda()
weight = torch.tensor([1/0.63,1/0.21,1/0.15]).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.RMSprop(net.parameters(), lr=opts['lr'])
bins = np.array([0,91,182,273,366])
nr_batches = len(train_data_loader)
for epoch in range(opts['epochs']):
running_loss = 0.0
print("Epoch ", epoch)
for i, sample in tqdm(enumerate(train_data_loader, 0)):
inputs, labels = sample
inputs_stack = torch.reshape(inputs,[-1,12,64,64]).cuda()
labels = labels.cuda()
# quantize labels to 4 bins
#labels_q = torch.tensor(np.digitize(labels,bins))
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs_stack)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
# tensorboard
writer.add_scalar('Loss/train', loss, epoch*nr_batches + i)
# run on test set
test_losses = []
best_loss = 10
correct = 0
total = 0
for i, test_sample in tqdm(enumerate(valid_data_loader, 0)):
test_inputs, test_labels = test_sample
test_inputs_stack = torch.reshape(test_inputs,[-1,12,64,64]).cuda()
test_labels = test_labels.cuda()
# forward
test_outputs = net(test_inputs_stack)
test_loss = criterion(test_outputs, test_labels)
_, predicted = torch.max(test_outputs.data, 1)
total += test_labels.size(0)
correct += (predicted == test_labels).sum().item()
test_losses.append(test_loss)
print("Test accuracy: ", correct/total)
avg_test_loss = torch.mean(torch.stack(test_losses))
writer.add_scalar('Loss/test', avg_test_loss, epoch)
writer.add_scalar('Loss/acc', correct/total, epoch)
if(avg_test_loss < best_loss and epoch > 5):
torch.save(net.state_dict(), os.path.join('models','best.th'))
print('Finished Training')
# -
# ## Load trained model and test it visually on a batch
model = CNN2()
model.load_state_dict(torch.load('models/best.th'))
for batch_idx, sample in enumerate(valid_data_loader):
images, labels = sample
test_images_stack = torch.reshape(images,[-1,12,64,64])
test_labels = model(test_images_stack)
softmaxlbl = F.softmax(test_labels)
for i in range(opts['batch_size']):
print("Predicted label: ", torch.argmax(softmaxlbl[i]))
print("True label: ", labels[i])
f, axarr = plt.subplots(1,4)
axarr[0].imshow(images[i][0].permute(1, 2, 0))
axarr[1].imshow(images[i][1].permute(1, 2, 0))
axarr[2].imshow(images[i][2].permute(1, 2, 0))
axarr[3].imshow(images[i][3].permute(1, 2, 0))
plt.show()
#for j in range(4):
# plt.imshow( sample[0][0][j].permute(1, 2, 0) )
# plt.show()
input("Press key for next sample")
# ## Misc
# +
max_val = 0
min_val = 100
for i, sample in tqdm(enumerate(train_data_loader, 0)):
max_tmp = torch.max(sample[1])
min_tmp = torch.min(sample[1])
if max_tmp > max_val:
max_val = max_tmp
if min_tmp < min_val:
min_val = min_tmp
print(max_val)
print(min_val)
# -
resnet18 = models.resnet18()
for batch_idx, sample in enumerate(train_data_loader):
images, labels = sample
print(images[0].shape)
#labels_q = np.digitize(labels,bins)
for i in range(4):
plt.imshow( sample[0][0][i].permute(1, 2, 0) )
plt.show()
break;
|
ideas/water_detection/run_classifier.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RichLuarkie/stamp-prices/blob/master/stamp_prices.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="e97Hb-2xpQuM" colab_type="code" colab={}
# + [markdown] id="28A1ynkRpjKO" colab_type="text"
# # Plot US Stamp Prices Over Time
# + id="6r-uOUnvp675" colab_type="code" colab={}
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
# + [markdown] id="SNFRGPm9qzxx" colab_type="text"
# This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
# + id="RUcyNDY1tCvB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c41218dc-8cde-4923-f9ae-ba2059ea8232"
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# + id="HxkKsqT7uSVa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="fcbaf29a-fd19-482a-8c88-4d9c48f2f161"
# head() will return the column name and the first few rows from the data frame
stamp_prices.head()
# + id="8zc9zExJvO03" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="ce70c2a0-2859-4ff6-d9c6-d999fc489586"
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
# + [markdown] id="Dzobtzkqs0_3" colab_type="text"
#
# + id="kMrr6JkQw2Nq" colab_type="code" colab={}
|
stamp_prices.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/application_model_zoo/Example%20-%20Trimodal%20People%20Segmentation%20Dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Table of contents
#
#
# ## 1. Installation Instructions
#
#
#
# ## 2. Use trained model to segment people in images
#
#
#
# ## 3. How to train a custom segmenter using "AAU VAP Trimodal People Segmentation"
# # About the networks
#
#
# 1. UNet
# - https://arxiv.org/abs/1505.04597
# - https://towardsdatascience.com/understanding-semantic-segmentation-with-unet-6be4f42d4b47
# - https://towardsdatascience.com/unet-line-by-line-explanation-9b191c76baf5
#
#
# 2. FPN
# - http://openaccess.thecvf.com/content_cvpr_2017/papers/Lin_Feature_Pyramid_Networks_CVPR_2017_paper.pdf
# - https://towardsdatascience.com/review-fpn-feature-pyramid-network-object-detection-262fc7482610
# - https://medium.com/@jonathan_hui/understanding-feature-pyramid-networks-for-object-detection-fpn-45b227b9106c
#
#
# 3. PSPNet
# - https://arxiv.org/abs/1612.01105
# - https://towardsdatascience.com/review-pspnet-winner-in-ilsvrc-2016-semantic-segmentation-scene-parsing-e089e5df177d
# - https://developers.arcgis.com/python/guide/how-pspnet-works/
#
#
# 4. Linknet
# - https://arxiv.org/pdf/1707.03718.pdf
# - https://neptune.ai/blog/image-segmentation-tips-and-tricks-from-kaggle-competitions
# # Installation
#
# - Run these commands
#
# - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
#
# - cd Monk_Object_Detection/9_segmentation_models/installation
#
# - Select the right requirements file and run
#
# - cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install
# ! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
# +
# For colab use the command below
# ! cd Monk_Object_Detection/9_segmentation_models/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install
# For Local systems and cloud select the right CUDA version
# #! cd Monk_Object_Detection/9_segmentation_models/installation && cat requirements_cuda10.0.txt | xargs -n 1 -L 1 pip install
# -
# # Use already trained model for demo
import os
import sys
sys.path.append("Monk_Object_Detection/9_segmentation_models/lib/");
from infer_segmentation import Infer
gtf = Infer();
classes_dict = {
'background': 0,
'person': 1
};
classes_to_train = ['background', 'person'];
gtf.Data_Params(classes_dict, classes_to_train, image_shape=[256, 256])
# +
# Download trained model
# -
# ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1GsY5Dw6xhcEPTSrIM9F62HnUcEU3lir7' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1GsY5Dw6xhcEPTSrIM9F62HnUcEU3lir7" -O seg_trimodal_trained.zip && rm -rf /tmp/cookies.txt
# ! unzip -qq seg_trimodal_trained.zip
gtf.Model_Params(model="Unet", backbone="efficientnetb3", path_to_model='seg_trimodal_trained/best_model.h5')
gtf.Setup();
# ! cp seg_trimodal_trained/test/4.png
gtf.Predict("seg_trimodal_trained/test/1.png", vis=True);
gtf.Predict("seg_trimodal_trained/test/2.png", vis=True);
gtf.Predict("seg_trimodal_trained/test/3.png", vis=True);
gtf.Predict("seg_trimodal_trained/test/4.png", vis=True);
# # Train you own detector
# # Monk Format
#
# ## Dataset Directory Structure
#
# root_dir
# |
# |
# |
# |----train_img_dir
# | |
# | |---------img1.jpg
# | |---------img2.jpg
# | |---------..........(and so on)
# |
# |----train_mask_dir
# | |
# | |---------img1.jpg
# | |---------img2.jpg
# | |---------..........(and so on)
# |
# |----val_img_dir (optional)
# | |
# | |---------img1.jpg
# | |---------img2.jpg
# | |---------..........(and so on)
# |
# |----val_mask_dir (optional)
# | |
# | |---------img1.jpg
# | |---------img2.jpg
# | |---------..........(and so on)
#
#
#
# # Sample Dataset Credits
#
# credits: https://www.kaggle.com/aalborguniversity/trimodal-people-segmentation
# ! pip install kaggle
# ! kaggle datasets download aalborguniversity/trimodal-people-segmentation
# ! unzip -qq trimodal-people-segmentation.zip
# ! mkdir dataset
# ! mkdir dataset/images
# ! mkdir dataset/masks
# ! mv TrimodalDataset/Scene\ 1 TrimodalDataset/scene_1
# ! mv TrimodalDataset/Scene\ 2 TrimodalDataset/scene_2
# ! mv TrimodalDataset/Scene\ 3 TrimodalDataset/scene_3
import os
import cv2
import numpy as np
from tqdm import tqdm
# +
num = 0;
img_list = os.listdir("TrimodalDataset/scene_1/rgbMasks");
for i in tqdm(range(len(img_list))):
try:
mask = cv2.imread("TrimodalDataset/scene_1/rgbMasks/" + img_list[i]);
mask[mask > 0] = 1;
cv2.imwrite("dataset/masks/scene_1_" + str(num) + ".png", mask);
img = cv2.imread("TrimodalDataset/scene_1/SyncRGB/" + img_list[i].split(".")[0] + ".jpg")
cv2.imwrite("dataset/images/scene_1_" + str(num) + ".png", img)
num += 1;
except:
print("TrimodalDataset/scene_1/rgbMasks/" + img_list[i])
# -
img_list = os.listdir("TrimodalDataset/scene_2/rgbMasks");
for i in tqdm(range(len(img_list))):
try:
mask = cv2.imread("TrimodalDataset/scene_2/rgbMasks/" + img_list[i]);
mask[mask > 0] = 1;
cv2.imwrite("dataset/masks/scene_2_" + str(num) + ".png", mask);
img = cv2.imread("TrimodalDataset/scene_2/SyncRGB/" + img_list[i].split(".")[0] + ".jpg")
cv2.imwrite("dataset/images/scene_2_" + str(num) + ".png", img)
num += 1;
except:
print("TrimodalDataset/scene_2/rgbMasks/" + img_list[i])
img_list = os.listdir("TrimodalDataset/scene_3/rgbMasks");
for i in tqdm(range(len(img_list))):
try:
mask = cv2.imread("TrimodalDataset/scene_3/rgbMasks/" + img_list[i]);
mask[mask > 0] = 1;
cv2.imwrite("dataset/masks/scene_3_" + str(num) + ".png", mask);
img = cv2.imread("TrimodalDataset/scene_1/SyncRGB/" + img_list[i].split(".")[0] + ".jpg")
cv2.imwrite("dataset/images/scene_3_" + str(num) + ".png", img)
num += 1;
except:
print("TrimodalDataset/scene_3/rgbMasks/" + img_list[i])
# ## Training
import os
import sys
sys.path.append("Monk_Object_Detection/9_segmentation_models/lib/");
from train_segmentation import Segmenter
gtf = Segmenter();
# +
train_img_dir = "dataset/images";
train_mask_dir = "dataset/masks";
val_img_dir = "dataset/images";
val_mask_dir = "dataset/masks";
# -
classes_dict = {
'background': 0,
'person': 1
};
classes_to_train = ['background', 'person'];
gtf.Train_Dataset(train_img_dir, train_mask_dir, classes_dict, classes_to_train)
gtf.Val_Dataset(val_img_dir, val_mask_dir)
gtf.List_Backbones();
gtf.Data_Params(batch_size=2, backbone="efficientnetb3", image_shape=[256, 256])
gtf.List_Models();
gtf.Model_Params(model="Unet")
gtf.Train_Params(lr=0.0001)
gtf.Setup();
gtf.Train(num_epochs=300);
gtf.Visualize_Training_History();
# # Inference
import os
import sys
sys.path.append("Monk_Object_Detection/9_segmentation_models/lib/");
from infer_segmentation import Infer
gtf = Infer();
classes_dict = {
'background': 0,
'person': 1
};
classes_to_train = ['background', 'person'];
gtf.Data_Params(classes_dict, classes_to_train, image_shape=[256, 256])
gtf.Model_Params(model="Unet", backbone="efficientnetb3", path_to_model='best_model.h5')
gtf.Setup();
gtf.Predict("dataset/images/scene_2_3502.png", vis=True);
|
application_model_zoo/Example - Trimodal People Segmentation Dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import nuscenes
import numpy as np
nusc = nuscenes.NuScenes(version='v1.0-trainval', dataroot='/data/Datasets/nuScenes', verbose=True)
sample = nusc.sample[13]
nusc.render_sample_data(nusc.get('sample_data', sample['data']['LIDAR_TOP'])['token'], nsweeps=5)
lidar_path, boxes, _ = nusc.get_sample_data(nusc.get('sample_data', sample['data']['LIDAR_TOP'])['token'])
lidar_path
points = np.fromfile(lidar_path, dtype=np.float32).reshape([-1, 5])[:, :4].T
points.shape
from nuscenes.utils.geometry_utils import transform_matrix
from pyquaternion import Quaternion
curr_sd_rec = nusc.get('sample_data', nusc.get('sample', sample['token'])['data']['LIDAR_TOP'])
ego_pose = nusc.get('ego_pose', curr_sd_rec['ego_pose_token'])
calibrated_sensor = nusc.get('calibrated_sensor', curr_sd_rec['calibrated_sensor_token'])
# +
global_from_car = transform_matrix(
ego_pose['translation'],
Quaternion(ego_pose['rotation']),
inverse=False)
car_from_current = transform_matrix(
calibrated_sensor['translation'],
Quaternion(calibrated_sensor['rotation']),
inverse=False)
tm = np.dot(global_from_car, car_from_current)
# -
nbr_points = points.shape[1]
points[:3, :] = tm.dot(np.vstack((points[:3, :], np.ones(nbr_points))))[:3, :]
points = points.T
print(points.shape)
for i in range(points.shape[1]):
print(points[:, i].min(), points[:, i].max())
from matplotlib import pyplot as plt
plt.scatter(points[:, 0],points[:, 1])
plt.show()
scene = nusc.get('scene', nusc.get('sample', sample['token'])['scene_token'])
scene_token = scene['token']
scene_record = nusc.get('scene', scene_token)
log_record = nusc.get('log', scene_record['log_token'])
map_record = nusc.get('map', log_record['map_token'])
map_mask = map_record['mask']
cropped = map_mask.mask()[8940:10695, 5599: 6969]
plt.imshow(cropped)
ego_pose['translation']
map_pose = np.concatenate(map_mask.to_pixel_coords(ego_pose['translation'][0], ego_pose['translation'][1]))
map_pose.shape
map_pose
filtered = map_mask.is_on_mask(x=points[:, 0], y=points[:, 1])
filtered_points = points[filtered]
filtered_points.shape
plt.scatter(filtered_points[:, 0],filtered_points[:, 1])
plt.show()
x_min = map_pose[0] - int(50.4/0.1)
x_max = map_pose[0] + int(50.4/0.1)
y_min = map_pose[1] - int(50.4/0.1)
y_max = map_pose[1] + int(50.4/0.1)
cropped2 = map_mask.mask()[y_min:y_max, x_min:x_max, ]
plt.imshow(cropped2)
|
det3d/visualization/map_mask.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit
# name: python3
# ---
# +
# Import modules
import datetime
import spiceypy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# Load the SPICE kernels via a meta file
spiceypy.furnsh('kernel_meta.txt')
# Create an initial date-time object that is converted to a string
datetime_utc = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
# Convert to Ephemeris Time (ET) using the SPICE function utc2et
datetime_et = spiceypy.utc2et(datetime_utc)
# +
# We want to compute the coordinates for different Solar System bodies as seen
# from our planet. First, a pandas dataframe is set that is used to append the
# computed data
solsys_df = pd.DataFrame()
# Add the ET and the corresponding UTC date-time string
solsys_df.loc[:, 'ET'] = [datetime_et]
solsys_df.loc[:, 'UTC'] = [datetime_utc]
# Set a dictionary that lists some body names and the corresponding NAIF ID
# code. Mars has the ID 499, however the loaded kernels do not contain the
# positional information. We use the Mars barycentre instead
SOLSYS_DICT = {'SUN': 10, 'VENUS': 299, 'MOON': 301, 'MARS': 4}
# Each body shall have an individual color; set a list with some colors
BODY_COLOR_ARRAY = ['y', 'tab:orange', 'tab:gray', 'tab:red']
# -
# Now we want the coordinates in equatorial J2000. For this purpose we
# iterate through all celestial bodies
for body_name in SOLSYS_DICT:
# First, compute the directional vector of the body as seen from Earth in
# J2000
solsys_df.loc[:, f'dir_{body_name}_wrt_earth_equ'] = solsys_df['ET'] \
.apply(lambda x: spiceypy.spkezp(targ=SOLSYS_DICT[body_name], \
et=x, \
ref='J2000', \
abcorr='LT+S', \
obs=399)[0])
# Compute the longitude and latitude values in equatorial J2000
# coordinates
solsys_df.loc[:, f'{body_name}_long_rad_equ'] = solsys_df[f'dir_{body_name}_wrt_earth_equ'] \
.apply(lambda x: spiceypy.recrad(x)[1])
solsys_df.loc[:, f'{body_name}_lat_rad_equ'] = solsys_df[f'dir_{body_name}_wrt_earth_equ'] \
.apply(lambda x: spiceypy.recrad(x)[2])
# Apply the same logic as shown before to compute the longitudes for the
# matplotlib figure
solsys_df.loc[:, f'{body_name}_long_rad4plot_equ'] = \
solsys_df[f'{body_name}_long_rad_equ'] \
.apply(lambda x: -1*((x % np.pi) - np.pi) if x > np.pi \
else -1*x)
# +
# Before we plot the data, let's add the Ecliptic plane for the visualisation.
# In ECLIPJ2000 the Ecliptic plane is the equator line (see corresponding
# figure. The latitude is 0 degrees.
# First, we create a separate dataframe for the ecliptic plane
eclip_plane_df = pd.DataFrame()
# Add the ecliptic longitude and latitude values for the plane. Note: here,
# we need to use pi/2 (90 degrees) as the latitude, since we will apply a
# SPICE function that expects spherical coordinates
eclip_plane_df.loc[:, 'ECLIPJ2000_long_rad'] = np.linspace(0, 2*np.pi, 100)
eclip_plane_df.loc[:, 'ECLIPJ2000_lat_rad'] = np.pi / 2.0
# Compute the directional vectors of the ecliptic plane for the different
# longitude values (the latitude is constant). Apply the SPICE function sphrec
# to transform the spherical coordinates to vectors. r=1 is the distance,
# here in our case: normalised distance
eclip_plane_df.loc[:, 'ECLIPJ2000_direction'] = \
eclip_plane_df \
.apply(lambda x: spiceypy.sphrec(r=1, \
colat=x['ECLIPJ2000_lat_rad'], \
lon=x['ECLIPJ2000_long_rad']), \
axis=1)
# +
# Compute a transformation matrix between ECLIPJ2000 and J2000 for a fixed
# date-time. Since both coordinate system are inertial (not changing in time)
# the resulting matrix is the same for different ETs
ecl2equ_mat = spiceypy.pxform(fromstr='ECLIPJ2000', \
tostr='J2000', \
et=datetime_et)
# Compute the direction vectors of the Ecliptic plane in J2000 using the
# transformation matrix
eclip_plane_df.loc[:, 'j2000_direction'] = \
eclip_plane_df['ECLIPJ2000_direction'].apply(lambda x: ecl2equ_mat.dot(x))
# Compute now the longitude (and matplotlib compatible version) and the
# latitude values using the SPICE function recrad
eclip_plane_df.loc[:, 'j2000_long_rad'] = \
eclip_plane_df['j2000_direction'].apply(lambda x: spiceypy.recrad(x)[1])
eclip_plane_df.loc[:, 'j2000_long_rad4plot'] = \
eclip_plane_df['j2000_long_rad'] \
.apply(lambda x: -1*((x % np.pi) - np.pi) if x > np.pi \
else -1*x)
eclip_plane_df.loc[:, 'j2000_lat_rad'] = \
eclip_plane_df['j2000_direction'].apply(lambda x: spiceypy.recrad(x)[2])
# +
# We plot now the data in equatorial J2000. Again with a dark background and
# the same properties as before
plt.style.use('dark_background')
plt.figure(figsize=(12, 8))
plt.subplot(projection="aitoff")
plt.title(f'{datetime_utc} UTC', fontsize=10)
# Iterate through the celestial bodies and plot them
for body_name, body_color in zip(SOLSYS_DICT, BODY_COLOR_ARRAY):
plt.plot(solsys_df[f'{body_name}_long_rad4plot_equ'], \
solsys_df[f'{body_name}_lat_rad_equ'], \
color=body_color, marker='o', linestyle='None', markersize=12, \
label=body_name.capitalize())
# Plot the Ecliptic plane as a blue dotted line
plt.plot(eclip_plane_df['j2000_long_rad4plot'], \
eclip_plane_df['j2000_lat_rad'], color='tab:blue', linestyle='None', \
marker='o', markersize=2)
# Convert the longitude values finally in right ascension hours
plt.xticks(ticks=np.radians(np.arange(-150, 180, 30)),
labels=['10 h', '8 h', '6 h', '4 h', '2 h', '0 h', \
'22 h', '20 h', '18 h', '16 h', '14 h'])
# Plot the labels
plt.xlabel('Right ascension in hours')
plt.ylabel('Declination in deg.')
# Create a legend and grid
plt.legend()
plt.grid(True)
# Save the figure
plt.savefig('j2000_sky_map.png', dpi=300)
|
[10]-Equatorial-Coordinates/equatorial_coordinates.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ulisess45/daa_2021_1/blob/master/20Enero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Nqxt3psyMpYR"
class NodoArbol:
def __init__(self, dato, hijo_izq = None, hijo_der = None):
self.dato = dato
self.left = hijo_izq
self.right = hijo_der
# + [markdown] id="hbyKFdm1NJqX"
# #Arbol Binario de Busqueda
# Los nodos a la izq son menores a la raiz y los nodos a la derecha son mayores a la raiz. Puede ser recorridos en: pre-orden y post-orden
#
#
# + id="BM_10uqvNlb8"
class BinarySearchTree:
def __init__(self):
self.__root = None
def insert(self, value):
if self.__root == None:
self.__root = NodoArbol(value, None, None)
else:
#Preguntar si value es menor que root, de ser el caso
#Insertar a la izq. PERO puede ser el caso que el sub arbol
#Izq ya tenga muchos elementos
self.__insert_nodo__(self.__root, value)
def __insert_nodo__(self, nodo, value):
if nodo.dato == value:
pass
elif value < nodo.dato: #true va a la izq
if nodo.left == None: #Si hay espacio en la izq, ahi va
nodo.left = NodoArbol(value, None, None) #Insertamos el nodo
else:
self.__insert_nodo__(nodo.left, value) #Buscar en sub arbol izq
else:
if nodo.right == None:
nodo.right = NodoArbol (value, None, None)
else:
self.__insert_nodo__ (nodo.right, vlue) #biscar en sub arbol de
def buscar (self, value):
if self.__root == None:
return None
else:
#Haremos busqueda recursiva
return self.__busca_nodo(self.__root, value)
def __busca_nodo(self, nodo, value):
if nodo == None:
return None
elif nodo.dato == value:
return nodo.dato
elif value < nodo.dato:
return self.__busca_nodo(nodo.left, value)
else:
return self.__busca_nodo(nodo.right, value)
def transversal (self, format = "inorden"):
if format == "inorden":
self.__recorrido_in(self.__root)
elif format == "preorden":
self.__recorrido_pre(self.__root)
elif format == "posorden":
self.__recorrido_pos(self.__root)
else:
print("Formato de recorrido no valido")
def __recorrido_pre(self, nodo):
if nodo != None:
print(nodo.dato, end = ",")
self.__recorrido_pre(nodo.left)
self.__recorrido_pre(nodo.right)
def __recorrido_in(self, nodo):
if nodo != None:
self.__recorrido_pre(nodo.left)
print(nodo.dato, end = ",")
self.__recorrido_pre(nodo.right)
def __recorrido_pos(self, nodo):
if nodo != None:
self.__recorrido_pre(nodo.left)
self.__recorrido_pre(nodo.right)
print(nodo.dato, end = ",")
# + id="3wHWeMefT_J-" colab={"base_uri": "https://localhost:8080/"} outputId="ae7c7e55-cb46-4ac0-edea-889655956953"
bst = BinarySearchTree()
bst.insert(50)
bst.insert(30)
bst.insert(20)
res = bst.buscar(30) #true o false
print("Dato: " + str(res))
print(bst.buscar(40))
print("Recorrido pre:")
bst.transversal(format = "preorden")
print("\n Recorrido in:")
bst.transversal(format = "inorden")
print("\n Recorrido pos:")
bst.transversal(format = "posorden")
|
20Enero.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.8 64-bit
# language: python
# name: python3
# ---
# +
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img_face = cv.imread('assets/gavFace.png')
plt.figure(figsize=(10, 6))
# BGR to RGB
img_face_rgb = img_face[:, :, ::-1]
plt.subplot(1, 3, 1)
plt.title('My Face')
plt.imshow(img_face_rgb)
img_ca = cv.imread('assets/ca.jpg')
img_ca_rgb = img_ca[:, :, ::-1]
plt.subplot(1, 3, 2)
plt.title('CA')
plt.imshow(img_ca_rgb)
h, w, c = img_face.shape
h2, w2, c2 = img_ca.shape
if h >= h2 or w >= w2:
img_face = cv.resize(img_face, None, fx=2, fy=2,
interpolation=cv.INTER_AREA)
img_ca[:h, :w] = img_face
imgRGB = img_ca[:, :, ::-1]
plt.subplot(1, 3, 3)
plt.title('VR')
plt.imshow(imgRGB)
plt.show()
print(f'img_face.shape: {img_face.shape}')
print(f'img_ca.shape: {img_ca.shape}')
# +
# Import required packages:
import cv2 as cv
from matplotlib import pyplot as plt
# Create the dimensions of the figure and set title:
fig = plt.figure(figsize=(8, 6))
plt.suptitle("ORB descriptors and Brute-Force (BF) matcher", fontsize=14, fontweight='bold')
fig.patch.set_facecolor('silver')
# Load the 'query' and 'scene' image:
image_query = cv.imread('assets/gavFace.png')
image_scene = cv.imread('assets/scene.png')
# Initiate ORB detector:
orb = cv.ORB_create()
# Detect the keypoints and compute the descriptors with ORB:
keypoints_1, descriptors_1 = orb.detectAndCompute(image_query, None)
keypoints_2, descriptors_2 = orb.detectAndCompute(image_scene, None)
# Create BFMatcher object
# First parameter sets the distance measurement (by default it is cv.NORM_L2)
# The second parameter crossCheck (which is False by default) can be set to True in order to return only
# consistent pairs in the matching process (the two features in both sets should match each other)
bf_matcher = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
# Match descriptors:
bf_matches = bf_matcher.match(descriptors_1, descriptors_2)
# Sort the matches in the order of their distance:
bf_matches = sorted(bf_matches, key=lambda x: x.distance)
# Draw first 20 matches:
result = cv.drawMatches(image_query, keypoints_1, image_scene, keypoints_2, bf_matches[:20], None,
matchColor=(255, 255, 0), singlePointColor=(255, 0, 255), flags=0)
# Plot the images:
# Convert BGR image to RGB
imgRGB = result[:, :, ::-1]
plt.imshow(imgRGB)
plt.title("matches between the two images")
# show
plt.show()
# +
"""
QR code detection
"""
# Import required packages:
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
# Load input image:
# img = cv.imread("assets/qrcode2.png")
img = cv.imread("assets/image_with_qr.png")
# Create QR code detector:
qr_code_detector = cv.QRCodeDetector()
# Detect and decode the QR code using qr_code_detector.detectAndDecode()
# This function returns the data, the array of vertices of the found QR code quadrangle and
# the image containing the rectified binarized QR code:
data, vertices, rectified_qr_code_binarized = qr_code_detector.detectAndDecode(img)
if len(data) > 0:
print("Decoded Data: '{}'".format(data))
# Show the detection in the image:
pts = np.int32(vertices).reshape(-1, 2)
for j in range(pts.shape[0]):
cv.line(img, tuple(pts[j]), tuple(pts[(j + 1) % pts.shape[0]]), (255, 0, 0), 5)
for j in range(pts.shape[0]):
cv.circle(img, tuple(pts[j]), 10, (255, 0, 255), -1)
# Convert BGR image to RGB
imgRGB = img[:, :, ::-1]
plt.imshow(imgRGB)
plt.title("original: " + data)
# Show the Figure:
plt.show()
# Convert binarized image to uint8:
rectified_image = np.uint8(rectified_qr_code_binarized)
rectified_image = cv.cvtColor(rectified_image, cv.COLOR_GRAY2BGR)
# Plot the images:
# Convert BGR image to RGB
imgRGB = rectified_image[:, :, ::-1]
plt.imshow(imgRGB)
plt.title("detected QR code")
plt.show()
else:
print("QR Code not detected")
# +
"""
Snapchat-based augmented reality OpenCV moustache overlay
"""
# Import required packages:
import cv2 as cv
# Load cascade classifiers for face and nose detection:
face_cascade = cv.CascadeClassifier("../samples/data/haarcascade_frontalface_default.xml")
nose_cascade = cv.CascadeClassifier("../samples/data/haarcascade_mcs_nose.xml")
# Load moustache image. The parameter -1 reads also de alpha channel
# Open 'moustaches.sgv' to see more moustaches that can be used
# Therefore, the loaded image has four channels (Blue, Green, Red, Alpha):
img_moustache = cv.imread('../samples/data/moustache.png', -1)
# Create the mask for the moustache:
img_moustache_mask = img_moustache[:, :, 3]
# cv.imshow("img moustache mask", img_moustache_mask)
# You can use a test image to adjust the ROIS:
# test_face = cv.imread("../samples/data/face_test.png")
# test_face = cv.imread("assets/numan_face.png")
# test_face = cv.resize(test_face, None, fx=2, fy=2, interpolation=cv.INTER_AREA)
# Convert moustache image to BGR (eliminate alpha channel):
img_moustache = img_moustache[:, :, 0:3]
# +
"""
Snapchat-based augmented reality OpenCV glasses overlay
"""
# Import required packages:
import cv2 as cv
# Load cascade classifiers for face and eyepair detection:
face_cascade = cv.CascadeClassifier("haarcascade_frontalface_default.xml")
eyepair_cascade = cv.CascadeClassifier("haarcascade_mcs_eyepair_big.xml")
nose_cascade = cv.CascadeClassifier("haarcascade_mcs_nose.xml")
# Load glasses image. The parameter -1 reads also de alpha channel (if exists)
# Open 'glasses.sgv' to see more glasses that can be used
# Therefore, the loaded image has four channels (Blue, Green, Red, Alpha):
img_glasses = cv.imread('assets/glasses.png', -1)
# Create the mask for the glasses:
img_glasses_mask = img_glasses[:, :, 3]
# cv.imshow("img glasses mask", img_glasses_mask)
# Convert glasses image to BGR (eliminate alpha channel):
img_glasses = img_glasses[:, :, 0:3]
# Load moustache image. The parameter -1 reads also de alpha channel
# Open 'moustaches.sgv' to see more moustaches that can be used
# Therefore, the loaded image has four channels (Blue, Green, Red, Alpha):
img_moustache = cv.imread('assets/moustache.png', -1)
# Create the mask for the moustache:
img_moustache_mask = img_moustache[:, :, 3]
# cv.imshow("img moustache mask", img_moustache_mask)
# Convert moustache image to BGR (eliminate alpha channel):
img_moustache = img_moustache[:, :, 0:3]
# You can use a test image to adjust the ROIS:
# test_face = cv.imread("../samples/data/face_test.png")
test_face = cv.imread("assets/numan_face.png")
# Create VideoCapture object to get images from the webcam:
video_capture = cv.VideoCapture(0)
while True:
# Capture frame from the VideoCapture object:
ret, frame = video_capture.read()
# Just for debugging purposes and to adjust the ROIS:
# frame = test_face.copy()
# Convert frame to grayscale:
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Detect faces using the function 'detectMultiScale()'
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# Iterate over each detected face:
for (x, y, w, h) in faces:
# Draw a rectangle to see the detected face (debugging purposes):
# cv.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
# Create the ROIS based on the size of the detected face:
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
# Detect the eyepair inside the detected face:
eyepairs = eyepair_cascade.detectMultiScale(roi_gray)
# Detects a nose inside the detected face:
noses = nose_cascade.detectMultiScale(roi_gray)
# Iterate over the detected eyepairs (inside the face):
for (ex, ey, ew, eh) in eyepairs:
# Draw a rectangle to see the detected eyepair (debugging purposes):
# cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (255, 0, 255), 2)
# Calculate the coordinates where the glasses will be placed:
x1 = int(ex - ew / 10)
x2 = int((ex + ew) + ew / 10)
y1 = int(ey)
y2 = int(ey + eh + eh / 2)
if x1 < 0 or x2 < 0 or x2 > w or y2 > h:
continue
# Draw a rectangle to see where the glasses will be placed (debugging purposes):
# cv.rectangle(roi_color, (x1, y1), (x2, y2), (0, 255, 255), 2)
# Calculate the width and height of the image with the glasses:
img_glasses_res_width = int(x2 - x1)
img_glasses_res_height = int(y2 - y1)
# Resize the mask to be equal to the region were the glasses will be placed:
mask = cv.resize(img_glasses_mask, (img_glasses_res_width, img_glasses_res_height))
# Create the invert of the mask:
mask_inv = cv.bitwise_not(mask)
# Resize img_glasses to the desired (and previously calculated) size:
img = cv.resize(img_glasses, (img_glasses_res_width, img_glasses_res_height))
# Take ROI from the BGR image:
roi = roi_color[y1:y2, x1:x2]
# Create ROI background and ROI foreground:
roi_bakground = cv.bitwise_and(roi, roi, mask=mask_inv)
roi_foreground = cv.bitwise_and(img, img, mask=mask)
# Show both roi_bakground and roi_foreground (debugging purposes):
# cv.imshow('roi_bakground', roi_bakground)
# cv.imshow('roi_foreground', roi_foreground)
# Add roi_bakground and roi_foreground to create the result:
res = cv.add(roi_bakground, roi_foreground)
# Set res into the color ROI:
roi_color[y1:y2, x1:x2] = res
break
for (nx, ny, nw, nh) in noses:
# Draw a rectangle to see the detected nose (debugging purposes):
# cv.rectangle(roi_color, (nx, ny), (nx + nw, ny + nh), (255, 0, 255), 2)
# Calculate the coordinates where the moustache will be placed:
x3 = int(nx - nw / 2)
x4 = int(nx + nw / 2 + nw)
y3 = int(ny + nh / 2 + nh / 8)
y4 = int(ny + nh + nh / 4 + nh / 6)
if x3 < 0 or x4 < 0 or x3 > w or y3 > h:
continue
# Draw a rectangle to see where the moustache will be placed (debugging purposes):
# cv.rectangle(roi_color, (x1, y1), (x2, y2), (255, 0, 0), 2)
# Calculate the width and height of the image with the moustache:
img_moustache_res_width = int(x4 - x3)
img_moustache_res_height = int(y4 - y3)
# Resize the mask to be equal to the region were the glasses will be placed:
mask = cv.resize(img_moustache_mask, (img_moustache_res_width, img_moustache_res_height))
# Create the invert of the mask:
mask_inv = cv.bitwise_not(mask)
# Resize img_glasses to the desired (and previously calculated) size:
img = cv.resize(img_moustache, (img_moustache_res_width, img_moustache_res_height))
# Take ROI from the BGR image:
roi = roi_color[y3:y4, x3:x4]
# Create ROI background and ROI foreground:
roi_bakground = cv.bitwise_and(roi, roi, mask=mask_inv)
roi_foreground = cv.bitwise_and(img, img, mask=mask)
# Show both roi_bakground and roi_foreground (debugging purposes):
# cv.imshow('roi_bakground', roi_bakground)
# cv.imshow('roi_foreground', roi_foreground)
# Add roi_bakground and roi_foreground to create the result:
res = cv.add(roi_bakground, roi_foreground)
# Set res into the color ROI:
roi_color[y3:y4, x3:x4] = res
break
# Display the resulting frame
cv.imshow('Snapchat-based OpenCV glasses filter', frame)
# Press any key to exit
if cv.waitKey(1) & 0xFF == ord('q'):
break
# Release everything:
video_capture.release()
cv.destroyAllWindows()
# -
|
Assignments/a24/a24.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Next-Generation File Formats (NGFF)
# ## ELMI 2021 Workshop
#
#
# The presentation and a PDF version of the workshop are available at:
#
# https://downloads.openmicroscopy.org/presentations/2021/ELMI
#
#
# ## Outline
#
# 0. Some basics (this notebook)
# 1. [Benefits of NGFFs to you](1_Viewing.ipynb)
# 2. [What is the "Cloud"?](2_Minio.ipynb)
# 3. [Getting started with data publishing](3_Conversion.ipynb)
# 4. [Advanced usage: analysis](4_Analysis.ipynb)
#
# <!--
# ## ELMI Poster
#
# <img src="images/poster.png" title="Screenshot of poster" width="400px"/>
#
# Figshare URL
#
# ## Poster Diagram
#
# 
# -->
# ## 0.1 Software versions used for this workshop:
#
#
# ### 0.1.1 Running now
#
# * jupyter 1.0.0 ([external guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/))
# * conda 4.8.4 ([external guide](https://docs.conda.io/projects/conda/en/latest/user-guide/index.html))
#
# ### 0.1.2 Core tools
# * **bioformats2raw 0.2.5** ([install locally](https://github.com/glencoesoftware/bioformats2raw/releases/latest); requires Java)
# * **minio-client 2020.11.17** ([install locally](https://docs.min.io/docs/minio-client-complete-guide.html))
#
# ### 0.1.3 Other
#
# * awscli 1.18.219
# * dask 2021.1.0
# * fsspec 0.8.5
# * napari 0.4.3
# * numpy 1.19.5
# * ome-zarr 0.0.17
# * omero-cli-zarr 0.0.9
# * omero-py 5.8.2
# * openjdk 11.0.8
# * tifffile 2021.1.14
# * zarr 2.6.1
# * vizarr 0.1.2
#
# ***
#
# ## 0.2. Notebook reminders
#
# This notebook is somewhat unusual in that we use a lot of command-line tools. Each of the lines beginning with a exclamation mark (`!`) is run in a terminal.
#
# For example, you can use standard bash commands like `pwd` to see what directory you are in. When using mybinder, this will start with `/home/jovyan`. If you are interested in _why_, see [What is a Jovyan?](https://jupyter.readthedocs.io/en/latest/community/content-community.html#what-is-a-jovyan)
#
# !pwd
# The `binder/` directory contains a Conda [environment.yml](https://github.com/ome/NGFF-GBI-2021-Workshop/blob/main/binder/environment.yml) file. You can use it to download all the tools we are using here.
# !ls binder/
# !cat binder/environment.yml
# You can perform those actions on your own system _without_ a Jupyter notebook. For that, you will need to install the correct tools like `bioformats2raw` or run everything via `repo2docker`.
#
# See instructions under https://github.com/ome/NGFF-ELMI-2021-Workshop/blob/main/README.md
#
# ****
# ### License (BSD 2-Clause)
#
# Copyright (c) 2021, University of Dundee All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
0_Intro.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Understanding Catagory Errors
from hist import Hist
mass_hist = (Hist.new
.Reg(60, 60, 180, name='mass', label='$m_{4\ell}$ [GeV]')
.StrCat([], name='dataset', label='Cut Type', growth=True)
.StrCat([], name='channel', label='Channel', growth=True)
.Int64()
)
mass_hist.fill(
mass=140.0,
dataset='data1',
channel='eemumu'
);
mass_hist[:,['data1'],:]
mass_hist[:,['data1', 'data2'],:]
|
notebooks/hist-demo-cat-clice-errors.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="https://upload.wikimedia.org/wikipedia/commons/4/47/Logo_UTFSM.png" width="200" alt="utfsm-logo" align="left"/>
#
# # MAT281
# ### Aplicaciones de la Matemática en la Ingeniería
# + [markdown] slideshow={"slide_type": "slide"}
# ## Módulo 02
# ## Clase 06: Desarrollo de Algoritmos
# + [markdown] slideshow={"slide_type": "slide"}
# ## Objetivos
#
# * Conocer las etapas al desarrollar un algoritmo
# * Aprender a empaquetar funciones y objetos.
# * Conocer las funciones más eficientes
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Contenidos
# * [Desarrollo de Software](#soft_dev)
# * [Módulos en Python](#modules)
# * [Fast-Pandas](#fast-pandas)
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='soft_dev'></a>
# ## Desarrollo de Software
#
# A la hora de desarrollar software (de todo tipo, ya sea, aplicaciones móviles, aplicaciones web, librerías, etc.) se dispone de una serie de etapas para una correcta puesta en marcha. Estas etapas típicamente están caracterizadas por el ambiente en el que se ejecutan.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Ejemplo
#
# Considera que estás desarrollando un algoritmo de reconocimiento facial que luego agranda tus ojos y agrega pecas, todo con la finalidad de crear un _nuevo_ filtro para Instagram. Los ambientes de desarrollo serían:
#
# * __Local:__ Tu propio computador, donde desarrollas el algoritmo y haces las primeras pruebas, con un par de fotos o videos, nada muy costoso computacionalmente.
# * __Development:__ Servidor con acceso a los desarrolladores, ya sea de _backend_, _frontend_, _data base_, _integration_, _algorithmic_. En esta etapa ya es necesario que todas las partes del producto se comuniquen entre ellas. Por ejemplo, que al hacer click en el filtro el algoritmo se ejecute. Aquí se pueden hacer pruebas sin miedo a que el producto deje de funcionar por cambios en el código.
# * __Test:__ Un servidor con acceso solo para tu equipo de trabajo, en el que harán pruebas de estress, ya sea para el algoritmo como para la aplicación.
# * __QA:__ En ocasiones se disponibiliza un servidor con acceso al usuario final, con tal que el pueda realizar pruebas. Esto con la finalidad de evitar el sesgo del equipo que desarrolló el producto.
# * __Production:__ Servidor final, donde todos los usuarios utilizan el producto. Debe ser capaz de soportar todas las pruebas anteriores y la cantidad de usuarios para la que fue desarrollado. Por ejemplo, debe soportar que millones de personas utilizen el filtro al mismo tiempo.
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='modules'></a>
# ## Módulos en Python
#
# Como matemático, es muy probable que el día de mañana debas desarrollar un algoritmo, en `Python` la forma de reutilizar y compartir código es a partir de _Modules_ y _Packages_. De la [documentación oficial](https://docs.python.org/3/tutorial/modules.html) tenemos que:
#
# * __Module__: A module is a file containing Python definitions and statements. The file name is the module name with the suffix `.py` appended.
# * __Package__: Packages are a way of structuring Python’s module namespace by using “dotted module names”.
#
# Un ejemplo de módulo es el archivo `Benchmarker.py` en el directorio `fast_pandas`. En el que se define una clase `Benchmarker` que cuenta con métodos para realizar _benchmarks_ de funciones que se aplican a dataframes de `pandas`. Veámoslo!
#
# Un ejemplo de paquete es pandas, al tenerlo instalado es posible acceder a él y a la colección de módulos que posee.
#
# A continuación veremos un directorio de un proyecto, en el que se crea un paquete con la posibilidad de instalarlo.
# + [markdown] slideshow={"slide_type": "slide"}
# <a id='modules'></a>
# ## Fast-Pandas
#
# Esta sección está basada en el repositorio https://github.com/mm-mansour/Fast-Pandas, donde se analizan diferentes tareas realizadas de diferente manera, en muchas ocasiones se comparan las funciones por defecto de `pandas` vs `numpy`.
# -
import os
import numpy as np
import pandas as pd
# Utilizaremos como ejemplo un dataset de gasto fiscal neto en Chile, obtenidos de una [datathon de DataCampfire](https://datacampfire.com/datathon/).
gasto = pd.read_csv(os.path.join("data", "gasto_fiscal.csv"), sep=";")
gasto.head()
# Las variables corresponden a:
#
# * anio: Año del periodo
# * mes: Mes del periodo
# * partida: Ministerio
# * capítulo: Servicio Público
# * programa: Programa
# * subtitulo: Primer nivel de clasificación para el presupuesto
# * monto: Cantidad de millones de pesos chilenos (CLP) actualizada según inflación.
# ### Eliminar columnas duplicadas
# Por ejemplo, si deseamos eliminar filas duplicadas según la columna programa, tenemos varias alternativas, veamos cuanto demoran.
# %%timeit
gasto[~gasto["programa"].duplicated(keep="first")].reset_index(drop=True)
# %%timeit
gasto.drop_duplicates(subset="programa", keep="first").reset_index(drop=True)
# %%timeit
gasto.groupby(gasto["programa"], as_index=False, sort=False).first()
# La pregunta natural que surge es:
#
# __¿Podemos sacar conclusiones de rendimiento solo con un dataset?__
#
# La respuesta, por increíble que parezca, es __NO__. Por lo que utilizaremos la clase `Benchmarker` para realizar las pruebas con distintos tamaños y además entregar información gráfica.
# # # %load fast_pandas/benchmark_drop.py
# from fast_pandas.Benchmarker import Benchmarker ## Editado
#
#
# def duplicated(df):
# return df[~df["A"].duplicated(keep="first")].reset_index(drop=True)
#
#
# def drop_duplicates(df):
# return df.drop_duplicates(subset="A", keep="first").reset_index(drop=True)
#
#
# def group_by_drop(df):
# return df.groupby(df["A"], as_index=False, sort=False).first()
#
# params = {
# "df_generator": 'pd.DataFrame(np.random.randint(1, df_size, (df_size, 2)), columns=list("AB"))',
# "functions_to_evaluate": [duplicated, drop_duplicates, group_by_drop],
# "title": "Benchmark for dropping duplicate rows",
# "user_df_size_powers": [2, 3, 4] # Editado manualmente con tal de acelear el proceso
# }
#
# benchmark = Benchmarker(**params)
# benchmark.benchmark_all()
# benchmark.print_results()
# benchmark.plot_results()
|
m02_data_analysis/m02_c06_lab/m02_c06_development.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# <img src='./img/acam_banner.png' alt='Logo EU Copernicus EUMETSAT' align='centre' width='70%'></img>
# + [markdown] Collapsed="false"
# <br>
# + [markdown] Collapsed="false"
# # 4th ACAM Training School
# -
# Jointly organized by [EUMETSAT](https://www.eumetsat.int/website/home/index.html), [CAMS-ECMWF](https://atmosphere.copernicus.eu/) and [ICIMOD](https://www.icimod.org/).
# + [markdown] Collapsed="false"
# The `4th ACAM Training School` focuses on Atmospheric Chemistry and Aerosols in the Asian Monsoon region using satellite- and model-based data.
#
# The course is a combination of lectures and practical sessions introducing you to different satellite- and model-based data for wildfire monitoring and hightlighting the asian summer monsoon anticyclone. The `practical` sessions have the following outline:
#
# * 22 June | 8-9:30 (UTC): **Practical session: Asian summer monsoon anticyclone + Indonesian fires 2020 workflow**
# * 23 June | 8-9:30 (UTC): **Practical session: Indonesian fires 2015 workflow**
# * 24 June | 8-9:30 (UTC): **Practical session: Model-based data**
#
#
# -
# <br>
# + [markdown] Collapsed="false"
# ## Featured data
# + [markdown] Collapsed="false"
# This course features the following data:
#
# * **Satellite-based data products**
# * MLS/Aura Level 3 data: [Carbon Monoxide](./01_asian_monsoon_anticyclone/11_MLS_CO_L3_ACAM.ipynb)
# * AC-SAF Metop-A/B GOME-2 Level-2 data: [Formaldehyde (HCHO)](./02_fire_monitoring/11_AC-SAF_Metop-AB_GOME-2_HCHO.ipynb)
# * AC-SAF Metop-A/B GOME-2 Level-2 data: [Absorbing Aerosol Index (AAI)](./02_fire_monitoring/12_AC-SAF_Metop-AB_GOME-2_AAI.ipynb) and [Absorbing Aerosol Height (AAH)](./02_fire_monitoring/13_AC-SAF_Metop-B_GOME-2_AAH.ipynb)
# * Metop-A/B IASI Level-2 data: [Carbon Monoxide](./02_fire_monitoring/14_Metop-AB_IASI_CO.ipynb)
# * Copernicus Sentinel-5P TROPOMI Level 2 data: [Ultraviolet Aerosol Index](./23_Sentinel-5P_TROPOMI_UVAI.ipynb) and [Carbon Monoxide](./22_Sentinel-5P_TROPOMI_CO.ipynb)
# * Copernicus Sentinel-3 OLCI Level-1B data: [Red-Green-Blue (RGB) radiances](./21_Sentinel-3_OLCI_RGB.ipynb)
#
#
# * **Model-based data products**
# * Copernicus Atmosphere Monitoring Service (CAMS) | Global atmospheric composition forecasts: [Aerosol Optical Depth](./03_cams/3.1_CAMSDataAccess_ADS_GlobalAtmosphericCompositionForecast.ipynb)
# * Copernicus Atmosphere Monitoring Service (CAMS) | Global reanalyses (EAC4): [Aerosol Optical Depth](./03_cams/3.2_CAMSDataAccess_ADS_GlobalAtmosphericCompositionReanalysis.ipynb)
# * Copernicus Atmosphere Monitoring Service (CAMS) | Global Fire Assimilation System (GFAS): [Fire Radiative Power](./03_cams/3.3_CAMSDataAccess_FireEmissions.ipynb)
# * Copernicus Atmosphere Monitoring Service (CAMS) | Greenhouse gas fluxes: [Posterior land surface upward flux](./3.4_CAMSDataAccess_GHGFluxInversions.ipynb)
#
#
# -
# <br>
# + [markdown] Collapsed="false"
# ## Course material
# + [markdown] Collapsed="false"
# The course outline is as follows
#
# * **00 - Introduction to Python and data overview**
# * [Introduction to Python and Project Jupyter](./01_introduction_to_python_and_jupyter.ipynb)
# * [Overview of data and data access systems](./02_atmospheric_composition_overview.ipynb)
#
#
# * **01 - Asian Summer Monsoon Anticyclone**
# * [1.0 MLS/Aura Carbon Monoxide Level 3](./01_asian_summer_monsoon_anticyclone/11_MLS_CO_L3_ACAM.ipynb)
#
#
# * **02 - Satellite-based data for wildfire monitoring**
# * [2.0 Indonesian fires 2015 workflow](./02_fire_monitoring/10_workflow_indonesia_fires_2015.ipynb)
# * [2.1 Indonesian fires 2020 workflow](./02_fire_monitoring/20_workflow_indonesia_fires_2020.ipynb)
#
#
# * **03 - Copernicus Atmosphere Monitoring Service (CAMS) data**
# * [3.0 CAMS data introduction](./03_cams/3.0_CAMSDataAccess_Introduction.ipynb)
# * [3.1 CAMS Global Atmospheric Composition Forecast](./03_cams/3.1_CAMSDataAccess_ADS_GlobalAtmospherericCompositionForecast.ipynb)
# * [3.2 CAMS Global Atmospheric Composition Reanalysis](./03_cams/3.2_CAMSDataAccess_ADS_GlobalAtmospherericCompositionReanalysis.ipynb)
# * [3.3 CAMS Fire Emissions](./03_cams/3.3_CAMSDataAccess_FireEmissions.ipynb)
# * [3.4 CAMS Greenhouse Gase fluxes](./03_cams/3.4_CAMSDataAccess_GHGFluxInversions.ipynb)
#
#
# <br>
#
# **NOTE:** Throughout the course, general functions to `load`, `re-shape`, `process` and `visualize` the datasets are defined. These functions are re-used when applicable. The [functions notebook](./functions.ipynb) gives you an overview of all the functions defined and used for the course.
#
# -
# <br>
# + [markdown] Collapsed="false"
# ## Learning outcomes
# + [markdown] Collapsed="false"
# The course is designed for `medium-level users`, who have basic Python knowledge and understanding of Fire monitoring data.
#
# After the course, you should have:
# * an idea about **different datasets on Fire Monitoring** and **atmospheric composition and the asian monsoon**,
# * knowledge about the most useful **Python packages** to handle, process and visualise large volumes of Earth Observation data
# * an idea about how the **data can help to detect and monitor fire events** as well as **the asian monsoon**.
# + [markdown] Collapsed="false"
# <hr>
# + [markdown] Collapsed="false"
# ## Access to the `JupyterHub`
# + [markdown] Collapsed="false"
# The course material is made available on a JupyterHub instance, a pre-defined environment that gives learners direct access to the data and Python packages required for following the course.
#
# The `JupyterHub` can be accessed as follows:
# + [markdown] Collapsed="false"
# * Web address: [https://training.ltpy.adamplatform.eu](https://training.ltpy.adamplatform.eu)
# * Create an account: [https://login.ltpy.adamplatform.eu/](https://login.ltpy.adamplatform.eu/)
# * Log into the `JupyterHub` with your account created.
# + [markdown] Collapsed="false"
# <hr>
# + [markdown] Collapsed="false"
# <img src='./img/copernicus_logo.png' alt='Logo EU Copernicus' align='right' width='20%'><br><br><br><br>
#
# <p style="text-align:right;">This project is licensed under the <a href="./LICENSE">MIT License</a> and is developed under a Copernicus contract.
|
90_workshops/202106_acam_training_school/00_index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Sparse Linear Inverse with EM Learning
#
# In the [sparse linear inverse demo](./sparse_lin_inverse.ipynb), we saw how to set up a solve a simple sparse linear inverse problem using the `vamp` method in the `vampyre` package. Specifically, we solved for a vector $x$ from linear measurements of the form $y=Ax+w$. Critical in demo was that the `vamp` method had to be supplied a description of the statistics on the components on $x$ and the noise variance $w$. In many practical cases though, these are not known. In the demo, we show how to simultaneously learn $x$ and the distribution on $x$ with EM learning.
#
# The example here is taken from the following paper which introduced the combination of VAMP with EM learning:
# > Fletcher, <NAME>., and <NAME>. [Learning and free energies for vector approximate message passing,](http://ieeexplore.ieee.org/abstract/document/7952957/) Proc. IEEE Acoustics, Speech and Signal Processing (ICASSP), 2017.
# ## Importing the Package
#
#
# First, as in the [sparse linear inverse demo](./sparse_lin_inverse.ipynb) we load `vampyre` and other packages.
# +
# Import vampyre
import os
import sys
vp_path = os.path.abspath('../../')
if not vp_path in sys.path:
sys.path.append(vp_path)
import vampyre as vp
# Import the other packages
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Generating Synthetic Data
#
# Next, we will generate the synthetic sparse data. Recall, that in the sparse linear inverse problem, we want to estimate a vector $z_0$ from measurements
# $$
# y = Az_0 + w,
# $$
# for some known linear transform $A$. The vector $w$ represents noise.
#
# The sparse vector $z_0$ is described probabilistically. We will use a slightly different model than in the sparse linear inverse demo, and describe the sparse vector $z_0$ as a [Gaussian mixture model](https://en.wikipedia.org/wiki/Mixture_model): Each component of the vector $z_0$ is distributed as being randomly one of two components:
# $$
# z_{0j} \sim \begin{cases}
# N(0,\sigma^2_H) & \mbox{with prob } P_H, \\
# N(0,\sigma^2_L) & \mbox{with prob } P_L,
# \end{cases}
# $$
# where $\sigma^2_H$ represents a *high* variance and $\sigma^2_L$ a *low* variance. Thus, with some probability $p_L$, the component is small (close to zero) and probability $p_H$ it is large.
# +
# Dimensions
nz0 = 1000
nz1 = 500
ncol = 10
zshape0 = (nz0,ncol)
zshape1 = (nz1,ncol)
# Parameters for the two components
varc_lo = 1e-4 # variance of the low variance component
varc_hi = 1 # variance of the high variance component
prob_hi = 0.1 # probability of the high variance component
prob_lo = 1-prob_hi
meanc = np.array([0,0])
probc = np.array([prob_lo, prob_hi])
varc = np.array([varc_lo, varc_hi])
nc = len(probc)
# Generate random data following the GMM model
zlen = np.prod(zshape0)
ind = np.random.choice(nc,zlen,p=probc)
u = np.random.randn(zlen)
z0 = u*np.sqrt(varc[ind]) + meanc[ind]
z0 = z0.reshape(zshape0)
# -
# Next, we generate a random matrix. Before, we generated the random matrix with Gaussian iid entries. In this example, to make the problem more challenging, we will use a more ill-conditioned random matrix. The method `rand_rot_invariant` creates a random matrix with a specific condition number.
cond_num = 100 # Condition number
A = vp.trans.rand_rot_invariant_mat(nz1,nz0,cond_num=cond_num)
z1 = A.dot(z0)
# Finally, we add noise at the desired SNR
snr = 40 # SNR in dB
yvar = np.mean(np.abs(z1)**2)
wvar = yvar*np.power(10, -0.1*snr)
y = z1 + np.random.normal(0,np.sqrt(wvar), zshape1)
# ## Set up the solvers
#
# As in the sparse inverse demo, the VAMP estimator requires that we specify two probability distributions:
# * Prior: $p(z_0|\theta_0)$;
# * Likelihood: $p(y|z_0,\theta_1)$.
# In this case, both densities depend on *parameters*: $\theta_0$ and $\theta_1$. For the prior, the parameters $\theta_0$ represent the parameters of the components `(probc,meanc,varc)`. For the likelihood, the unknown parameter $\theta_1$ is the output variance `wvar`.
#
# EM estimation is a method that allows to learn the values of the parameters $\theta_0$ and $\theta_1$ while also estimating the vector $z_0$.
# EM estimation is an iterative technique and requires that we specify initial estimates for the unknown parameters: `wvar,probc,meanc,varc`. We will use the initialization in the paper above.
# +
# Initial estimate for the noise
wvar_init = np.mean(np.abs(y)**2)
# Intiial estimates for the component means, variances and probabilities
meanc_init = np.array([0,0])
prob_hi_init = np.minimum(nz1/nz0/2,0.95)
prob_lo_init = 1-prob_hi_init
var_hi_init = yvar/np.mean(np.abs(A)**2)/nz0/prob_hi_init
var_lo_init = 1e-4
probc_init = np.array([prob_lo_init, prob_hi_init])
varc_init = np.array([var_lo_init, var_hi_init])
# -
# To evaluate the EM method, we will compare it against an *oracle* that knows the true density. We thus create two estimators for the prior: one for the oracle that is set to the true GMM parameters with tuning disabled (`tune_gmm=False`); and one for the EM estimator where the parameters are set to the initial estimators and tuning enabled (`tune_gmm=True`).
# +
# Estimator with EM, initialized to the above values
est_in_em = vp.estim.GMMEst(shape=zshape0,\
zvarmin=1e-6,tune_gmm=True,probc=probc_init,meanc=meanc_init, varc=varc_init,name='GMM input')
# No auto-tuning. Set estimators with the true values
est_in_oracle = vp.estim.GMMEst(shape=zshape0, probc=probc, meanc=meanc, varc=varc, tune_gmm=False,name='GMM input')
# -
# We also create two estimators for the likelihood $p(y|z1,wvar)$. For the oracle estimator, the parameter `wvar` is set to its true value; for the EM estimator it is set to its initial estimate `wvar_init`.
Aop = vp.trans.MatrixLT(A,zshape0)
b = np.zeros(zshape1)
map_est = False
est_out_em = vp.estim.LinEst(Aop,y,wvar=wvar_init,map_est=map_est,tune_wvar=True, name='Linear+AWGN')
est_out_oracle = vp.estim.LinEst(Aop,y,wvar=wvar,map_est=map_est,tune_wvar=False, name='Linear+AWGN')
# ## Running the solvers for the oracle and EM case
# We first run the solver for the oracle case and measure the MSE per iteration.
# +
# Create the message handler
msg_hdl = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape0)
# Create the solver
nit = 40
solver = vp.solver.Vamp(est_in_oracle, est_out_oracle,msg_hdl,hist_list=['zhat'],nit=nit)
# Run the solver
solver.solve()
# Get the estimation history
zhat_hist = solver.hist_dict['zhat']
nit2 = len(zhat_hist)
zpow = np.mean(np.abs(z0)**2)
mse_oracle = np.zeros(nit2)
for it in range(nit2):
zhati = zhat_hist[it]
zerr = np.mean(np.abs(zhati-z0)**2)
mse_oracle[it] = 10*np.log10(zerr/zpow)
# Print final MSE
print("Final MSE (oracle) = {0:f} dB".format(mse_oracle[-1]))
# -
# Next, we run the EM estimator. We see we obtain a similar final MSE.
# +
# Create the message handler
msg_hdl = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape0)
# Create the solver
solver = vp.solver.Vamp(est_in_em, est_out_em, msg_hdl,hist_list=['zhat'],nit=nit)
# Run the solver
solver.solve()
# Get the estimation history
zhat_hist = solver.hist_dict['zhat']
nit2 = len(zhat_hist)
zpow = np.mean(np.abs(z0)**2)
mse_em = np.zeros(nit2)
for it in range(nit2):
zhati = zhat_hist[it]
zerr = np.mean(np.abs(zhati-z0)**2)
mse_em[it] = 10*np.log10(zerr/zpow)
# Print final MSE
print("Final MSE (EM) = {0:f} dB".format(mse_em[-1]))
# -
# We plot the two MSEs as a function of the iteration number.
t = np.arange(nit2)
plt.plot(t,mse_oracle,'o-')
plt.plot(t,mse_em,'s-')
plt.grid()
plt.xlabel('Iteration')
plt.ylabel('MSE (dB)')
plt.legend(['Oracle', 'EM'])
plt.show()
# We see that the EM algorithm is eventually able to obtain the same MSE, but with a few more iterations.
|
demos/sparse/sparse_em.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# # blurr
#
# > A library that integrates huggingface transformers with version 2 of the fastai framework
#
# Named after the **fast**est **transformer** (well, at least of the Autobots), ***blurr*** provides both a comprehensive and extensible framework for training and deploying *all* 🤗 [huggingface](https://huggingface.co/transformers/) transformer models with [fastai](http://docs.fast.ai/) v2.
#
# Utilizing features like fastai's new `@typedispatch` and `@patch` decorators, and a simple class hiearchy, **blurr** provides fastai developers with the ability to train and deploy transformers for sequence classification, question answer, token classification, summarization, and language modeling tasks. Though much of this works out-of-the-box, users will be able to customize the tokenization strategies and model inputs based on task and/or architecture as needed.
#
# **Supports**:
# - Sequence Classification (multiclassification and multi-label classification)
# - Token Classification
# - Question Answering
# - Summarization
#
# *Support for language modeling, translation tasks and more forthcoming!!!*
# ## Install
# You can now pip install blurr via `pip install ohmeow-blurr`
#
# Or, even better as this library is under *very* active development, create an editable install like this:
# ```
# git clone https://github.com/ohmeow/blurr.git
# # cd blurr
# pip install -e ".[dev]"
# ```
# ## How to use
# The initial release includes everything you need for sequence classification and question answering tasks. Support for token classification and summarization are incoming. Please check the documentation for more thorough examples of how to use this package.
#
# The following two packages need to be installed for blurr to work:
# 1. fastai2 (see http://docs.fast.ai/ for installation instructions)
# 2. huggingface transformers (see https://huggingface.co/transformers/installation.html for details)
# ### Imports
# +
import torch
from transformers import *
from fastai.text.all import *
from blurr.data.all import *
from blurr.modeling.all import *
# -
# ### Get your data
# +
path = untar_data(URLs.IMDB_SAMPLE)
model_path = Path('models')
imdb_df = pd.read_csv(path/'texts.csv')
# -
# ### Get your 🤗 objects
# +
task = HF_TASKS_AUTO.SequenceClassification
pretrained_model_name = "bert-base-uncased"
hf_arch, hf_config, hf_tokenizer, hf_model = BLURR_MODEL_HELPER.get_hf_objects(pretrained_model_name, task=task)
# -
# ### Build your Data 🧱 and your DataLoaders
# +
# single input
blocks = (HF_TextBlock(hf_arch=hf_arch, hf_tokenizer=hf_tokenizer), CategoryBlock)
dblock = DataBlock(blocks=blocks,
get_x=ColReader('text'), get_y=ColReader('label'),
splitter=ColSplitter(col='is_valid'))
dls = dblock.dataloaders(imdb_df, bs=4)
# -
dls.show_batch(hf_tokenizer=hf_tokenizer, max_n=2)
# ### ... and 🚂
# +
#slow
model = HF_BaseModelWrapper(hf_model)
learn = Learner(dls,
model,
opt_func=partial(Adam, decouple_wd=True),
loss_func=CrossEntropyLossFlat(),
metrics=[accuracy],
cbs=[HF_BaseModelCallback],
splitter=hf_splitter)
learn.create_opt()
learn.freeze()
learn.fit_one_cycle(3, lr_max=1e-3)
# -
#slow
learn.show_results(hf_tokenizer=hf_tokenizer, max_n=2)
# ## ❗ Updates
#
# **08/20/2020**
# * Updated everything to work latest version of fastai (tested against 2.0.0)
# * Added batch-time padding, so that by default now, `HF_TokenizerTransform` doesn't add any padding tokens and all huggingface inputs are padded simply to the max sequence length in each batch rather than to the max length (passed in and/or acceptable to the model). This should create efficiencies across the board, from memory consumption to GPU utilization. The old tried and true method of padding during tokenization requires you to pass in `padding='max_length` to `HF_TextBlock`.
# * Removed code to remove fastai2 @patched summary methods which had previously conflicted with a couple of the huggingface transformers
#
# **08/13/2020**
# * Updated everything to work latest transformers and fastai
# * Reorganized code to bring it more inline with how huggingface separates out their "tasks".
#
# **07/06/2020**
# * Updated everything to work huggingface>=3.02
# * Changed a lot of the internals to make everything more efficient and performant along with the latest version of huggingface ... meaning, I have broken things for folks using previous versions of blurr :).
#
# **06/27/2020**
# * Simplified the `BLURR_MODEL_HELPER.get_hf_objects` method to support a wide range of options in terms of building the necessary huggingface objects (architecture, config, tokenizer, and model). Also added `cache_dir` for saving pre-trained objects in a custom directory.
# * Misc. renaming and cleanup that may break existing code (please see the docs/source if things blow up)
# * Added missing required libraries to requirements.txt (e.g., nlp)
#
# **05/23/2020**
# * Initial support for text generation (e.g., summarization, conversational agents) models now included. Only tested with BART so if you try it with other models before I do, lmk what works ... and what doesn't
#
# **05/17/2020**
# * Major code restructuring to make it easier to build out the library.
# * `HF_TokenizerTransform` replaces `HF_Tokenizer`, handling the tokenization and numericalization in one place. DataBlock code has been dramatically simplified.
# * Tokenization correctly handles huggingface tokenizers that require `add_prefix_space=True`.
# * `HF_BaseModelCallback` and `HF_BaseModelCallback` are required and work together in order to allow developers to tie into any callback friendly event exposed by fastai2 and also pass in named arguments to the huggingface models.
# * `show_batch` and `show_results` have been updated for Question/Answer and Token Classification models to represent the data and results in a more easily intepretable manner than the defaults.
#
# **05/06/2020**
# * Initial support for Token classification (e.g., NER) models now included
# * Extended fastai's `Learner` object with a `predict_tokens` method used specifically in token classification
# * `HF_BaseModelCallback` can be used (or extended) instead of the model wrapper to ensure your inputs into the huggingface model is correct (recommended). See docs for examples (and thanks to fastai's Sylvain for the suggestion!)
# * `HF_Tokenizer` can work with strings or a string representation of a list (the later helpful for token classification tasks)
# * `show_batch` and `show_results` methods have been updated to allow better control on how huggingface tokenized data is represented in those methods
# ## ⭐ Props
#
# A word of gratitude to the following individuals, repos, and articles upon which much of this work is inspired from:
#
# - The wonderful community that is the [fastai forum](https://forums.fast.ai/) and especially the tireless work of both Jeremy and Sylvain in building this amazing framework and place to learn deep learning.
# - All the great tokenizers, transformers, docs and examples over at [huggingface](https://huggingface.co/)
# - [FastHugs](https://github.com/morganmcg1/fasthugs)
# - [Fastai with 🤗Transformers (BERT, RoBERTa, XLNet, XLM, DistilBERT)](https://towardsdatascience.com/fastai-with-transformers-bert-roberta-xlnet-xlm-distilbert-4f41ee18ecb2)
# - [Fastai integration with BERT: Multi-label text classification identifying toxicity in texts](https://medium.com/@abhikjha/fastai-integration-with-bert-a0a66b1cecbe)
# - [A Tutorial to Fine-Tuning BERT with Fast AI](https://mlexplained.com/2019/05/13/a-tutorial-to-fine-tuning-bert-with-fast-ai/)
|
nbs/index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MT
# language: python
# name: mt
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Holdings-company-information" data-toc-modified-id="Holdings-company-information-1"><span class="toc-item-num">1 </span>Holdings company information</a></span></li><li><span><a href="#Download-holdings-data" data-toc-modified-id="Download-holdings-data-2"><span class="toc-item-num">2 </span>Download holdings data</a></span></li><li><span><a href="#Download-fund-information-data" data-toc-modified-id="Download-fund-information-data-3"><span class="toc-item-num">3 </span>Download fund information data</a></span></li><li><span><a href="#Download-fund-summary-data" data-toc-modified-id="Download-fund-summary-data-4"><span class="toc-item-num">4 </span>Download fund summary data</a></span></li><li><span><a href="#Download-fund-style-data" data-toc-modified-id="Download-fund-style-data-5"><span class="toc-item-num">5 </span>Download fund style data</a></span></li><li><span><a href="#CRSP/COMPUSTAT-Linking-data" data-toc-modified-id="CRSP/COMPUSTAT-Linking-data-6"><span class="toc-item-num">6 </span>CRSP/COMPUSTAT Linking data</a></span></li><li><span><a href="#Test-holdings-data" data-toc-modified-id="Test-holdings-data-7"><span class="toc-item-num">7 </span>Test holdings data</a></span></li></ul></div>
# -
# # Load different Tables from WRDS
# Large tables like the 50+ GB Holdings table were downloaded from WRDS using an FTP client
# ## Holdings company information
# + pycharm={"is_executing": false}
import wrds
import feather
import matplotlib.pyplot as plt
# Connect to DB
db = wrds.Connection(wrds_username='amglex')
print('Successfully connected')
# -
# ## CRSP/COMPUSTAT Linking data
# +
# %%time
######################
# Query the data
######################
print('Start downloading data ...')
# SQL Query
data_raw_df = db.raw_sql(
'''
SELECT *
FROM Ccmxpf_linktable;
'''
)
print('SQL successful')
print(data_raw_df.shape)
print(data_raw_df.dtypes)
print(data_raw_df.head())
print("Successfully saved data")
# -
# ## Test holdings data
# +
######################
# Query the data
######################
print('Start downloading data ...')
# SQL Query
data_raw_df = db.raw_sql(
'''
SELECT EXTRACT(YEAR FROM report_dt) as year, COUNT(DISTINCT crsp_portno) as count_portno
FROM holdings
GROUP BY year;
'''
)
print('SQL successful')
print(data_raw_df.shape)
print(data_raw_df.dtypes)
print(data_raw_df.head())
|
notebooks/011 - Data - Loading Test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Canonical Time-series Characteristics (catch22) transform
#
# catch22\[1\] is a collection of 22 time series features extracted from the 7000+ present in the _hctsa_\[2\]\[3\] toolbox.
# A hierarchical clustering was performed on the correlation matrix of features that performed better than random chance to remove redundancy.
# These clusters were sorted by balanced accuracy using a decision tree classifier and a single feature was selected from the 22 clusters formed, taking into account balanced accuracy results, computational efficiency and interpretability.
#
# In this notebook, we will demonstrate how to use the catch22 transformer on the ItalyPowerDemand univariate and BasicMotions multivariate datasets. We also show catch22 used for classication with a random forest classifier.
#
# Both make use of the features implemented in the catch22 package (<https://github.com/chlubba/catch22>), where versions of catch22 for C and MATLAB are also available.
#
# #### References:
#
# \[1\] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). catch22: CAnonical Time-series CHaracteristics. Data Mining and Knowledge Discovery, 33(6), 1821-1852.
#
# \[2\] <NAME>., & <NAME>. (2017). hctsa: A computational framework for automated time-series phenotyping using massive feature extraction. Cell systems, 5(5), 527-531.
#
# \[3\] <NAME>., <NAME>., & <NAME>. (2013). Highly comparative time-series analysis: the empirical structure of time series and their methods. Journal of the Royal Society Interface, 10(83), 20130048.
# ## 1. Imports
# +
from sklearn import metrics
from sktime.classification.hybrid._catch22_forest_classifier import (
Catch22ForestClassifier,
)
from sktime.datasets import load_basic_motions, load_italy_power_demand
from sktime.transformations.panel.catch22_features import Catch22
# -
# ## 2. Load data
# +
IPD_X_train, IPD_y_train = load_italy_power_demand(split="train", return_X_y=True)
IPD_X_test, IPD_y_test = load_italy_power_demand(split="test", return_X_y=True)
IPD_X_test = IPD_X_test[:50]
IPD_y_test = IPD_y_test[:50]
print(IPD_X_train.shape, IPD_y_train.shape, IPD_X_test.shape, IPD_y_test.shape)
BM_X_train, BM_y_train = load_basic_motions(split="train", return_X_y=True)
BM_X_test, BM_y_test = load_basic_motions(split="test", return_X_y=True)
print(BM_X_train.shape, BM_y_train.shape, BM_X_test.shape, BM_y_test.shape)
# -
# ## 3. catch22 transform
#
# ### Univariate
#
# The catch22 features are provided in the form of a transformer, `Catch22`.
# From this the transformed data can be used for a variety of time series analysis tasks.
c22_uv = Catch22()
c22_uv.fit(IPD_X_train, IPD_y_train)
# + pycharm={"name": "#%%\n"}
transformed_data_uv = c22_uv.transform(IPD_X_train)
print(transformed_data_uv.head())
# -
# The transform `Catch22` method will process all 22 features.
# For individual features, the transform_single_feature method can be used when provided with a numeric feature ID or the feature name.
# + pycharm={"name": "#%%\n"}
transformed_feature_uv = c22_uv._transform_single_feature(IPD_X_train, "CO_f1ecac")
print(transformed_feature_uv)
# -
# ### Multivariate
#
# Transformation of multivariate data is supported by `Catch22`.
# The default procedure will concatenate each column prior to transformation.
# + pycharm={"name": "#%%\n"}
c22_mv = Catch22()
c22_mv.fit(BM_X_train, BM_y_train)
# + pycharm={"name": "#%%\n"}
transformed_data_mv = c22_mv.transform(BM_X_train)
print(transformed_data_mv.head())
# -
# ## 4. catch22 Forest Classifier
#
# For classification tasks the default classifier to use with the catch22 features is random forest classifier.
# An implementation making use of the `RandomForestClassifier` from sklearn built on catch22 features is provided in the form on the `Catch22ForestClassifier` for ease of use.
c22f = Catch22ForestClassifier(n_estimators=100, random_state=0)
c22f.fit(IPD_X_train, IPD_y_train)
c22f_preds = c22f.predict(IPD_X_test)
print("C22F Accuracy: " + str(metrics.accuracy_score(IPD_y_test, c22f_preds)))
|
examples/catch22.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 周内效应——定投篇
# +
import pandas as pd
from datetime import datetime
import trdb2py
import numpy as np
isStaticImg = False
width = 960
height = 768
pd.options.display.max_columns = None
pd.options.display.max_rows = None
trdb2cfg = trdb2py.loadConfig('./trdb2.yaml')
# +
# 具体基金
asset = 'jqdata.000300_XSHG|1d'
# baselineasset = 'jrj.510310'
# asset = 'jrj.110011'
# baselineasset = 'jqdata.000300_XSHG|1d'
# 起始时间,0表示从最开始算起
tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-12-31', '%Y-%m-%d'))
# 初始资金池
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
# # 买入参数,用全部的钱来买入(也就是复利)
# paramsbuy2 = trdb2py.trading2_pb2.BuyParams(
# perHandMoney=1/6,
# )
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy2 = trdb2py.trading2_pb2.BuyParams(
perHandMoney=0.5,
)
# 卖出参数,全部卖出
paramssell = trdb2py.trading2_pb2.SellParams(
perVolume=1,
)
paramsaip = trdb2py.trading2_pb2.AIPParams(
money=10000,
type=trdb2py.trading2_pb2.AIPTT_MONTHDAY,
day=1,
)
# 止盈参数,120%止盈
paramstakeprofit = trdb2py.trading2_pb2.TakeProfitParams(
perVolume=1,
# isOnlyProfit=True,
# isFinish=True,
)
# 止盈参数,120%止盈
paramstakeprofit2 = trdb2py.trading2_pb2.TakeProfitParams(
perVolume=1,
# isOnlyProfit=True,
isFinish=True,
)
# 止盈参数,120%止盈
paramstakeprofit1 = trdb2py.trading2_pb2.TakeProfitParams(
perVolume=1,
isOnlyProfit=True,
# isFinish=True,
)
# 止盈参数,120%止盈
paramstakeprofit3 = trdb2py.trading2_pb2.TakeProfitParams(
perVolume=1,
isOnlyProfit=True,
isFinish=True,
)
# 卖出参数,全部卖出
paramssell7 = trdb2py.trading2_pb2.SellParams(
# perVolume=1,
keepTime=7 * 24 * 60 * 60,
)
lststart = [1, 2, 3, 4, 5]
lsttitle = ['周一', '周二', '周三', '周四', '周五']
# +
def calcweekday2val2(wday, offday):
if offday == 1:
if wday == 5:
return 3
if offday == 2:
if wday >= 4:
return 4
if offday == 3:
if wday >= 3:
return 5
if offday == 4:
if wday >= 2:
return 6
return offday
def getAIPLastTs(pnl):
ctrlnums = len(pnl.lstCtrl)
if ctrlnums <= 0:
return -1
if pnl.lstCtrl[ctrlnums - 1].type == trdb2py.trading2_pb2.CTRL_SELL:
return pnl.lstCtrl[ctrlnums - 1].ts
return -1
def getLastResult(pnl) -> dict:
nums = len(pnl.values)
if nums <= 0:
return None
return {'cost': pnl.values[nums - 1].cost, 'value': pnl.values[nums - 1].value}
def getLastCtrl(pnl):
ctrlnums = len(pnl.lstCtrl)
if ctrlnums <= 0:
return None
return pnl.lstCtrl[ctrlnums - 1]
def getPNLValueWithTimestamp(ts, pnl: trdb2py.trading2_pb2.PNLAssetData) -> int:
for i in range(0, len(pnl.values)):
if ts == pnl.values[i].ts:
return i
if ts < pnl.values[i].ts:
pnl.values.insert(i, trdb2py.trading2_pb2.PNLDataValue(ts=ts))
return i
pnl.values.append(trdb2py.trading2_pb2.PNLDataValue(ts=ts))
return len(pnl.values) - 1
def mergePNL(lstpnl: list) -> trdb2py.trading2_pb2.PNLAssetData:
pnl = trdb2py.trading2_pb2.PNLAssetData()
for vpnl in lstpnl:
v = vpnl['pnl']
for cai in range(0, len(v.values)):
di = getPNLValueWithTimestamp(v.values[cai].ts, pnl)
pnl.values[di].value += v.values[cai].value
pnl.values[di].cost += v.values[cai].cost
if pnl.values[di].cost > 0:
pnl.values[di].perValue = pnl.values[di].value / \
pnl.values[di].cost
else:
pnl.values[di].perValue = 1
# pnl.values[di].perValue = 2
return pnl
def mergePNLEx(pnldest:trdb2py.trading2_pb2.PNLAssetData, pnlsrc:trdb2py.trading2_pb2.PNLAssetData, inmoney):
for cai in range(0, len(pnlsrc.values)):
di = getPNLValueWithTimestamp(pnlsrc.values[cai].ts, pnldest)
pnldest.values[di].value += (pnlsrc.values[cai].value - inmoney)
# pnl.values[di].cost += pnlsrc.values[cai].cost
if pnldest.values[di].cost > 0:
pnldest.values[di].perValue = pnldest.values[di].value / \
pnldest.values[di].cost
else:
pnldest.values[di].perValue = 1
def getNextMonthDay1(ts):
dt = datetime.utcfromtimestamp(ts)
if dt.month == 12:
return datetime(dt.year + 1, 1, 1).timestamp()
return datetime(dt.year, dt.month + 1, 1).timestamp()
def rmPNLValuesWithTimestamp(ts, pnl: trdb2py.trading2_pb2.PNLAssetData):
i = getPNLValueWithTimestamp(ts, pnl)
del pnl.values[i+1:]
def getPNLTimestampLowInMonth(pnl: trdb2py.trading2_pb2.PNLAssetData) -> list:
ts = 0
dt = None
lastPerValue = 0
arr = []
for i in range(0, len(pnl.values)):
v = pnl.values[i]
if ts == 0:
ts = v.ts
dt = datetime.utcfromtimestamp(ts)
lastPerValue = v.perValue
else:
cdt = datetime.utcfromtimestamp(v.ts)
if dt.year == cdt.year and dt.month == cdt.month:
if lastPerValue > v.perValue:
ts = v.ts
dt = cdt
lastPerValue = v.perValue
if i == len(pnl.values) - 1:
arr.append(ts)
else:
arr.append(ts)
ts = v.ts
dt = cdt
lastPerValue = v.perValue
return arr
def getPNLTimestampHighInMonth(pnl: trdb2py.trading2_pb2.PNLAssetData) -> list:
ts = 0
dt = None
lastPerValue = 0
arr = []
for i in range(0, len(pnl.values)):
v = pnl.values[i]
if ts == 0:
ts = v.ts
dt = datetime.utcfromtimestamp(ts)
lastPerValue = v.perValue
else:
cdt = datetime.utcfromtimestamp(v.ts)
if dt.year == cdt.year and dt.month == cdt.month:
if lastPerValue < v.perValue:
ts = v.ts
dt = cdt
lastPerValue = v.perValue
if i == len(pnl.values) - 1:
arr.append(ts)
else:
arr.append(ts)
ts = v.ts
dt = cdt
lastPerValue = v.perValue
return arr
# +
asset = 'jrj.110011'
# asset = 'jqdata.000036_XSHG|1d'
# asset = 'jqdata.000032_XSHG|1d'
asset = 'jqdata.000300_XSHG|1d'
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='沪深300',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
for i in range(1, 2):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[i],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
# paramsaip = trdb2py.trading2_pb2.AIPParams(
# money=10000,
# type=trdb2py.trading2_pb2.AIPTT_WEEKDAY,
# day=1,
# )
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
# s0.paramsInit.CopyFrom(paramsinit)
s0.paramsAIP.CopyFrom(paramsaip)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='每月{}号定投'.format(i),
))
lstaippnl = trdb2py.simTradings(trdb2cfg, lstparams)
trdb2py.showPNLs(lstaippnl + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
# -
# 我们看到不管是每个月的几号买入,最终其实都差异不大
# +
dfpnl1b = trdb2py.buildPNLReport(lstaippnl + [pnlBaseline])
dfpnl1b[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=False)
# +
lstparams = []
lstpnl = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# # name='annualizedreturns',
# name='totalreturnex',
# vals=[1.2, 0.1],
# operators=['>='],
# )
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='20%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
pnldata = mergePNL(lstpnl)
# pnldata = mergePNL([])
trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '20%止盈合并', 'pnl': pnldata}, lstaippnl[0]], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl1 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# # name='annualizedreturns',
# name='totalreturnex',
# vals=[1.2, 0.1],
# operators=['>='],
# )
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0, takeprofit1])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='20%止盈至少6期{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl1.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl1 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
pnldata1 = mergePNL(lstpnl1)
# pnldata = mergePNL([])
trdb2py.showPNLs([pnlBaseline, {'title': '20%止盈至少6期合并', 'pnl': pnldata1}, lstaippnl[0], {'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl2 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturnex',
vals=[1.2, 0.1],
operators=['>='],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
# # name='totalreturn',
# vals=[1.2],
# operators=['>='],
# )
# takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
# name='timestamp',
# int64Vals=[tsStart + 60 * 60 * 24 * 180],
# operators=['>='],
# )
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='20%回撤10%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl2.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl2 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
pnldata2 = mergePNL(lstpnl2)
# pnldata = mergePNL([])
trdb2py.showPNLs([pnlBaseline, {'title': '20%回撤10%止盈合并', 'pnl': pnldata2}, {'title': '20%止盈至少6期合并', 'pnl': pnldata1}, lstaippnl[0], {'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl3 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# # name='annualizedreturns',
# name='totalreturnex',
# vals=[1.2, 0.1],
# operators=['>='],
# )
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[2.0],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='100%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl3.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl3 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
pnldata3 = mergePNL(lstpnl3)
# pnldata = mergePNL([])
trdb2py.showPNLs([pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl4 = []
lstpnl41 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
lastMoney = 0
def deeprunAIP(lst, ts, money, parts, index):
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='totalreturn',
vals=[2.0],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
# 初始资金池
paramsinitt = trdb2py.trading2_pb2.InitParams(
money=money,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuyt = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1/parts,
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0, takeprofit1])
s0.paramsBuy.CopyFrom(paramsbuyt)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsInit.CopyFrom(paramsinitt)
# s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=ts,
endTs=tsEnd,
strategies=[s0],
title='100%盈利再定投{}-{}'.format(index, i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlt['inmoney'] = money
lst.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
money = lastctrl.lastMoney
ts = int(getNextMonthDay1(cts))
i+=1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[2.0],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0, takeprofit1])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='100%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl4.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
lastMoney += lastctrl.lastMoney
tsStart = int(getNextMonthDay1(cts))
deeprunAIP(lstpnl41, tsStart, lastctrl.lastMoney, 6, i)
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl4 + lstpnl41 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata4 = mergePNL(lstpnl4)
for v in lstpnl41:
mergePNLEx(pnldata4, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl5 = []
lstpnl51 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
lastMoney = 0
def deeprunAIP(lst, ts, money, parts, index):
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
# 初始资金池
paramsinitt = trdb2py.trading2_pb2.InitParams(
money=money,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuyt = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1/parts,
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuyt)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsInit.CopyFrom(paramsinitt)
# s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=ts,
endTs=tsEnd,
strategies=[s0],
title='20%盈利再定投{}-{}'.format(index, i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlt['inmoney'] = money
lst.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
money = lastctrl.lastMoney
ts = int(getNextMonthDay1(cts))
i+=1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0, takeprofit1])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='20%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl5.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
lastMoney += lastctrl.lastMoney
tsStart = int(getNextMonthDay1(cts))
deeprunAIP(lstpnl51, tsStart, lastctrl.lastMoney, 6, i)
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl5 + lstpnl51 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata5 = mergePNL(lstpnl5)
for v in lstpnl51:
mergePNLEx(pnldata5, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl6 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# # name='annualizedreturns',
# name='totalreturnex',
# vals=[1.2, 0.1],
# operators=['>='],
# )
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='annualizedreturns',
# name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='年化20%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl6.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl6 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata6 = mergePNL(lstpnl6)
# for v in lstpnl51:
# mergePNLEx(pnldata5, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '年化20%止盈合并', 'pnl': pnldata6},
{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl7 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# # name='annualizedreturns',
# name='totalreturnex',
# vals=[1.2, 0.1],
# operators=['>='],
# )
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='annualizedreturns',
# name='totalreturn',
vals=[1.1],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='年化10%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl7.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
if tsStart >= tsEnd:
break
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl7 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata7 = mergePNL(lstpnl7)
# for v in lstpnl51:
# mergePNLEx(pnldata5, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '年化10%止盈合并', 'pnl': pnldata7},
{'title': '年化20%止盈合并', 'pnl': pnldata6},
{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl8 = []
lstpnl81 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
lastMoney = 0
def deeprunAIP(lst, ts, money, parts, index):
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='annualizedreturns',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
# 初始资金池
paramsinitt = trdb2py.trading2_pb2.InitParams(
money=money,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuyt = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1/parts,
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0, takeprofit1])
s0.paramsBuy.CopyFrom(paramsbuyt)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsInit.CopyFrom(paramsinitt)
# s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=ts,
endTs=tsEnd,
strategies=[s0],
title='年化20%盈利再定投{}-{}'.format(index, i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlt['inmoney'] = money
lst.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
money = lastctrl.lastMoney
ts = int(getNextMonthDay1(cts))
i+=1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='annualizedreturns',
# name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit2)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='年化20%止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl8.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
lastMoney += lastctrl.lastMoney
tsStart = int(getNextMonthDay1(cts))
deeprunAIP(lstpnl81, tsStart, lastctrl.lastMoney, 6, i)
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl8 + lstpnl81 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata8 = mergePNL(lstpnl8)
for v in lstpnl81:
mergePNLEx(pnldata8, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '年化20%止盈再定投合并', 'pnl': pnldata8},
{'title': '年化10%止盈合并', 'pnl': pnldata7},
{'title': '年化20%止盈合并', 'pnl': pnldata6},
{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl9 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
# takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# # name='annualizedreturns',
# name='totalreturnex',
# vals=[1.2, 0.1],
# operators=['>='],
# )
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit3)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='20%部分止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl9.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
tsStart = int(getNextMonthDay1(cts))
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl9 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata9 = mergePNL(lstpnl9)
# for v in lstpnl81:
# mergePNLEx(pnldata8, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '20%部分止盈合并', 'pnl': pnldata9},
# {'title': '年化20%止盈再定投合并', 'pnl': pnldata8},
# {'title': '年化10%止盈合并', 'pnl': pnldata7},
# {'title': '年化20%止盈合并', 'pnl': pnldata6},
# {'title': '20%止盈再定投合并', 'pnl': pnldata5},
# {'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
# {'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
lstpnl10 = []
lstpnl101 = []
cost = 0
value = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
i = 1
lastMoney = 0
def deeprunAIP(lst, ts, money, parts, index):
i = 1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
# 初始资金池
paramsinitt = trdb2py.trading2_pb2.InitParams(
money=money,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuyt = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1/parts,
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuyt)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit3)
s0.paramsInit.CopyFrom(paramsinitt)
# s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=ts,
endTs=tsEnd,
strategies=[s0],
title='20%部分止盈再定投{}-{}'.format(index, i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlt['inmoney'] = money
lst.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
money = lastctrl.lastMoney
ts = int(getNextMonthDay1(cts))
i+=1
while True:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
# name='annualizedreturns',
name='totalreturn',
vals=[1.2],
operators=['>='],
)
takeprofit1 = trdb2py.trading2_pb2.CtrlCondition(
name='timestamp',
int64Vals=[tsStart + 60 * 60 * 24 * 180],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0, takeprofit1])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit3)
s0.paramsAIP.CopyFrom(paramsaip)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='20%部分止盈{}'.format(i),
)
pnlt = trdb2py.simTrading(trdb2cfg, p0)
pnlrt = getLastResult(pnlt['pnl'])
cost += pnlrt['cost']
value += pnlrt['value']
lstpnl10.append(pnlt)
cts = getAIPLastTs(pnlt['pnl'])
if cts < 0:
break
lastctrl = getLastCtrl(pnlt['pnl'])
lastMoney += lastctrl.lastMoney
tsStart = int(getNextMonthDay1(cts))
deeprunAIP(lstpnl101, tsStart, lastctrl.lastMoney, 6, i)
i+=1
# {'value': value, 'cost': cost, 'per': value / cost}
trdb2py.showPNLs(lstpnl10 + lstpnl101 + [pnlBaseline] + lstaippnl, toImg=isStaticImg, width=width, height=height)
# +
# getLastCtrl(lstpnl[0]['pnl']).ts
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[0]['pnl']).ts, lstpnl4[0]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[2]['pnl']).ts, lstpnl4[2]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[4]['pnl']).ts, lstpnl4[4]['pnl'])
# rmPNLValuesWithTimestamp(getLastCtrl(lstpnl4[6]['pnl']).ts, lstpnl4[6]['pnl'])
pnldata10 = mergePNL(lstpnl10)
for v in lstpnl101:
mergePNLEx(pnldata10, v['pnl'], v['inmoney'])
# # pnldata = mergePNL([])
# trdb2py.showPNLs(lstpnl + [pnlBaseline, {'title': '合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
trdb2py.showPNLs([{'title': '20%部分止盈再定投合并', 'pnl': pnldata10},
{'title': '20%部分止盈合并', 'pnl': pnldata9},
# {'title': '年化20%止盈再定投合并', 'pnl': pnldata8},
# {'title': '年化10%止盈合并', 'pnl': pnldata7},
# {'title': '年化20%止盈合并', 'pnl': pnldata6},
{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}], toImg=isStaticImg, width=width, height=height)
# +
lstallpnl = [{'title': '20%部分止盈合并', 'pnl': pnldata9},
{'title': '20%部分止盈再定投合并', 'pnl': pnldata10},
{'title': '年化20%止盈再定投合并', 'pnl': pnldata8},
{'title': '年化10%止盈合并', 'pnl': pnldata7},
{'title': '年化20%止盈合并', 'pnl': pnldata6},
{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}]
for v in lstallpnl:
trdb2py.rebuildPNL(v['pnl'])
dflstallpnl = trdb2py.buildPNLReport(lstallpnl)
dflstallpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=False)
# -
{'title': '20%部分止盈合并', 'pnl': pnldata9},
{'title': '20%部分止盈再定投合并', 'pnl': pnldata10},
{'title': '年化20%止盈再定投合并', 'pnl': pnldata8},
{'title': '年化10%止盈合并', 'pnl': pnldata7},
{'title': '年化20%止盈合并', 'pnl': pnldata6},
{'title': '20%止盈再定投合并', 'pnl': pnldata5},
{'title': '100%止盈再定投合并', 'pnl': pnldata4},
pnlBaseline,
{'title': '100%止盈合并', 'pnl': pnldata3},
{'title': '20%回撤10%止盈合并', 'pnl': pnldata2},
{'title': '20%止盈至少6期合并', 'pnl': pnldata1},
lstaippnl[0],
{'title': '20%止盈合并', 'pnl': pnldata}
# +
lstparams = []
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2014-11-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2015-01-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2015-04-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2017-12-01', '%Y-%m-%d'))
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='totalreturn',
vals=[1.2],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
# paramsaip = trdb2py.trading2_pb2.AIPParams(
# money=10000,
# type=trdb2py.trading2_pb2.AIPTT_WEEKDAY,
# day=1,
# )
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit)
s0.paramsAIP.CopyFrom(paramsaip)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='{}定投止盈'.format(1),
))
lstaippnl1 = trdb2py.simTradings(trdb2cfg, lstparams)
# lstaippnl1[0]['pnl'].lstCtrl
# getPNLLastTs(lstaippnl1[0]['pnl'])
# -
trdb2py.showPNLs(lstaippnl1 + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
# +
lstparams = []
# tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2014-11-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2015-01-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2015-04-01', '%Y-%m-%d'))
# tsStart = int(trdb2py.str2timestamp('2017-12-01', '%Y-%m-%d'))
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='monthdayex',
vals=[1],
)
takeprofit0 = trdb2py.trading2_pb2.CtrlCondition(
name='totalreturn',
vals=[1.2],
operators=['>='],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
# paramsaip = trdb2py.trading2_pb2.AIPParams(
# money=10000,
# type=trdb2py.trading2_pb2.AIPTT_WEEKDAY,
# day=1,
# )
s0.buy.extend([buy0])
s0.takeprofit.extend([takeprofit0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsTakeProfit.CopyFrom(paramstakeprofit1)
s0.paramsAIP.CopyFrom(paramsaip)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='{}定投止盈2'.format(1),
))
lstaippnl2 = trdb2py.simTradings(trdb2cfg, lstparams)
trdb2py.showPNLs(lstaippnl1 + lstaippnl2 + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
# -
|
home/trdb2/aip002.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import the required libraries: Pandas, Numpy, Matplotlib, Seaborn and Feature Tools:
from featuretools.primitives import Feature as ft
from featuretools.primitives import Feature
import pandas as pd
import numpy as np
import seaborn as sns # For mathematical calculations
import matplotlib.pyplot as plt # For plotting graphs
from datetime import datetime # To access datetime
from pandas import Series # To work on series
# %matplotlib inline
import warnings # To ignore the warnings
warnings.filterwarnings("ignore")
# -
import featuretools as ft
import sklearn
# +
# Let us load and read the data from the csv file
df=pd.read_csv("train.csv")
df.columns
# +
# load the test data set
cf=pd.read_csv("test.csv")
cf.info()
# -
df.head(3)
df.dtypes
# +
# Checking the Missing Values
missing_df = df.isnull().sum(axis=0).reset_index()
missing_df.columns = ['Column_Name', 'Missing_Count']
missing_df = missing_df[missing_df['Missing_Count']>0]
missing_df = missing_df.sort_values(by='Missing_Count')
missing_df
# -
missing_df = cf.isnull().sum(axis=0).reset_index()
missing_df.columns = ['Column_Name', 'Missing_Count']
missing_df = missing_df[missing_df['Missing_Count']>0]
missing_df = missing_df.sort_values(by='Missing_Count')
missing_df
# +
# Checking the Missing Values in the dataset
missing_df =df.isnull().sum(axis=0).reset_index()
missing_df.columns = ['Column Name', 'Missing Values Count']
missing_df['Filling Factor (%)']=(df.shape[0]-missing_df['Missing Values Count'])/df.shape[0]*100
missing_df.sort_values('Filling Factor (%)').reset_index(drop = True)
# +
# Checking the Missing Values in the dataset
missing_df =cf.isnull().sum(axis=0).reset_index()
missing_df.columns = ['Column Name', 'Missing Values Count']
missing_df['Filling Factor (%)']=(df.shape[0]-missing_df['Missing Values Count'])/df.shape[0]*100
missing_df.sort_values('Filling Factor (%)').reset_index(drop = True)
# -
# Drop All rows that contain and have a Missing Value
a=df.dropna()
a.sample(2)
b=cf.dropna()
b.sample(2)
# +
# Checking the Missing Values in the dataset
missing_df =a.isnull().sum(axis=0).reset_index()
missing_df.columns = ['Column Name', 'Missing Values Count']
missing_df['Filling Factor (%)']=(a.shape[0]-missing_df['Missing Values Count'])/a.shape[0]*100
missing_df.sort_values('Filling Factor (%)').reset_index(drop = True)
# -
missing_df =b.isnull().sum(axis=0).reset_index()
missing_df.columns = ['Column Name', 'Missing Values Count']
missing_df['Filling Factor (%)']=(b.shape[0]-missing_df['Missing Values Count'])/b.shape[0]*100
missing_df.sort_values('Filling Factor (%)').reset_index(drop = True)
# ### Feature Engineering Using Feature Tools:
# Prepare the data for Feature Engineering
#
# In this Loan Dataset, there are 2 tables. Each table is called an entity in Featuretools.
# We will join together the training and testing datasets to make sure we build the same features for each set. Later, after the feature matrix is built, we can separate out the two sets.
# +
# Create an Entity Set 'es'
es = ft.EntitySet(id = 'Loans')
# +
# Entities with a unique index
es = es.entity_from_dataframe(entity_id = 'LoansTrain', dataframe =df, index = 'Loan_ID')
# -
# Entities with a unique index
es = es.entity_from_dataframe(entity_id = 'LoansTest', dataframe =cf, index = 'Loan_ID')
# +
# Display the Entity Set thus Far
es
# -
es["LoansTrain"]
es["LoansTest"]
# ### Relationships
# After defining the entities (tables) in an EntitySet, we now need to tell feature tools how they are related with a relationship.
# +
# Define the Relationship between the Training and Test Entity Sets
new_relationship = ft.Relationship(es["LoansTrain"]["Loan_ID"],es["LoansTest"]["Loan_ID"])
# +
# Add the relationship to the entity set
es = es.add_relationship(new_relationship)
# -
# The relationship has now been stored in the entity set.
es
# We now have our entities in an entityset along with the relationships between them. We can now start to making new features from all of the tables using stacks of feature primitives to form deep features. First, let's cover feature primitives
# ### Feature Primitives
# A feature primitive a at a very high-level is an operation applied to data to create a feature. These represent very simple calculations that can be stacked on top of each other to create complex features. Feature primitives fall into two categories:
#
# Aggregation: function that groups together child datapoints for each parent and then calculates a statistic such as mean, min, max, or standard deviation. An example is calculating the maximum loan amount for each client. An aggregation works across multiple tables using relationships between tables.
# Transformation: an operation applied to one or more columns in a single table. An example would be extracting the day from dates, or finding the difference between two columns in one table.
# Let's take a look at feature primitives in feature tools. We can view the list of primitives:
#
#
primitives = ft.list_primitives()
pd.options.display.max_colwidth = 100
primitives[primitives['type'] == 'aggregation'].head(10)
primitives[primitives['type'] == 'transform'].head(10)
## Create new features using specified primitives
features, feature_names = ft.dfs(entityset = es, target_entity = 'LoansTest',
agg_primitives = ['mean', 'max', "sum", "mode"],
trans_primitives = ['years', 'month', 'divide'])
# Check the Newly Created Features
features.columns
features.head()
|
Exploratory Data Analysis/Feature Engineering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
from pathlib import Path
import os, sys, datetime, time, random, fnmatch, math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import skimage.metrics
import pywt
import torch
from torchvision import transforms as tvtransforms
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision.utils as vutils
import torch.utils.tensorboard as tensorboard
import torch.nn as nn
import dataloaders, transforms, OhModel, pytorch_msssim
# Paths
PATH_SAVE_NETWORK_INTERMEDIATE = "./runs/OhModel_v10/nets_int_135.tar"
directory_source = "D:/data/JSRT/augmented/test/source/"
directory_boneless = "D:/data/JSRT/augmented/test/target/"
# Data
_batch_size = 10
image_spatial_size = (512,512)
discriminator_keys_images = ["source","boneless"]
ds = dataloaders.JSRT_CXR(directory_source, directory_boneless,
transform=tvtransforms.Compose([
transforms.Rescale(image_spatial_size, discriminator_keys_images, "PixelSize"),
transforms.ImageComplement(discriminator_keys_images),
transforms.RescalingNormalisation(discriminator_keys_images,(0,1)),
transforms.HaarTransform(discriminator_keys_images),
transforms.ToTensor(discriminator_keys_images),
]))
dl = DataLoader(ds, _batch_size, num_workers=0)
sample = next(iter(dl))
print(sample["source"].shape)
def InverseHaarTransform(cA, cH, cV, cD):
cA = torch.squeeze(cA)
cH = torch.squeeze(cH)
cV = torch.squeeze(cV)
cD = torch.squeeze(cD)
out = pywt.idwt2((cA,(cH,cV,cD)), 'haar')
return out
# +
# Network
_input_array_shape_generator = (_batch_size, sample["source"].size(1), sample["source"].size(2), sample["source"].size(3))
_input_array_shape_classifier = (_batch_size, 2*sample["source"].size(1), sample["source"].size(2), sample["source"].size(3))
netG = OhModel.Generator(_input_array_shape_generator, reluType="normal", use_bias=True)
netD = OhModel.Discriminator(_input_array_shape_classifier, num_kernels=32, kernel_dims=32, use_bias = True, reluType="normal")
if os.path.isfile(PATH_SAVE_NETWORK_INTERMEDIATE):
print("=> loading checkpoint '{}'".format(PATH_SAVE_NETWORK_INTERMEDIATE))
checkpoint = torch.load(PATH_SAVE_NETWORK_INTERMEDIATE, map_location='cpu')
start_epoch = checkpoint['epoch_next']
reals_shown_now = checkpoint['reals_shown']
netG.load_state_dict(checkpoint['modelG_state_dict'])
netD.load_state_dict(checkpoint['modelD_state_dict'])
print("=> loaded checkpoint '{}' (epoch {}, reals shown {})".format(PATH_SAVE_NETWORK_INTERMEDIATE,
start_epoch, reals_shown_now))
else:
print("=> NO CHECKPOINT FOUND AT '{}'" .format(PATH_SAVE_NETWORK_INTERMEDIATE))
raise RuntimeError("No checkpoint found at specified path.")
netG.eval()
netD.eval()
out = netG(sample["source"])
out = out.detach()
# +
# Image Display
plt.figure(1)
batch_idx = 3
fig, ax = plt.subplots(1,3, figsize=(15,15))
# Reconstructed Images to be displayed
source = InverseHaarTransform(sample["source"][batch_idx,0,:], sample["source"][batch_idx,1,:], sample["source"][batch_idx,2,:], sample["source"][batch_idx,3,:])
ax[0].imshow(source, cmap='gray')
ax[0].set_title("Source")
ax[0].axis("off")
model_out = InverseHaarTransform(out[batch_idx,0,:],out[batch_idx,1,:], out[batch_idx,2,:], out[batch_idx,3,:])
ax[1].imshow(model_out , cmap='gray')
ax[1].set_title("Suppressed")
ax[1].axis("off")
true_boneless = InverseHaarTransform(sample["boneless"][batch_idx,0,:], sample["boneless"][batch_idx,1,:], sample["boneless"][batch_idx,2,:], sample["boneless"][batch_idx,3,:])
ax[2].imshow(true_boneless , cmap='gray')
ax[2].set_title("Target")
ax[2].axis("off")
# +
# Analysis
def PSNR(image, reference, max_reference=1.):
"""
Peak Signal-to-Noise Ratio
Input image and reference assumed to be Torch Tensors of shape [NxCxHxW]
"""
# Assume the image
H = image.size(-2)
W = image.size(-1)
MSE = (1/(H*W))*torch.sum((torch.abs(image - reference))**2, (-1,-2))
rtMSE = torch.sqrt(MSE)
output = 20*torch.log10(max_reference/rtMSE)
return output.numpy().squeeze()
def NPS():
"""Noise Power Spectrum"""
def SSIM(image, reference):
iters = 0
out_list = np.ndarray(image.size(0))
for im in image:
im = im.numpy()
im = np.moveaxis(im, 0,-1)
ref = reference[iters,:].numpy()
ref = np.moveaxis(ref, 0,-1)
out = skimage.metrics.structural_similarity(im, ref, multichannel=True)
out_list[iters]=out
iters+=1
return out_list
print(PSNR(out, sample["boneless"]))
print(SSIM(out, sample["boneless"]))
|
analysis_script.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import folium
from folium import plugins
import os
import json
# !pip install folium
import folium
from folium import plugins
latvijas_universitate = folium.Map(location=[56.95080,24.11650],
zoom_start = 20)
latvijas_universitate
import pandas as pd
df = pd.read_json('cities.json', encoding="UTF-8")
df[20:30]
df.index = df['name']
df.head()
'Riga' in df.loc[]
df.loc['Riga']
Riga = df[df.name == 'Riga']
Riga
df.head(40)
def getCity(cityName):
cityFrame = pd.read_json('cities.json', encoding="UTF-8")
city = cityFrame[cityFrame.name == cityName]
return [float(city.lat), float(city.lng)]
getCity('Riga')
getCity('Ogre')
ogre = folium.Map(location=getCity('Ogre'))
ogre
latvijas_universitate = folium.Map(location=[56.95080,24.11650],
tiles = "Stamen Terrain",
zoom_start = 15)
latvijas_universitate
clean_map = folium.Map()
clean_map
latvijas_universitate = folium.Map(location=[56.95080,24.11650],
tiles = "Stamen Toner",
zoom_start = 20)
latvijas_universitate
# +
latvijas_universitate = folium.Map(location=[56.95080,24.11650],
width=500, height=500, #var nodefinēt dimensijas
zoom_start = 20)
latvijas_universitate
folium.Marker([56.95080,24.11650], popup='Latvijas universitāte').add_to(ogre)
ogre
# -
df.head()
lvcities = df[df.country == 'LV']
lvcities.head(), len(lvcities)
zilupe = lvcities.iloc[1]
zilupe
zilupe.name, float(zilupe.lat), float(zilupe.lng)
def createCitiesMap(countrycode, filename='cities.json'):
df = pd.read_json(filename, encoding="UTF-8")
cities = df[df.country == countrycode]
newmap = folium.Map(location=[(cities.lat.max()+cities.lat.min())/2, (cities.lng.max()+cities.lng.min())/2], zoom_start = 8)
for index, city in cities.iterrows():
folium.Marker([float(city.lat), float(city.lng)]).add_to(newmap)
return newmap
lietuva = createCitiesMap("LT")
lietuva
usa = createCitiesMap("JP")
usa
lietuva.lat.max(),lietuva.lat.min()
lietuva = df[df.country == "LT"]
lietuva.head()
df.head()
estonia = createCitiesMap('EE')
estonia
#Adding cities to map
for index, city in lvcities.iterrows():
folium.Marker([float(city.lat), float(city.lng)]).add_to(clean_map)
clean_map
clean_map.location = getCity('Ogre')
clean_map.zoom_start = 7
clean_map
clean_map.zoom_start = 7
clean_map
# +
latvijas_universitate = folium.Map(location=[56.9504,24.11650],
#tiles = "Stamen Watercolor",
zoom_start = 20)
# Parasts marķieris, zaļš
folium.Marker([56.95080,24.11650],
popup='Latvijas universitāte',
icon=folium.Icon(color='green')
).add_to(latvijas_universitate)
# Apaļš marķieris
folium.CircleMarker([56.95,24.1174],
radius=100,
popup='Mūzikas akadēmija',
color='red',
).add_to(latvijas_universitate)
# Interactive marker
latvijas_universitate.add_child(folium.ClickForMarker(popup="Galapunkts"))
latvijas_universitate
# -
# +
centrs = folium.Map(location=[56.9514934,24.1135],
tiles = "Stamen Toner", #Mapbox Bright
zoom_start = 17)
folium.Marker([56.9514934,24.1135],
popup='Brīvības piemineklis',
icon=folium.Icon(color='green')
).add_to(centrs)
folium.Marker([56.95080,24.11650],
popup='Latvijas Universitāte',
icon=folium.Icon(color='blue',icon='university', prefix='fa')
).add_to(centrs)
folium.Marker([56.9514934,24.1111156],
popup='Bastejkalns',
icon=folium.Icon(color='red',icon='bicycle', prefix='fa')
).add_to(centrs)
centrs
# +
from folium.plugins import MiniMap
latvia = folium.Map(location=(57,25), zoom_start=6,width=500, height=500)
minimap = MiniMap()
latvia.add_child(minimap)
latvia
# +
coordinates = [
[56.95080,24.11650],
[56.9506724,24.11533],
[56.9515315,24.1143683],
[56.9518371,24.1138815],
[56.9515036,24.1135815]]
centrs = folium.Map(location=[56.9514934,24.1135],
zoom_start = 17)
path = folium.PolyLine(
[coordinates]
).add_to(centrs)
# Create the map and add the line
folium.Marker([56.95080,24.11650], popup='Latvijas universitāte', icon=folium.Icon(color='red')).add_to(centrs)
folium.Marker([56.9514934,24.1135],
popup='Brīvības piemineklis',
icon=folium.Icon(color='red')
).add_to(centrs)
plugins.PolyLineTextPath(
path,
'Uz brīvības pieminekli',
offset=-5
).add_to(centrs)
centrs
# -
#https://www.openstreetmap.org/relation/1554509
#http://polygons.openstreetmap.fr/
riga = os.path.join('riga.json')
geo_json_data = json.load(open(riga))
# +
riga = folium.Map(location=[56.9514934,24.1135],
zoom_start = 10)
folium.GeoJson(
geo_json_data,
style_function=lambda feature: {
'fillColor': 'blue',
'color': 'black',
'weight': 2,
'dashArray': '5, 5'
}
).add_to(riga)
riga.save(os.path.join('Colormaps_0.html')) #saglabā rezultātu html failā
riga
# +
m = folium.Map([30, 0], zoom_start=3)
wind_locations = [
[59.35560, -31.992190],
[55.178870, -42.89062],
[47.754100, -43.94531],
[38.272690, -37.96875],
[27.059130, -41.13281],
[16.299050, -36.56250],
[8.4071700, -30.23437],
[1.0546300, -22.50000],
[-8.754790, -18.28125],
[-21.61658, -20.03906],
[-31.35364, -24.25781],
[-39.90974, -30.93750],
[-43.83453, -41.13281],
[-47.75410, -49.92187],
[-50.95843, -54.14062],
[-55.97380, -56.60156]
]
wind_line = folium.PolyLine(
wind_locations,
weight=15,
color='#8EE9FF'
).add_to(m)
attr = {'fill': '#007DEF', 'font-weight': 'bold', 'font-size': '24'}
plugins.PolyLineTextPath(
wind_line,
') ',
repeat=True,
offset=7,
attributes=attr
).add_to(m)
danger_line = folium.PolyLine(
[[-40.311, -31.952],
[-12.086, -18.727]],
weight=10,
color='orange',
opacity=0.8
).add_to(m)
attr = {'fill': 'red'}
plugins.PolyLineTextPath(
danger_line,
'\u25BA',
repeat=True,
offset=6,
attributes=attr
).add_to(m)
plane_line = folium.PolyLine(
[[-49.38237, -37.26562],
[-1.75754, -14.41406],
[51.61802, -23.20312]],
weight=1,
color='black'
).add_to(m)
attr = {'font-weight': 'bold', 'font-size': '24'}
plugins.PolyLineTextPath(
plane_line,
'\u2708 ',
repeat=True,
offset=8,
attributes=attr
).add_to(m)
line_to_new_delhi = folium.PolyLine(
[[46.67959447, 3.33984375],
[46.5588603, 29.53125],
[42.29356419, 51.328125],
[35.74651226, 68.5546875],
[28.65203063, 76.81640625]]
).add_to(m)
line_to_hanoi = folium.PolyLine(
[[28.76765911, 77.60742188],
[27.83907609, 88.72558594],
[25.68113734, 97.3828125],
[21.24842224, 105.77636719]]
).add_to(m)
plugins.PolyLineTextPath(
line_to_new_delhi,
'To New Delhi',
offset=-5
).add_to(m)
plugins.PolyLineTextPath(
line_to_hanoi,
'To Hanoi',
offset=-5
).add_to(m)
m.save(os.path.join('Polyline_text_path.html'))
m
# +
us_states = os.path.join('us-states.json')
geo_json_data = json.load(open(us_states))
unemployment = pd.read_csv('US_Unemployment_Oct2012.csv')
unemployment_dict = unemployment.set_index('State')['Unemployment']
# -
def my_color_function(feature):
"Zemās vērtības - zaļas. Augstās - sarkanas"
if unemployment_dict[feature['id']] > 6.5:
return '#ff0000'
else:
return '#008000'
# +
m = folium.Map([43, -100], tiles='cartodbpositron', zoom_start=4)
folium.GeoJson(
geo_json_data,
style_function=lambda feature: {
'fillColor': my_color_function(feature),
'color': 'black',
'weight': 2,
'dashArray': '5, 5'
}
).add_to(m)
m.save(os.path.join('Colormaps_0.html'))
m
# +
import branca.colormap as cm
linear = cm.LinearColormap(
['green', 'yellow', 'red'],
vmin=3, vmax=10
)
linear
# +
m = folium.Map([43, -100], tiles='cartodbpositron', zoom_start=4)
folium.GeoJson(
geo_json_data,
style_function=lambda feature: {
'fillColor': linear(unemployment_dict[feature['id']]),
'color': 'black',
'weight': 2,
'dashArray': '5, 5'
}
).add_to(m)
m.save(os.path.join('Colormaps_2.html'))
m
# -
|
Folium/FOLIUM_in_class.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2Pmxv2ioyCRw"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="b-2ShX25yNWf"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="pa49bUnKyRgF"
# # Time series forecasting
# + [markdown] id="11Ilg92myRcw"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/structured_data/time_series"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/structured_data/time_series.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="GU8C5qm_4vZb"
# This tutorial is an introduction to time series forecasting using TensorFlow. It builds a few different styles of models including Convolutional and Recurrent Neural Networks (CNNs and RNNs).
#
# This is covered in two main parts, with subsections:
#
# * Forecast for a single timestep:
# * A single feature.
# * All features.
# * Forecast multiple steps:
# * Single-shot: Make the predictions all at once.
# * Autoregressive: Make one prediction at a time and feed the output back to the model.
# + [markdown] id="XVhK72Pu1cJL"
# ## Setup
# + id="7rZnJaGTWQw0"
import os
import datetime
import IPython
import IPython.display
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
# + [markdown] id="TokBlnUhWFw9"
# ## The weather dataset
# This tutorial uses a <a href="https://www.bgc-jena.mpg.de/wetter/" class="external">weather time series dataset</a> recorded by the <a href="https://www.bgc-jena.mpg.de" class="external">Max Planck Institute for Biogeochemistry</a>.
#
# This dataset contains 14 different features such as air temperature, atmospheric pressure, and humidity. These were collected every 10 minutes, beginning in 2003. For efficiency, you will use only the data collected between 2009 and 2016. This section of the dataset was prepared by <NAME> for his book [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
# + id="xyv_i85IWInT"
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
# + [markdown] id="R81Wx8WP4c3G"
# This tutorial will just deal with **hourly predictions**, so start by sub-sampling the data from 10 minute intervals to 1h:
# + id="TX6uGeeeWIkG"
df = pd.read_csv(csv_path)
# slice [start:stop:step], starting from index 5 take every 6th record.
df = df[5::6]
date_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S')
# + [markdown] id="VdbOWXiTWM2T"
# Let's take a glance at the data. Here are the first few rows:
# + id="ojHE-iCCWIhz"
df.head()
# + [markdown] id="WRzj1inMfgcO"
# Here is the evolution of a few features over time.
# + id="Vg5XIc5tfNlG"
plot_cols = ['T (degC)', 'p (mbar)', 'rho (g/m**3)']
plot_features = df[plot_cols]
plot_features.index = date_time
_ = plot_features.plot(subplots=True)
plot_features = df[plot_cols][:480]
plot_features.index = date_time[:480]
_ = plot_features.plot(subplots=True)
# + [markdown] id="wXWLG0_WBhZS"
# ### Inspect and cleanup
# + [markdown] id="yhmZXJew6GlS"
# Next look at the statistics of the dataset:
# + id="h510pgKVrrai"
df.describe().transpose()
# + [markdown] id="TzOTnWOoWMGK"
# #### Wind velocity
# + [markdown] id="i47LiW5DCVsP"
# One thing that should stand out is the `min` value of the wind velocity, `wv (m/s)` and `max. wv (m/s)` columns. This `-9999` is likely erroneous. There's a separate wind direction column, so the velocity should be `>=0`. Replace it with zeros:
#
# + id="qFOq0_80vF4d"
wv = df['wv (m/s)']
bad_wv = wv == -9999.0
wv[bad_wv] = 0.0
max_wv = df['max. wv (m/s)']
bad_max_wv = max_wv == -9999.0
max_wv[bad_max_wv] = 0.0
# The above inplace edits are reflected in the DataFrame
df['wv (m/s)'].min()
# + [markdown] id="vtmu2IBPgPG8"
# ### Feature engineering
#
# Before diving in to build a model it's important to understand your data, and be sure that you're passing the model appropriately formatted data.
# + [markdown] id="FYyEaqiD6j4s"
# #### Wind
# The last column of the data, `wd (deg)`, gives the wind direction in units of degrees. Angles do not make good model inputs, 360° and 0° should be close to each other, and wrap around smoothly. Direction shouldn't matter if the wind is not blowing.
#
# Right now the distribution of wind data looks like this:
# + id="YO7JGTcWQG2z"
plt.hist2d(df['wd (deg)'], df['wv (m/s)'], bins=(50, 50), vmax=400)
plt.colorbar()
plt.xlabel('Wind Direction [deg]')
plt.ylabel('Wind Velocity [m/s]')
# + [markdown] id="yWnf5dwMU1_g"
# But this will be easier for the model to interpret if you convert the wind direction and velocity columns to a wind **vector**:
# + id="6GmSTHXw6lI1"
wv = df.pop('wv (m/s)')
max_wv = df.pop('max. wv (m/s)')
# Convert to radians.
wd_rad = df.pop('wd (deg)')*np.pi / 180
# Calculate the wind x and y components.
df['Wx'] = wv*np.cos(wd_rad)
df['Wy'] = wv*np.sin(wd_rad)
# Calculate the max wind x and y components.
df['max Wx'] = max_wv*np.cos(wd_rad)
df['max Wy'] = max_wv*np.sin(wd_rad)
# + [markdown] id="7iI0zDoxWDyB"
# The distribution of wind vectors is much simpler for the model to correctly interpret.
# + id="bMgCG5o2SYKD"
plt.hist2d(df['Wx'], df['Wy'], bins=(50, 50), vmax=400)
plt.colorbar()
plt.xlabel('Wind X [m/s]')
plt.ylabel('Wind Y [m/s]')
ax = plt.gca()
ax.axis('tight')
# + [markdown] id="_8im1ttOWlRB"
# #### Time
# + [markdown] id="7YE21HKK40zQ"
# Similarly the `Date Time` column is very useful, but not in this string form. Start by converting it to seconds:
# + id="LIFf-VjMfnh3"
timestamp_s = date_time.map(datetime.datetime.timestamp)
# + [markdown] id="EC_pnM1D5Sgc"
# Similar to the wind direction the time in seconds is not a useful model input. Being weather data it has clear daily and yearly periodicity. There are many ways you could deal with periodicity.
#
# A simple approach to convert it to a usable signal is to use `sin` and `cos` to convert the time to clear "Time of day" and "Time of year" signals:
# + id="MBfX6CDwax73"
day = 24*60*60
year = (365.2425)*day
df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))
df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day))
df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))
df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))
# + id="mXBbTJZfuuTC"
plt.plot(np.array(df['Day sin'])[:25])
plt.plot(np.array(df['Day cos'])[:25])
plt.xlabel('Time [h]')
plt.title('Time of day signal')
# + [markdown] id="HiurzTGQgf_D"
# This gives the model access to the most important frequency features. In this case you knew ahead of time which frequencies were important.
#
# If you didn't know, you can determine which frequencies are important using an `fft`. To check our assumptions, here is the `tf.signal.rfft` of the temperature over time. Note the obvious peaks at frequencies near `1/year` and `1/day`:
# + id="EN4U1fcMiTYs"
fft = tf.signal.rfft(df['T (degC)'])
f_per_dataset = np.arange(0, len(fft))
n_samples_h = len(df['T (degC)'])
hours_per_year = 24*365.2524
years_per_dataset = n_samples_h/(hours_per_year)
f_per_year = f_per_dataset/years_per_dataset
plt.step(f_per_year, np.abs(fft))
plt.xscale('log')
plt.ylim(0, 400000)
plt.xlim([0.1, max(plt.xlim())])
plt.xticks([1, 365.2524], labels=['1/Year', '1/day'])
_ = plt.xlabel('Frequency (log scale)')
# + [markdown] id="2rbL8bSGDHy3"
# ### Split the data
# + [markdown] id="qoFJZmXBaxCc"
# We'll use a `(70%, 20%, 10%)` split for the training, validation, and test sets. Note the data is **not** being randomly shuffled before splitting. This is for two reasons.
#
# 1. It ensures that chopping the data into windows of consecutive samples is still possible.
# 2. It ensures that the validation/test results are more realistic, being evaluated on data collected after the model was trained.
# + id="ia-MPAHxbInX"
column_indices = {name: i for i, name in enumerate(df.columns)}
n = len(df)
train_df = df[0:int(n*0.7)]
val_df = df[int(n*0.7):int(n*0.9)]
test_df = df[int(n*0.9):]
num_features = df.shape[1]
# + [markdown] id="-eFckdUUHWmT"
# ### Normalize the data
#
# It is important to scale features before training a neural network. Normalization is a common way of doing this scaling. Subtract the mean and divide by the standard deviation of each feature.
# + [markdown] id="mxbIic5TMlxx"
# The mean and standard deviation should only be computed using the training data so that the models have no access to the values in the validation and test sets.
#
# It's also arguable that the model shouldn't have access to future values in the training set when training, and that this normalization should be done using moving averages. That's not the focus of this tutorial, and the validation and test sets ensure that you get (somewhat) honest metrics. So in the interest of simplicity this tutorial uses a simple average.
# + id="Eji6njXvHusN"
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
# + [markdown] id="G6ufs8kk9JQw"
# Now peek at the distribution of the features. Some features do have long tails, but there are no obvious errors like the `-9999` wind velocity value.
# + id="T0UYEnkwm8Fe"
df_std = (df - train_mean) / train_std
df_std = df_std.melt(var_name='Column', value_name='Normalized')
plt.figure(figsize=(12, 6))
ax = sns.violinplot(x='Column', y='Normalized', data=df_std)
_ = ax.set_xticklabels(df.keys(), rotation=90)
# + [markdown] id="ZBBmdxZ2HgfJ"
# ## Data windowing
#
# The models in this tutorial will make a set of predictions based on a window of consecutive samples from the data.
#
# The main features of the input windows are:
#
# * The width (number of time steps) of the input and label windows
# * The time offset between them.
# * Which features are used as inputs, labels, or both.
#
# This tutorial builds a variety of models (including Linear, DNN, CNN and RNN models), and uses them for both:
#
# * *Single-output*, and *multi-output* predictions.
# * *Single-time-step* and *multi-time-step* predictions.
#
# This section focuses on implementing the data windowing so that it can be reused for all of those models.
#
# + [markdown] id="YAhGUVx1jtOy"
# Depending on the task and type of model you may want to generate a variety of data windows. Here are some examples:
#
# 1. For example, to make a single prediction 24h into the future, given 24h of history you might define a window like this:
#
# 
#
# 2. A model that makes a prediction 1h into the future, given 6h of history would need a window like this:
#
# 
# + [markdown] id="sa2BbfNZt8wy"
# The rest of this section defines a `WindowGenerator` class. This class can:
#
# 1. Handle the indexes and offsets as shown in the diagrams above.
# 1. Split windows of features into a `(features, labels)` pairs.
# 2. Plot the content of the resulting windows.
# 3. Efficiently generate batches of these windows from the training, evaluation, and test data, using `tf.data.Dataset`s.
# + [markdown] id="rfx3jGjyziUF"
# ### 1. Indexes and offsets
#
# Start by creating the `WindowGenerator` class. The `__init__` method includes all the necessary logic for the input and label indices.
#
# It also takes the train, eval, and test dataframes as input. These will be converted to `tf.data.Dataset`s of windows later.
# + id="Kem30j8QHxyW"
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
# + [markdown] id="yVJgblsYzL1g"
# Here is code to create the 2 windows shown in the diagrams at the start of this section:
# + id="IsM5kRkz0UwK"
w1 = WindowGenerator(input_width=24, label_width=1, shift=24,
label_columns=['T (degC)'])
w1
# + id="viwKsYeAKFUn"
w2 = WindowGenerator(input_width=6, label_width=1, shift=1,
label_columns=['T (degC)'])
w2
# + [markdown] id="kJaUyTWQJd-L"
# ### 2. Split
# Given a list consecutive inputs, the `split_window` method will convert them to a window of inputs and a window of labels.
#
# The example `w2`, above, will be split like this:
#
# 
#
# This diagram doesn't show the `features` axis of the data, but this `split_window` function also handles the `label_columns` so it can be used for both the single output and multi-output examples.
# + id="W4KbxfzqkXPW"
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
WindowGenerator.split_window = split_window
# + [markdown] id="G6U6VtVuM15s"
# Try it out:
# + id="YeCWbq6KLmL7"
# Stack three slices, the length of the total window:
example_window = tf.stack([np.array(train_df[:w2.total_window_size]),
np.array(train_df[100:100+w2.total_window_size]),
np.array(train_df[200:200+w2.total_window_size])])
example_inputs, example_labels = w2.split_window(example_window)
print('All shapes are: (batch, time, features)')
print(f'Window shape: {example_window.shape}')
print(f'Inputs shape: {example_inputs.shape}')
print(f'labels shape: {example_labels.shape}')
# + [markdown] id="xtMk1ffk2Mmd"
# Typically data in TensorFlow is packed into arrays where the outermost index is across examples (the "batch" dimension). The middle indices are the "time" or "space" (width, height) dimension(s). The innermost indices are the features.
#
# The code above took a batch of 3, 7-timestep windows, with 19 features at each time step. It split them into a batch of 6-timestep, 19 feature inputs, and a 1-timestep 1-feature label. The label only has one feature because the `WindowGenerator` was initialized with `label_columns=['T (degC)']`. Initially this tutorial will build models that predict single output labels.
# + [markdown] id="tFZukGXrJoGo"
# ### 3. Plot
#
# Here is a plot method that allows a simple visualization of the split window:
# + id="fmgd1qkYUWT7"
w2.example = example_inputs, example_labels
# + id="jIrYccI-Hm3B"
def plot(self, model=None, plot_col='T (degC)', max_subplots=3):
inputs, labels = self.example
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
max_n = min(max_subplots, len(inputs))
for n in range(max_n):
plt.subplot(3, 1, n+1)
plt.ylabel(f'{plot_col} [normed]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index],
label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index],
edgecolors='k', label='Labels', c='#2ca02c', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index],
marker='X', edgecolors='k', label='Predictions',
c='#ff7f0e', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time [h]')
WindowGenerator.plot = plot
# + [markdown] id="HXvctEuK68vX"
# This plot aligns inputs, labels, and (later) predictions based on the time that the item refers to:
# + id="XjTqUnglOOni"
w2.plot()
# + [markdown] id="UqiqcPOldPG6"
# You can plot the other columns, but the example window `w2` configuration only has labels for the `T (degC)` column.
# + id="EBRe4wnlfCH8"
w2.plot(plot_col='p (mbar)')
# + [markdown] id="xCvD-UaUzYMw"
# ### 4. Create `tf.data.Dataset`s
# + [markdown] id="kLO3SFR9Osdf"
# Finally this `make_dataset` method will take a time series `DataFrame` and convert it to a `tf.data.Dataset` of `(input_window, label_window)` pairs using the `preprocessing.timeseries_dataset_from_array` function.
# + id="35qoSQeRVfJg"
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,)
ds = ds.map(self.split_window)
return ds
WindowGenerator.make_dataset = make_dataset
# + [markdown] id="LvsxQwJaCift"
# The `WindowGenerator` object holds training, validation and test data. Add properties for accessing them as `tf.data.Datasets` using the above `make_dataset` method. Also add a standard example batch for easy access and plotting:
# + id="2jZ2KkqGCfzu"
@property
def train(self):
return self.make_dataset(self.train_df)
@property
def val(self):
return self.make_dataset(self.val_df)
@property
def test(self):
return self.make_dataset(self.test_df)
@property
def example(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result
WindowGenerator.train = train
WindowGenerator.val = val
WindowGenerator.test = test
WindowGenerator.example = example
# + [markdown] id="fF_Vj6Iw3Y2w"
# Now the `WindowGenerator` object gives you access to the `tf.data.Dataset` objects, so you can easily iterate over the data.
#
# The `Dataset.element_spec` property tells you the structure, `dtypes` and shapes of the dataset elements.
# + id="daJ0-U383YVs"
# Each element is an (inputs, label) pair
w2.train.element_spec
# + [markdown] id="XKTx3_Z7ua-n"
# Iterating over a `Dataset` yields concrete batches:
# + id="6gtKXEgf4Iml"
for example_inputs, example_labels in w2.train.take(1):
print(f'Inputs shape (batch, time, features): {example_inputs.shape}')
print(f'Labels shape (batch, time, features): {example_labels.shape}')
# + [markdown] id="LyuGuJUgjUK3"
# ## Single step models
#
# The simplest model you can build on this sort of data is one that predicts a single feature's value, 1 timestep (1h) in the future based only on the current conditions.
#
# So start by building models to predict the `T (degC)` value 1h into the future.
#
# 
#
# Configure a `WindowGenerator` object to produce these single-step `(input, label)` pairs:
# + id="G5QX1G1JTPCr"
single_step_window = WindowGenerator(
input_width=1, label_width=1, shift=1,
label_columns=['T (degC)'])
single_step_window
# + [markdown] id="RKTm8ajVGw4N"
# The `window` object creates `tf.data.Datasets` from the training, validation, and test sets, allowing you to easily iterate over batches of data.
#
# + id="Do4ILUaBF8oc"
for example_inputs, example_labels in single_step_window.train.take(1):
print(f'Inputs shape (batch, time, features): {example_inputs.shape}')
print(f'Labels shape (batch, time, features): {example_labels.shape}')
# + [markdown] id="D1bbPiR3VAm_"
# ### Baseline
#
# Before building a trainable model it would be good to have a performance baseline as a point for comparison with the later more complicated models.
#
# This first task is to predict temperature 1h in the future given the current value of all features. The current values include the current temperature.
#
# So start with a model that just returns the current temperature as the prediction, predicting "No change". This is a reasonable baseline since temperature changes slowly. Of course, this baseline will work less well if you make a prediction further in the future.
#
# 
# + id="9TybQaIsi3yg"
class Baseline(tf.keras.Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
result = inputs[:, :, self.label_index]
return result[:, :, tf.newaxis]
# + [markdown] id="0vb3f948i8p8"
# Instantiate and evaluate this model:
# + id="IS3-QKc4sX0D"
baseline = Baseline(label_index=column_indices['T (degC)'])
baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
val_performance = {}
performance = {}
val_performance['Baseline'] = baseline.evaluate(single_step_window.val)
performance['Baseline'] = baseline.evaluate(single_step_window.test, verbose=0)
# + [markdown] id="nhBxQcCSs7Ec"
# That printed some performance metrics, but those don't give you a feeling for how well the model is doing.
#
# The `WindowGenerator` has a plot method, but the plots won't be very interesting with only a single sample. So, create a wider `WindowGenerator` that generates windows 24h of consecutive inputs and labels at a time.
#
# The `wide_window` doesn't change the way the model operates. The model still makes predictions 1h into the future based on a single input time step. Here the `time` axis acts like the `batch` axis: Each prediction is made independently with no interaction between time steps.
# + id="C8jNR5uuJ5Zp"
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1,
label_columns=['T (degC)'])
wide_window
# + [markdown] id="ZAnj7CFZkuYv"
# This expanded window can be passed directly to the same `baseline` model without any code changes. This is possible because the inputs and labels have the same number of timesteps, and the baseline just forwards the input to the output:
#
# 
# + id="sGKdvdg087qs"
print('Input shape:', wide_window.example[0].shape)
print('Output shape:', baseline(wide_window.example[0]).shape)
# + [markdown] id="SKqQHX1K0JW-"
# Plotting the baseline model's predictions you can see that it is simply the labels, shifted right by 1h.
# + id="jQyAPVLgWTOZ"
wide_window.plot(baseline)
# + [markdown] id="e93TLUhfAVg2"
# In the above plots of three examples the single step model is run over the course of 24h. This deserves some explanation:
#
# * The blue "Inputs" line shows the input temperature at each time step. The model recieves all features, this plot only shows the temperature.
# * The green "Labels" dots show the target prediction value. These dots are shown at the prediction time, not the input time. That is why the range of labels is shifted 1 step relative to the inputs.
# * The orange "Predictions" crosses are the model's prediction's for each output time step. If the model were predicting perfectly the predictions would land directly on the "labels".
# + [markdown] id="E4aOJScj52Yu"
# ### Linear model
#
# The simplest **trainable** model you can apply to this task is to insert linear transformation between the input and output. In this case the output from a time step only depends on that step:
#
# 
#
# A `layers.Dense` with no `activation` set is a linear model. The layer only transforms the last axis of the data from `(batch, time, inputs)` to `(batch, time, units)`, it is applied independently to every item across the `batch` and `time` axes.
# + id="6341OXuQ5xA9"
linear = tf.keras.Sequential([
tf.keras.layers.Dense(units=1)
])
# + id="KwaOM8RucUSn"
print('Input shape:', single_step_window.example[0].shape)
print('Output shape:', linear(single_step_window.example[0]).shape)
# + [markdown] id="OMZTYIj3bYLg"
# This tutorial trains many models, so package the training procedure into a function:
# + id="CbCL6VIrk-Gt"
MAX_EPOCHS = 20
def compile_and_fit(model, window, patience=2):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
mode='min')
model.compile(loss=tf.losses.MeanSquaredError(),
optimizer=tf.optimizers.Adam(),
metrics=[tf.metrics.MeanAbsoluteError()])
history = model.fit(window.train, epochs=MAX_EPOCHS,
validation_data=window.val,
callbacks=[early_stopping])
return history
# + [markdown] id="OobVjM-schwj"
# Train the model and evaluate its performance:
# + id="9agbz2qB9bLS"
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
# + [markdown] id="7U9XukYh8beN"
# Like the `baseline` model, the linear model can be called on batches of wide windows. Used this way the model makes a set of independent predictions on consecutive time steps. The `time` axis acts like another `batch` axis. There are no interactions between the predictions at each time step.
#
# 
# + id="K9UVM5Sw9KQN"
print('Input shape:', wide_window.example[0].shape)
print('Output shape:', baseline(wide_window.example[0]).shape)
# + [markdown] id="X-CGj85oKaOG"
# Here is the plot of its example predictions on the `wide_window`, note how in many cases the prediction is clearly better than just returning the input temperature, but in a few cases it's worse:
# + id="bCC8VVo-OvwV"
wide_window.plot(linear)
# + [markdown] id="Is51vU8EMl6c"
# One advantage to linear models is that they're relatively simple to interpret.
# You can pull out the layer's weights, and see the weight assigned to each input:
# + id="d4uCTbsmK8VI"
plt.bar(x = range(len(train_df.columns)),
height=linear.layers[0].kernel[:,0].numpy())
axis = plt.gca()
axis.set_xticks(range(len(train_df.columns)))
_ = axis.set_xticklabels(train_df.columns, rotation=90)
# + [markdown] id="Ylng7215boIY"
# Sometimes the model doesn't even place the most weight on the input `T (degC)`. This is one of the risks of random initialization.
# + [markdown] id="W18e6da1cNbw"
# ### Dense
#
# Before applying models that actually operate on multiple time-steps, it's worth checking the performance of deeper, more powerful, single input step models.
#
# Here's a model similar to the `linear` model, except it stacks several a few `Dense` layers between the input and the output:
# + id="Z86WkYp7cNAD"
dense = tf.keras.Sequential([
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=1)
])
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
# + [markdown] id="j5dv_whJdswH"
# ### Multi-step dense
#
# A single-time-step model has no context for the current values of its inputs. It can't see how the input features are changing over time. To address this issue the model needs access to multiple time steps when making predictions:
#
# 
#
# + [markdown] id="Zac-ti8agbJ7"
# The `baseline`, `linear` and `dense` models handled each time step independently. Here the model will take multiple time steps as input to produce a single output.
#
# Create a `WindowGenerator` that will produce batches of the 3h of inputs and, 1h of labels:
# + [markdown] id="gtN4BwZ37niR"
# Note that the `Window`'s `shift` parameter is relative to the end of the two windows.
#
# + id="lBh0j5djUKY2"
CONV_WIDTH = 3
conv_window = WindowGenerator(
input_width=CONV_WIDTH,
label_width=1,
shift=1,
label_columns=['T (degC)'])
conv_window
# + id="dCQ5gvs68Xkd"
conv_window.plot()
plt.title("Given 3h as input, predict 1h into the future.")
# + [markdown] id="We0HdMxKeqB_"
# You could train a `dense` model on a multiple-input-step window by adding a `layers.Flatten` as the first layer of the model:
# + id="oNQnUOkOnC1G"
multi_step_dense = tf.keras.Sequential([
# Shape: (time, features) => (time*features)
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
# Add back the time dimension.
# Shape: (outputs) => (1, outputs)
tf.keras.layers.Reshape([1, -1]),
])
# + id="cayD74luo4Vq"
print('Input shape:', conv_window.example[0].shape)
print('Output shape:', multi_step_dense(conv_window.example[0]).shape)
# + id="fu91yEbRo9-J"
history = compile_and_fit(multi_step_dense, conv_window)
IPython.display.clear_output()
val_performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.val)
performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.test, verbose=0)
# + id="tnqdXYT6pkEh"
conv_window.plot(multi_step_dense)
# + [markdown] id="gWfrsP8mq8lV"
# The main down-side of this approach is that the resulting model can only be executed on input windows of exactly this shape.
# + id="j-q6tz5Yq8Jk"
print('Input shape:', wide_window.example[0].shape)
try:
print('Output shape:', multi_step_dense(wide_window.example[0]).shape)
except Exception as e:
print(f'\n{type(e).__name__}:{e}')
# + [markdown] id="bvvajm3ip_8V"
# The convolutional models in the next section fix this problem.
# + [markdown] id="CrpU6gwSJome"
# ### Convolution neural network
#
# A convolution layer (`layers.Conv1D`) also takes multiple time steps as input to each prediction.
# + [markdown] id="cdLBwoaHmsWb"
# Below is the **same** model as `multi_step_dense`, re-written with a convolution.
#
# Note the changes:
# * The `layers.Flatten` and the first `layers.Dense` are replaced by a `layers.Conv1D`.
# * The `layers.Reshape` is no longer necessary since the convolution keeps the time axis in its output.
# + id="5azaMBj4ac9t"
conv_model = tf.keras.Sequential([
tf.keras.layers.Conv1D(filters=32,
kernel_size=(CONV_WIDTH,),
activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
])
# + [markdown] id="ftaH6B5ECRiK"
# Run it on an example batch to see that the model produces outputs with the expected shape:
# + id="5YNgt1-e98lH"
print("Conv model on `conv_window`")
print('Input shape:', conv_window.example[0].shape)
print('Output shape:', conv_model(conv_window.example[0]).shape)
# + [markdown] id="5m4kC-jGCY3x"
# Train and evaluate it on the ` conv_window` and it should give performance similar to the `multi_step_dense` model.
# + id="QDVWdm4paUW7"
history = compile_and_fit(conv_model, conv_window)
IPython.display.clear_output()
val_performance['Conv'] = conv_model.evaluate(conv_window.val)
performance['Conv'] = conv_model.evaluate(conv_window.test, verbose=0)
# + [markdown] id="sYRipDeXs0Kr"
# The difference between this `conv_model` and the `multi_step_dense` model is that the `conv_model` can be run on inputs of any length. The convolutional layer is applied to a sliding window of inputs:
#
# 
#
# If you run it on wider input, it produces wider output:
# + id="hoqccxx9r5jF"
print("Wide window")
print('Input shape:', wide_window.example[0].shape)
print('Labels shape:', wide_window.example[1].shape)
print('Output shape:', conv_model(wide_window.example[0]).shape)
# + [markdown] id="h_WGxtLIHhRF"
# Note that the output is shorter than the input. To make training or plotting work, you need the labels, and prediction to have the same length. So build a `WindowGenerator` to produce wide windows with a few extra input time steps so the label and prediction lengths match:
# + id="_VPvJ_VwTc0f"
LABEL_WIDTH = 24
INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1)
wide_conv_window = WindowGenerator(
input_width=INPUT_WIDTH,
label_width=LABEL_WIDTH,
shift=1,
label_columns=['T (degC)'])
wide_conv_window
# + id="gtqlWYXeKXej"
print("Wide conv window")
print('Input shape:', wide_conv_window.example[0].shape)
print('Labels shape:', wide_conv_window.example[1].shape)
print('Output shape:', conv_model(wide_conv_window.example[0]).shape)
# + [markdown] id="yzxbbS56cSBV"
# Now you can plot the model's predictions on a wider window. Note the 3 input time steps before the first prediction. Every prediction here is based on the 3 preceding timesteps:
# + id="gR7VyL45UuEe"
wide_conv_window.plot(conv_model)
# + [markdown] id="H4crpOcoMlSe"
# ### Recurrent neural network
#
# A Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state from time-step to time-step.
#
# For more details, read the [text generation tutorial](https://www.tensorflow.org/tutorials/text/text_generation) or the [RNN guide](https://www.tensorflow.org/guide/keras/rnn).
#
# In this tutorial, you will use an RNN layer called Long Short Term Memory ([LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM)).
# + [markdown] id="vfQbHSMb1ATa"
# An important constructor argument for all keras RNN layers is the `return_sequences` argument. This setting can configure the layer in one of two ways.
#
# 1. If `False`, the default, the layer only returns the output of the final timestep, giving the model time to warm up its internal state before making a single prediction:
#
# 
#
# 2. If `True` the layer returns an output for each input. This is useful for:
# * Stacking RNN layers.
# * Training a model on multiple timesteps simultaneously.
#
# 
# + id="DXKLCJy8nWNU"
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences=True),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=1)
])
# + [markdown] id="F124B00KZcLC"
# With `return_sequences=True` the model can be trained on 24h of data at a time.
#
# Note: This will give a pessimistic view of the model's performance. On the first timestep the model has no access to previous steps, and so can't do any better than the simple `linear` and `dense` models shown earlier.
# + id="eZEROCQVYV6q"
print('Input shape:', wide_window.example[0].shape)
print('Output shape:', lstm_model(wide_window.example[0]).shape)
# + id="uvdWRl1e9WJl"
history = compile_and_fit(lstm_model, wide_window)
IPython.display.clear_output()
val_performance['LSTM'] = lstm_model.evaluate(wide_window.val)
performance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0)
# + id="NwAOWCVgB26e"
wide_window.plot(lstm_model)
# + [markdown] id="pYglOCKehi8F"
# ### Performance
# + [markdown] id="2pCk0_rwhi8H"
# With this dataset typically each of the models does slightly better than the one before it.
# + id="JjEkt488hi8I"
x = np.arange(len(performance))
width = 0.3
metric_name = 'mean_absolute_error'
metric_index = lstm_model.metrics_names.index('mean_absolute_error')
val_mae = [v[metric_index] for v in val_performance.values()]
test_mae = [v[metric_index] for v in performance.values()]
plt.ylabel('mean_absolute_error [T (degC), normalized]')
plt.bar(x - 0.17, val_mae, width, label='Validation')
plt.bar(x + 0.17, test_mae, width, label='Test')
plt.xticks(ticks=x, labels=performance.keys(),
rotation=45)
_ = plt.legend()
# + id="cBMCpsdphi8L"
for name, value in performance.items():
print(f'{name:12s}: {value[1]:0.4f}')
# + [markdown] id="b5rUJ_2YMWzG"
# ### Multi-output models
#
# The models so far all predicted a single output feature, `T (degC)`, for a single time step.
#
# All of these models can be converted to predict multiple features just by changing the number of units in the output layer and adjusting the training windows to include all features in the `labels`.
#
# + id="9Gk0Z91xjOwv"
single_step_window = WindowGenerator(
# `WindowGenerator` returns all features as labels if you
# don't set the `label_columns` argument.
input_width=1, label_width=1, shift=1)
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1)
for example_inputs, example_labels in wide_window.train.take(1):
print(f'Inputs shape (batch, time, features): {example_inputs.shape}')
print(f'Labels shape (batch, time, features): {example_labels.shape}')
# + [markdown] id="XmcjHfDskX1N"
# Note above that the `features` axis of the labels now has the same depth as the inputs, instead of 1.
# + [markdown] id="9k7S5IHNhSNF"
# #### Baseline
#
# The same baseline model can be used here, but this time repeating all features instead of selecting a specific `label_index`.
# + id="sqqB9W-pjr5i"
baseline = Baseline()
baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
# + id="ltQdgaqQjQWu"
val_performance = {}
performance = {}
val_performance['Baseline'] = baseline.evaluate(wide_window.val)
performance['Baseline'] = baseline.evaluate(wide_window.test, verbose=0)
# + [markdown] id="dfbCrf5q3P6n"
# #### Dense
# + id="NdpzH1dYjdIN"
dense = tf.keras.Sequential([
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=num_features)
])
# + id="6uHuU9Cd3PTo"
history = compile_and_fit(dense, single_step_window)
IPython.display.clear_output()
val_performance['Dense'] = dense.evaluate(single_step_window.val)
performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
# + [markdown] id="dsc9pur_mHsx"
# #### RNN
#
# + id="4QbGLMyomXaz"
# %%time
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1)
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences=True),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=num_features)
])
history = compile_and_fit(lstm_model, wide_window)
IPython.display.clear_output()
val_performance['LSTM'] = lstm_model.evaluate( wide_window.val)
performance['LSTM'] = lstm_model.evaluate( wide_window.test, verbose=0)
print()
# + [markdown] id="UwhY2f_Nn0_K"
# <a id="residual"></a>
#
# #### Advanced: Residual connections
#
# The `Baseline` model from earlier took advantage of the fact that the sequence doesn't change drastically from time step to time step. Every model trained in this tutorial so far was randomly initialized, and then had to learn that the output is a a small change from the previous time step.
#
# While you can get around this issue with careful initialization, it's simpler to build this into the model structure.
#
# It's common in time series analysis to build models that instead of predicting the next value, predict how the value will change in the next timestep.
# Similarly, "Residual networks" or "ResNets" in deep learning refer to architectures where each layer adds to the model's accumulating result.
#
# That is how you take advantage of the knowledge that the change should be small.
#
# 
#
# Essentially this initializes the model to match the `Baseline`. For this task it helps models converge faster, with slightly better performance.
# + [markdown] id="yP58A_ORx0kM"
# This approach can be used in conjunction with any model discussed in this tutorial.
#
# Here it is being applied to the LSTM model, note the use of the `tf.initializers.zeros` to ensure that the initial predicted changes are small, and don't overpower the residual connection. There are no symmetry-breaking concerns for the gradients here, since the `zeros` are only used on the last layer.
# + id="7YlfnDQC22TQ"
class ResidualWrapper(tf.keras.Model):
def __init__(self, model):
super().__init__()
self.model = model
def call(self, inputs, *args, **kwargs):
delta = self.model(inputs, *args, **kwargs)
# The prediction for each timestep is the input
# from the previous time step plus the delta
# calculated by the model.
return inputs + delta
# + id="NNeH02pspc9B"
# %%time
residual_lstm = ResidualWrapper(
tf.keras.Sequential([
tf.keras.layers.LSTM(32, return_sequences=True),
tf.keras.layers.Dense(
num_features,
# The predicted deltas should start small
# So initialize the output layer with zeros
kernel_initializer=tf.initializers.zeros)
]))
history = compile_and_fit(residual_lstm, wide_window)
IPython.display.clear_output()
val_performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.val)
performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.test, verbose=0)
print()
# + [markdown] id="I42Er9Du6co1"
# #### Performance
# + [markdown] id="LZxR38P_6pUi"
# Here is the overall performance for these multi-output models.
# + id="6XgTK9tnr7rc"
x = np.arange(len(performance))
width = 0.3
metric_name = 'mean_absolute_error'
metric_index = lstm_model.metrics_names.index('mean_absolute_error')
val_mae = [v[metric_index] for v in val_performance.values()]
test_mae = [v[metric_index] for v in performance.values()]
plt.bar(x - 0.17, val_mae, width, label='Validation')
plt.bar(x + 0.17, test_mae, width, label='Test')
plt.xticks(ticks=x, labels=performance.keys(),
rotation=45)
plt.ylabel('MAE (average over all outputs)')
_ = plt.legend()
# + id="URz3ajCc6kBj"
for name, value in performance.items():
print(f'{name:15s}: {value[1]:0.4f}')
# + [markdown] id="_Vt2MJhNxwPU"
# The above performances are averaged across all model outputs.
# + [markdown] id="eYokb7Om2YbK"
# ## Multi-step models
#
# Both the single-output and multiple-output models in the previous sections made **single time step predictions**, 1h into the future.
#
# This section looks at how to expand these models to make **multiple time step predictions**.
#
# In a multi-step prediction, the model needs to learn to predict a range of future values. Thus, unlike a single step model, where only a single future point is predicted, a multi-step model predicts a sequence of the future values.
#
# There are two rough approaches to this:
#
# 1. Single shot predictions where the entire time series is predicted at once.
# 2. Autoregressive predictions where the model only makes single step predictions and its output is fed back as its input.
#
# In this section all the models will predict **all the features across all output time steps**.
#
# + [markdown] id="WFsDAwVt4_rq"
# For the multi-step model, the training data again consists of hourly samples. However, here, the models will learn to predict 24h of the future, given 24h of the past.
#
# Here is a `Window` object that generates these slices from the dataset:
# + id="1cFYtsz6XiGw"
OUT_STEPS = 24
multi_window = WindowGenerator(input_width=24,
label_width=OUT_STEPS,
shift=OUT_STEPS)
multi_window.plot()
multi_window
# + [markdown] id="5lg8SInh9Jzd"
# ### Baselines
# + [markdown] id="axwpoWYOApJL"
# A simple baseline for this task is to repeat the last input time step for the required number of output timesteps:
#
# 
# + id="_5iaHSaJ9Rxv"
class MultiStepLastBaseline(tf.keras.Model):
def call(self, inputs):
return tf.tile(inputs[:, -1:, :], [1, OUT_STEPS, 1])
last_baseline = MultiStepLastBaseline()
last_baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
multi_val_performance = {}
multi_performance = {}
multi_val_performance['Last'] = last_baseline.evaluate(multi_window.val)
multi_performance['Last'] = last_baseline.evaluate(multi_window.test, verbose=0)
multi_window.plot(last_baseline)
# + [markdown] id="AvHZ93ObAfMA"
# Since this task is to predict 24h given 24h another simple approach is to repeat the previous day, assuming tomorrow will be similar:
#
# 
# + id="L8Y1uMhGwIRs"
class RepeatBaseline(tf.keras.Model):
def call(self, inputs):
return inputs
repeat_baseline = RepeatBaseline()
repeat_baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
multi_val_performance['Repeat'] = repeat_baseline.evaluate(multi_window.val)
multi_performance['Repeat'] = repeat_baseline.evaluate(multi_window.test, verbose=0)
multi_window.plot(repeat_baseline)
# + [markdown] id="tbndS-ct9C2Q"
# ### Single-shot models
#
# One high level approach to this problem is use a "single-shot" model, where the model makes the entire sequence prediction in a single step.
#
# This can be implemented efficiently as a `layers.Dense` with `OUT_STEPS*features` output units. The model just needs to reshape that output to the required `(OUTPUT_STEPS, features)`.
# + [markdown] id="NCKS4m1VKrDQ"
# #### Linear
#
# A simple linear model based on the last input time step does better than either baseline, but is underpowered. The model needs to predict `OUTPUT_STEPS` time steps, from a single input time step with a linear projection. It can only capture a low-dimensional slice of the behavior, likely based mainly on the time of day and time of year.
#
# 
# + id="kfRz_WVhIQcd"
multi_linear_model = tf.keras.Sequential([
# Take the last time-step.
# Shape [batch, time, features] => [batch, 1, features]
tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),
# Shape => [batch, 1, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_linear_model, multi_window)
IPython.display.clear_output()
multi_val_performance['Linear'] = multi_linear_model.evaluate(multi_window.val)
multi_performance['Linear'] = multi_linear_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_linear_model)
# + [markdown] id="zi2TMHk2IRrh"
# #### Dense
#
# Adding a `layers.Dense` between the input and output gives the linear model more power, but is still only based on a single input timestep.
# + id="jezm-BKaGj91"
multi_dense_model = tf.keras.Sequential([
# Take the last time step.
# Shape [batch, time, features] => [batch, 1, features]
tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),
# Shape => [batch, 1, dense_units]
tf.keras.layers.Dense(512, activation='relu'),
# Shape => [batch, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_dense_model, multi_window)
IPython.display.clear_output()
multi_val_performance['Dense'] = multi_dense_model.evaluate(multi_window.val)
multi_performance['Dense'] = multi_dense_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_dense_model)
# + [markdown] id="icsBAjCzMaMl"
# #### CNN
# + [markdown] id="34lCZrWYNBwd"
# A convolutional model makes predictions based on a fixed-width history, which may lead to better performance than the dense model since it can see how things are changing over time:
#
# 
# + id="0xJoIP6PMWMI"
CONV_WIDTH = 3
multi_conv_model = tf.keras.Sequential([
# Shape [batch, time, features] => [batch, CONV_WIDTH, features]
tf.keras.layers.Lambda(lambda x: x[:, -CONV_WIDTH:, :]),
# Shape => [batch, 1, conv_units]
tf.keras.layers.Conv1D(256, activation='relu', kernel_size=(CONV_WIDTH)),
# Shape => [batch, 1, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_conv_model, multi_window)
IPython.display.clear_output()
multi_val_performance['Conv'] = multi_conv_model.evaluate(multi_window.val)
multi_performance['Conv'] = multi_conv_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_conv_model)
# + [markdown] id="weBjeZAFJOP4"
# #### RNN
# + [markdown] id="8022xOKxOO92"
# A recurrent model can learn to use a long history of inputs, if it's relevant to the predictions the model is making. Here the model will accumulate internal state for 24h, before making a single prediction for the next 24h.
#
# In this single-shot format, the LSTM only needs to produce an output at the last time step, so set `return_sequences=False`.
#
# 
#
# + id="Bf1ks6RTzF64"
multi_lstm_model = tf.keras.Sequential([
# Shape [batch, time, features] => [batch, lstm_units]
# Adding more `lstm_units` just overfits more quickly.
tf.keras.layers.LSTM(32, return_sequences=False),
# Shape => [batch, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_lstm_model, multi_window)
IPython.display.clear_output()
multi_val_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.val)
multi_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_lstm_model)
# + [markdown] id="d5n-1cDW12Vo"
# ### Advanced: Autoregressive model
#
# The above models all predict the entire output sequence in a single step.
#
# In some cases it may be helpful for the model to decompose this prediction into individual time steps. Then each model's output can be fed back into itself at each step and predictions can be made conditioned on the previous one, like in the classic [Generating Sequences With Recurrent Neural Networks](https://arxiv.org/abs/1308.0850).
#
# One clear advantage to this style of model is that it can be set up to produce output with a varying length.
#
# You could take any of the single-step multi-output models trained in the first half of this tutorial and run in an autoregressive feedback loop, but here you'll focus on building a model that's been explicitly trained to do that.
#
# 
#
# + [markdown] id="PKRreBbULRXY"
# #### RNN
#
# This tutorial only builds an autoregressive RNN model, but this pattern could be applied to any model that was designed to output a single timestep.
#
# The model will have the same basic form as the single-step `LSTM` models: An `LSTM` followed by a `layers.Dense` that converts the `LSTM` outputs to model predictions.
#
# A `layers.LSTM` is a `layers.LSTMCell` wrapped in the higher level `layers.RNN` that manages the state and sequence results for you (See [Keras RNNs](https://www.tensorflow.org/guide/keras/rnn) for details).
#
# In this case the model has to manually manage the inputs for each step so it uses `layers.LSTMCell` directly for the lower level, single time step interface.
# + id="s5tz3Nu0R5JG"
class FeedBack(tf.keras.Model):
def __init__(self, units, out_steps):
super().__init__()
self.out_steps = out_steps
self.units = units
self.lstm_cell = tf.keras.layers.LSTMCell(units)
# Also wrap the LSTMCell in an RNN to simplify the `warmup` method.
self.lstm_rnn = tf.keras.layers.RNN(self.lstm_cell, return_state=True)
self.dense = tf.keras.layers.Dense(num_features)
# + id="2OXVM9G1U7xR"
feedback_model = FeedBack(units=32, out_steps=OUT_STEPS)
# + [markdown] id="ph5uFSfTUNho"
# The first method this model needs is a `warmup` method to initialize its internal state based on the inputs. Once trained this state will capture the relevant parts of the input history. This is equivalent to the single-step `LSTM` model from earlier:
# + id="vM2K_LLdRjDZ"
def warmup(self, inputs):
# inputs.shape => (batch, time, features)
# x.shape => (batch, lstm_units)
x, *state = self.lstm_rnn(inputs)
# predictions.shape => (batch, features)
prediction = self.dense(x)
return prediction, state
FeedBack.warmup = warmup
# + [markdown] id="6JkaSYaZ9eB7"
# This method returns a single time-step prediction, and the internal state of the LSTM:
# + id="w9Fz6NTKXXwU"
prediction, state = feedback_model.warmup(multi_window.example[0])
prediction.shape
# + [markdown] id="S_ZdvPjdX3y3"
# With the `RNN`'s state, and an initial prediction you can now continue iterating the model feeding the predictions at each step back as the input.
#
# The simplest approach to collecting the output predictions is to use a python list, and `tf.stack` after the loop.
# + [markdown] id="yotTad3nZXQU"
# Note: Stacking a python list like this only works with eager-execution, using `Model.compile(..., run_eagerly=True)` for training, or with a fixed length output. For a dynamic output length you would need to use a `tf.TensorArray` instead of a python list, and `tf.range` instead of the python `range`.
# + id="g1GRDu3mZtr9"
def call(self, inputs, training=None):
# Use a TensorArray to capture dynamically unrolled outputs.
predictions = []
# Initialize the lstm state
prediction, state = self.warmup(inputs)
# Insert the first prediction
predictions.append(prediction)
# Run the rest of the prediction steps
for n in range(1, self.out_steps):
# Use the last prediction as input.
x = prediction
# Execute one lstm step.
x, state = self.lstm_cell(x, states=state,
training=training)
# Convert the lstm output to a prediction.
prediction = self.dense(x)
# Add the prediction to the output
predictions.append(prediction)
# predictions.shape => (time, batch, features)
predictions = tf.stack(predictions)
# predictions.shape => (batch, time, features)
predictions = tf.transpose(predictions, [1, 0, 2])
return predictions
FeedBack.call = call
# + [markdown] id="Ubop-YWp15XW"
# Test run this model on the example inputs:
# + id="Xja83zEYaM2D"
print('Output shape (batch, time, features): ', feedback_model(multi_window.example[0]).shape)
# + [markdown] id="qMs0rYB8be9M"
# Now train the model:
# + id="VBRVG2hnNyrO"
history = compile_and_fit(feedback_model, multi_window)
IPython.display.clear_output()
multi_val_performance['AR LSTM'] = feedback_model.evaluate(multi_window.val)
multi_performance['AR LSTM'] = feedback_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(feedback_model)
# + [markdown] id="hGjcJsAQJUkI"
# ### Performance
# + [markdown] id="sODAwr2ndtDB"
# There are clearly diminishing returns as a function of model complexity on this problem.
# + id="WZwWBA8S6B3L"
x = np.arange(len(multi_performance))
width = 0.3
metric_name = 'mean_absolute_error'
metric_index = lstm_model.metrics_names.index('mean_absolute_error')
val_mae = [v[metric_index] for v in multi_val_performance.values()]
test_mae = [v[metric_index] for v in multi_performance.values()]
plt.bar(x - 0.17, val_mae, width, label='Validation')
plt.bar(x + 0.17, test_mae, width, label='Test')
plt.xticks(ticks=x, labels=multi_performance.keys(),
rotation=45)
plt.ylabel(f'MAE (average over all times and outputs)')
_ = plt.legend()
# + [markdown] id="Zq3hUsedCEmJ"
# The metrics for the multi-output models in the first half of this tutorial show the performance averaged across all output features. These performances similar but also averaged across output timesteps.
# + id="jKq3eAIvH4Db"
for name, value in multi_performance.items():
print(f'{name:8s}: {value[1]:0.4f}')
# + [markdown] id="MpBFwfnaHP23"
# The gains achieved going from a dense model to convolutional and recurrent models are only a few percent (if any), and the autoregressive model performed clearly worse. So these more complex approaches may not be worth while on **this** problem, but there was no way to know without trying, and these models could be helpful for **your** problem.
# + [markdown] id="pOzaIRYBhqwg"
# ## Next steps
#
# This tutorial was a quick introduction to time series forecasting using TensorFlow.
#
# * For further understanding, see:
# * Chapter 15 of [Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/), 2nd Edition
# * Chapter 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
# * Lesson 8 of [Udacity's intro to TensorFlow for deep learning](https://www.udacity.com/course/intro-to-tensorflow-for-deep-learning--ud187), and the [exercise notebooks](https://github.com/tensorflow/examples/tree/master/courses/udacity_intro_to_tensorflow_for_deep_learning)
# * Also remember that you can implement any [classical time series model](https://otexts.com/fpp2/index.html) in TensorFlow, this tutorial just focuses on TensorFlow's built-in functionality.
|
site/en/tutorials/structured_data/time_series.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Sparkmagic (PySpark)
# language: ''
# name: pysparkkernel
# ---
# # CSV -> JSON
# #### 이번 Lab에서는 Glue Job의 기본 Template을 살펴보고 CSV 파일을 JSON으로 변환하는 Glue Job을 만들어 실행하고 디버깅 하는 과정을 살펴봅니다.
# #### S3에 업로드한 데이터를 읽어오기 위해 각자 S3 bucket에 지정한 account-id를 account_id 변수에 할당합니다.
ACCOUNT_ID = ''
# #### File Read & Write with Spark API
# #### 아래 코드 실행 후 s3://aws-glue-hol-[account id]/output 디렉토리에 json 파일이 정상적으로 생성되었는 지 확인합니다.
# +
import sys
from awsglue.context import GlueContext
from awsglue.dynamicframe import DynamicFrame
from pyspark.sql.functions import regexp_extract, col
# GlueContext 생성
glueContext = GlueContext(sc)
s3_bucket = 's3://aws-glue-hol-' + ACCOUNT_ID
# Read CSV file using Spark API
titanic_csv_df = spark.read.csv(s3_bucket + '/train', header=True)
# Create initial Column using Spark API
titanic_csv_df = titanic_csv_df.withColumn('initial', regexp_extract(col('Name'), "(\w+)\.", 1))
# Drop Name Column using Glue API
titanic_csv_dyf = DynamicFrame.fromDF(titanic_csv_df, glueContext, 'titanic_csv_dyf').drop_fields('Name')
# Write JSON file using Spark API
titanic_csv_dyf.toDF().write \
.format('json') \
.mode('overwrite') \
.save(s3_bucket + '/output')
# -
# #### File Read & Write with Glue API
# +
import sys
from awsglue.context import GlueContext
from awsglue.dynamicframe import DynamicFrame
from pyspark.sql.functions import regexp_extract, col
# GlueContext 생성
glueContext = GlueContext(sc)
s3_bucket = 's3://aws-glue-hol-' + ACCOUNT_ID
# Read CSV file using Glue API
titanic_dyf = glueContext.create_dynamic_frame.from_catalog(database='analytics_hol',
table_name='titanic_train',
transformation_ctx='titanic_dyf')
# Create initial Column using Spark API
titanic_csv_df = titanic_dyf.toDF()
titanic_csv_df = titanic_csv_df.withColumn('initial', regexp_extract(col('Name'), "(\w+)\.", 1))
# Drop Name Column using Glue API
titanic_csv_dyf = DynamicFrame.fromDF(titanic_csv_df, glueContext, 'titanic_csv_dyf').drop_fields('Name')
# Write JSON file using Glue API
glueContext.write_dynamic_frame.from_options(
frame=titanic_csv_dyf,
connection_type = "s3",
connection_options = {"path": s3_bucket + '/output'},
format = "json",
transformation_ctx = "titanic_json_dyf")
# -
# #### 실제 Glue Job을 실습하기 위한 코드입니다.
# #### Lab Guide를 따라 Glue Console에서 Job을 만들고 아래 코드를 Copy해서 Job을 실행합니다.
# +
import sys
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
from pyspark.sql.functions import regexp_extract, col
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
# SparkContext 생성
sc = SparkContext()
# GlueContext 생성
glueContext = GlueContext(sc)
# SparkSession 생성
spark = glueContext.spark_session
# Job 생성
job = Job(glueContext)
# Job 초기화
job.init(args['JOB_NAME'], args)
s3_bucket = 's3://aws-glue-hol-' + ACCOUNT_ID
# S3에서 csv 데이터를 읽어 DynamicFrame 생성
titanic_dyf = glueContext.create_dynamic_frame_from_options(
connection_type = 's3',
connection_options = {'paths': [s3_bucket + '/train']},
format='csv',
format_options={
"withHeader": True,
"delimiter": ','
})
# Spark 활용: DynamicFrame을 DataFrame으로 변환 및 initail column을 추가
titanic_csv_df = titanic_dyf.toDF()
titanic_csv_df = titanic_csv_df.withColumn('initial', regexp_extract(col('Name'), "(\w+)\.", 1))
# Glue 활용: DataFrame을 DynamicFrame으로 변환하여 Name column 삭제
titanic_csv_dyf = DynamicFrame.fromDF(titanic_csv_df, glueContext, 'titanic_csv_dyf').drop_fields('Name')
# json format으로 output 디렉토리에 저장
titanic_csv_dyf.toDF().write \
.format('json') \
.mode('overwrite') \
.save(s3_bucket + '/output')
# Job commit
job.commit()
|
glue-deep-dive-hol/02.csv_to_json.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from WaveNetEncoder import WaveNetEncoder
import numpy as np
import torch
import math
import matplotlib.pyplot as plt
#Generate fake data
numTSteps = 10000
zeropad = 9
length = 10
dt = length / numTSteps
times = np.array([ dt for i in range(numTSteps)])
times = np.cumsum(times)
signal = np.zeros(numTSteps + zeropad)
signal[zeropad:] = np.cos(2*math.pi*times + math.pi/10) + np.cos(6*math.pi*times + math.pi/10)+ np.sin(25*math.pi*times + math.pi/10)
generated_signal = np.zeros(numTSteps + zeropad)
signal = signal / (np.max(signal) + 0.001)
signalt = torch.tensor(signal, dtype=torch.float).unsqueeze(0).unsqueeze(0)
plt.plot(signal)
encoder = WaveNetEncoder(n_channels=1, n_layers=10,max_dilation=128,n_residual_channels=3,n_dilated_channels=6, encoding_factor=500, encoding_stride=500)
#Test individual layers.
signal = encoder.NCInput(signalt)
signal.size()
skip = signal
signal = torch.nn.functional.relu(signal, True)
signal = encoder.dilate_layers[0](signal)
signal.size()
signal = torch.nn.functional.relu(signal, True)
signal = encoder.res_layers[0](signal)
length = signal.size(2)
signal.size()
skip.size()
skip = skip[:,:,-length:]
skip.size()
signal = signal + skip
signal.size()
signal = encoder.final_layer(signal)
signal.size()
signal = encoder.pooling_layer(signal)
signal.size()
encoding = encoder(signalt)
encoding.size()
import torchaudio
music, sr = torchaudio.load("./002012.mp3")
music = music.unsqueeze(0)
music.size()
encoder = WaveNetEncoder(n_channels=2, n_layers=10,max_dilation=128,n_residual_channels=3,n_dilated_channels=6, encoding_factor=500, encoding_stride=500)
encoding = encoder(music)
encoding.size()
zerovect = torch.zeros(1,2,10008)
out = encoder(zerovect)
out
from WaveNetClassifier import WaveNetClassifier
Encoder_Dict = {'n_channels':2, 'n_layers':10,'max_dilation':128, 'n_residual_channels':3, 'n_dilated_channels':6, 'encoding_factor':500, 'encoding_stride':500}
classifier = WaveNetClassifier(Encoder_Dict,1322496)
classifier(music)
|
WaveNet2/WaveNetEncoder/.ipynb_checkpoints/WaveNetEncoderTests-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 64-bit ('pydatk')
# metadata:
# interpreter:
# hash: 191f923cb66d773162629997b180780ff6671a6d24890c003180089c1fb45d90
# name: python3
# ---
# Sklearn Pipeline test
# +
import sklearn
import numpy as np
import pandas as pd
import os.path
import sys
from sklearn.pipeline import Pipeline
# -
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(''),os.path.pardir)))
#from datk.preprocessing import make_column_transformer, get_preprocessed_df
from datk.custom_transformers import *
# +
df = pd.read_csv("./train.csv")
df1 = df.drop(['PassengerId','Name','Ticket','Cabin'],axis=1)
df11 = df.drop(['Name','Ticket','Cabin'],axis=1)
ID = ['PassengerId']
df1.head()
# -
df1.isnull().mean()
df.info()
df1.head()
# +
CAT_FEATS = ['Sex','Embarked']
NUM_FEATS = ['Age']
# Preprocessing with a Pipeline
pipeline = Pipeline([
('features', DFFeatureUnion([
('categoricals', Pipeline([
('extract', ColumnExtractor(CAT_FEATS)),
('dummy', DummyTransformer())
])),
('numerics', Pipeline([
('extract', ColumnExtractor(NUM_FEATS)),
('zero_fill', ZeroFillTransformer()),
('log', Log1pTransformer())
]))
])),
#('scale', DFStandardScaler())
])
df2 = pipeline.fit_transform(df1)
# +
df2.columns
# -
df2.head()
# Preprocessing with a Pipeline
pipeline3 = Pipeline([
('features', DFFeatureUnion([
('dates', Pipeline([
('extract', ColumnExtractor(DATE_FEATS)),
('to_date', DateFormatter()),
('diffs', DateDiffer()),
('mid_fill', DFImputer(strategy='median'))
])),
('categoricals', Pipeline([
('extract', ColumnExtractor(CAT_FEATS)),
('dummy', DummyTransformer())
])),
('multi_labels', Pipeline([
('extract', ColumnExtractor(MULTI_FEATS)),
('multi_dummy', MultiEncoder(sep=';'))
])),
('numerics', Pipeline([
('extract', ColumnExtractor(NUM_FEATS)),
('zero_fill', ZeroFillTransformer()),
('log', Log1pTransformer())
]))
])),
('scale', DFStandardScaler())
])
from datk.preprocessing import fix_column_names
df2 = fix_column_names(df2)
df2.head()
# +
from sklearn.pipeline import make_pipeline,Pipeline,FeatureUnion
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.preprocessing import StandardScaler, OneHotEncoder, FunctionTransformer
from sklearn.compose import ColumnTransformer,make_column_selector
num_imputer = Pipeline([
("imputer", SimpleImputer(strategy="median",add_indicator=False)),
])
cat_ohe = Pipeline([
("cat_imputer", SimpleImputer(strategy='constant',fill_value='NA')),
('ohe',OneHotEncoder(dtype=np.int,handle_unknown='ignore'))
])
# -
col_transformer = ColumnTransformer(
[('imp', num_imputer, make_column_selector(dtype_include=np.number)),
('ohe', cat_ohe, make_column_selector(dtype_include=['object','category']))
], remainder='passthrough'
)
col_transformer.fit(df1)
col_label = col_transformer.transformers_[0][2] + list(col_transformer.transformers_[1][1].steps[1][1].get_feature_names(
col_transformer.transformers_[1][2]))
col_label
a = make_column_transformer().fit(df1)
df2 = get_preprocessed_df(df1,a,fit_flag=True)
df2 = pd.DataFrame(col_transformer.transform(df1),
columns=col_label, # get column name
index = df1.index
)
df2.isnull().sum()
# +
df3 = get_preprocessed_df(df1,col_transformer,fit_flag=False)
df3.head()
# -
col_trans = make_column_transformer()
df4 = get_preprocessed_df(df1,col_trans,fit_flag=True)
df4.head()
df_s = df1.sample(1)
df_s.head()
df5 = get_preprocessed_df(df_s,col_trans,fit_flag=False)
df5.head()
df.head()
# +
df7 = get_preprocessed_df(df,col_trans,fit_flag=False)
df7.head()
# -
col_trans_smpl = make_column_transformer().fit(df_s)
df6 = get_preprocessed_df(df_s,col_trans_smpl,fit_flag=False)
df6.head()
col_trans_smpl
# +
import joblib
joblib.dump(col_transformer,'./col_transformer.bin')
# -
ct = joblib.load('./col_transformer.bin')
df3 = pd.DataFrame(ct.transform(df1),
columns=ct.transformers_[0][2], # get column name
index = df1.index
)
df3.head()
# !rm col_transformer.bin
|
examples/sklearn_pipeline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Event marker synchronization
#
# Stimuli delivery > Event synchonization
#
# Is possible to take advantage of the OpenBCI construction, this board includes a set of analog and digital inputs that can be used to synchronize markers. Other systems will need a piece of additional laboratory equipment like [LabStreamer](https://www.neurobs.com/menu_presentation/menu_hardware/labstreamer) that can register time with microseconds precision. In order to use OpenBCI, we will only need an _LDR module_ connected to the pin **D11** (or **A5**) and start the automatic latency correction system. This method is similar to the one implemented by <cite data-footcite="davis2020stimulus">davis2020stimulus</cite>.
# <img src='images/marker_sync.gif'></img>
# This simple latency correction consists of a stimuli delivery with only a marker synchronization area, the _LDR module_ is constantly sensing (the [boardmode](02-interface.ipynb#Connection) must be in `analog`) so the changes on the square signal are compared with streamed markers and then the latency is corrected. The latency correction **only affects the current session**, if the framework is restarted this calibration will lose.
#
# For hard event synchronization, is prefer to use [the markers synchronization constantly](80-stimuli_delivery.ipynb#Hardware-based-event-synchonization) during all run.
# ---
# .. footbibliography::
|
docs/source/notebooks/08-event_marker_synchronization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: condagpu
# language: python
# name: condagpu
# ---
# +
import spacy
# Load English tokenizer, tagger, parser and NER
nlp = spacy.load("en_core_web_sm")
def extractor(messageText,dialogNumber):
doc = nlp(messageText)
extractedNamesFromText = []
for entity in doc.ents:
if entity.label_ =='PERSON':
extractedNamesFromText.append(
{
"value" : entity.text,
"exactValue" : entity.text,
"kind":'PERSON',
"dialogNumber": dialogNumber,
"foundAt":[entity.start_char, entity.end_char]
}
)
return extractedNamesFromText
def substituter(messageText):
doc = nlp(messageText)
textLabel = [(entity.text, entity.label_) for entity in doc.ents if entity.label_=='PERSON']
for text, label in textLabel:
messageText = messageText.replace(text, "--!"+label)
return messageText
from simbots.Bot import Bot
from simbots.utils.builtInIntents import IntentSamples
from simbots.utils.builtInEntities import EntitySamples
import json
intentExamples = {
"PersonName": ["My name is Vaibhav","I am <NAME> .","You can call me Rahul ." ,
"I am Simon . ","I am Riko .","You can call me Shiva . "],
'Greetings': IntentSamples.greetingSamples(),
'BotName': IntentSamples.botNameSamples(),
'Relatives': IntentSamples.relativeSamples(),
'Age': IntentSamples.ageSamples(),
'BirthPlace': IntentSamples.birthPlaceSamples(),
'Abilities': IntentSamples.abilitiesSamples(),
'Really': IntentSamples.reallySamples(),
'Laughter': IntentSamples.laughterSamples(),
'Cool': IntentSamples.coolSamples(),
'Praise': IntentSamples.praiseSamples(),
'TrueT': IntentSamples.trueSamples(),
'FalseT': IntentSamples.falseSamples(),
'Bye': IntentSamples.byeSamples(),
'Confirm': IntentSamples.confirmSamples(),
'Thanks': IntentSamples.thanksSamples(),
'Irrelevant': ['the weather is fine today','the sun rises in the east','the quick brown fox jumps over the red carpet',
'the sun rises in the east',
'What is love , baby dont hurt me ',
'this is a new dawn a new day'],
}
entityExamples = {
'GreetingsHelper': EntitySamples.greetingsHelper(),
'LaughterHelper': {'haha': [{ 'tag': 'case-insensitive',
'pattern': "\s(h+(a|e)+)+(h+)?\s",
'type': 'regex'}],
'happysmily': [{'tag': 'case-insensitive',
'pattern': "\s\:\)\s", 'type': 'regex'}]},
'CoolHelper': EntitySamples.coolHelper(),
'ByeHelper': EntitySamples.byeHelper(),
'NameHelper':{
'personName':[{'type' : 'function',
'extractor': extractor,
'substituter':substituter,
'tag':'case-insensitive'
}]
}
}
botMessages = {
'basic': {
'Greetings': {
'basic': ['Hello ! What can i do for you ?',
'Hi there ! what can I do for you ?', 'Hello'
],
"nameKnown":[
"Hi {0} ! Its a pleasure to meet you !",
"Hey {0} its a pleasure talking to you !"
]
},
"PersonName": {
'basic': ["Its a pleasure to meet you {0} ."],
"nameNotFound": ["Im sorry i couldn't get your name could you please write it in title case like -> Riko "]
},
'Age': {
'basic': ['I am two years old ', 'I am two']
},
'BotName': {
'basic': ['I am riko', 'You can call me riko']
},
'Abilities': {
'basic': ['I am still learning ! So cant do much !']
},
'BirthPlace': {
'basic': ['I am from Punjab , india', 'I am punjabi', 'I am punjabi and i love food']
},
'Really': {
'basic': ['To the best of my knowledge', 'Im positive !']
},
'Laughter': {
'basic': ['Im glad i was able to make you smile !',
'See I can be funny !',
'And they say I dont have a sense of humor :)'
]
},
'Cool': {
'basic': ['cool', 'thanks']
},
'Bye': {
'basic': ['Bubye !', 'Bye ! nice chatting with you !']
},
'Confirm': {
'basic': ['cool ']
},
'Discard': {
'basic': ['No it is then', 'agreed , no it is .']
},
'Praise': {
'basic': ['Thanks ! now i think ill blush ',
'So nice of you to say that !'
]
},
'Relatives': {
'basic': ['Umm no i dont really have any relatives :)']
},
'Thanks': {
'basic': ['Dont mention it ',
' Im glad , please dont mention it'
]
},
'Irrelevant': {
'basic': ['Im sorry Im not getting you :( ',
'Im sorry could you please rephrase ?'
]
},
}
}
class NewBot(Bot):
def reason(self):
# # find current dialogNumber
currentDialogNumber = self.contextManager.context['dialogs'][-1]
currentTopIntent = self.contextManager.findCurrentTopIntent()
currentEntities = self.contextManager.findCurrentEntities()
output = []
if currentTopIntent['confidence'] < self.confidenceLimit or currentTopIntent['name'] == 'Irrelevant':
currentTopIntent = {}
if currentTopIntent:
##
## Getting the Intent name here
##
name = currentTopIntent['name']
if name == 'Greetings' and ("sessionVariables" in self.contextManager.context.keys()):
personName = self.contextManager.context["sessionVariables"]["PERSON"]
reply = {'tag': '{0}.nameKnown'.format(name), 'data': personName}
elif name =='PersonName' and len(currentEntities) >0:
currentEntities =[ent for ent in currentEntities if ent["kind"] =='PERSON' and ent["exactValue"].lower() != 'riko']
self.contextManager.context["sessionVariables"] = {
"PERSON" :currentEntities[0]["exactValue"]
}
self.contextManager.updateContextTree()
reply = {'tag': '{0}.basic'.format(name), 'data': currentEntities[0]["exactValue"]}
elif name =='PersonName':
reply = {'tag': '{0}.nameNotFound'.format(name), 'data': None}
else:
reply = {'tag': '{0}.basic'.format(name), 'data': None}
output.append(reply)
else:
# #
# #
# # Rule for irrelevant
# #
# #
irrelevant = {'tag': 'Irrelevant.basic', 'data': None}
output.append(irrelevant)
return output
newB = NewBot(intentExamples, entityExamples, botMessages,
confidenceLimit=0.5)
outputTheme = 'basic'
newB.run(theme = outputTheme)
# -
|
sampleBots/ReplyUsingNameBot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf] *
# language: python
# name: conda-env-tf-py
# ---
# +
# Notebook to generate new graph object for training testing your customized data
# using iCafe project folder as an example, if your data is not in the iCafe framework, you need to provide equavalent files:
# For testing only: SWC format of centerlines
# For training, needs additional SWC format of labeled centerlines (giving edge labels) and landmark list (giving node labels)
# <NAME>
# 3/7/2021
# +
import os
import numpy as np
import copy
import networkx as nx
import matplotlib.pyplot as plt
from matchvestype import matchvestype
from gnn_utils import VESTYPENUM, BOITYPENUM
from iCafePythonBasic import SnakeList, Snake, SWCNode, Point3D
# +
#supporting functions
def loadSWCFile(swcfilename):
#node info in SWCNode, in and output. Load swclist
swclist = []
#snakelist representation, link with swclist
snakelist = SnakeList()
if not os.path.exists(swcfilename):
print('not exist', swcfilename)
return snakelist, swclist
with open(swcfilename,'r') as fp:
for line in fp:
swclist.append(SWCNode.fromline(line))
temp_Snake = []
for i in range(len(swclist)):
if i > 0 and swclist[i].pid == -1:
#end last snake
snakelist.addSnake(Snake(temp_Snake))
temp_Snake.clear()
#add new point
temp_Snake.append(swclist[i])
else:
temp_Snake.append(swclist[i])
#end of the swc file
if i == len(swclist) - 1:
snakelist.addSnake(Snake(temp_Snake))
temp_Snake.clear()
return snakelist
def loadVes(path):
if not os.path.exists(path):
raise FileNotFoundError('No vessel file available',path)
vessnakelist = SnakeList()
cveslist = []
starttype = -1
endtype = -1
with open(path,'r') as fp:
for line in fp:
cswcnode = SWCNode.fromline(line)
cveslist.append(cswcnode)
if cswcnode.type!=0:
if starttype==-1:
starttype = cswcnode.type
else:
endtype = cswcnode.type
cvestype = matchvestype(starttype,endtype)
starttype=-1
endtype=-1
if cvestype!=-1:
vessnakelist.addSnake(Snake(cveslist,cvestype))
else:
print('Unknown ves match type',starttype,endtype)
cveslist.clear()
return vessnakelist
# construct graph with only key nodes (deg!=2)
def generateSimG(snakelist, ves_snakelist, landmark, ASSIGNNODE=0, ASSIGNEDGE=0, ASSIGNDIR=0):
swclist = snakelist.toSWCList()
if ASSIGNNODE:
if len(landmark) == 0:
raise ValueError('landmark is empty')
landmarkposmap = {}
for lmtype, lmpos in landmark:
# ignore M2/3
if lmtype in [13, 14]:
continue
landmarkposmap[lmpos.hashPos()] = lmtype
if ASSIGNEDGE:
if len(swclist) == 0:
raise ValueError('Vessnake empty')
#veslist [[] for i in range(VESTYPE))]
veslist = ves_snakelist.toVesList()
vesposmap = {}
for ctype in range(1, VESTYPENUM):
if len(veslist[ctype]) == 0:
continue
for snakei in veslist[ctype]:
# skip start and end point for ves type mapping (might appear in multiple snake)
for nodeid in range(1, len(snakei) - 1):
vesposmap[snakei[nodeid].pos.hashPos()] = ctype
if ASSIGNDIR:
availdirs = [Point3D(0, 0, -1), Point3D(0, 0, 1)]
for ag1 in range(-45, 46, 45):
for ag2 in range(0, 360, 45):
xi = np.cos(ag1 / 180 * np.pi) * np.sin(ag2 / 180 * np.pi)
yi = np.cos(ag1 / 180 * np.pi) * np.cos(ag2 / 180 * np.pi)
zi = np.sin(ag1 / 180 * np.pi)
cdir = Point3D(xi, yi, zi) # .norm()
availdirs.append(cdir)
startswcid = None # record the starting id of the seg, index is node id, value: first id is the swclist id, second is the node id in graph
G = nx.Graph()
simghash = [] # hash for all pos in simg, same id as graph id
# first node
cti = swclist[0]
startswcid = 0 # swclist id
G.add_node(len(G.nodes), swcid=cti.id, pos=cti.pos, rad=cti.rad, deg=cti.type)
simghash.append(cti.pos.hashPos())
rads = []
dists = []
for i in range(1, len(swclist)):
cnode = swclist[i]
prevnode = swclist[i - 1]
rads.append(cnode.rad)
if cnode.pid == prevnode.id:
dists.append(prevnode.pos.dist(cnode.pos))
else:
dists.append(0)
# type in raw_ves is degree
if cnode.type != 2:
# add node if not exist
if cnode.pos.hashPos() not in simghash:
cnodeGid = len(G.nodes)
G.add_node(cnodeGid, swcid=cnode.id, pos=cnode.pos, rad=cnode.rad, deg=cnode.type)
simghash.append(cnode.pos.hashPos())
if ASSIGNNODE:
cnodehash = cnode.pos.hashPos()
if cnodehash in landmarkposmap:
lmtype = landmarkposmap[cnodehash]
G.add_node(cnodeGid, boitype=lmtype)
else:
cnodeGid = simghash.index(cnode.pos.hashPos())
# rint('Line',i,'Node',cnode,'gid',cnodeGid)
# add edge with feature if has at least two rads
if cnode.pid != -1:
assert swclist[startswcid].pos.hashPos() in simghash
startGid = simghash.index(swclist[startswcid].pos.hashPos())
mdswcid = (startswcid + i) // 2
mdnode = swclist[mdswcid]
mdnodeposhash = mdnode.pos.hashPos()
if ASSIGNEDGE:
if mdnodeposhash in vesposmap:
edgetype = vesposmap[mdnodeposhash]
else:
# print('no ves for pt',mdnode)
edgetype = 0
edgetype_onehot = [0]*VESTYPENUM
edgetype_onehot[edgetype] = 1
G.add_edge(startGid, cnodeGid, dist=np.sum(dists), rad=np.mean(rads), vestype=edgetype_onehot)
else:
G.add_edge(startGid, cnodeGid, dist=np.sum(dists), rad=np.mean(rads))
#print(i,startswcid,'connect',startGid,cnodeGid,'len',len(rads))
if ASSIGNDIR:
dirgap = 3
startswcidend = min(startswcid + dirgap, startswcid + len(rads) - 1)
# add direction to startnode
dirs = G.nodes[startGid].get('dir')
if dirs is None:
dirs = np.zeros(len(availdirs))
startdir = swclist[startswcidend].pos - swclist[startswcid].pos
startdirnorm = startdir / startdir.vecLenth()
startmatchdir = startdirnorm.posMatch(availdirs)
dirs[startmatchdir] += 1
G.add_node(startGid, dir=dirs)
# add direction to endnode
endswcidend = max(i - dirgap, i - len(rads) + 1)
dirs = G.nodes[cnodeGid].get('dir')
if dirs is None:
dirs = np.zeros(len(availdirs))
enddir = swclist[endswcidend].pos - swclist[i].pos
enddirnorm = enddir / enddir.vecLenth()
endmatchdir = enddirnorm.posMatch(availdirs)
dirs[endmatchdir] += 1
G.add_node(cnodeGid, dir=dirs)
# add direction to edge
edgedir = G.nodes[startGid]['pos'] - G.nodes[cnodeGid]['pos']
# print(startGid,cnodeGid,G.nodes[startGid]['pos'],G.nodes[cnodeGid]['pos'],edgedir)
edgedirnorm = edgedir / edgedir.vecLenth()
# dirs = np.zeros(len(availdirs))
# edgematchdir = edgedirnorm.posmatch(availdirs)
# dirs[edgematchdir] += 1
# edgedirnorm = -edgedirnorm
# edgematchdir = edgedirnorm.posmatch(availdirs)
# dirs[edgematchdir] += 1
# G.add_edge(startGid,cnodeGid,dir=dirs)
if edgedirnorm.z < 0:
edgedirnorm = -edgedirnorm
G.add_edge(startGid, cnodeGid, dir=edgedirnorm.pos)
# new node from this node
startswcid = i
rads = []
dists = []
rads.append(cnode.rad)
if len(landmark):
for nodei in G.nodes():
fd = -1
for li, posi in landmark:
if posi.dist(G.nodes[nodei]['pos']) == 0:
G.add_node(nodei, boitype=li)
fd = 1
break
if fd == -1:
G.add_node(nodei, boitype=0)
return G
def refreshid(G):
#replace node id with new id
Gnew = nx.Graph()
nodemap = {}
for newid,node in enumerate(G.nodes(data=True)):
#print(newid,node[0])
nodemap[node[0]] = newid
kwargs = node[1]
Gnew.add_node(newid,**kwargs)
for edge in G.edges(data=True):
#print(edge)
kwargs = edge[2]
Gnew.add_edge(nodemap[edge[0]],nodemap[edge[1]],**kwargs)
return Gnew
# -
# # Example case
# +
#path direct to icafe folder
#pi is case name
pi = '76_NVIVOL1_M'
path = '//DESKTOP2/iCafe/result//NVIFull/'+pi
raw_ves_path = path+'/tracing_raw_ves_TH_'+pi+'.swc'
ves_path = path+'/tracing_ves_TH_'+pi+'.swc'
snakelist = loadSWCFile(raw_ves_path)
# -
snakelist,snakelist[0],snakelist[0][0].pos,snakelist[0][0].rad
# SWC format should look like this
#ID, Type, X, Y, Z, Radius, ParentID
'''
1 3 229.74 274.524 103.014 7.19887 -1
2 2 229.558 273.755 103.659 8.28298 1
3 2 229.78 270.803 101.763 3.16595 2
4 2 231.004 268.459 102.427 4.12542 3
5 2 232.807 266.055 103.32 4.38879 4
6 2 234.713 263.561 103.508 4.37335 5
'''
# for original SWC (raw_ves), the type is the node degree
# for labeled SWC (ves), for each centerline its type starts with an landmark id and ends with another,
# and all the rest of points have type of 0
# build graph with ground truth
mode = 'train'
ves_snakelist = loadVes(ves_path)
landmarks = [[1, Point3D(377.613,295.210,0.000)],
[9, Point3D(357.446,255.332,74.635)],
[11, Point3D(416.189,182.412,69.026)],
[3, Point3D(373.302,304.061,101.510)],
[7, Point3D(422.213,248.340,85.278)],
[5, Point3D(321.977,267.065,109.496)],
[6, Point3D(313.128,262.316,97.416)],
[2, Point3D(216.033,323.601,0.000)],
[4, Point3D(244.912,286.769,98.217)],
[8, Point3D(229.740,274.524,103.014)],
[18, Point3D(328.559,323.742,90.339)],
[19, Point3D(367.778,315.978,110.521)],
[20, Point3D(276.607,308.538,98.931)],
[17, Point3D(312.886,298.249,0.000)],
[12, Point3D(249.353,175.554,77.312)],
[10, Point3D(294.019,252.755,87.805)],
[13, Point3D(447.451,423.177,181.648)],
[13, Point3D(445.828,429.378,167.761)],
[13, Point3D(451.451,364.075,183.667)],
[13, Point3D(499.877,349.704,168.476)],
[13, Point3D(468.529,329.279,193.375)],
[13, Point3D(440.575,217.125,193.608)],
[13, Point3D(436.046,213.269,152.295)],
[13, Point3D(529.754,280.611,161.089)],
[13, Point3D(518.602,312.228,148.840)],
[13, Point3D(505.016,271.499,138.997)],
[14, Point3D(186.933,381.757,182.815)],
[14, Point3D(163.279,370.695,178.394)],
[14, Point3D(180.226,338.852,197.329)],
[14, Point3D(153.261,336.300,171.684)],
[14, Point3D(209.365,243.204,194.615)],
[14, Point3D(203.413,198.937,177.906)],
[14, Point3D(182.980,266.730,176.422)],
[14, Point3D(153.294,218.107,158.305)]]
# OR build graph without ground truth
mode = 'test'
ves_snakelist = None
landmarks = []
# ## Build Graph G
# +
#whether to trim some obsolete branches
trim=1
if mode == 'train':
Gs = generateSimG(snakelist, ves_snakelist, landmarks, ASSIGNNODE=1, ASSIGNEDGE=1, ASSIGNDIR=1)
elif mode == 'test':
Gs = generateSimG(snakelist, ves_snakelist, landmarks, ASSIGNNODE=0, ASSIGNEDGE=0, ASSIGNDIR=1)
if trim:
S = []
for c in nx.connected_components(Gs):
Gsi = Gs.subgraph(c).copy()
gsidist = np.sum([Gs.edges[nodei]['dist'] for nodei in Gsi.edges()])
# print(len(c),gsidist)
if gsidist > 100 and len(Gsi.nodes()) > 5:
S.append(Gsi)
# sort based on length
SSort = []
for i in np.argsort([len(c) for c in S])[::-1]:
SSort.append(S[i])
if len(SSort)==0:
G = Gs
else:
G = refreshid(nx.compose_all(SSort))
else:
G = Gs
VESCOLORS = ['b'] + ['r'] * VESTYPENUM
NODECOLORS = ['r'] + ['b'] * BOITYPENUM
posz = {k: [-v['pos'].pos[0],-v['pos'].pos[1]] for k, v in G.nodes.items()}
if mode == 'train':
edgecolors = [VESCOLORS[np.argmax(G.edges[v]['vestype'])] for v in G.edges()]
nodecolors = [NODECOLORS[G.nodes[n]['boitype']] for n in G.nodes()]
else:
edgecolors = [VESCOLORS[0] for v in G.edges()]
nodecolors = [NODECOLORS[0] for n in G.nodes()]
plt.figure(figsize=(5,5))
nx.draw_networkx(G, pos=posz, node_size=30, node_color=nodecolors, edge_color=edgecolors)
plt.show()
for nodei in G.nodes():
G.nodes[nodei]['pos'] = G.nodes[nodei]['pos'].pos
G.nodes[nodei]['deg'] = G.degree[nodei]
if mode == 'train':
#remove extend types for distal branches, only applicable to iCafe
if G.nodes[nodei]['boitype'] > 22:
G.nodes[nodei]['boitype'] = 0
elif mode == 'test':
G.nodes[nodei]['boitype'] = 0
if mode == 'train':
for edgei in G.edges():
if G.edges[edgei]['vestype'][12] > 0:
#print('merge m23')
G.edges[edgei]['vestype'][5] += G.edges[edgei]['vestype'][12]
G.edges[edgei]['vestype'][12] = 0
if G.edges[edgei]['vestype'][13] > 0:
G.edges[edgei]['vestype'][6] += G.edges[edgei]['vestype'][13]
G.edges[edgei]['vestype'][13] = 0
elif mode == 'test':
for edgei in G.edges():
G.edges[edgei]['vestype'] = 0
# -
# ## Save graph
graphtype='graphsim'
pickle_graph_name = path + '/' + graphtype + '_TH_' + pi + '.pickle'
nx.write_gpickle(G, pickle_graph_name)
print('Graph saved', pickle_graph_name, 'Node', len(G.nodes), 'Edges', len(G.edges))
|
Generate graph.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pylab as plt
from scipy.linalg import lu
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica Neue"],
"font.size": 28,
})
# Solve least squares problem for $Ax \approx b$ where
# $$A = \begin{bmatrix}
# 2 & 0\\
# -1 & 1\\
# 0 & 2\\
# \end{bmatrix},\quad b = \begin{bmatrix}1\\0\\-1
# \end{bmatrix}.
# $$
# and $Cx = d$, where
# $$
# C = \begin{bmatrix}0.6 & 1\end{bmatrix},\quad d = [-0.7]
# $$
A = np.array([
[2, 0],
[-1, 1],
[0, 2]])
b = np.array([1, 0, -1])
C = np.array([[0.6, 1.]])
d = np.array([-0.7])
# x_star = np.array([1/3, -1/3])
# Objective function
def f(x1, x2):
x = np.array([x1, x2])
return np.sum(np.square(A.dot(x) - b))
f_vec = np.vectorize(f)
# Make contour data
x1 = np.linspace(-1, 1.5, 50)
x2 = np.linspace(-1.7, 1, 40)
X1, X2 = np.meshgrid(x1, x2)
Y = f_vec(X1, X2)
# Make line data
x2_c = (1/C[0, 1]) * (- C[0, 0] * x1 + d)
# +
def forward_substitution(L, b):
n = L.shape[0]
x = np.zeros(n)
for i in range(n):
x[i] = (b[i] - L[i,:i] @ x[:i])/L[i, i]
return x
def backward_substitution(U, b):
n = U.shape[0]
x = np.zeros(n)
for i in reversed(range(n)):
x[i] = (b[i] - U[i,i+1:] @ x[i+1:])/U[i, i]
return x
def con_lstsq(A, b, C, d):
p, n = C.shape
KKT = np.block([[2*A.T @ A, C.T],
[C, np.zeros((p, p))]])
rhs = np.hstack([2*A.T @ b, d])
P, L, U = lu(KKT)
x = P.T @ rhs
x = forward_substitution(L, x)
x = backward_substitution(U, x)
return x[:n]
# -
# Solution
x_star = con_lstsq(A, b, C, d)
x_star
# Make contour plot
fig = plt.figure(figsize=(9, 9))
cs = plt.contour(X1, X2, Y, colors='black', linestyles="dashed", levels=10)
plt.clabel(cs, inline=1, fmt='%1.1f')
plt.scatter(x_star[0], x_star[1])
plt.plot(x1, x2_c, color="k")
plt.text(x_star[0]+0.05, x_star[1] + 0.05, r'$x^\star$')
plt.savefig('small_constrained_least_squares.pdf')
|
lectures/22_lecture/small_constrained_least_squares.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="hbU_2vvyR5ZK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="d9a9ed71-9dc4-4318-b9d4-9a3e6ace6c83"
# !git clone https://github.com/mgrankin/over9000.git
# + id="l8rkSx-iR9lh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fd686c75-072a-4ca7-f1cf-18b4d4e44004"
# !git clone https://github.com/lessw2020/mish.git
# + id="uB_z2nIOR_Dg" colab_type="code" colab={}
from fastai.vision import *
# + id="fqkabKYDSKAM" colab_type="code" colab={}
path = untar_data(URLs.IMAGEWOOF)
# + id="o3H7VlcESjSg" colab_type="code" colab={}
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x *( torch.tanh(F.softplus(x)))
return x
# + id="almLtXf2SOD4" colab_type="code" colab={}
act_fn = Mish() #nn.ReLU(inplace=True)
class Flatten(Module):
def forward(self, x): return x.view(x.size(0), -1)
def init_cnn(m):
if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv2d,nn.Linear)): nn.init.kaiming_normal_(m.weight)
for l in m.children(): init_cnn(l)
def conv(ni, nf, ks=3, stride=1, bias=False):
return nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=ks//2, bias=bias)
def noop(x): return x
def conv_layer(ni, nf, ks=3, stride=1, zero_bn=False, act=True):
bn = nn.BatchNorm2d(nf)
nn.init.constant_(bn.weight, 0. if zero_bn else 1.)
layers = [conv(ni, nf, ks, stride=stride), bn]
if act: layers.append(act_fn)
return nn.Sequential(*layers)
class ResBlock(Module):
def __init__(self, expansion, ni, nh, stride=1):
nf,ni = nh*expansion,ni*expansion
layers = [conv_layer(ni, nh, 3, stride=stride),
conv_layer(nh, nf, 3, zero_bn=True, act=False)
] if expansion == 1 else [
conv_layer(ni, nh, 1),
conv_layer(nh, nh, 3, stride=stride),
conv_layer(nh, nf, 1, zero_bn=True, act=False)
]
self.convs = nn.Sequential(*layers)
# TODO: check whether act=True works better
self.idconv = noop if ni==nf else conv_layer(ni, nf, 1, act=False)
self.pool = noop if stride==1 else nn.AvgPool2d(2, ceil_mode=True)
def forward(self, x): return act_fn(self.convs(x) + self.idconv(self.pool(x)))
def filt_sz(recep): return min(64, 2**math.floor(math.log2(recep*0.75)))
class MXResNet(nn.Sequential):
def __init__(self, expansion, layers, c_in=3, c_out=1000):
stem = []
sizes = [c_in,32,64,64] #modified per Grankin
for i in range(3):
stem.append(conv_layer(sizes[i], sizes[i+1], stride=2 if i==0 else 1))
#nf = filt_sz(c_in*9)
#stem.append(conv_layer(c_in, nf, stride=2 if i==1 else 1))
#c_in = nf
block_szs = [64//expansion,64,128,256,512]
blocks = [self._make_layer(expansion, block_szs[i], block_szs[i+1], l, 1 if i==0 else 2)
for i,l in enumerate(layers)]
super().__init__(
*stem,
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
*blocks,
nn.AdaptiveAvgPool2d(1), Flatten(),
nn.Linear(block_szs[-1]*expansion, c_out),
)
init_cnn(self)
def _make_layer(self, expansion, ni, nf, blocks, stride):
return nn.Sequential(
*[ResBlock(expansion, ni if i==0 else nf, nf, stride if i==0 else 1)
for i in range(blocks)])
def mxresnet(expansion, n_layers, name, pretrained=False, **kwargs):
model = MXResNet(expansion, n_layers, **kwargs)
if pretrained:
#model.load_state_dict(model_zoo.load_url(model_urls[name]))
print("No pretrained yet for MXResNet")
return model
me = sys.modules[__name__]
for n,e,l in [
[ 18 , 1, [2,2,2 ,2] ],
[ 34 , 1, [3,4,6 ,3] ],
[ 50 , 4, [3,4,6 ,3] ],
[ 101, 4, [3,4,23,3] ],
[ 152, 4, [3,8,36,3] ],
]:
name = f'mxresnet{n}'
setattr(me, name, partial(mxresnet, expansion=e, n_layers=l, name=name))
# + id="SalWyCjuSyte" colab_type="code" colab={}
data = (ImageList.from_folder(path).split_by_folder(valid='val')
.label_from_folder().transform(([flip_lr(p=0.5)], []), size=128)
.databunch(bs=64, num_workers=2)
.presize(128, scale=(0.35,1))
.normalize(imagenet_stats))
# + id="4hS9szJpTTGU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="631a79b8-600c-4738-9844-902d572cec96"
# %cd over9000/
# + id="msOltE2MTYJH" colab_type="code" colab={}
from over9000 import *
# + id="wnHK5UtLTaRz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="280fc1e4-ac8c-43c1-c25b-a1839065d214"
Over9000
# + id="L7AE5fm-TI6A" colab_type="code" colab={}
bs_rat = 64/256
opt_func=partial(Over9000, betas = (0.9,0.99), eps=1e-6)
# + id="AgQnm9pYXFPD" colab_type="code" colab={}
n = len(learn.data.train_dl)
# + id="x6H6ehjWXF55" colab_type="code" colab={}
anneal_start = int(n*5*0.7)
# + id="bGLvGNzOXK5r" colab_type="code" colab={}
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
# + id="4sjrfTvIXQF2" colab_type="code" colab={}
TrainingPhase
# + id="Xs0H8gODXekJ" colab_type="code" colab={}
lr = 1e-2
# + id="b4tiMLlhXrwL" colab_type="code" colab={}
learn = Learner(data, mxresnet50(c_out=10), wd=1e-2, opt_func=opt_func,
metrics=[accuracy, top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func=LabelSmoothingCrossEntropy())
# + id="mG-NkvL2Sdyv" colab_type="code" colab={}
res = []
num_epoch=5
for x in range(5):
learn = Learner(data, mxresnet50(c_out=10), wd=1e-2, opt_func=opt_func,
metrics=[accuracy, top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func=LabelSmoothingCrossEntropy())
n = len(learn.data.train_dl)
anneal_start = int(n*5*0.7)
phase0 = TrainingPhase(anneal_start).schedule_hp('lr', lr)
phase1 = TrainingPhase(n*5 - anneal_start).schedule_hp('lr', lr, anneal=annealing_cos)
phases = [phase0, phase1]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
learn.fit(num_epoch)
loss, acc, topk = learn.validate()
res.append(acc.numpy())
# + id="GZnABPBXUiwM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0da3f2d0-c57b-4c22-f393-571919f6f700"
np.mean(res)
# + id="zl_vjMWEiL7y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8dd9ee34-1135-4e6b-fd51-7c3c64f67833"
np.std(res)
# + id="hArOffsgiNP-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="4fc603d7-e67f-4956-ea10-edea52f18292"
res
# + id="6OU10gtbiumN" colab_type="code" colab={}
|
ImageWoofTests/initial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# !pip install mxnet
# +
### This notebook has two cells. First run the cell below this cell which has all the function definitions. ###
### Then run this cell to get the desired output. ###
# author: <NAME>, <EMAIL>, <NAME>, <NAME>
# This notebook is for the algorithm (MBEM) proposed in the paper "Learning From Noisy Singly-labeled Data"
# that is under review at ICLR 2018. The paper can be obtained at "https://openreview.net/pdf?id=S15IBlWAZ".
# Model Bootstrapped Expectation Maximization (MBEM) is a new algorithm for
# training a deep learning model using noisy data collected from crowdsourcing
# platforms such as Amazon Mechanical Turk. MBEM outperforms classical crowdsourcing algorithm "majority vote".
# In this notebook, we run MBEM on CIFAR-10 dataset. We synthetically generate noisy labels given the true labels
# and using hammer-spammer worker distribution for worker qualities that is explained in the paper.
# Under the setting when the total annotation budget is fixed, that is we choose whether to collect "1" noisy label
# for each of the "n" training samples or collect "r" noisy labels for each of the "n/r" training examples,
# we show empirically that it is better to choose the former case, that is collect "1" noisy label per example
# for as many training examples as possible when the total annotation budget is fixed.
# It takes a few hours to run this notebook and obtain the desired numerical results when using gpus.
# We use ResNet deep learning model for training a classifier for CIFAR-10.
# We use ResNet MXNET implementation given in https://github.com/tornadomeet/ResNet/.
import mxnet as mx
import numpy as np
import logging,os
import copy
import urllib
import logging,os,sys
from scipy import stats
from random import shuffle
from __future__ import division
def generate_workers(m,k,gamma,class_wise):
# Generating worker confusion matrices according to class-wise hammer-spammer distribution if class_wise ==1
# Generating worker confusion matrices according to hammer-spammer distribution if class_wise ==0
# One row for each true class and columns for given answers
#iniializing confusion matrices with all entries being equal to 1/k that is corresponding to a spammer worker.
conf = (1/float(k))*np.ones((m,k,k))
# a loop to generate confusion matrix for each worker
for i in range(m):
# if class_wise ==0 then generating worker confusion matrix according to hammer-spammer distribution
if(class_wise==0):
#letting the confusion matrix to be identity with probability gamma
if(np.random.uniform(0,1) < gamma):
conf[i] = np.identity(k)
# To avoid numerical issues changing the spammer matrix each element slightly
else:
conf[i] = conf[i] + 0.01*np.identity(k)
conf[i] = np.divide(conf[i],np.outer(np.sum(conf[i],axis =1),np.ones(k)))
else:
# if class_wise ==1 then generating each class separately according to hammer-spammer distribution
for j in range(k):
# with probability gamma letting the worker to be hammer for the j-th class
if(np.random.uniform(0,1) < gamma):
conf[i,j,:] = 0
conf[i,j,j] = 1
# otherwise letting the worker to be spammer for the j-th class.
# again to avoid numerical issues changing the spammer distribution slighltly
# by generating uniform random variable between 0.1 and 0.11
else:
conf[i,j,:] = 1
conf[i,j,j] = 1 + np.random.uniform(0.1,0.11)
conf[i,j,:] = conf[i,j,:]/np.sum(conf[i,j,:])
# returining the confusion matrices
return conf
# Downloading data for CIFAR10
# The following function downloads .rec iterator and .lst files (MXNET iterators) for CIFAR10
# that are used for training the deep learning model with noisy annotations
def download_cifar10():
fname = ['train.rec', 'train.lst', 'val.rec', 'val.lst']
testfile = urllib.URLopener()
testfile.retrieve('http://data.mxnet.io/data/cifar10/cifar10_train.rec', fname[0])
testfile.retrieve('http://data.mxnet.io/data/cifar10/cifar10_train.lst', fname[1])
testfile.retrieve('http://data.mxnet.io/data/cifar10/cifar10_val.rec', fname[2])
testfile.retrieve('http://data.mxnet.io/data/cifar10/cifar10_val.lst', fname[3])
return fname
# +
#### main function ####
def main(fname,n,n1,k,conf,samples,repeat,epochs,depth,gpus):
# defining the range of samples that are to be used for training the model
valid = np.arange(0,samples)
# declaring the other samples to be invalid
invalid = np.arange(samples,n)
# calling function generate_labels_weight which generates noisy labels given the true labels
# the true lables of the examples are ascertained from the .lst files
# it takes as input the following:
# name of the .lst files for the training set and the validation set
# conf: the confusion matrices of the workers
# repeat: number of redundant labels that need to be generated for each sample
# for each i-th sample repeat number of workers are chosen randomly that labels the given sample
# it returns a multi dimensional array resp_org:
# such that resp_org[i,j,k] is 0 vector if the a-th worker was not chosen to label the i-th example
# else it is one-hot representation of the noisy label given by the j-th worker on the i-th example
# workers_train_label_org: it is a dictionary. it contains "repeat" number of numpy arrays, each of size (n,k)
# the arrays have the noisy labels given by the workers
# workers_val_label: it is a dictionary. it contains one numpy array of size (n,k)
# that has true label of the examples in the validation set
# workers_this_example: it is a numpy array of size (n,repeat).
# it conatins identity of the worker that are used to generate "repeat" number of noisy labels for example
resp_org, workers_train_label_org, workers_val_label, workers_this_example = generate_labels_weight(fname,n,n1,repeat,conf)
#setting invalid ones 0, so that they are not used by deep learning module
for r in range(repeat):
workers_train_label_org['softmax'+ str(r) +'_label'][invalid] = 0
print "Algorithm: majority vote:\t\t",
# running the baseline algorithm where the noisy labels are aggregated using the majority voting
# calling majority voting function to aggregate the noisy labels
pred_mv = majority_voting(resp_org[valid])
# call_train function takes as input the noisy labels "pred_mv", trains ResNet model for the given "depth"
# for "epochs" run using the available "gpus".
# it prints the generalization error of the trained model.
_, val_acc = call_train(n,samples,k,pred_mv,workers_val_label,fname,epochs,depth,gpus)
print "generalization_acc: " + str(val_acc)
print "Algorithm: weighted majority vote:\t",
# running the another baseline algorithm where the aggregation is performed using the weighted majority vote
# creating a numpy array to store weighted majority vote labels
naive_agg = np.zeros((n,k))
# generating the weighted majority vote label using the original noisy labels stored in the
# dictionary "workers_train_label_org"
for r in range(repeat):
naive_agg = naive_agg + (1/repeat)*copy.deepcopy(workers_train_label_org['softmax'+ str(r) +'_label'])
# calling the "call_train" function which besides printing the generalization error
# returns model prediction on the training examples, which is being stored in the variable "naive_pred".
naive_pred, val_acc = call_train(n,samples,k,naive_agg[valid],workers_val_label,fname,epochs,depth,gpus)
print "generalization_acc: " + str(val_acc)
print "Algorithm: MBEM:\t\t\t",
# running the proposed algorithm "MBEM: model bootstrapped expectation maximization"
# computing posterior probabilities of the true labels given the noisy labels and the worker identities.
# post_prob_DS function takes the noisy labels given by the workers "resp_org", model prediction obtained
# by running "weighted majority vote" algorithm, and the worker identities.
probs_est_labels = post_prob_DS(resp_org[valid],naive_pred[valid],workers_this_example[valid])
algo_agg = np.zeros((n,k))
algo_agg[valid] = probs_est_labels
# calling the "call_train" function with aggregated labels being the posterior probability distribution of the
# examples given the model prediction obtained using the "weighted majority vote" algorithm.
_, val_acc = call_train(n,samples,k,algo_agg[valid],workers_val_label,fname,epochs,depth,gpus)
print "generalization_acc: " + str(val_acc)
def call_train(n,samples,k,workers_train_label_use,workers_val_label,fname,epochs,depth,gpus):
# this function takes as input aggregated labels of the training examples
# along with name of the .rec files for training the ResNet model, depth of the model, number of epochs, and gpus information
# it returns model prediction on the training examples.
# we train the model twice first using the given aggregated labels and
# second using the model prediction on the training examples on based on the first training
# this aspect is not covered in the algorithm given in the paper. however, it works better in practice.
# training the model twice in this fashion can be replaced by training once for sufficiently large number of epochs
# first training of the model using the given aggregated labels
workers_train_label_use_core = np.zeros((n,k))
workers_train_label_use_core[np.arange(samples)] = workers_train_label_use
pred_first_iter, val_acc = call_train_core(n,samples,k,workers_train_label_use_core,workers_val_label,fname,epochs,depth,gpus)
# second training of the model using the model prediction on the training examples based on the first training.
workers_train_label_use_core = np.zeros((n,k))
workers_train_label_use_core[np.arange(samples)] = pred_first_iter[np.arange(samples)]
pred_second_iter, val_acc = call_train_core(n,samples,k,workers_train_label_use_core,workers_val_label,fname,epochs,depth,gpus)
return pred_second_iter, val_acc
def call_train_core(n,samples,k,workers_train_label_use_core,workers_val_label,fname,epochs,depth,gpus):
# this function takes as input the same variables as the "call_train" function and it calls
# the mxnet implementation of ResNet training module function "train"
workers_train_label = {}
workers_train_label['softmax0_label'] = workers_train_label_use_core
prediction, val_acc = train(gpus,fname,workers_train_label,workers_val_label,numepoch=epochs,batch_size=500,depth = depth,lr=0.5)
model_pred = np.zeros((n,k))
model_pred[np.arange(samples), np.argmax(prediction[0:samples],1)] = 1
return model_pred, val_acc
def generate_labels_weight(fname,n,n1,repeat,conf):
# extracting the number of workers and the number of classes from the confusion matrices
m, k = conf.shape[0], conf.shape[1]
# a numpy array to store true class of the training examples
class_train = np.zeros((n), dtype = np.int)
# reading the train.lst file and storing true class of each training example
with open(fname[1],"r") as f1:
content = f1.readlines()
for i in range(n):
content_lst = content[i].split("\t")
class_train[i] = int(float(content_lst[1]))
# a dictionary to store noisy labels generated using the worker confusion matrices for each training example
workers_train_label = {}
# the dictionary contains "repeat" number of numpy arrays with keys named "softmax_0_label", where 0 varies
# each array has the noisy labels for the training examples given by the workers
for i in range(repeat):
workers_train_label['softmax' + str(i) + '_label'] = np.zeros((n,k))
# Generating noisy labels according the worker confusion matrices and the true labels of the examples
# a variable to store one-hot noisy label, note that each label belongs to one of the k classes
resp = np.zeros((n,m,k))
# a variable to store identity of the workers that are assigned to the i-th example
# note that "repeat" number of workers are randomly chosen from the set of [m] workers and assigned to each example
workers_this_example = np.zeros((n,repeat),dtype=np.int)
# iterating over each training example
for i in range(n):
# randomly selecting "repeat" number of workers for the i-th example
workers_this_example[i] = np.sort(np.random.choice(m,repeat,replace=False))
count = 0
# for each randomly chosen worker generating noisy label according to her confusion matrix and the true label
for j in workers_this_example[i]:
# using the row of the confusion matrix corresponding to the true label generating the noisy label
temp_rand = np.random.multinomial(1,conf[j,class_train[i],:])
# storing the noisy label in the resp variable
resp[i,j,:] = temp_rand
# storing the noisy label in the dictionary
workers_train_label['softmax' + str(count) + '_label'][i] = temp_rand
count = count +1
# note that in the dictionary each numpy array is of size only (n,k).
# The dictionary is passed to the deep learning module
# however, the resp variable is a numpy array of size (n,m,k).
# it is used for performing expectation maximization on the noisy labels
# initializing a dictionary to store one-hot representation of the true labels for the validation set
workers_val_label = {}
# the dictionary contains "repeat" number of numpy arrays with keys named "softmax_0_label", where 0 varies
# each array has the true labels of the examples in the validation set
workers_val_label['softmax' + str(0) + '_label'] = np.zeros((n1,k))
# reading the .lst file for the validation set
content_val_lst = np.genfromtxt(fname[3], delimiter='\t')
# storing the true labels of the examples in the validation set in the dictionary
for i in range(n1):
workers_val_label['softmax' + str(0) + '_label'][i][int(content_val_lst[i,1])] = 1
# returning the noisy responses of the workers stored in the resp numpy array,
# the noisy labels stored in the dictionary that is used by the deep learning module
# the true lables of the examples in the validation set stored in the dictionary
# identity of the workers that are assigned to th each example in the training set
return resp, workers_train_label, workers_val_label, workers_this_example
def majority_voting(resp):
# computes majority voting label
# ties are broken uniformly at random
n = resp.shape[0]
k = resp.shape[2]
pred_mv = np.zeros((n), dtype = np.int)
for i in range(n):
# finding all labels that have got maximum number of votes
poss_pred = np.where(np.sum(resp[i],0) == np.max(np.sum(resp[i],0)))[0]
shuffle(poss_pred)
# choosing a label randomly among all the labels that have got the highest number of votes
pred_mv[i] = poss_pred[0]
pred_mv_vec = np.zeros((n,k))
# returning one-hot representation of the majority vote label
pred_mv_vec[np.arange(n), pred_mv] = 1
return pred_mv_vec
def post_prob_DS(resp_org,e_class,workers_this_example):
# computes posterior probability distribution of the true label given the noisy labels annotated by the workers
# and model prediction
n = resp_org.shape[0]
m = resp_org.shape[1]
k = resp_org.shape[2]
repeat = workers_this_example.shape[1]
temp_class = np.zeros((n,k))
e_conf = np.zeros((m,k,k))
temp_conf = np.zeros((m,k,k))
#Estimating confusion matrices of each worker by assuming model prediction "e_class" is the ground truth label
for i in range(n):
for j in workers_this_example[i]: #range(m)
temp_conf[j,:,:] = temp_conf[j,:,:] + np.outer(e_class[i],resp_org[i,j])
#regularizing confusion matrices to avoid numerical issues
for j in range(m):
for r in range(k):
if (np.sum(temp_conf[j,r,:]) ==0):
# assuming worker is spammer for the particular class if there is no estimation for that class for that worker
temp_conf[j,r,:] = 1/k
else:
# assuming there is a non-zero probability of each worker assigning labels for all the classes
temp_conf[j,r,:][temp_conf[j,r,:]==0] = 1e-10
e_conf[j,:,:] = np.divide(temp_conf[j,:,:],np.outer(np.sum(temp_conf[j,:,:],axis =1),np.ones(k)))
# Estimating posterior distribution of the true labels using confusion matrices of the workers and the original
# noisy labels annotated by the workers
for i in range(n):
for j in workers_this_example[i]:
if (np.sum(resp_org[i,j]) ==1):
temp_class[i] = temp_class[i] + np.log(np.dot(e_conf[j,:,:],np.transpose(resp_org[i,j])))
temp_class[i] = np.exp(temp_class[i])
temp_class[i] = np.divide(temp_class[i],np.outer(np.sum(temp_class[i]),np.ones(k)))
e_class[i] = temp_class[i]
return e_class
# The following code implements ResNet using MXNET. It is copied from https://github.com/tornadomeet/ResNet/.
def train(gpus,fname,workers_train_label,workers_val_label,numepoch,batch_size,depth = 20,lr=0.5):
output_filename = "tr_err.txt"
model_num = 1
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if os.path.isfile(output_filename):
os.remove(output_filename)
hdlr = logging.FileHandler(output_filename)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
kv = mx.kvstore.create('device')
### training iterator
train1 = mx.io.ImageRecordIter(
path_imgrec = fname[0],
label_width = 1,
data_name = 'data',
label_name = 'softmax0_label',
data_shape = (3, 32, 32),
batch_size = batch_size,
pad = 4,
fill_value = 127,
rand_crop = True,
max_random_scale = 1.0,
min_random_scale = 1.0,
rand_mirror = True,
shuffle = False,
num_parts = kv.num_workers,
part_index = kv.rank)
### Validation iterator
val1 = mx.io.ImageRecordIter(
path_imgrec = fname[2],
label_width = 1,
data_name = 'data',
label_name = 'softmax0_label',
batch_size = batch_size,
data_shape = (3, 32, 32),
rand_crop = False,
rand_mirror = False,
pad = 0,
num_parts = kv.num_workers,
part_index = kv.rank)
n = workers_train_label['softmax0_label'].shape[0]
k = workers_train_label['softmax0_label'].shape[1]
n1 = workers_val_label['softmax0_label'].shape[0]
train2 = mx.io.NDArrayIter(np.zeros(n), workers_train_label, batch_size, shuffle = False,)
train_iter = MultiIter([train1,train2])
val2 = mx.io.NDArrayIter(np.zeros(n1), workers_val_label, batch_size = batch_size,shuffle = False,)
val_iter = MultiIter([val1,val2])
if((depth-2)%6 == 0 and depth < 164):
per_unit = [int((depth-2)/6)]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(depth))
units = per_unit*3
symbol = resnet(units=units, num_stage=3, filter_list=filter_list, num_class=k,data_type="cifar10",
bottle_neck = False, bn_mom=0.9, workspace=512,
memonger=False)
devs = mx.cpu() if gpus is None else [mx.gpu(int(i)) for i in gpus.split(',')]
epoch_size = max(int(n / batch_size / kv.num_workers), 1)
if not os.path.exists("./model" + str(model_num)):
os.mkdir("./model" + str(model_num))
model_prefix = "model"+ str(model_num) + "/resnet-{}-{}-{}".format("cifar10", depth, kv.rank)
checkpoint = mx.callback.do_checkpoint(model_prefix)
def custom_metric(label,softmax):
return len(np.where(np.argmax(softmax,1)==np.argmax(label,1))[0])/float(label.shape[0])
#there is only one softmax layer with respect to which error of all the labels are computed
output_names = []
output_names = output_names + ['softmax' + str(0) + '_output']
eval_metrics = mx.metric.CustomMetric(custom_metric,name = 'accuracy', output_names=output_names, label_names=workers_train_label.keys())
model = mx.mod.Module(
context = devs,
symbol = mx.sym.Group(symbol),
data_names = ['data'],
label_names = workers_train_label.keys(),#['softmax0_label']
)
lr_scheduler = multi_factor_scheduler(0, epoch_size, step=[40, 50], factor=0.1)
optimizer_params = {
'learning_rate': lr,
'momentum' : 0.9,
'wd' : 0.0001,
'lr_scheduler': lr_scheduler}
model.fit(
train_iter,
eval_data = val_iter,
eval_metric = eval_metrics,
kvstore = kv,
batch_end_callback = mx.callback.Speedometer(batch_size, 50),
epoch_end_callback = checkpoint,
optimizer = 'nag',
optimizer_params = optimizer_params,
num_epoch = numepoch,
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
)
epoch_max_val_acc, train_acc, val_acc = max_val_epoch(output_filename)
#print "val-acc: " + str(val_acc)
# Prediction on Training data
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix,epoch_max_val_acc)
model = mx.mod.Module(
context = devs,
symbol = sym,
data_names = ['data'],
label_names = workers_train_label.keys(),#['softmax0_label']
)
model.bind(for_training=False, data_shapes=train_iter.provide_data,
label_shapes=train_iter.provide_label,)
model.set_params(arg_params, aux_params, allow_missing=True)
outputs = model.predict(train_iter)
if type(outputs) is list:
return outputs[0].asnumpy(), val_acc
else:
return outputs.asnumpy(), val_acc
def max_val_epoch(filename):
import re
TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)')
EPOCH_RE = re.compile('Epoch\[(\d+)\] V+?')
log = open(filename, 'r').read()
val_acc = [float(x) for x in VA_RE.findall(log)]
train_acc = [float(x) for x in TR_RE.findall(log)]
index_max_val_acc = np.argmax([float(x) for x in VA_RE.findall(log)])
epoch_max_val_acc = [int(x) for x in EPOCH_RE.findall(log)][index_max_val_acc]
return epoch_max_val_acc+1, train_acc[index_max_val_acc], val_acc[index_max_val_acc]
class MultiIter(mx.io.DataIter):
def __init__(self, iter_list):
self.iters = iter_list
#self.batch_size = 500
def next(self):
batches = [i.next() for i in self.iters]
return mx.io.DataBatch(data=[t for t in batches[0].data],
label= [t for t in batches[1].label],pad=0)
def reset(self):
for i in self.iters:
i.reset()
@property
def provide_data(self):
return [t for t in self.iters[0].provide_data]
@property
def provide_label(self):
return [t for t in self.iters[1].provide_label]
def multi_factor_scheduler(begin_epoch, epoch_size, step=[40, 50], factor=0.1):
step_ = [epoch_size * (x-begin_epoch) for x in step if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
'''
Reproducing paper:
<NAME>, <NAME>, <NAME>, <NAME>. "Identity Mappings in Deep Residual Networks"
'''
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=512, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tupe
Stride used in convolution
dim_match : Boolen
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def resnet(units, num_stage, filter_list, num_class, data_type, bottle_neck=True, bn_mom=0.9, workspace=512, memonger=False):
"""Return ResNet symbol of cifar10 and imagenet
Parameters
----------
units : list
Number of units in each stage
num_stage : int
Number of stage
filter_list : list
Channel size of each stage
num_class : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert(num_unit == num_stage)
data = mx.sym.Variable(name='data')
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
if data_type == 'cifar10':
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
elif data_type == 'imagenet':
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
else:
raise ValueError("do not support {} yet".format(data_type))
for i in range(num_stage):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.symbol.Flatten(data=pool1)
fc1 = mx.symbol.FullyConnected(data=flat, num_hidden=num_class, name='fc1')
softmax0 = mx.sym.log_softmax(fc1)
softmax0_output = mx.sym.BlockGrad(data = softmax0,name = 'softmax0')
loss = [softmax0_output]
label = mx.sym.Variable(name='softmax0_label')
ce = -mx.sym.sum(mx.sym.sum(mx.sym.broadcast_mul(softmax0,label),1))
loss[:] = loss + [mx.symbol.MakeLoss(ce, normalization='batch')]
return loss
# +
# download data
fname = download_cifar10()
# setting up values according to CIFAR10 dataset
# n is total number of training samples for CIFAR10
# n1 is the total number of test samples for CIFAR10
# k is the number of classes
n, n1, k = 50000, 10000, 10
#setting the number of gpus that are available
#gpus = '0,1,2,3' # if there are no gpus available set it to None.
gpus=None
# m is the number of workers, gamma is the worker quality,
# class_wise is the binary variable: takes value 1 if workers are class_wise hammer spammer
# and 0 if workers are hammer-spammer
# k is the number of classification classes,
# epochs is the number of epochs for ResNet model
m, gamma, class_wise, epochs, depth = 100, 0.2, 0, 60, 20
# calling function to generate confusion matrices of workers
conf = generate_workers(m,k,gamma,class_wise)
# calling the main function that takes as input the following:
# name of .rec iterators and .lst files that to operate on,
# worker confusion matrices,
# number of epochs for running ResNet model, depth of the model,
# number of gpus available on the machine,
# samples: number of samples to be used for training the model,
# repeat: the number of redundant noisy labels to be used for each training example,
# that are generated using the worker confusion mtrices
# it prints the generalization error of the model on set aside test data
# note that the samples*repeat is approximately same for each pair
# which implies that the total annotation budget is fixed.
for repeat,samples in [[13,4000],[7,7000],[5,10000],[3,17000],[1,50000]]:
print "\nnumber of training examples: " + str(samples) + "\t redundancy: " + str(repeat)
# calling the main function
main(fname,n,n1,k,conf,samples,repeat,epochs,depth,gpus)
# -
|
mltrain-nips-2017/forough_arabshahi/MBEM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Distance Matrix
# imports
from sklearn import datasets
import numpy as np
# abstract
dataset = datasets.load_iris()
# dictionary
dataset.keys()
dataset["feature_names"]
data = dataset["data"]
# data # data is a numpy array data structure. Think of it as a matrix of data (or as an excel spreadsheet)
data.shape
# +
# euclidean distance of 2 observations
# -
# initialize distance matrix. What will be its final shape?
dist = []
# +
# Build the distance matrix. Use 2 for loops, the append list method and the euclidean distance formula
# -
dist
# another import (usually all imports are done at the top of the script/ notebook)
import seaborn as sns
sns.heatmap(dist)
# # Plotting data:
# Don't worry about the code as that's not the objective of the exercise and we will learn how to plot data in future classes
# ### How can we represent an observation in a N-dimensional Space
# another import (usually all imports are done at the top of the script/ notebook)
import matplotlib.pyplot as plt
# 2D scatter plot
plt.scatter(data[:, 0], data[:, 1])
plt.xlabel(dataset["feature_names"][0])
plt.ylabel(dataset["feature_names"][1])
plt.show()
# 1D scatter plot
plt.scatter(data[:, 0], [0 for i in range(data.shape[0])])
plt.xlabel(dataset["feature_names"][0])
plt.show()
# 3D scatter plot
fig = plt.figure(figsize=(14, 7)) # defining a figure so we can add a 3d subplot
ax = fig.add_subplot(111, projection="3d")
ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.set_xlabel(dataset["feature_names"][0])
ax.set_ylabel(dataset["feature_names"][1])
ax.set_zlabel(dataset["feature_names"][2])
plt.show()
# ## Finding nearest neighbors
# get variables to save closest neighbors later
min_args, min_dist = (None, 9e99)
for id_r, row in enumerate(dist):
# CODE HERE
min_args
print(data[min_args[0]])
print(data[min_args[1]])
print('minimum distance:\t', min_dist)
# ## Define functions
# Why do we want to define functions in this case?
# +
def distance_matrix(data):
# CODE HERE
return dist
def closest_points(dist_matrix):
# CODE HERE
return min_args, min_dist
# -
# ## Finding the `n` shortest distances
# +
dist_matrix = distance_matrix(data)
n_distances = 10
# CODE HERE
distances
|
notebooks/lab02_close_neighbors.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ralsouza/python_fundamentos/blob/master/src/06_Modulos_Analise_de_Dados/13_Matplotlib_Charts.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="oSSJ-Pj_-OIe" colab_type="text"
# ## Bar Charts
# + id="7ZT_et_l-t5N" colab_type="code" colab={}
# The matplotlib.pyplot is a collection of function and styles that make works as the Matlab
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="nL1SUqOiLSWt" colab_type="code" colab={}
x = [2,4,6,8,10]
y = [6,7,8,2,4]
# + id="aVvY_2LG-ahT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="f23c07ab-21c9-4278-ed2d-a3ebb417ccf0"
# Create a simple bar chart
plt.bar(x,y,label='My Bars',color='b')
plt.legend()
plt.show()
# + id="Ojv6V7WvAAvx" colab_type="code" colab={}
x2 = [1,3,5,7,9]
y2 = [7,8,2,4,2]
# + id="XdzAquSr_MCj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="34e9a096-4867-4a62-c1ad-3a0cc7b6e544"
# Create a clustered bar chart
plt.bar(x,y,label='My Bars blue', color='b')
plt.bar(x2,y2,label='My Bars red', color='r')
plt.legend()
plt.show()
# + id="9RuP8ZS6Ao33" colab_type="code" colab={}
ages = [22,65,45,55,21,22,34,42,41,4,99,101,121,122,130,111,115,80,75,64,54,44,64,13,18,48]
# + id="EAlo42ELFff-" colab_type="code" colab={}
ids = [x for x in range(len(ages))]
# + id="1PSmdJYcFlGH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="7568f8fc-b99d-497e-fbc1-05f59492f1e8"
plt.bar(ids, ages)
plt.show()
# + [markdown] id="o5TL88ErFuFa" colab_type="text"
# ## Create a Histogram
# + id="F8G1WQ03FpeT" colab_type="code" colab={}
bins = [0,10,20,30,40,50,60,70,80,90,100,110,120,130]
# + id="D4FYnnuTF8gw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="1a2a0370-0a5f-4b32-c61a-08a7f0b19c5b"
plt.hist(ages,bins,histtype='bar',rwidth=0.8)
plt.show()
# + id="jMwic1evGGOC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="61e0073e-46b8-4481-c88b-e38ec0e959fd"
# Another histogram
plt.hist(ages,bins,histtype='stepfilled',rwidth=0.8)
plt.show()
# + [markdown] id="18j-1wd6GklO" colab_type="text"
# ## Scatterplot
# + id="Rrbl7gi8GhZU" colab_type="code" colab={}
x = [1,2,3,4,5,6,7,8]
y = [5,2,4,5,6,8,4,8]
# + id="ywwdF8dAG6o7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="9f78a41d-377e-4412-a482-e44f811df7a3"
plt.scatter(x,y,label='Points',color='r',marker='o',s=100)
plt.legend()
plt.show()
# + [markdown] id="ilpMjC_3HME_" colab_type="text"
# ## Stack Points
# + id="FJg8qQEtHJI3" colab_type="code" colab={}
days = [1,2,3,4,5]
sleep = [7,8,6,77,7]
eat = [2,3,4,5,3]
work = [7,8,7,2,2]
walk = [8,5,7,8,13]
# + id="QBE4b1AyIRTH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="44cd4c30-e5d3-4e7d-cca3-47b22821f625"
plt.stackplot(days,sleep,eat,work,walk,colors=['m','c','r','k','b'])
plt.show()
# + [markdown] id="R16kkT0mInEq" colab_type="text"
# ## Pie Chart
# + id="OwSLOielIfPI" colab_type="code" colab={}
section = [7,2,2,13]
activities = ['sleep','eat','work','walk']
colors = ['c','m','r','k']
# + id="I_27liqxI-DT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="1997f82d-2026-457c-ab40-e3c25bca54df"
plt.pie(section,labels=activities,colors=colors,startangle=90,shadow=True,explode=(0,0.1,0,0))
plt.show()
|
src/06_Modulos_Analise_de_Dados/13_Matplotlib_Charts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (dev)
# language: python
# name: dev
# ---
# + [markdown] id="N7RVbSmDYYct"
# # Machine Learning Trading Bot
#
# In this Challenge, you’ll assume the role of a financial advisor at one of the top five financial advisory firms in the world. Your firm constantly competes with the other major firms to manage and automatically trade assets in a highly dynamic environment. In recent years, your firm has heavily profited by using computer algorithms that can buy and sell faster than human traders.
#
# The speed of these transactions gave your firm a competitive advantage early on. But, people still need to specifically program these systems, which limits their ability to adapt to new data. You’re thus planning to improve the existing algorithmic trading systems and maintain the firm’s competitive advantage in the market. To do so, you’ll enhance the existing trading signals with machine learning algorithms that can adapt to new data.
#
# ## Instructions:
#
# Use the starter code file to complete the steps that the instructions outline. The steps for this Challenge are divided into the following sections:
#
# * Establish a Baseline Performance
#
# * Tune the Baseline Trading Algorithm
#
# * Evaluate a New Machine Learning Classifier
#
# * Create an Evaluation Report
#
# #### Establish a Baseline Performance
#
# In this section, you’ll run the provided starter code to establish a baseline performance for the trading algorithm. To do so, complete the following steps.
#
# Open the Jupyter notebook. Restart the kernel, run the provided cells that correspond with the first three steps, and then proceed to step four.
#
# 1. Import the OHLCV dataset into a Pandas DataFrame.
#
# 2. Generate trading signals using short- and long-window SMA values.
#
# 3. Split the data into training and testing datasets.
#
# 4. Use the `SVC` classifier model from SKLearn's support vector machine (SVM) learning method to fit the training data and make predictions based on the testing data. Review the predictions.
#
# 5. Review the classification report associated with the `SVC` model predictions.
#
# 6. Create a predictions DataFrame that contains columns for “Predicted” values, “Actual Returns”, and “Strategy Returns”.
#
# 7. Create a cumulative return plot that shows the actual returns vs. the strategy returns. Save a PNG image of this plot. This will serve as a baseline against which to compare the effects of tuning the trading algorithm.
#
# 8. Write your conclusions about the performance of the baseline trading algorithm in the `README.md` file that’s associated with your GitHub repository. Support your findings by using the PNG image that you saved in the previous step.
#
# #### Tune the Baseline Trading Algorithm
#
# In this section, you’ll tune, or adjust, the model’s input features to find the parameters that result in the best trading outcomes. (You’ll choose the best by comparing the cumulative products of the strategy returns.) To do so, complete the following steps:
#
# 1. Tune the training algorithm by adjusting the size of the training dataset. To do so, slice your data into different periods. Rerun the notebook with the updated parameters, and record the results in your `README.md` file. Answer the following question: What impact resulted from increasing or decreasing the training window?
#
# > **Hint** To adjust the size of the training dataset, you can use a different `DateOffset` value—for example, six months. Be aware that changing the size of the training dataset also affects the size of the testing dataset.
#
# 2. Tune the trading algorithm by adjusting the SMA input features. Adjust one or both of the windows for the algorithm. Rerun the notebook with the updated parameters, and record the results in your `README.md` file. Answer the following question: What impact resulted from increasing or decreasing either or both of the SMA windows?
#
# 3. Choose the set of parameters that best improved the trading algorithm returns. Save a PNG image of the cumulative product of the actual returns vs. the strategy returns, and document your conclusion in your `README.md` file.
#
# #### Evaluate a New Machine Learning Classifier
#
# In this section, you’ll use the original parameters that the starter code provided. But, you’ll apply them to the performance of a second machine learning model. To do so, complete the following steps:
#
# 1. Import a new classifier, such as `AdaBoost`, `DecisionTreeClassifier`, or `LogisticRegression`. (For the full list of classifiers, refer to the [Supervised learning page](https://scikit-learn.org/stable/supervised_learning.html) in the scikit-learn documentation.)
#
# 2. Using the original training data as the baseline model, fit another model with the new classifier.
#
# 3. Backtest the new model to evaluate its performance. Save a PNG image of the cumulative product of the actual returns vs. the strategy returns for this updated trading algorithm, and write your conclusions in your `README.md` file. Answer the following questions: Did this new model perform better or worse than the provided baseline model? Did this new model perform better or worse than your tuned trading algorithm?
#
# #### Create an Evaluation Report
#
# In the previous sections, you updated your `README.md` file with your conclusions. To accomplish this section, you need to add a summary evaluation report at the end of the `README.md` file. For this report, express your final conclusions and analysis. Support your findings by using the PNG images that you created.
#
# + colab={"base_uri": "https://localhost:8080/"} id="mYMA_S8YainO" executionInfo={"status": "ok", "timestamp": 1647802373768, "user_tz": 420, "elapsed": 14025, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="194d37cb-66a5-4086-ebb4-6fa1e1233a45"
# Install the required libraries
from IPython.display import clear_output
try:
# !pip install pystan
# !pip install fbprophet
# !pip install hvplot
# !pip install holoviews
except:
print("Error installing libraries")
finally:
clear_output()
print('Libraries successfully installed')
# + id="ZK-xFC6TYYcw" executionInfo={"status": "ok", "timestamp": 1647802695643, "user_tz": 420, "elapsed": 322, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}}
# Imports
import pandas as pd
import numpy as np
from pathlib import Path
import hvplot.pandas
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.preprocessing import StandardScaler
from pandas.tseries.offsets import DateOffset
from sklearn.metrics import classification_report
# + [markdown] id="pSzitNnwYYcx"
# ---
#
# ## Establish a Baseline Performance
#
# In this section, you’ll run the provided starter code to establish a baseline performance for the trading algorithm. To do so, complete the following steps.
#
# Open the Jupyter notebook. Restart the kernel, run the provided cells that correspond with the first three steps, and then proceed to step four.
#
# + [markdown] id="-NEay74fYYcx"
# ### Step 1: mport the OHLCV dataset into a Pandas DataFrame.
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7CgpmdW5jdGlvbiBfdXBsb2FkRmlsZXMoaW5wdXRJZCwgb3V0cHV0SWQpIHsKICBjb25zdCBzdGVwcyA9IHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCk7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICAvLyBDYWNoZSBzdGVwcyBvbiB0aGUgb3V0cHV0RWxlbWVudCB0byBtYWtlIGl0IGF2YWlsYWJsZSBmb3IgdGhlIG5leHQgY2FsbAogIC8vIHRvIHVwbG9hZEZpbGVzQ29udGludWUgZnJvbSBQeXRob24uCiAgb3V0cHV0RWxlbWVudC5zdGVwcyA9IHN0ZXBzOwoKICByZXR1cm4gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpOwp9CgovLyBUaGlzIGlzIHJvdWdobHkgYW4gYXN5bmMgZ2VuZXJhdG9yIChub3Qgc3VwcG9ydGVkIGluIHRoZSBicm93c2VyIHlldCksCi8vIHdoZXJlIHRoZXJlIGFyZSBtdWx0aXBsZSBhc3luY2hyb25vdXMgc3RlcHMgYW5kIHRoZSBQeXRob24gc2lkZSBpcyBnb2luZwovLyB0byBwb2xsIGZvciBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcC4KLy8gVGhpcyB1c2VzIGEgUHJvbWlzZSB0byBibG9jayB0aGUgcHl0aG9uIHNpZGUgb24gY29tcGxldGlvbiBvZiBlYWNoIHN0ZXAsCi8vIHRoZW4gcGFzc2VzIHRoZSByZXN1bHQgb2YgdGhlIHByZXZpb3VzIHN0ZXAgYXMgdGhlIGlucHV0IHRvIHRoZSBuZXh0IHN0ZXAuCmZ1bmN0aW9uIF91cGxvYWRGaWxlc0NvbnRpbnVlKG91dHB1dElkKSB7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICBjb25zdCBzdGVwcyA9IG91dHB1dEVsZW1lbnQuc3RlcHM7CgogIGNvbnN0IG5leHQgPSBzdGVwcy5uZXh0KG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSk7CiAgcmV0dXJuIFByb21pc2UucmVzb2x2ZShuZXh0LnZhbHVlLnByb21pc2UpLnRoZW4oKHZhbHVlKSA9PiB7CiAgICAvLyBDYWNoZSB0aGUgbGFzdCBwcm9taXNlIHZhbHVlIHRvIG1ha2UgaXQgYXZhaWxhYmxlIHRvIHRoZSBuZXh0CiAgICAvLyBzdGVwIG9mIHRoZSBnZW5lcmF0b3IuCiAgICBvdXRwdXRFbGVtZW50Lmxhc3RQcm9taXNlVmFsdWUgPSB2YWx1ZTsKICAgIHJldHVybiBuZXh0LnZhbHVlLnJlc3BvbnNlOwogIH0pOwp9CgovKioKICogR2VuZXJhdG9yIGZ1bmN0aW9uIHdoaWNoIGlzIGNhbGxlZCBiZXR3ZWVuIGVhY2ggYXN5bmMgc3RlcCBvZiB0aGUgdXBsb2FkCiAqIHByb2Nlc3MuCiAqIEBwYXJhbSB7c3RyaW5nfSBpbnB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIGlucHV0IGZpbGUgcGlja2VyIGVsZW1lbnQuCiAqIEBwYXJhbSB7c3RyaW5nfSBvdXRwdXRJZCBFbGVtZW50IElEIG9mIHRoZSBvdXRwdXQgZGlzcGxheS4KICogQHJldHVybiB7IUl0ZXJhYmxlPCFPYmplY3Q+fSBJdGVyYWJsZSBvZiBuZXh0IHN0ZXBzLgogKi8KZnVuY3Rpb24qIHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IGlucHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKGlucHV0SWQpOwogIGlucHV0RWxlbWVudC5kaXNhYmxlZCA9IGZhbHNlOwoKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIG91dHB1dEVsZW1lbnQuaW5uZXJIVE1MID0gJyc7CgogIGNvbnN0IHBpY2tlZFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgaW5wdXRFbGVtZW50LmFkZEV2ZW50TGlzdGVuZXIoJ2NoYW5nZScsIChlKSA9PiB7CiAgICAgIHJlc29sdmUoZS50YXJnZXQuZmlsZXMpOwogICAgfSk7CiAgfSk7CgogIGNvbnN0IGNhbmNlbCA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2J1dHRvbicpOwogIGlucHV0RWxlbWVudC5wYXJlbnRFbGVtZW50LmFwcGVuZENoaWxkKGNhbmNlbCk7CiAgY2FuY2VsLnRleHRDb250ZW50ID0gJ0NhbmNlbCB1cGxvYWQnOwogIGNvbnN0IGNhbmNlbFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgY2FuY2VsLm9uY2xpY2sgPSAoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9OwogIH0pOwoKICAvLyBXYWl0IGZvciB0aGUgdXNlciB0byBwaWNrIHRoZSBmaWxlcy4KICBjb25zdCBmaWxlcyA9IHlpZWxkIHsKICAgIHByb21pc2U6IFByb21pc2UucmFjZShbcGlja2VkUHJvbWlzZSwgY2FuY2VsUHJvbWlzZV0pLAogICAgcmVzcG9uc2U6IHsKICAgICAgYWN0aW9uOiAnc3RhcnRpbmcnLAogICAgfQogIH07CgogIGNhbmNlbC5yZW1vdmUoKTsKCiAgLy8gRGlzYWJsZSB0aGUgaW5wdXQgZWxlbWVudCBzaW5jZSBmdXJ0aGVyIHBpY2tzIGFyZSBub3QgYWxsb3dlZC4KICBpbnB1dEVsZW1lbnQuZGlzYWJsZWQgPSB0cnVlOwoKICBpZiAoIWZpbGVzKSB7CiAgICByZXR1cm4gewogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgICAgfQogICAgfTsKICB9CgogIGZvciAoY29uc3QgZmlsZSBvZiBmaWxlcykgewogICAgY29uc3QgbGkgPSBkb2N1bWVudC5jcmVhdGVFbGVtZW50KCdsaScpOwogICAgbGkuYXBwZW5kKHNwYW4oZmlsZS5uYW1lLCB7Zm9udFdlaWdodDogJ2JvbGQnfSkpOwogICAgbGkuYXBwZW5kKHNwYW4oCiAgICAgICAgYCgke2ZpbGUudHlwZSB8fCAnbi9hJ30pIC0gJHtmaWxlLnNpemV9IGJ5dGVzLCBgICsKICAgICAgICBgbGFzdCBtb2RpZmllZDogJHsKICAgICAgICAgICAgZmlsZS5sYXN0TW9kaWZpZWREYXRlID8gZmlsZS5sYXN0TW9kaWZpZWREYXRlLnRvTG9jYWxlRGF0ZVN0cmluZygpIDoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgJ24vYSd9IC0gYCkpOwogICAgY29uc3QgcGVyY2VudCA9IHNwYW4oJzAlIGRvbmUnKTsKICAgIGxpLmFwcGVuZENoaWxkKHBlcmNlbnQpOwoKICAgIG91dHB1dEVsZW1lbnQuYXBwZW5kQ2hpbGQobGkpOwoKICAgIGNvbnN0IGZpbGVEYXRhUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICAgIGNvbnN0IHJlYWRlciA9IG5ldyBGaWxlUmVhZGVyKCk7CiAgICAgIHJlYWRlci5vbmxvYWQgPSAoZSkgPT4gewogICAgICAgIHJlc29sdmUoZS50YXJnZXQucmVzdWx0KTsKICAgICAgfTsKICAgICAgcmVhZGVyLnJlYWRBc0FycmF5QnVmZmVyKGZpbGUpOwogICAgfSk7CiAgICAvLyBXYWl0IGZvciB0aGUgZGF0YSB0byBiZSByZWFkeS4KICAgIGxldCBmaWxlRGF0YSA9IHlpZWxkIHsKICAgICAgcHJvbWlzZTogZmlsZURhdGFQcm9taXNlLAogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbnRpbnVlJywKICAgICAgfQogICAgfTsKCiAgICAvLyBVc2UgYSBjaHVua2VkIHNlbmRpbmcgdG8gYXZvaWQgbWVzc2FnZSBzaXplIGxpbWl0cy4gU2VlIGIvNjIxMTU2NjAuCiAgICBsZXQgcG9zaXRpb24gPSAwOwogICAgZG8gewogICAgICBjb25zdCBsZW5ndGggPSBNYXRoLm1pbihmaWxlRGF0YS5ieXRlTGVuZ3RoIC0gcG9zaXRpb24sIE1BWF9QQVlMT0FEX1NJWkUpOwogICAgICBjb25zdCBjaHVuayA9IG5ldyBVaW50OEFycmF5KGZpbGVEYXRhLCBwb3NpdGlvbiwgbGVuZ3RoKTsKICAgICAgcG9zaXRpb24gKz0gbGVuZ3RoOwoKICAgICAgY29uc3QgYmFzZTY0ID0gYnRvYShTdHJpbmcuZnJvbUNoYXJDb2RlLmFwcGx5KG51bGwsIGNodW5rKSk7CiAgICAgIHlpZWxkIHsKICAgICAgICByZXNwb25zZTogewogICAgICAgICAgYWN0aW9uOiAnYXBwZW5kJywKICAgICAgICAgIGZpbGU6IGZpbGUubmFtZSwKICAgICAgICAgIGRhdGE6IGJhc2U2NCwKICAgICAgICB9LAogICAgICB9OwoKICAgICAgbGV0IHBlcmNlbnREb25lID0gZmlsZURhdGEuYnl0ZUxlbmd0aCA9PT0gMCA/CiAgICAgICAgICAxMDAgOgogICAgICAgICAgTWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCk7CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPSBgJHtwZXJjZW50RG9uZX0lIGRvbmVgOwoKICAgIH0gd2hpbGUgKHBvc2l0aW9uIDwgZmlsZURhdGEuYnl0ZUxlbmd0aCk7CiAgfQoKICAvLyBBbGwgZG9uZS4KICB5aWVsZCB7CiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICB9CiAgfTsKfQoKc2NvcGUuZ29vZ2xlID0gc2NvcGUuZ29vZ2xlIHx8IHt9OwpzY29wZS5nb29nbGUuY29sYWIgPSBzY29wZS5nb29nbGUuY29sYWIgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYi5fZmlsZXMgPSB7CiAgX3VwbG9hZEZpbGVzLAogIF91cGxvYWRGaWxlc0NvbnRpbnVlLAp9Owp9KShzZWxmKTsK", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 294} id="DnNwo4CuYYcx" executionInfo={"status": "ok", "timestamp": 1647802715370, "user_tz": 420, "elapsed": 9936, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="5fc541f9-e4e8-4cf9-9260-cac93cc82f54"
from pathlib import Path
from google.colab import files
uploaded = files.upload()
# Import the OHLCV dataset into a Pandas Dataframe
ohlcv_df = pd.read_csv(
Path('./emerging_markets_ohlcv.csv'),
index_col='date',
infer_datetime_format=True,
parse_dates=True
)
# Review the DataFrame
ohlcv_df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="Y1JFd3HvojnA" executionInfo={"status": "ok", "timestamp": 1647802718802, "user_tz": 420, "elapsed": 668, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="8c60fbc8-9f4f-4683-83de-93e038b8716a"
# Filter the date index and close columns
signals_df = ohlcv_df.loc[:, ["close"]]
signals_df.hvplot() #why is hvplot not working?
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="wwd_oJUoYYcx" executionInfo={"status": "ok", "timestamp": 1647802723186, "user_tz": 420, "elapsed": 318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="3c19c11a-f7bb-4ab9-d686-dbfd61ff9bb1"
# Filter the date index and close columns
signals_df = ohlcv_df.loc[:, ["close"]]
# Use the pct_change function to generate returns from close prices
signals_df["Actual Returns"] = signals_df["close"].pct_change()
# Drop all NaN values from the DataFrame
signals_df = signals_df.dropna()
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
# + [markdown] id="4ElLHh9DYYcy"
# ## Step 2: Generate trading signals using short- and long-window SMA values.
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="P7HggIYSYYcy" executionInfo={"status": "ok", "timestamp": 1647803885707, "user_tz": 420, "elapsed": 315, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="d50e6001-2b28-4750-d126-7f2855043172"
# Set the short window and long window
short_window = 10
long_window = 200
# Generate the fast and slow simple moving averages (4 and 100 days, respectively)
signals_df['SMA_Fast'] = signals_df['close'].rolling(window=short_window).mean()
signals_df['SMA_Slow'] = signals_df['close'].rolling(window=long_window).mean()
signals_df = signals_df.dropna()
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="rjlcuGkoYYcy" executionInfo={"status": "ok", "timestamp": 1647803894203, "user_tz": 420, "elapsed": 414, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="6f91c605-2e1e-4c7e-a0a9-ae3fd2bef003"
# Initialize the new Signal column
signals_df['Signal'] = 0.0
# When Actual Returns are greater than or equal to 0, generate signal to buy stock long
signals_df.loc[(signals_df['Actual Returns'] >= 0), 'Signal'] = 1
# When Actual Returns are less than 0, generate signal to sell stock short
signals_df.loc[(signals_df['Actual Returns'] < 0), 'Signal'] = -1
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
# + colab={"base_uri": "https://localhost:8080/"} id="S7W8XhJMYYcz" executionInfo={"status": "ok", "timestamp": 1647803897888, "user_tz": 420, "elapsed": 312, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="27364841-a444-4fbc-d745-ba92ef0d7145"
signals_df['Signal'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="6aOSDpj6phji" executionInfo={"status": "ok", "timestamp": 1647802745279, "user_tz": 420, "elapsed": 1127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>", "userId": "03063694329766245039"}} outputId="1ae19b01-411e-495e-b420-813380ffe4ec"
#Visually show the SMA fast and slow averages to identify cross-over point
moving_avgs = signals_df[['SMA_Fast', 'SMA_Slow']].hvplot(
ylabel='Price in $',
width=1000,
height=400)
# Show the plot
moving_avgs #why is the plot not working?
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="vKmxKLP8YYcz" executionInfo={"status": "ok", "timestamp": 1647803913761, "user_tz": 420, "elapsed": 413, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="a6617f75-adc1-4741-c466-a122e4a03b55"
# Calculate the strategy returns and add them to the signals_df DataFrame
signals_df['Strategy Returns'] = signals_df['Actual Returns'] * signals_df['Signal'].shift()
# Review the DataFrame
display(signals_df.head())
display(signals_df.tail())
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="S9DgI7AzYYcz" executionInfo={"status": "ok", "timestamp": 1647803917555, "user_tz": 420, "elapsed": 419, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="fe49f1f6-ffdf-4f16-9099-182f8a2e980e"
# Plot Strategy Returns to examine performance
(1 + signals_df['Strategy Returns']).cumprod().plot()
# + [markdown] id="Nz2JRCNlYYcz"
# ### Step 3: Split the data into training and testing datasets.
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="cbm49bJbYYcz" executionInfo={"status": "ok", "timestamp": 1647803921647, "user_tz": 420, "elapsed": 416, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="07619525-f0a0-4bcf-db11-bf4efb655893"
# Assign a copy of the sma_fast and sma_slow columns to a features DataFrame called X
X = signals_df[['SMA_Fast', 'SMA_Slow']].shift().dropna()
# Review the DataFrame
X.head()
# + colab={"base_uri": "https://localhost:8080/"} id="L-cigyMlYYc0" executionInfo={"status": "ok", "timestamp": 1647803925331, "user_tz": 420, "elapsed": 413, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="72e06083-6748-4239-af58-24afa0b06108"
# Create the target set selecting the Signal column and assiging it to y
y = signals_df['Signal']
# Review the value counts
y.value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="5FBDAQdKYYc0" executionInfo={"status": "ok", "timestamp": 1647804221267, "user_tz": 420, "elapsed": 415, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="46e93888-0d80-49d6-d66a-45d27456f3b5"
# Select the start of the training period
training_begin = X.index.min()
# Display the training begin date
print(training_begin)
# + colab={"base_uri": "https://localhost:8080/"} id="kON3SkMKYYc0" executionInfo={"status": "ok", "timestamp": 1647804223454, "user_tz": 420, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="785f29d7-7972-44f5-a0c1-35c5c368e856"
# Select the ending period for the training data with an offset of 3 months
training_end = X.index.min() + DateOffset(months=6)
# Display the training end date
print(training_end)
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="V5prKUepYYc0" executionInfo={"status": "ok", "timestamp": 1647804228845, "user_tz": 420, "elapsed": 568, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="f2c0b4e2-10f4-4499-9934-ac9206c053ea"
# Generate the X_train and y_train DataFrames
X_train = X.loc[training_begin:training_end]
y_train = y.loc[training_begin:training_end]
# Review the X_train DataFrame
display(X_train.head())
display(X_train.tail())
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="7HMADyT7YYc0" executionInfo={"status": "ok", "timestamp": 1647804232738, "user_tz": 420, "elapsed": 510, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="40a61f5e-3b4a-4bb3-bdb6-41ef0c2b71af"
# Generate the X_test and y_test DataFrames
X_test = X.loc[training_end+DateOffset(hours=1):]
y_test = y.loc[training_end+DateOffset(hours=1):]
# Review the X_test DataFrame
X_test.head()
# + id="BcN8fUAbYYc0" executionInfo={"status": "ok", "timestamp": 1647804236009, "user_tz": 420, "elapsed": 400, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}}
# Scale the features DataFrames
# Create a StandardScaler instance
scaler = StandardScaler()
# Apply the scaler model to fit the X-train data
X_scaler = scaler.fit(X_train)
# Transform the X_train and X_test DataFrames using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# + [markdown] id="pcmZEPTdYYc0"
# ### Step 4: Use the `SVC` classifier model from SKLearn's support vector machine (SVM) learning method to fit the training data and make predictions based on the testing data. Review the predictions.
# + colab={"base_uri": "https://localhost:8080/"} id="FSJulPyXYYc0" executionInfo={"status": "ok", "timestamp": 1647804239498, "user_tz": 420, "elapsed": 513, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="069b180c-d83b-4aa6-ccae-93f4258ddafc"
# From SVM, instantiate SVC classifier model instance
from sklearn.svm import SVC
svm_model = SVC()
# Fit the model to the data using the training data
svm_model = svm_model.fit(X_train_scaled,y_train)
# Use the testing data to make the model predictions
svm_pred = svm_model.predict(X_test_scaled)
# Review the model's predicted values
svm_pred
# + [markdown] id="7vc93Ne8YYc0"
# ### Step 5: Review the classification report associated with the `SVC` model predictions.
# + colab={"base_uri": "https://localhost:8080/"} id="-ojEKPrpYYc1" executionInfo={"status": "ok", "timestamp": 1647804241767, "user_tz": 420, "elapsed": 293, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="4ed6ef77-0470-4c38-c4bc-91c68413a73c"
# Use a classification report to evaluate the model using the predictions and testing data
from sklearn.metrics import classification_report
svm_testing_report = classification_report(y_test, svm_pred)
# Print the classification report
print(svm_testing_report)
# + [markdown] id="-UQ8YlFnYYc1"
# ### Step 6: Create a predictions DataFrame that contains columns for “Predicted” values, “Actual Returns”, and “Strategy Returns”.
# + colab={"base_uri": "https://localhost:8080/", "height": 511} id="K2ryRpgDYYc1" executionInfo={"status": "ok", "timestamp": 1647803713774, "user_tz": 420, "elapsed": 346, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="b5f61fbc-6b3d-454d-dd21-60a65a174e50"
# Create a new empty predictions DataFrame.
# Create a predictions DataFrame
predictions_df = pd.DataFrame(index=X_test.index)
# Add the SVM model predictions to the DataFrame
predictions_df['Predicted'] = svm_pred
display(predictions_df['Predicted'].value_counts())
# Add the actual returns to the DataFrame
signals_df["Actual Returns"] = signals_df["close"].pct_change()
predictions_df['Actual Returns'] = signals_df['Actual Returns']
# Add the strategy returns to the DataFrame
predictions_df['Strategy Returns'] = predictions_df['Actual Returns'] * predictions_df['Predicted']
# Review the DataFrame
display(predictions_df.head())
display(predictions_df.tail())
# + [markdown] id="I6KvKxfSYYc1"
# ### Step 7: Create a cumulative return plot that shows the actual returns vs. the strategy returns. Save a PNG image of this plot. This will serve as a baseline against which to compare the effects of tuning the trading algorithm.
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="RteC-H-aYYc1" executionInfo={"status": "ok", "timestamp": 1647803718217, "user_tz": 420, "elapsed": 452, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="781cb00e-439e-470e-fe87-3f0b26238192"
# Plot the actual returns versus the strategy returns
(1 + predictions_df[['Actual Returns', 'Strategy Returns']]).cumprod().plot()
# + [markdown] id="qHEWipzlYYc1"
# ---
#
# ## Tune the Baseline Trading Algorithm
# + [markdown] id="XeO_o8OxYYc1"
# ## Step 6: Use an Alternative ML Model and Evaluate Strategy Returns
# + [markdown] id="B4w9aJzIYYc1"
# In this section, you’ll tune, or adjust, the model’s input features to find the parameters that result in the best trading outcomes. You’ll choose the best by comparing the cumulative products of the strategy returns.
# + [markdown] id="_dNmUWPuYYc1"
# ### Step 1: Tune the training algorithm by adjusting the size of the training dataset.
#
# To do so, slice your data into different periods. Rerun the notebook with the updated parameters, and record the results in your `README.md` file.
#
# Answer the following question: What impact resulted from increasing or decreasing the training window?
# + [markdown] id="n9QQpFLEYYc1"
# ### Step 2: Tune the trading algorithm by adjusting the SMA input features.
#
# Adjust one or both of the windows for the algorithm. Rerun the notebook with the updated parameters, and record the results in your `README.md` file.
#
# Answer the following question: What impact resulted from increasing or decreasing either or both of the SMA windows?
# + [markdown] id="b7DxDploYYc1"
# ### Step 3: Choose the set of parameters that best improved the trading algorithm returns.
#
# Save a PNG image of the cumulative product of the actual returns vs. the strategy returns, and document your conclusion in your `README.md` file.
# + [markdown] id="-lURU96fYYc1"
# ---
#
# ## Evaluate a New Machine Learning Classifier
#
# In this section, you’ll use the original parameters that the starter code provided. But, you’ll apply them to the performance of a second machine learning model.
# + [markdown] id="IBTWlVFvYYc1"
# ### Step 1: Import a new classifier, such as `AdaBoost`, `DecisionTreeClassifier`, or `LogisticRegression`. (For the full list of classifiers, refer to the [Supervised learning page](https://scikit-learn.org/stable/supervised_learning.html) in the scikit-learn documentation.)
# + colab={"base_uri": "https://localhost:8080/"} id="YW6O3A06YYc1" executionInfo={"status": "ok", "timestamp": 1647804619501, "user_tz": 420, "elapsed": 394, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="e633f1be-fae4-43f0-9274-e57e6d782e4b"
# Import a new classifier from SKLearn
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr
# Initiate the model instance
# YOUR CODE HERE
# + [markdown] id="3FGPtC3wYYc1"
# ### Step 2: Using the original training data as the baseline model, fit another model with the new classifier.
# + colab={"base_uri": "https://localhost:8080/"} id="qGRz3mNYYYc2" executionInfo={"status": "ok", "timestamp": 1647804794602, "user_tz": 420, "elapsed": 313, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="ff97c05d-a23f-454e-857d-1dff45ced54e"
# Fit the model using the training data
model = lr.fit(X_train_scaled, y_train)
# Use the testing dataset to generate the predictions for the new model
pred = lr.predict(X_test_scaled)
# Review the model's predicted values
# Import Modules
from sklearn.metrics import confusion_matrix,classification_report
# Print confusion matrix
print(confusion_matrix(y_test, pred))
# Print classification report
print(classification_report(y_test, pred))
# + [markdown] id="REavPf7yYYc2"
# ### Step 3: Backtest the new model to evaluate its performance.
#
# Save a PNG image of the cumulative product of the actual returns vs. the strategy returns for this updated trading algorithm, and write your conclusions in your `README.md` file.
#
# Answer the following questions:
# Did this new model perform better or worse than the provided baseline model?
# Did this new model perform better or worse than your tuned trading algorithm?
# + id="6mKqQzlmYYc2"
# Use a classification report to evaluate the model using the predictions and testing data
# Import Modules
from sklearn.metrics import confusion_matrix,classification_report
# Print confusion matrix
print(confusion_matrix(y_test, pred))
# Print the classification report
print(classification_report(y_test, pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 511} id="uCzcQdxGYYc2" executionInfo={"status": "ok", "timestamp": 1647805236069, "user_tz": 420, "elapsed": 418, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="1e16c293-b711-45e3-fdc5-456fefea52fa"
# Create a new empty predictions DataFrame.
# Create a predictions DataFrame
predictions_new_df = pd.DataFrame(index=X_test.index)
# Add the LR model predictions to the DataFrame
predictions_new_df['Predicted'] = pred
display(predictions_new_df['Predicted'].value_counts())
# Add the actual returns to the DataFrame
signals_df["Actual Returns"] = signals_df["close"].pct_change()
predictions_new_df['Actual Returns'] = signals_df['Actual Returns']
# Add the strategy returns to the DataFrame
predictions_new_df['Strategy Returns'] = predictions_new_df['Actual Returns'] * predictions_new_df['Predicted']
# Review the DataFrame
display(predictions_new_df.head())
display(predictions_new_df.tail())
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="DVLNzmpNYYc2" executionInfo={"status": "ok", "timestamp": 1647805267205, "user_tz": 420, "elapsed": 934, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiBXQfpWTDW6g2fOVEtkz6LdG-Y60LXjuVO5FtAeg=s64", "userId": "03063694329766245039"}} outputId="289d5ab4-cb09-414c-a819-41db6ba19da9"
# Plot the actual returns versus the strategy returns
(1 + predictions_df[['Actual Returns', 'Strategy Returns']]).cumprod().plot()
# + id="6g8U0ug1YYc2"
|
machine_learning_trading_bot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from IPython.display import HTML
from matplotlib.patches import Ellipse
# +
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
sf = 0.01
patch = Ellipse((0, 0), width = sf * 4, height = sf * 1, color ='blue')
# initialization function: plot the background of each frame
def init():
ax = plt.axes(xlim=(0, 4), ylim=(0, 0.5))
line, = ax.plot([], [], lw=2)
line.set_data([], [])
z_list = np.linspace(0, 4, 100)
E_list = np.array([E_of_ring(x, R=1) for x in z_list])
plt.plot(z_list, E_list)
patch.center = (0, 0)
ax.add_patch(patch)
return patch,
def E_of_ring(x, R=1):
return x/(x**2 + R**2)**(3/2)
def animate(i):
x, y = patch.center
x = (2/100)*i
y = E_of_ring(x, R=1)
patch.center = (x, y)
return patch,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=200,
interval=20,
blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('test.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
HTML(anim.to_html5_video())
# -
# +
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
sf = 0.01
patch = Ellipse((0, 0), width = sf * 4, height = sf * 1, color ='blue')
# initialization function: plot the background of each frame
def init():
ax = plt.axes(xlim=(-3, 3), ylim=(-0.5, 0.5))
line, = ax.plot([], [], lw=2)
line.set_data([], [])
z_list = np.linspace(-3, 3, 100)
E_list = np.array([E_of_ring(x, R=1) for x in z_list])
plt.plot(z_list, E_list)
patch.center = (0, 0)
ax.add_patch(patch)
return patch,
def E_of_ring(x, R=1):
return x/(x**2 + R**2)**(3/2)
def animate(i):
x, y = patch.center
t = 0.6*np.sin(np.pi*i/50)
x = 3*E_of_ring(t, R=1)
patch.center = (x, 0)
return patch,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=100,
interval=20,
blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('test.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
HTML(anim.to_html5_video())
# -
|
jupyter/charged particle in an electric field.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Let's explore the metadata
import pandas as pd
# ## Load the metadata
df = pd.read_csv("../data/index.csv")
df.head()
# ## Check the labels
df['label1'].unique()
df['label2'].unique()
df['label3'].unique()
# ## Imbalanced classes
df['label3'].value_counts()
|
notebooks/explore.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="eJGtmni-DezY"
# <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
# <br></br>
# <br></br>
#
# ## *Data Science Unit 1 Sprint 3 Lesson 1*
#
# # Statistics, Probability and Inference
#
# Ever thought about how long it takes to make a pancake? Have you ever compared the tooking time of a pancake on each eye of your stove? Is the cooking time different between the different eyes? Now, we can run an experiment and collect a sample of 1,000 pancakes on one eye and another 800 pancakes on the other eye. Assumed we used the same pan, batter, and technique on both eyes. Our average cooking times were 180 (5 std) and 178.5 (4.25 std) seconds repsectively. Now, we can tell those numbers are not identicial, but how confident are we that those numbers are practically the same? How do we know the slight difference isn't caused by some external randomness?
#
# Yes, today's lesson will help you figure out how long to cook your pancakes (*theoretically*). Experimentation is up to you; otherwise, you have to accept my data as true. How are going to accomplish this? With probability, statistics, inference and maple syrup (optional).
#
# <img src="https://images.unsplash.com/photo-1541288097308-7b8e3f58c4c6?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=3300&q=80" width=400>
#
# ## Learning Objectives
# * [Part 1](#p1): Normal Distribution Revisted
# * [Part 2](#p2): Student's T Test
# * [Part 3](#p3): Hypothesis Test & Doing it Live
# + [markdown] id="omxd_b1Ov65I" colab_type="text"
# ## Normal Distribution Revisited
#
# What is the Normal distribution: A probability distribution of a continuous real valued random-variable. The Normal distribution properties make it useful for the *Central Limit Theorm*, because if we assume a variable follows the normal distribution, we can make certain conclusions based on probabilities.
# + id="1HkS0RHhv65J" colab_type="code" colab={}
import numpy as np
mu = 180 # mean
sigma = 5 # standard deviation
sample = np.random.normal(mu, sigma, 1000)
# + id="UQEoopgZwc9X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="58ebcc02-ba3a-457e-f009-c0aac23869c8"
np.mean(sample)
# + id="tRr-rwUxv65N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c0c966e8-c028-464d-fcbe-fcdcb25811b2"
# Verify the mean of our sample
abs(mu - np.mean(sample)) < 1
# + id="4r9Yl2DVv65U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="83ef78e9-2a14-41fb-b318-55008c248dca"
# Verify the variance of our sample
abs(sigma - np.std(sample, ddof=1)) < 1
# + id="rflfocEbv65Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="da24027c-4d43-48c8-fd41-c36428c560e7"
import seaborn as sns
from matplotlib import style
style.use('fivethirtyeight')
ax = sns.distplot(sample, color='r')
ax.axvline(np.percentile(sample,97.5),0)
ax.axvline(np.percentile(sample,2.5),0);
# + id="jX_xaFdJyd7s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="63832c2f-fa48-45ef-9b2e-cabdc383bd46"
np.percentile(sample, 97.5)
# + [markdown] id="GKoYNoeUx51I" colab_type="text"
# Lean six sigma
# 99.7% of your product isn't faulty
# + [markdown] colab_type="text" id="FMhDKOFND0qY"
# ## Student's T Test
#
# >Assuming data come from a Normal distribution, the t test provides a way to test whether the sample mean (that is the mean calculated from the data) is a good estimate of the population mean.
#
# The derivation of the t-distribution was first published in 1908 by <NAME> while working for the Guinness Brewery in Dublin. Due to proprietary issues, he had to publish under a pseudonym, and so he used the name Student.
#
# The t-distribution is essentially a distribution of means of normaly distributed data. When we use a t-statistic, we are checking that a mean fails within a certain $\alpha$ probability of the mean of means.
# + colab_type="code" id="fQ9rkLJmEbsk" colab={}
t_df10 = np.random.standard_t(df=10, size=10)
t_df100 = np.random.standard_t(df=100, size=100)
t_df1000 = np.random.standard_t(df=1000, size=1000)
# + colab_type="code" id="RyNKPt_tJk86" outputId="cd9fc608-caef-47fc-dbd1-65d34a888d5a" colab={"base_uri": "https://localhost:8080/", "height": 282}
sns.kdeplot(t_df10, color='r');
sns.kdeplot(t_df100, color='y');
sns.kdeplot(t_df1000, color='b');
# + colab_type="code" id="seQv5unnJvpM" outputId="60bbe6a8-5031-40a8-a7f3-4ef623198c7f" colab={"base_uri": "https://localhost:8080/", "height": 272}
i = 10
for sample in [t_df10, t_df100, t_df1000]:
print(f"t - distribution with {i} degrees of freedom")
print("---" * 10)
print(f"Mean: {sample.mean()}")
print(f"Standard Deviation: {sample.std()}")
print(f"Variance: {sample.var()}")
i = i*10
# + [markdown] colab_type="text" id="FOvEGMysLaE2"
# Why is it different from normal? To better reflect the tendencies of small data and situations with unknown population standard deviation. In other words, the normal distribution is still the nice pure ideal (thanks to the central limit theorem), but the t-distribution is much more useful in many real-world situations.
# + id="APBihWol2-IM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="80981903-a4af-45ff-d366-88e36440286d"
import pandas as pd
# Missing LAR (no team roster page on NFL.com)
teams = ['ARI','ATL','BAL','BUF','CAR','CHI','CIN','CLE','DAL','DEN','DET','GB','HOU',
'IND','JAX','KC','LAC','MIA','MIN','NE','NO','NYG','NYJ','OAK','PHI',
'PIT','SEA','SF','TB','TEN','WAS']
df_list = []
for team in teams:
df = pd.read_html(f'http://www.nfl.com/teams/roster?team={team}')[1]
df['Team'] = team
df.columns = ['No','Name','Pos','Status','Height','Weight','Birthdate','Exp','College','Team']
df_list.append(df)
final_df = pd.concat(df_list, ignore_index=True)
print(final_df.shape)
final_df.head()
# + [markdown] colab_type="text" id="1yx_QilAEC6o"
# ## Live Lecture - let's perform and interpret a t-test
#
# We'll generate our own data, so we can know and alter the "ground truth" that the t-test should find. We will learn about p-values and how to interpret "statistical significance" based on the output of a hypothesis test. We will also dig a bit deeper into how the test statistic is calculated based on the sample error, and visually what it looks like to have 1 or 2 "tailed" t-tests.
# + colab_type="code" id="BuysRPs-Ed0v" colab={}
from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel
# + id="jnFCZYoV8nnx" colab_type="code" colab={}
burnerA = np.random.normal(180, 5, 1000)
burnerB = np.random.normal(178.5, 4.25, 800)
# + id="xW_zThU38zUh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="09e4935b-0c18-4509-d19e-ed58ca909cfe"
burnerA[:10]
# + id="RiJ79A5M81p2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5441a87c-af04-4ec5-a5ca-c96a71bf9ba1"
burnerB[:10]
# + id="ekAwVF4v9kBI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="a03d441a-3178-4863-8f60-d8540238e742"
for sample in [burnerA, burnerB]:
print(f'Mean: {sample.mean()}')
print(f'StDev: {sample.std()}')
print('----'*7)
# + id="ktkSgVLI9kDs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="52b6bd9d-c34e-445f-abf8-3b1ec66b90ca"
tstat, pvalue = ttest_ind(burnerA, burnerB)
print(tstat)
print(pvalue)
# + id="kJsg_YvU_eNR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="14fa8dab-7d78-472d-a20b-c47f93c87218"
import seaborn as sns
sns.distplot(burnerA, color='r')
sns.distplot(burnerB, color='b')
# + [markdown] colab_type="text" id="wiq83guLcuAE"
# # Resources
#
# - https://homepage.divms.uiowa.edu/~mbognar/applets/t.html
# - https://rpsychologist.com/d3/tdist/
# - https://gallery.shinyapps.io/tdist/
# - https://en.wikipedia.org/wiki/Standard_deviation#Sample_standard_deviation_of_metabolic_rate_of_northern_fulmars
# - https://www.khanacademy.org/math/ap-statistics/two-sample-inference/two-sample-t-test-means/v/two-sample-t-test-for-difference-of-means
# + id="wK096UztAUpp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="f8195c9c-8582-4f4c-f42a-a0dc6ff13f74"
from sklearn.datasets import load_wine
X = load_wine()
X
# + id="KOoO-V7JAn2P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="83eafc5c-1991-4137-dbd6-1024afecdd60"
import pandas as pd
wine = pd.DataFrame(X['data'], columns = X['feature_names'])
wine['origin'] = X['target']
print(wine.shape)
wine.head()
# + id="IB1DSJGlA-T_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="bac7a3b7-efac-41d3-fd82-e22d271c7a00"
wine.origin.value_counts()
# + id="uFvSWMinBPPO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 304} outputId="9eede715-6077-4232-ebc4-37be0e5ba7bf"
sns.distplot(wine[wine['origin'] == 0]['ash'], color = 'b')
sns.distplot(wine[wine['origin'] == 2]['ash'], color = 'r');
# + id="yWmnR_C9Bu-u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="8aa475d0-c769-461b-bb4b-cf5641379de4"
tstat, pvalue = ttest_ind(wine[wine['origin'] == 0]['ash'], wine[wine['origin'] == 2]['ash'])
print(tstat)
print(pvalue)
# + id="SGE3Z7XECZhd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7054a7e2-9b0a-4f65-9c51-fd77c58c9b80"
import matplotlib.pyplot as plt
for feat in wine.columns:
# Splot groups
group1 = wine[wine['origin']==1][feat]
group2 = wine[wine['origin']==2][feat]
# Plot distribution
sns.distplot(group1, color = 'b')
sns.distplot(group2, color = 'r');
# Run t-test
_, pvalue = ttest_ind(group1, group2)
# Plot
plt.title(f'Feature: {feat}, P-value: {pvalue:.5f}')
plt.figure()
# + [markdown] id="BpgYM1NyBfqq" colab_type="text"
# # Notes
# + [markdown] id="SIkz-kkbBhNF" colab_type="text"
# ### What is a t test?
#
# this is a statistical method of evaluating whether or not there is a significant difference between the average of two samples
#
# Alternatively, a null hypothesis could be that the two groups come from the same population. This is the example that we chose above. We were asking if the NBA and NFL players were pulled from the same population.
#
# ### What is a p value?
#
# p value helps you determine the significance of your resluts.
#
# A small p value indicates string evidence against the null hypothesis. So if we did a t test and we had a p value of like 0.01 you would reject the null hypothesis and state that both groups came from the same population.
#
# A large p-value (> 0.05) indicates weak evidence against the null hypothesis, so you fail to reject the null hypothesis. I.e. in the example above you would say that NBA players are not from the same distribution of athletes (based on height).
# + id="eZbt9B-XHEN4" colab_type="code" colab={}
|
module1-statistics-probability-and-inference/LS_DS_131_Statistics_Probability_and_Inference.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + executionInfo={"elapsed": 902, "status": "ok", "timestamp": 1601197846320, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="v2hDK_q508XH"
# Test distance function
# [x, y, type]
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
# + executionInfo={"elapsed": 723, "status": "ok", "timestamp": 1601197875932, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="JNLVIRkI1JRr"
from math import sqrt
# calculate the Euclidean distance between two vectors
# row = [x, y, type]
def euclidean_distance(row1, row2):
distance = 0.0
for i in range(len(row1)-1):
distance += (row1[i] - row2[i])**2
return sqrt(distance)
# + colab={"base_uri": "https://localhost:8080/", "height": 193} executionInfo={"elapsed": 744, "status": "ok", "timestamp": 1601197928429, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="hwYJuQdR1WNa" outputId="3c74489c-6e5c-43f0-bd92-8db8e6e4c1bf"
row0 = [3,3]
for row in dataset:
distance = euclidean_distance(row0, row)
print(distance)
# + executionInfo={"elapsed": 692, "status": "ok", "timestamp": 1601198043950, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="iiOrxQw81wfH"
# Locate the most similar neighbors
def get_neighbors(train, test_row, num_neighbors):
distances = list()
for train_row in train:
dist = euclidean_distance(test_row, train_row)
distances.append((train_row, dist))
distances.sort(key=lambda tup: tup[1])
neighbors = list()
for i in range(num_neighbors):
neighbors.append(distances[i][0])
return neighbors
# + colab={"base_uri": "https://localhost:8080/", "height": 70} executionInfo={"elapsed": 650, "status": "ok", "timestamp": 1601198111702, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="cqf7_Ykg2C4c" outputId="3d552205-3016-4860-def6-2c6057b164ae"
neighbors = get_neighbors(dataset, row0, 3)
for neighbor in neighbors:
print(neighbor)
# + executionInfo={"elapsed": 662, "status": "ok", "timestamp": 1601198138812, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="Cy3wwgm52Jmr"
# Make a classification prediction with neighbors
def predict_classification(train, test_row, num_neighbors):
neighbors = get_neighbors(train, test_row, num_neighbors)
for neighbor in neighbors:
print(neighbor)
output_values = [row[-1] for row in neighbors]
prediction = max(set(output_values), key=output_values.count)
return prediction
# + colab={"base_uri": "https://localhost:8080/", "height": 87} executionInfo={"elapsed": 724, "status": "ok", "timestamp": 1601198168043, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="wiii7E3O2Quk" outputId="dcfdbeff-69fc-43c4-867a-d5704512cd7c"
row0 = [3,3,0]
prediction = predict_classification(dataset, row0, 3)
print('Expected %d, Got %d.' % (row0[-1], prediction))
# -
# ## Breast cancer example using KNN
# + executionInfo={"elapsed": 1185, "status": "ok", "timestamp": 1601201398986, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="B8GkTBok2VaV"
from sklearn.datasets import load_breast_cancer
breast_cancer_data = load_breast_cancer()
# -
print(breast_cancer_data.keys())
# + colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"elapsed": 677, "status": "ok", "timestamp": 1601201457910, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="83McEiFsCxp2" outputId="bca7d20d-c5e2-4378-bd1f-e80038f6a06e"
print(breast_cancer_data.target_names)
# + executionInfo={"elapsed": 449, "status": "ok", "timestamp": 1601201415175, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="bZVcOHbzCpZk"
import pandas as pd
df_data = pd.DataFrame(breast_cancer_data.data)
df_labels = pd.DataFrame(breast_cancer_data.target)
# + colab={"base_uri": "https://localhost:8080/", "height": 215} executionInfo={"elapsed": 663, "status": "ok", "timestamp": 1601201488334, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="C59zTWLOC1mH" outputId="4ed9955f-e042-4c14-ecaa-91e772694d74"
df_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"elapsed": 680, "status": "ok", "timestamp": 1601201436329, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="Ba-ClyS5Cula" outputId="b26dbb1c-b9fa-4c9b-fa39-7c6c9b19e62b"
df_labels.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 304} executionInfo={"elapsed": 1005, "status": "ok", "timestamp": 1601201548936, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="dJ1GKkn-DGbG" outputId="4b1169bb-a47c-4da9-9f84-8af59e6cf251"
df_data.describe()
# + executionInfo={"elapsed": 765, "status": "ok", "timestamp": 1601201573969, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="ue0jr8QKDME1"
def min_max_normalize(lst):
normalized = []
for value in lst:
normalized_num = (value - min(lst)) / (max(lst) - min(lst))
normalized.append(normalized_num)
return normalized
# + colab={"base_uri": "https://localhost:8080/", "height": 304} executionInfo={"elapsed": 3282, "status": "ok", "timestamp": 1601201594293, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="E5aAxR3wDUgP" outputId="0bee64e1-ad82-44f5-a62f-28fbd0814b59"
for x in range(len(df_data.columns)):
df_data[x] = min_max_normalize(df_data[x])
df_data.describe()
# -
df_data
# + executionInfo={"elapsed": 673, "status": "ok", "timestamp": 1601201733845, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="DiYggEnVD3G4"
#training 80%, testing 20%
from sklearn.model_selection import train_test_split
training_data, validation_data , training_labels, validation_labels = train_test_split(df_data, df_labels, test_size = 0.2, random_state = 100)
# + colab={"base_uri": "https://localhost:8080/", "height": 87} executionInfo={"elapsed": 715, "status": "ok", "timestamp": 1601201746584, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="bSD12HrDD6Yq" outputId="41b7b37a-7172-4d38-a263-99b4781896ee"
print(len(training_data))
print(len(validation_data))
print(len(training_labels))
print(len(validation_labels))
# + executionInfo={"elapsed": 659, "status": "ok", "timestamp": 1601201802612, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="q7ZDM53gD_Y4"
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 3)
# + colab={"base_uri": "https://localhost:8080/", "height": 125} executionInfo={"elapsed": 658, "status": "ok", "timestamp": 1601201804748, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="FKDr2JtkEDEE" outputId="cb55e97f-f725-43e5-da53-44c29640ffb2"
classifier.fit(training_data, training_labels)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"elapsed": 652, "status": "ok", "timestamp": 1601201828955, "user": {"displayName": "\u00ad\uc815\ud61c\uc601 | ERICA \uacfc\ud559\uae30\uc220\uc735\ud569\ub300\ud559 \uc751\uc6a9\uc218\ud559\uacfc | \uc870\uad50\uc218", "photoUrl": "", "userId": "03003724031739843305"}, "user_tz": -540} id="yXe18hZFEOeG" outputId="8e66b873-9b95-4fa3-92eb-56f03689da25"
print(classifier.score(validation_data, validation_labels))
# + id="KQjKOYTzEPq_"
import matplotlib.pyplot as plt
k_list = range(1,101)
accuracies = []
for k in k_list:
classifier = KNeighborsClassifier(n_neighbors = k)
classifier.fit(training_data, training_labels)
accuracies.append(classifier.score(validation_data, validation_labels))
plt.plot(k_list, accuracies)
plt.xlabel("k")
plt.ylabel("Validation Accuracy")
plt.title("Breast Cancer Classifier Accuracy")
plt.show()
|
Assignment_04_Classification/Reference/KNN example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Imports-&-Inits" data-toc-modified-id="Imports-&-Inits-1"><span class="toc-item-num">1 </span>Imports & Inits</a></span></li><li><span><a href="#Data-Loading" data-toc-modified-id="Data-Loading-2"><span class="toc-item-num">2 </span>Data Loading</a></span></li><li><span><a href="#Model" data-toc-modified-id="Model-3"><span class="toc-item-num">3 </span>Model</a></span></li><li><span><a href="#Training" data-toc-modified-id="Training-4"><span class="toc-item-num">4 </span>Training</a></span></li><li><span><a href="#Testing" data-toc-modified-id="Testing-5"><span class="toc-item-num">5 </span>Testing</a></span><ul class="toc-item"><li><span><a href="#Plots" data-toc-modified-id="Plots-5.1"><span class="toc-item-num">5.1 </span>Plots</a></span></li><li><span><a href="#Ignite-Testing" data-toc-modified-id="Ignite-Testing-5.2"><span class="toc-item-num">5.2 </span>Ignite Testing</a></span></li><li><span><a href="#NLPBook-Testing" data-toc-modified-id="NLPBook-Testing-5.3"><span class="toc-item-num">5.3 </span>NLPBook Testing</a></span></li></ul></li><li><span><a href="#Inference" data-toc-modified-id="Inference-6"><span class="toc-item-num">6 </span>Inference</a></span></li><li><span><a href="#Playground" data-toc-modified-id="Playground-7"><span class="toc-item-num">7 </span>Playground</a></span></li></ul></div>
# -
# # Surname Classifier Using ElmanRNN
# ## Imports & Inits
# %load_ext autoreload
# %autoreload 2
# +
import matplotlib.pyplot as plt
# %matplotlib inline
import pdb
import pandas as pd
import numpy as np
import torch
import re
from torch import nn
from torch.nn import functional as F
from torch import optim
from torch.utils.data import DataLoader
from pathlib import Path
from ignite.engine import Events
from ignite.metrics import Accuracy, Loss
from ignite.contrib.handlers import ProgressBar
# -
from surname.dataset import UnconditionedGenerationDataset as SurnameDataset
from surname.containers import DataContainer, ModelContainer
from consts import unconditioned_generation_consts as consts
vars(consts)
# ## Data Loading
df = pd.read_csv(consts.proc_dataset_csv)
print(df.shape)
df.head()
dc = DataContainer(df, SurnameDataset, consts.vectorizer_json, consts.bs, is_load=False)
try:
class_weights = torch.load(consts.class_weights_pth)
except FileNotFoundError:
nationality_vocab = dc.nationality_vocab
class_counts = df['nationality'].value_counts().to_dict()
sorted_counts = sorted(class_counts.items(), key=lambda x: nationality_vocab.lookup_token(x[0]))
freq = [count for _, count in sorted_counts]
class_weights = 1.0/torch.tensor(freq, dtype=torch.float32)
torch.save(class_weights, consts.class_weights_pth)
# ## Model
class SurnameGenerator(nn.Module):
def __init__(self, emb_sz: int, vocab_sz: int, rnn_hidden_sz: int, batch_first: bool=True,\
padding_idx: int=0, dropout_p: float=0.5) -> None:
super(SurnameGenerator, self).__init__()
self.emb = nn.Embedding(vocab_sz, emb_sz, padding_idx)
self.rnn = nn.GRU(emb_sz, rnn_hidden_sz, batch_first=batch_first)
self.fc = nn.Linear(rnn_hidden_sz, vocab_sz)
self.dropout = nn.Dropout(dropout_p)
def forward(self, x_in, apply_softmax=False):
x_emb = self.emb(x_in)
classifier = SurnameClassifier(classify_consts.char_embedding_sz, dc.vocab_size, dc.n_classes, classify_consts.rnn_hidden_sz, \
padding_idx=dc.surname_vocab.mask_idx)
class_weights = class_weights.to(classify_consts.device)
loss_fn = nn.CrossEntropyLoss(class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=classify_consts.lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', 0.5, patience=1)
mc = ModelContainer(classifier, optimizer, loss_fn, scheduler)
mc.model
itr = iter(dc.train_dl)
inp,y = next(itr)
y_pred = mc.model(x,l)
loss_fn(y_pred, y)
# ## Training
pbar = ProgressBar(persist=True)
metrics = {'accuracy': Accuracy(), 'loss': Loss(loss_fn)}
classify_consts.n_epochs=2
ig = IgniteTrainer(mc, dc, classify_consts, pbar, metrics)
ig.run()
# ## Testing
# ### Plots
training_metrics = pd.read_csv(classify_consts.metrics_file)
training_metrics = training_metrics[:-1]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15,5))
training_metrics.plot(x='epoch', y=['training_loss', 'validation_loss'], kind='line',
title='Training and validation loss', ax=axes[0])
training_metrics.plot(x='epoch', y=['training_acc', 'validation_acc'], kind='line',
title='Training and validation accuracy', ax=axes[1])
# ### Ignite Testing
classifier = SurnameClassifier(classify_consts.char_embedding_sz, dc.vocab_size, dc.n_classes, classify_consts.rnn_hidden_sz, \
padding_idx=dc.surname_vocab.mask_idx)
state_dict = torch.load(classify_consts.workdir/'elman_classifier.pth')
classifier.load_state_dict(state_dict)
class_weights = class_weights.to('cpu')
loss_fn = nn.CrossEntropyLoss(class_weights)
metrics = {'accuracy': Accuracy(), 'loss': Loss(loss_fn)}
# +
evaluator = custom_evaluator(classifier, metrics=metrics)
@evaluator.on(Events.COMPLETED)
def log_testing_results(engine):
metrics = engine.state.metrics
print(f"Test loss: {metrics['loss']:0.3f}")
print(f"Test accuracy: {metrics['accuracy']:0.3f}")
evaluator.run(dc.test_dl)
# -
# ### NLPBook Testing
# +
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
running_loss = 0.
running_acc = 0.
classifier.eval()
for i, batch in enumerate(dc.test_dl):
model_inp,y = batch
y_pred = classifier(*model_inp)
loss = loss_fn(y_pred, y)
loss_t = loss.item()
running_loss += (loss_t-running_loss)/(i+1)
acc_t = compute_accuracy(y_pred, y)
running_acc += (acc_t-running_acc)/(i+1)
print(f"Test loss: {running_loss:0.3f}")
print(f"Test acc: {running_acc:0.3f}%")
# -
# ## Inference
import math
def predict_nationality(surname, classifier, vectorizer):
vectorized_surname, vec_length = vectorizer.vectorize(surname, len(surname)+2)
vectorized_surname = torch.tensor(vectorized_surname).unsqueeze(dim=0) # bs of 1
vec_length = torch.tensor([vec_length], dtype=torch.int64)
result = classifier(vectorized_surname, vec_length, apply_softmax=True)
assert(math.isclose(result.sum(dim=1).item(), 1.0, abs_tol=1e-6))
prob_vals, idxs = result.max(dim=1)
idx = idxs.item()
prob_val = prob_vals.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_idx(idx)
return {'nationality': predicted_nationality, 'probability': prob_val, 'surname': surname}
for surname in ['McMahan', 'Nakamoto', 'Wan', 'Cho']:
print(predict_nationality(surname, classifier, dc.vectorizer))
# ## Playground
bs=3
hidden_sz=7
seq_sz =5
x_lens = torch.randint(1, seq_sz+1, (bs,))
x_lens = x_lens.long().detach().cpu().numpy()-1
y_out = torch.randn(bs, seq_sz, hidden_sz)
print(x_lens.shape)
x_lens
print(y_out.shape)
y_out
# +
out = []
for batch_idx, column_idx in enumerate(x_lens):
out.append(y_out[batch_idx, column_idx])
# print(batch_idx, column_idx)
# -
y = torch.stack(out)
print(y.shape)
y
bs=3
hidden_sz=7
seq_sz =5
from surname.elman import ElmanRNN
e = ElmanRNN(classify_consts.char_embedding_sz, classify_consts.rnn_hidden_sz, batch_first=True)
inp = torch.randn(2,10,100)
e(inp)
inp = inp.to('cuda:3')
e = e.to('cuda:3')
inp = inp.cpu()
e = e.cpu()
e(inp)
x = torch.zeros(5, device='cuda:3')
x
|
surname_rnn/surname_generation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/johnpharmd/DS-Sprint-02-Storytelling-With-Data/blob/master/DS_Unit_1_Sprint_Challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="i-n_5en3ER1o" colab_type="text"
# # Data Science Unit 1 Sprint Challenge 2
#
# # Storytelling with Data
#
# In this sprint challenge you'll work with a dataset from **FiveThirtyEight's article, [Every Guest <NAME> Ever Had On ‘The Daily Show’](https://fivethirtyeight.com/features/every-guest-jon-stewart-ever-had-on-the-daily-show/)**!
# + [markdown] id="Thm2n5FF2Fnp" colab_type="text"
# # Part 0 — Run this starter code
#
# You don't need to add or change anything here. Just run this cell and it loads the data for you, into a dataframe named `df`.
#
# (You can explore the data if you want, but it's not required to pass the Sprint Challenge.)
# + id="0rTHgzJIuRS7" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/daily-show-guests/daily_show_guests.csv')
df.rename(columns={'YEAR': 'Year', 'Raw_Guest_List': 'Guest'}, inplace=True)
def get_occupation(group):
if group in ['Acting', 'Comedy', 'Musician']:
return 'Acting, Comedy, & Music'
elif group in ['Media', 'media']:
return 'Media'
elif group in ['Government', 'Politician', 'Political Aide']:
return 'Government and Politics'
else:
return 'Other'
df['Occupation'] = df['Group'].apply(get_occupation)
# + [markdown] id="OS0nW1vz1itX" colab_type="text"
# # Part 1 — What's the breakdown of guests’ occupations per year?
#
# For example, in 1999, what percentage of guests were actors, comedians, or musicians? What percentage were in the media? What percentage were in politics? What percentage were from another occupation?
#
# Then, what about in 2000? In 2001? And so on, up through 2015.
#
# So, **for each year of _The Daily Show_, calculate the percentage of guests from each occupation:**
# - Acting, Comedy & Music
# - Government and Politics
# - Media
# - Other
#
# #### Hints:
# 1. Use pandas to make a **crosstab** of **`Year`** & **`Occupation`**. ([This documentation](http://pandas.pydata.org/pandas-docs/stable/reshaping.html#cross-tabulations) has examples and explanation.)
# 2. To get percentages instead of counts, use crosstab's **`normalize`** parameter to normalize over each _row._ ([This documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.crosstab.html) describes the parameter and its options.)
# 3. You'll know you've calculated the crosstab correctly when the percentage of "Acting, Comedy & Music" guests is 90.36% in 1999, and 45% in 2015.
# + id="sRMc0H_5z6ff" colab_type="code" colab={}
df.shape, df.head()
# + id="3xKbbdkRRhdp" colab_type="code" colab={}
year_occupation = pd.crosstab(df['Year'], df['Occupation'], normalize='index')
year_occupation.shape, year_occupation.head()
# + id="U0IRhMZbS18B" colab_type="code" colab={}
# Percentage of 'Acting, Comedy, & Music' guests in 1999
acm_guests_1999 = df[(df.Occupation == 'Acting, Comedy, & Music') & (df.Year == 1999)].Guest.count()
total_guests_1999 = df[df.Year == 1999].Guest.count()
print(acm_guests_1999 / total_guests_1999 * 100, '%')
# Percentage of 'Acting, Comedy, & Music' guests in 2015
acm_guests_2015 = df[(df.Occupation == 'Acting, Comedy, & Music') & (df.Year == 2015)].Guest.count()
total_guests_2015 = df[df.Year == 2015].Guest.count()
print(acm_guests_2015 / total_guests_2015 * 100, '%')
# + [markdown] id="z4G_egR81ZS2" colab_type="text"
# # Percentage of guests from each occupation, for each year of 'The Daily Show'
# + id="b59IeHdQyL4H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1289} outputId="8b1705fb-49fe-4709-ef37-2aea499f28ca"
for year in year_occupation.index:
for occupation in year_occupation:
print(year, occupation,
df[(df.Occupation == occupation) & (df.Year == year)].Guest.count() / df[df.Year == year].Guest.count()*100, '%')
# + [markdown] id="Nqf9oJJDDu-d" colab_type="text"
# # Part 2 — Recreate this explanatory visualization:
# + id="scozkHQc0_eD" colab_type="code" outputId="64a105e6-8fa5-45e5-c78e-d29fcd19b3f2" colab={"base_uri": "https://localhost:8080/", "height": 406}
from IPython.display import display, Image
url = 'https://fivethirtyeight.com/wp-content/uploads/2015/08/hickey-datalab-dailyshow.png'
example = Image(url, width=500)
display(example)
# + [markdown] id="W7lw3JzAE6BJ" colab_type="text"
# **Hint:** use the crosstab you calculated in part 1!
#
# **Expectations:** Your plot should include:
# - 3 lines visualizing "occupation of guests, by year." The shapes of the lines should look roughly identical to 538's example. Each line should be a different color. (But you don't need to use the _same_ colors as 538.)
# - Legend or labels for the lines. (But you don't need each label positioned next to its line or colored like 538.)
# - Title in the upper left: _"Who Got To Be On 'The Daily Show'?"_ with more visual emphasis than the subtitle. (Bolder and/or larger font.)
# - Subtitle underneath the title: _"Occupation of guests, by year"_
#
# Any visual element not specifically mentioned in the expectations is an optional bonus, but it's _not_ required to pass the Sprint Challenge.
#
#
#
#
#
# + id="E8XBAr8rz_Na" colab_type="code" outputId="8cee7c96-bc82-4e0f-9d04-80480e58f9d1" colab={"base_uri": "https://localhost:8080/", "height": 319}
# Data
year_occupation_3 = year_occupation.drop('Other', axis=1)
# Plot
plt.style.use('fivethirtyeight')
# fig, ax = plt.subplots(figsize=(8, 5))
# Initial plot
ax = year_occupation_3.plot.line();
# Title
ax.set_title("Who Got To Be On 'The Daily Show'?",
loc='left', fontsize=18, fontweight='bold')
plt.suptitle('Occupation of guests, by year', x=0.26, y=0.89, fontsize=12);
# Line for ACM
# ax = year_occupation_3['Acting, Comedy, & Music'].plot.line()
# Line for GP
# ax_1 = year_occupation_3['Government and Politics'].plot.line()
# Line for Media
# ax_2 = year_occupation_3['Media'].plot.line()
# + [markdown] id="LuacMjSf2ses" colab_type="text"
# # Part 3 — Who were the top 10 guests on _The Daily Show_?
#
# **Make a plot** that shows their names and number of appearances.
#
# **Hint:** you can use the pandas `value_counts` method.
#
# **Expectations:** This can be a simple, quick plot: exploratory, not explanatory.
#
# If you want, you can add titles and change aesthetics, but it's _not_ required to pass the Sprint Challenge.
# + id="tbwfBN3HsFlh" colab_type="code" outputId="4e621fc4-b8de-40e2-f25e-4d996fe3cb91" colab={"base_uri": "https://localhost:8080/", "height": 223}
top_10_guests = df.Guest.value_counts()[:10]
top_10_guests = top_10_guests.sort_values()
top_10_guests
# + id="lz0TP7XMl0ny" colab_type="code" colab={}
top_guest_list = []
for guest, visit_count in top_10_guests.items():
top_guest_list.append(guest)
top_guests = np.array(top_guest_list)
visit_counts = top_10_guests.values
top_guests, visit_counts
# + id="tHLjaPv0nbiR" colab_type="code" outputId="8b7406b9-84fa-4c24-89ca-59b0c6397f2f" colab={"base_uri": "https://localhost:8080/", "height": 340}
# plt.rcParams['figure.figsize'] = (15, 5)
fig, ax = plt.subplots()
plt.scatter(top_guests, visit_counts, s=200);
ax.set_ylabel('Number of Appearances');
# n, bins, patches = plt.hist(top_10_guests)
|
DS_Unit_1_Sprint_Challenge_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Mandatory imports
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from os.path import join as opj
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pylab
plt.rcParams['figure.figsize'] = 10, 10
# %matplotlib inline
train = pd.read_json("input/train.json")
target_train=train['is_iceberg']
test = pd.read_json("input/test.json")
target_train=train['is_iceberg']
test['inc_angle']=pd.to_numeric(test['inc_angle'], errors='coerce')
train['inc_angle']=pd.to_numeric(train['inc_angle'], errors='coerce')#We have only 133 NAs.
train['inc_angle']=train['inc_angle'].fillna(method='pad')
X_angle=train['inc_angle']
test['inc_angle']=pd.to_numeric(test['inc_angle'], errors='coerce')
X_test_angle=test['inc_angle']
#Generate the training data
X_band_1=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train["band_1"]])
X_band_2=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train["band_2"]])
X_band_3=(X_band_1+X_band_2)/2
#X_band_3=np.array([np.full((75, 75), angel).astype(np.float32) for angel in train["inc_angle"]])
X_train = np.concatenate([X_band_1[:, :, :, np.newaxis]
, X_band_2[:, :, :, np.newaxis]
, X_band_3[:, :, :, np.newaxis]], axis=-1)
X_band_test_1=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in test["band_1"]])
X_band_test_2=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in test["band_2"]])
X_band_test_3=(X_band_test_1+X_band_test_2)/2
#X_band_test_3=np.array([np.full((75, 75), angel).astype(np.float32) for angel in test["inc_angle"]])
X_test = np.concatenate([X_band_test_1[:, :, :, np.newaxis]
, X_band_test_2[:, :, :, np.newaxis]
, X_band_test_3[:, :, :, np.newaxis]], axis=-1)
# +
#Import Keras.
from matplotlib import pyplot
from keras.optimizers import RMSprop
# from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, Activation
from keras.layers import GlobalMaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate
from keras.models import Model
from keras import initializers
from keras.optimizers import Adam
from keras.optimizers import rmsprop
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.optimizers import SGD
from keras.optimizers import Adadelta
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from keras.datasets import cifar10
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from keras.applications.xception import Xception
from keras.applications.mobilenet import MobileNet
from keras.applications.vgg19 import VGG19
from keras.layers import Concatenate, Dense, LSTM, Input, concatenate
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
# -
#Data Aug for multi-input
from keras.preprocessing.image import ImageDataGenerator
batch_size=64
# Define the image transformations here
gen = ImageDataGenerator(horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.,
height_shift_range = 0.,
channel_shift_range=0,
zoom_range = 0.2,
rotation_range = 10)
# Here is the function that merges our two generators
# We use the exact same generator with the same random seed for both the y and angle arrays
def gen_flow_for_two_inputs(X1, X2, y):
genX1 = gen.flow(X1,y, batch_size=batch_size,seed=55)
genX2 = gen.flow(X1,X2, batch_size=batch_size,seed=55)
while True:
X1i = genX1.next()
X2i = genX2.next()
#Assert arrays are equal - this was for peace of mind, but slows down training
#np.testing.assert_array_equal(X1i[0],X2i[0])
yield [X1i[0], X2i[1]], X1i[1]
# Finally create generator
def get_callbacks(filepath, patience=2):
es = EarlyStopping('val_loss', patience=25, mode="min")
msave = ModelCheckpoint(filepath, save_best_only=True)
return [es, msave]
# +
def getVggAngleModel():
input_2 = Input(shape=[1], name="angle")
angle_layer = Dense(1, )(input_2)
base_model = VGG16(weights='imagenet', include_top=False,
input_shape=X_train.shape[1:], classes=1)
x = base_model.get_layer('block5_pool').output
x = GlobalMaxPooling2D()(x)
merge_one = concatenate([x, angle_layer])
merge_one = Dense(512, activation='relu', name='fc2')(merge_one)
merge_one = Dropout(0.3)(merge_one)
merge_one = Dense(512, activation='relu', name='fc3')(merge_one)
merge_one = Dropout(0.3)(merge_one)
predictions = Dense(1, activation='sigmoid')(merge_one)
model = Model(inputs=[base_model.input, input_2], outputs=predictions)
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=1e-6, amsgrad=True)
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
# sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='binary_crossentropy',
# optimizer=sgd,
# metrics=['accuracy'])
return model
# +
#def myAngleCV(X_train, X_angle, X_test):
K=10
folds = list(StratifiedKFold(n_splits=K, shuffle=True, random_state=16).split(X_train, target_train))
y_test_pred_log = 0
y_train_pred_log=0
y_valid_pred_log = 0.0*target_train
for j, (train_idx, test_idx) in enumerate(folds):
print('\n===================FOLD=',j)
X_train_cv = X_train[train_idx]
y_train_cv = target_train[train_idx]
X_holdout = X_train[test_idx]
Y_holdout= target_train[test_idx]
#Angle
X_angle_cv=X_angle[train_idx]
X_angle_hold=X_angle[test_idx]
#define file path and get callbacks
file_path = "%s_aug_model_weights.hdf5"%j
callbacks = get_callbacks(filepath=file_path, patience=5)
gen_flow = gen_flow_for_two_inputs(X_train_cv, X_angle_cv, y_train_cv)
galaxyModel= getVggAngleModel()
histr = galaxyModel.fit_generator(
gen_flow,
steps_per_epoch=24,
epochs=100,
shuffle=True,
verbose=1,
validation_data=([X_holdout,X_angle_hold], Y_holdout),
callbacks=callbacks)
#Getting the Best Model
galaxyModel.load_weights(filepath=file_path)
#Getting Training Score
score = galaxyModel.evaluate([X_train_cv,X_angle_cv], y_train_cv, verbose=0)
print('Train loss:', score[0])
print('Train accuracy:', score[1])
#Getting Test Score
score = galaxyModel.evaluate([X_holdout,X_angle_hold], Y_holdout, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#Getting validation Score.
pred_valid=galaxyModel.predict([X_holdout,X_angle_hold])
y_valid_pred_log[test_idx] = pred_valid.reshape(pred_valid.shape[0])
#Getting Test Scores
temp_test=galaxyModel.predict([X_test, X_test_angle])
y_test_pred_log+=temp_test.reshape(temp_test.shape[0])
#Getting Train Scores
temp_train=galaxyModel.predict([X_train, X_angle])
y_train_pred_log+=temp_train.reshape(temp_train.shape[0])
y_test_pred_log=y_test_pred_log/K
y_train_pred_log=y_train_pred_log/K
print('\n Train Log Loss Validation= ',log_loss(target_train, y_train_pred_log))
print(' Test Log Loss Validation= ',log_loss(target_train, y_valid_pred_log))
#return y_test_pred_log
# -
#preds=myAngleCV(X_train, X_angle, X_test)
preds = y_test_pred_log
# Submission to kaggle
submission = pd.DataFrame()
submission['id']=test['id']
submission['is_iceberg']=preds
submission.to_csv('sub_Adam_10folds_amsgrad_0001_25p.csv', index=False)
# list all data in history
print(histr.history.keys())
# summarize history for accuracy
plt.plot(histr.history['acc'])
plt.plot(histr.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(histr.history['loss'])
plt.plot(histr.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# ---
# +
import matplotlib.image as mpimg
min1 = np.min(X_holdout)
max1 = np.max(X_holdout)
diff1 = max1 - min1
my_sum = lambda x: (x - min1)/diff1
X_holdout2 = my_sum(X_holdout)
# -
Y_holdout
plt.imshow(X_holdout2[0], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[13])+ ","+" iceberg_probability : "+str(pred_valid[0])
plt.title(tl)
plt.imshow(X_holdout2[1], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[14])+ ","+" iceberg_probability : "+str(pred_valid[1])
plt.title(tl)
plt.imshow(X_holdout2[2], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[38])+ ","+" iceberg_probability : "+str(pred_valid[2])
plt.title(tl)
plt.imshow(X_holdout2[3], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[43])+ ","+" iceberg_probability : "+str(pred_valid[3])
plt.title(tl)
plt.imshow(X_holdout2[6], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[104])+ ","+" iceberg_probability : "+str(pred_valid[6])
plt.title(tl)
plt.imshow(X_holdout2[9], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[138])+ ","+" iceberg_probability : "+str(pred_valid[9])
plt.title(tl)
plt.imshow(X_holdout2[10], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[141])+ ","+" iceberg_probability : "+str(pred_valid[10])
plt.title(tl)
plt.imshow(X_holdout2[11], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[167])+ ","+" iceberg_probability : "+str(pred_valid[11])
plt.title(tl)
plt.imshow(X_holdout2[12], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[195])+ ","+" iceberg_probability : "+str(pred_valid[12])
plt.title(tl)
plt.imshow(X_holdout2[13], cmap='gray', interpolation='nearest');
tl = "Actual label : " +str(Y_holdout[199])+ ","+" iceberg_probability : "+str(pred_valid[13])
plt.title(tl)
|
Model_03-Adam-k=10-amsgrad-0001-25p.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="f4TSNCvpENrW"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="vamNSA0vEP-m"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="e1oSi4lHFt3z"
# # Use XLA with tf.function
# + [markdown] id="b7noD9NjFRL-"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/xla/tutorials/compile"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/g3doc/tutorials/jit_compile.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/g3doc/tutorials/jit_compile.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] id="sDy5lSBd4BDE"
# This tutorial trains a TensorFlow model to classify the MNIST dataset, where the training function is compiled using XLA.
#
# First, load TensorFlow and enable eager execution.
# + id="ukTmcHYkwx8f"
# In TF 2.4 jit_compile is called experimental_compile
# !pip install tf-nightly
# + id="45kUPj5ZFrRa"
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
# + [markdown] id="GZVNiRmTDV-5"
# Then define some necessary constants and prepare the MNIST dataset.
# + id="f37TSEGvGX4_"
# Size of each input image, 28 x 28 pixels
IMAGE_SIZE = 28 * 28
# Number of distinct number labels, [0..9]
NUM_CLASSES = 10
# Number of examples in each training batch (step)
TRAIN_BATCH_SIZE = 100
# Number of training steps to run
TRAIN_STEPS = 1000
# Loads MNIST dataset.
train, test = tf.keras.datasets.mnist.load_data()
train_ds = tf.data.Dataset.from_tensor_slices(train).batch(TRAIN_BATCH_SIZE).repeat()
# Casting from raw data to the required datatypes.
def cast(images, labels):
images = tf.cast(
tf.reshape(images, [-1, IMAGE_SIZE]), tf.float32)
labels = tf.cast(labels, tf.int64)
return (images, labels)
# + [markdown] id="lv7I-u_82v1S"
# Finally, define the model and the optimizer. The model uses a single dense layer.
# + id="7O2NcEfG206Q"
layer = tf.keras.layers.Dense(NUM_CLASSES)
optimizer = tf.keras.optimizers.Adam()
# + [markdown] id="x_ZehpZP-SfS"
# # Define the training function
#
# In the training function, you get the predicted labels using the layer defined above, and then minimize the gradient of the loss using the optimizer. In order to compile the computation using XLA, place it inside `tf.function` with `jit_compile=True`.
# + id="ZbhJl_WvGa3g"
@tf.function(jit_compile=True)
def train_mnist(images, labels):
images, labels = cast(images, labels)
with tf.GradientTape() as tape:
predicted_labels = layer(images)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=predicted_labels, labels=labels
))
layer_variables = layer.trainable_variables
grads = tape.gradient(loss, layer_variables)
optimizer.apply_gradients(zip(grads, layer_variables))
# + [markdown] id="EZD1m_n1DxAF"
# # Train and test the model
# + [markdown] id="gukC2Hol3sFZ"
# Once you have defined the training function, define the model.
# + id="qe28bAHNHUG2"
for images, labels in train_ds:
if optimizer.iterations > TRAIN_STEPS:
break
train_mnist(images, labels)
# + [markdown] id="qgsKmz3n2UiW"
# And, finally, check the accuracy:
# + id="_GxF6jTRHVuA"
images, labels = cast(test[0], test[1])
predicted_labels = layer(images)
correct_prediction = tf.equal(tf.argmax(predicted_labels, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Prediction accuracy after training: %s" % accuracy)
# + [markdown] id="PXoOjJnuZRaV"
# Behind the scenes, the XLA compiler has compiled the entire TF function to HLO, which has enabled fusion optimizations. Using the introspection facilities, we can see the HLO code (other interesting possible values for "stage" are `optimized_hlo` for HLO after optimizations and `optimized_hlo_dot` for a Graphviz graph):
# + id="_a8GsNLVaLSQ"
print(train_mnist.experimental_get_compiler_ir(images, labels)(stage='hlo'))
|
site/en-snapshot/xla/tutorials/jit_compile.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tonks Ensemble Model Training Pipeline
# As the fourth (and final) step of this tutorial, we will train an ensemble model using the image and text models we've already trained.
#
# This notebook was run on an AWS p3.2xlarge
# +
# %load_ext autoreload
# %autoreload 2
# -
import sys
sys.path.append('../../')
# +
import joblib
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from transformers import AdamW, BertTokenizer, get_cosine_schedule_with_warmup
from tonks.learner import MultiTaskLearner, MultiInputMultiTaskLearner
from tonks.dataloader import MultiDatasetLoader
from tonks.ensemble import TonksEnsembleDataset, BertResnetEnsembleForMultiTaskClassification
# -
# ## Load in train and validation datasets
# First we load in the csv's we created in Step 1.
# Remember to change the path if you stored your data somewhere other than the default.
TRAIN_COLOR_DF = pd.read_csv('data/color_swatches/color_train.csv')
VALID_COLOR_DF = pd.read_csv('data/color_swatches/color_valid.csv')
TRAIN_PATTERN_DF = pd.read_csv('data/pattern_swatches/pattern_train.csv')
VALID_PATTERN_DF = pd.read_csv('data/pattern_swatches/pattern_valid.csv')
# You will most likely have to alter this to however big your batches can be on your machine
batch_size = 16
# +
bert_tok = BertTokenizer.from_pretrained(
'bert-base-uncased',
do_lower_case=True
)
max_seq_length = 128
# +
color_train_dataset = TonksEnsembleDataset(
text_inputs=TRAIN_COLOR_DF['complex_color'],
img_inputs=TRAIN_COLOR_DF['image_locs'],
y=TRAIN_COLOR_DF['simple_color_cat'],
tokenizer=bert_tok,
max_seq_length=max_seq_length,
transform='train',
crop_transform='train'
)
color_valid_dataset = TonksEnsembleDataset(
text_inputs=VALID_COLOR_DF['complex_color'],
img_inputs=VALID_COLOR_DF['image_locs'],
y=VALID_COLOR_DF['simple_color_cat'],
tokenizer=bert_tok,
max_seq_length=max_seq_length,
transform='val',
crop_transform='val'
)
pattern_train_dataset = TonksEnsembleDataset(
text_inputs=VALID_PATTERN_DF['fake_text'],
img_inputs=VALID_PATTERN_DF['image_locs'],
y=VALID_PATTERN_DF['pattern_type_cat'],
tokenizer=bert_tok,
max_seq_length=max_seq_length,
transform='train',
crop_transform='train'
)
pattern_valid_dataset = TonksEnsembleDataset(
text_inputs=VALID_PATTERN_DF['fake_text'],
img_inputs=VALID_PATTERN_DF['image_locs'],
y=VALID_PATTERN_DF['pattern_type_cat'],
tokenizer=bert_tok,
max_seq_length=max_seq_length,
transform='val',
crop_transform='val'
)
# -
# We then put the datasets into a dictionary of dataloaders.
#
# Each task is a key.
train_dataloaders_dict = {
'color': DataLoader(color_train_dataset, batch_size=batch_size, shuffle=True, num_workers=2),
'pattern': DataLoader(pattern_train_dataset, batch_size=batch_size, shuffle=True, num_workers=2),
}
valid_dataloaders_dict = {
'color': DataLoader(color_valid_dataset, batch_size=batch_size, shuffle=False, num_workers=2),
'pattern': DataLoader(pattern_valid_dataset, batch_size=batch_size, shuffle=False, num_workers=2),
}
TrainLoader = MultiDatasetLoader(loader_dict=train_dataloaders_dict)
len(TrainLoader)
ValidLoader = MultiDatasetLoader(
loader_dict=valid_dataloaders_dict,
shuffle=False
)
len(ValidLoader)
# Create Model and Learner
# ===
# Since the image model could potentially have multiple Resnets for different subsets of tasks, we need to create an `image_task_dict` that splits up the tasks grouped by the Resnet they use.
image_task_dict = {
'color_pattern': {
'color': TRAIN_COLOR_DF['simple_color_cat'].nunique(),
'pattern': TRAIN_PATTERN_DF['pattern_type_cat'].nunique()
}
}
# We still need to create the `new_task_dict` for the learner.
new_task_dict = {
'color': TRAIN_COLOR_DF['simple_color_cat'].nunique(),
'pattern': TRAIN_PATTERN_DF['pattern_type_cat'].nunique()
}
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
# We first initialize the model by setting up the right shape with the image_task_dict.
model = BertResnetEnsembleForMultiTaskClassification(
image_task_dict=image_task_dict
)
# We then load in the existing models by specifying the folder where the models live and their id's.
resnet_model_id_dict = {
'color_pattern': 'IMAGE_MODEL1'
}
model.load_core_models(
folder='models/',
bert_model_id='TEXT_MODEL1',
resnet_model_id_dict=resnet_model_id_dict
)
# We've set some helper methods that will freeze the core bert and resnets for you if you only want to train the new layers. As with all other aspects of training, this is likely to require some experimentation to determine what works for your problem.
# You will likely need to explore different values in this section to find some that work
# for your particular model.
# +
model.freeze_bert()
model.freeze_resnets()
loss_function = nn.CrossEntropyLoss()
lr_last = 1e-3
lr_main = 1e-5
lr_list = [
{'params': model.bert.parameters(), 'lr': lr_main},
{'params': model.dropout.parameters(), 'lr': lr_main},
{'params': model.image_resnets.parameters(), 'lr': lr_main},
{'params': model.image_dense_layers.parameters(), 'lr': lr_main},
{'params': model.ensemble_layers.parameters(), 'lr': lr_last},
{'params': model.classifiers.parameters(), 'lr': lr_last},
]
optimizer = optim.Adam(lr_list)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size= 4, gamma= 0.1)
# -
learn = MultiInputMultiTaskLearner(model, TrainLoader, ValidLoader, new_task_dict)
# Train Model
# ===
# As your model trains, you can see some output of how the model is performing overall and how it is doing on each individual task.
learn.fit(
num_epochs=10,
loss_function=loss_function,
scheduler=exp_lr_scheduler,
step_scheduler_on_batch=False,
optimizer=optimizer,
device=device,
best_model=True
)
# Ideally the ensemble would perform better than either the image or text model alone, but our performance is probably suffering due to this being synthetic data.
# Checking validation data
# ===
# We provide a method on the learner called `get_val_preds`, which makes predictions on the validation data. You can then use this to analyze your model's performance in more detail.
pred_dict = learn.get_val_preds(device)
pred_dict
# Save/Export Model
# ===
# The ensemble model can also be saved or exported.
model.save(folder='models/', model_id='ENSEMBLE_MODEL1')
model.export(folder='models/', model_id='ENSEMBLE_MODEL1')
|
notebooks/synthetic_data/Step4_Train_ensemble_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: hoezithet
# language: python
# name: hoezithet
# ---
import bokeh
from bokeh.plotting import figure, show
from bokeh.io import output_notebook, save
from bokeh.embed import json_item
import json
from pathlib import Path
output_notebook()
# +
from hoezithet import graphs
import math
import numpy as np
from bokeh.models import Arrow, VeeHead, Label
p = graphs.get_plot()
p.title.text = '<NAME> f(x) vinden'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
p.tools[0].names = ['f(x)', 'dom'] # Only show hover for f(x) and dom
def f(x): return 3*(math.sqrt(x + 5) - 2)
xs = [x for x in np.arange(-5, 12, 0.1)]
ys = [f(x) for x in xs]
p.add_layout(Label(x=6, y=5, text='f(x)',
text_font='Quicksand',
text_font_size='24pt',
text_color=graphs.BLUE))
for x, y in [(x, f(x)) for x in range(-5, 12)]:
p.line([x, x], [0, y],
color=graphs.GRAY,
alpha=1,
line_width=4,
line_dash='dotted')
if y != 0:
p.add_layout(Arrow(end=VeeHead(size=10, line_color=graphs.GRAY, fill_color=graphs.GRAY),
x_start=x, x_end=x,
y_start=y, y_end=y/2,
line_dash='dotted',
line_color=graphs.GRAY,
line_alpha=0))
p.line(xs, ys, line_width=10, line_cap='round', color=graphs.BLUE, name='f(x)')
p.line([-5, 12], [0, 0], line_width=10, color=graphs.GREEN, name='dom')
p.circle(-5, 0, radius=.2, line_width=10, color=graphs.GREEN)
item = json.dumps(json_item(p))
Path('plt/dom.json').write_text(item)
# +
p = graphs.get_plot()
p.title.text = 'Grafiek van f(x)'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
p.tools[0].names = ['f(x)', 'dom'] # Only show hover for f(x) and dom
p.add_layout(Label(x=6, y=5, text='f(x)',
text_font='Quicksand',
text_font_size='24pt',
text_color=graphs.BLUE))
p.line(xs, ys, line_width=10, line_cap='round', color=graphs.BLUE, name='f(x)')
item = json.dumps(json_item(p))
Path('plt/fx.json').write_text(item)
# -
from bokeh.models import Label
# +
p = graphs.get_plot()
p.title.text = 'Beeld van f(x) vinden'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
p.tools[0].names = ['f(x)', 'bld'] # Only show hover for f(x) and bld
def fi(y): return (y/3 + 2)**2 - 5
p.add_layout(Label(x=6, y=5, text='f(x)',
text_font='Quicksand',
text_font_size='24pt',
text_color=graphs.BLUE))
for x, y in [(fi(y), y) for y in range(-6, 12)]:
p.line([0, x], [y, y],
color=graphs.GREY,
alpha=1,
line_width=4,
line_dash='dotted')
if x != 0:
p.add_layout(Arrow(end=VeeHead(size=10, line_color=graphs.GREY, fill_color=graphs.GREY),
x_start=x, x_end=x/2,
y_start=y, y_end=y,
line_dash='dotted',
line_color=graphs.GREY,
line_alpha=0))
p.line(xs, ys, line_width=10, line_cap='round', color=graphs.BLUE, name='f(x)')
p.line([0, 0], [-6, 12], line_width=10, color=graphs.GREEN, name='bld')
p.circle(0, -6, radius=.2, line_width=10, color=graphs.GREEN)
item = json.dumps(json_item(p))
Path('plt/bld.json').write_text(item)
# -
|
content/lessen/wiskunde/functies/domein_beeld/dom_bld.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# Introduction to Python for Bioinformatics - available at https://github.com/kipkurui/Python4Bioinformatics.
# </i></small></small>
from IPython.display import HTML
# # Control Flow Statements
# The key thing to note about Python's control flow statements and program structure is that it uses _indentation_ to mark blocks. Hence the amount of white space (space or tab characters) at the start of a line is very important. This generally helps to make code more readable but can catch out new users of python.
# ## Conditionals
#
# Conditionals in Python allows us to test conditions and change the program behaviour depending on the outcome of the tests. The Booleans, 'True' or 'False' are used in conditionals.
# ### If
# ```python
# if some_condition:
# code block```
#
# Take note of the **:** at the end of the condition. The indented statements that follow are called a
# block. The first unindented statement marksthe end of the block. Code is executed in blocks.
x = 12
if x > 10:
print('Hello')
# ### If-else
# ```python
# if some_condition:
# algorithm1
# else:
# algorithm2```
#
# If the condition is True then algorithm1 is executed. If not, algorithm2 under the else clause is executed.
x = 12
if 10 < x > 11:
print("hello")
else:
print("world")
# ### Else if
#
# Sometimes there are more than two possibilities and we need more than two branches. One way to express a computation like that is a **chained conditional**. You can have as many `elif` statements as you'd like, but it must have just one `else` statemet at the end.
# ```python
# if some_condition:
# algorithm
# elif some_condition:
# algorithm
# else:
# algorithm```
x = 27
y = 12
if x > y:
print("x>y")
elif x < y:
print("x<y")
else:
print("x=y")
# if statement inside a if statement or if-elif or if-else are called as nested if statements.
# +
x = 10
y = 12
if x > y:
print( "x>y")
elif x < y:
print( "x<y")
if x==10:
print("x==10")
else:
print('invalid')
else:
print ("x=y")
# -
x=17
y=12
if x>y:
print('x>y')
elif x<y:
print('x<y')
if x==10:
print('x==10')
else:
print('invalid')
else:
print('x=y')
# ## Loops
#
#
# ### For
#
# Loops allows us to repeat some code over a given number of times. For example, we can print an invite to a Party for each of our friends using a for loop. In this case, it repeats printing an ivite utill we have invited all our friends. That is the terminating condition of the loop.
names = ["Joe","Zoe","Brad","Angelina","Zuki","Thandi","Paris"]
for name in names:
invite = "Hi %s. Please come to my party on Saturday!" % name
print(invite)
# In short:
#
# ```python
# for variable in something:
# algorithm```
#
# When looping over integers the **range()** function is useful which generates a range of integers:
# * range(n) = 0, 1, ..., n-1
# * range(m,n)= m, m+1, ..., n-1
# * range(m,n,s)= m, m+s, m+2s, ..., m + ((n-m-1)//s) * s
#
# Once again, let's use [Python Visualizer](https://goo.gl/vHxi2f) to understand loops.
# + language="html"
# <iframe width="900" height="400" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=for%20f%20in%20%5B%22Joe%22,%22Zoe%22,%22Brad%22,%22Angelina%22,%22Zuki%22,%22Thandi%22,%22Paris%22%5D%3A%0A%20%20%20%20invite%20%3D%20%22Hi%20%22%20%2B%20f%20%2B%20%22.%20%20Please%20come%20to%20my%20party%20on%20Saturday!%22%0A%20%20%20%20print%28invite%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=15&heapPrimitives=nevernest&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>
# -
for ch in 'abc':
print(ch)
total = 0
for i in range(5):
total += i
for i,j in [(1,2),(3,1)]:
total += i**j
print("total =",total)
# In the above example, i iterates over the 0,1,2,3,4. Every time it takes each value and executes the algorithm inside the loop. It is also possible to iterate over a nested list illustrated below.
list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
for list1 in list_of_lists:
print(list1)
# A use case of a nested for loop in this case would be,
list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
total=0
for list1 in list_of_lists:
for x in list1:
total = total+x
print(total)
# There are many helper functions that make **for** loops even more powerful and easy to use. For example **enumerate()**, **zip()**, **sorted()**, **reversed()**
print("reversed: ",end="")
for ch in reversed("abc"):
print(ch,end=";")
print("\nenuemerated: ")
for i,ch in enumerate("abc"):
print(i,"=",ch,end="; ")
print("\nzip'ed: ")
for a,x in zip("abc","xyz"):
print(a,":",x)
# ### While
# ```python
# while some_condition:
# algorithm```
#
# A while loop checks a condition and continues executing the block untill the condition is False. The loop terminates when the condition is not met.
#
# #### Example
#
# * Write a program to manage bank withdrawals at the ATM
#
# In the example below, sometimes the code does not behave as expected in Jupyter Notebook. See the Script bank.py.
acountbal = 50000
choice = input("Please enter 'b' to check balance or 'w' to withdraw: ")
while choice != 'q':
if choice.lower() in ('w','b'):
if choice.lower() == 'b':
print("Your balance is: %d" % acountbal)
print("Anything else?")
choice = input("Enter b for balance, w to withdraw or q to quit: ")
print(choice.lower())
else:
withdraw = float(input("Enter amount to withdraw: "))
if withdraw <= acountbal:
print("here is your: %.2f" % withdraw)
acountbal = acountbal - withdraw
print("Anything else?")
choice = input("Enter b for balance, w to withdraw or q to quit: ")
#choice = 'q'
else:
print("You have insufficient funds: %.2f" % acountbal)
else:
print("Wrong choice!")
choice = input("Please enter 'b' to check balance or 'w' to withdraw: ")
acountbal = 50000
choice = input("Please enter 'b' to check balance, 'w' to withdraw or 'd' to deposit: ")
while choice != 'q':
if choice.lower() in ('w','b', 'd'):
if choice.lower() == 'b':
print("Your balance is: %d" % acountbal)
print("Anything else?")
choice = input("Enter b for balance, w to withdraw, d to deposit or q to quit: ")
print(choice.lower())
elif choice.lower()== 'w':
withdraw = float(input("Enter amount to withdraw: "))
if withdraw <= acountbal:
print("here is your: %.2f" % withdraw)
acountbal = acountbal - withdraw
print("Anything else?")
choice = input("Enter b for balance, w to withdraw, d to deposit or q to quit: ")
#choice = 'q'
else:
print("You have insufficient funds: %.2f" % acountbal)
choice = input("Please enter 'b' to check balance, 'w' to withdraw 'd' to deposit or 'q' to quit: ")
elif choice.lower() == 'd':
deposit = float(input("Enter amount to deposit"))
print("your deposit was successful")
acountbal = acountbal + deposit
print("Your new balance is: %d" % acountbal)
print("Anything else?")
choice = input("Enter b for balance, w to withdraw, d to deposit or q to quit: ")
else:
print("Wrong choice!")
choice = input("Please enter 'b' to check balance, 'w' to withdraw or 'd' to deposit: ")
# ### You turn
#
# Expand the script in the previous cell to also manage ATM deposits
i = 1
while i < 3:
print(i ** 2)
i = i+1
print('Bye')
dna = 'ATGCGGACCTAT'
base = 'C'
i = 0 # counter
j = 0 # string index
while j < len(dna):
if dna[j] == base:
i += 1
j += 1
print(j)
# If the conditional does not chnage to false at some point, we end up with an infinite loop. For example, if you follow the directions for using shampoo 'lather, rinse, repeat' literally you may never finish washing you hair. That is an infinite loop.
#
# Use a **for loop** if you know, before you start looping, the maximum number of times that you’ll need to execute the body.
# ### Break
# Loops execute until a given number of times is reached or the condition changes to False. You can `break` out of a loop when a condition becomes true when executing the loop.
for i in range(100):
print(i)
if i>=7:
break
# ### Continue
# This continues the rest of the loop. Sometimes when a condition is satisfied there are chances of the loop getting terminated. This can be avoided using continue statement.
for i in range(10):
if i>4:
print("Ignored",i)
continue
# this statement is not reach if i > 4
print("Processed",i)
# ## Catching exceptions
# To break out of deeply nested exectution sometimes it is useful to raise an exception.
# A try block allows you to catch exceptions that happen anywhere during the exeuction of the try block:
# ```python
# try:
# code
# except <Exception Type> as <variable name>:
# # deal with error of this type
# except:
# # deal with any error```
try:
count=0
while True:
print('First here')
while True:
print('Then here')
while True:
print('Finally here')
print("Looping")
count = count + 1
if count > 3:
float('ywed')
#raise Exception("abort") # exit every loop or function
except Exception as e: # this is where we go when an exception is raised
print("Caught exception:",e)
# This can also be useful to handle unexpected system errors more gracefully:
try:
for i in [2,1.5,0.0,3]:
inverse = 1.0/i
print("The inverse of %f is %f" % (i,inverse))
except ValueError: # no matter what exception
print("Cannot calculate inverse of %f" % i)
except ZeroDivisionError:
print("Cannot divide by zero")
except:
print("No idea whhat went wrong")
# ### Exercise
#
# 1. Create a while loop that starts with x = 0 and increments x until x is equal to 5. Each iteration should print to the console.
# 2. Repeat the previous problem, but the loop will skip printing x = 5 to the console but will print values of x from 6 to 10.
# 3. Create a for loop that prints values from 4 to 10 to the console.
x=0
while x < 6:
print (x)
x=x+1
x=0
while x<11:
if x==5:
x=x+1
print (x)
x=x+1
for i in range (4,11):
print (i)
|
Notebooks/05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib notebook
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from src.plot import policy_plot, dummy_obs, q_plot
from src.mdp.action import a_str, NUM_A
from src.agent.train import load
from src.mdp.state import state_to_obs, process_obs
from gym_ca.envs import ValCAEnv
from math import pi
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
model_name = 'test2.pkl'
policy_plot(pi, model_name)
# +
# q_plot(pi, model_name)
# -
# ### Plot encounter
# +
# model = load(model_name)
# +
# from src.encounter import sticky_act_encounter
# from src.plot import encounter_plot
# +
# encounters = [sticky_act_encounter(35)]
# env = ValCAEnv(encounters, seed=4)
# enc_info = []
# done, obs = False, env.reset()
# while not done:
# obs, rw, done, info = env.step(model.predict(obs)[0])
# st = info['state']
# enc_info.append((st.ac0, info['a0'], st.ac1, info['a1']))
# +
# encounter_plot(enc_info)
# -
# %load_ext tensorboard
# %tensorboard --logdir /home/osmany/deep-rl-ca/src/src/models/model-test-run/logs/tb/data
def test():
pass
test.__name__
|
src/src/plot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# +
# default_exp core
# -
# # Data Processing and EDA
#
# > regarding Frost data
#hide
from nbdev.showdoc import *
from fastcore.test import *
#export
def say_hello(to):
"Say hello to somebody"
return f'Hello {to}!'
say_hello('Clarita')
test_eq(say_hello('Clarita'), 'Hello Clarita!')
|
00_core.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from tabulate import tabulate
arr = np.array(pd.read_excel("letters.xlsx", header=None, dtype=str))
midware = np.tile("", (len(arr), len(arr[0])))
result = np.tile("", (len(arr), len(arr[0])))
class result:
def __init__(self):
self.left = False,
self.right = False,
self.up = False,
self.down = False,
self.left_up = False,
self.left_down = False,
self.right_up = False,
self.right_down = False
# -
def getwordsfromuser():
islistfull = False
words = []
while islistfull == False:
word = input("input word /s to stop ")
if word == "/s":
islistfull = True
else:
words.append(word)
print(searchwords(arr, words))
def searchwords(array, wordList):
for x in range(0, len(array[0]) - 1):
for y in range(0, len(array)):
for a in range(0, len(wordList)):
for b in range(0, len(wordList[a])):
if array[x][y] == wordList[a][b]:
midware[x][y] = wordList[a][b]
if checkmoves(x,y).left == True and arr[x - 1][y] == wordList[a][b + 1]:
midware[x - 1][y] = wordList[a][b + 1]
#print(checkmoves(x,y).left_down)
if checkmoves(x,y).left_down == True and arr[x]
#print(checkmoves(x,y).left_up)
#print(checkmoves(x,y).right)
#print(checkmoves(x,y).right_down)
#print(checkmoves(x,y).right_up)
#print(checkmoves(x,y).up)
#print(checkmoves(x,y).down)
break
return midware
getwordsfromuser()
def writeresult(result, file):
writer = pd.ExcelWriter(file)
pd.DataFrame(result).to_excel(writer, header=False, index=False)
writer.save()
def checkmoves(x,y):
res = result()
if x - 1 >= 0:
res.left = True
if x + 1 <= (len(arr[0]) - 1):
res.right = True
if y - 1 >= 0:
res.up = True
if y + 1 <= (len(arr) - 1):
res.down = True
if (x - 1 >= 0 and
y - 1 >= 0):
res.left_up = True
if (x - 1 >= 0 and
y + 1 <= (len(arr[0]) - 1)):
res.left_down = True
if (x + 1 <= (len(arr[0]) - 1) and
y - 1 >= 0):
res.right_up = True
if (x + 1 <= (len(arr[0]) - 1)
and y + 1 <= (len(arr) - 1)):
res.right_down = True
return res
checkmoves(1,1)
|
workbook.ipynb
|