content
stringlengths 6
1.05M
|
|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dev
# language: python
# name: dev
# ---
import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import KFold as skKFold
class KFold():
def __init__(self, n_splits=5, shuffle=False, random_state=0):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def _iter_test_indices(self, X, y):
indices = np.arange(X.shape[0])
if self.shuffle:
rng = np.random.RandomState(self.random_state)
rng.shuffle(indices)
fold_sizes = np.full(self.n_splits, X.shape[0] // self.n_splits)
fold_sizes[:X.shape[0] % self.n_splits] += 1
current = 0
for fold_size in fold_sizes:
yield indices[current:current + fold_size]
current += fold_size
def _iter_test_masks(self, X, y):
for test_index in self._iter_test_indices(X, y):
test_mask = np.zeros(X.shape[0], dtype=bool)
test_mask[test_index] = True
yield test_mask
def split(self, X, y):
indices = np.arange(X.shape[0])
for test_index in self._iter_test_masks(X, y):
yield indices[~test_index], indices[test_index]
X, y = load_boston(return_X_y=True)
cv1 = KFold(n_splits=5)
cv2 = skKFold(n_splits=5)
for (train1, test1), (train2, test2) in zip(cv1.split(X, y), cv2.split(X, y)):
assert np.array_equal(train1, train2)
assert np.array_equal(test1, test2)
X, y = load_boston(return_X_y=True)
cv1 = KFold(n_splits=5, shuffle=True, random_state=0)
cv2 = skKFold(n_splits=5, shuffle=True, random_state=0)
for (train1, test1), (train2, test2) in zip(cv1.split(X, y), cv2.split(X, y)):
assert np.array_equal(train1, train2)
assert np.array_equal(test1, test2)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from maskr.ipstartup import *
from maskr.samples.nuke.learner import *
from maskr.utils import visualize
from maskr.samples.nuke.config import Config
from maskr.test.baseline import rngreset
class Config(Config):
SHUFFLE=False
GPU_COUNT=1
config = Config()
rngreset()
# -
data = get_data(config)
model = MaskRCNN(config=config)
model.load_state_dict(torch.load("models/nukemodel.pth"))
images = [data.train_ds.load_image(255)]
results = model.predict(images)
# Visualize results
for image, res in zip(images, results):
boxes, class_ids, scores, masks = res
visualize.display_instances(image, boxes, masks, class_ids, config.CLASS_NAMES, scores)
plt.show()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (myenv)
# language: python
# name: myenv
# ---
# # Logistic Regression Using Gradient Descent
# ### Dataset
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=100, centers=[[2,4],[4,2]], random_state=20)
#Visualize dataset
plt.plot(X[:,0][y==0],X[:,1][y==0],'o',color='red')
plt.plot(X[:,0][y==1],X[:,1][y==1],'^',color='blue')
print(X.shape)
print(y.shape)
def sigmoid(z):
return 1/(1 + np.exp(-z))
def predict(W,x):
z = np.dot(W,x)
return sigmoid(z)
# +
def Cost(W,y,x):
return -y*np.log(predict(W,x)) + -(1 - y)*np.log(1 - predict(W,x))
# +
''' x is the data vector appended with 1 for bias '''
''' y is the label (0,1)'''
''' returns a vector of gradient '''
''' W is the current Weights '''
def compute_gradient(x,y,W):
return x*(predict(W,x) - y)
# +
def Gradient_Descent(iterations , X_train , y_train, learning_rate ):
Weights = []
Iterations = []
cost = []
w = np.random.rand(3)
for j in range(iterations):
C = 0
G = np.zeros(w.shape)
for i in range(len(X_train)):
C = C + Cost(w,y_train[i],X_train[i,:])
G = G + compute_gradient(X_train[i,:],y[i],w)
C = C/len(X_train)
G = G/len(X_train)
w = w - learning_rate * G
Weights.append(w)
cost.append(C)
Iterations.append(j + 1)
print("The cost after Iteration {0} is {1}".format(j + 1,C))
return Weights,cost,w,Iterations
# -
X_train = np.zeros((X.shape[0],X.shape[1] + 1))
X_train[:,0:2] = X
X_train[:,2] = 1
#print(X_train)
#print(X_train.shape)
y_train = np.copy(y)
W,C,w,I = Gradient_Descent(1000,X_train,y_train,0.01)
# +
c = -w[2]/w[1]
m = -w[0]/w[1]
xmin, xmax = X_train[:, 0].min()-1, X_train[:, 0].max()+1
ymin, ymax = X_train[:, 1].min()-1, X_train[:, 1].max()+1
xd = np.array([xmin, xmax])
yd = m*xd + c
plt.plot(xd, yd, 'k', lw=1, ls='--')
plt.fill_between(xd, yd, ymin, color='tab:blue', alpha = 0.2)
plt.fill_between(xd, yd, ymax, color='tab:orange', alpha = 0.2)
plt.scatter(*X[y_train == 0 ].T, s=11, alpha= 1)
plt.scatter(*X[y_train == 1 ].T, s=11, alpha= 1)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.ylabel(r'$x_2$')
plt.xlabel(r'$x_1$')
plt.show()
# -
plt.plot(I,C)
plt.xlabel(" No of Iterations ")
plt.ylabel("Error / Cost Function ")
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nanomesh
# language: python
# name: nanomesh
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib notebook
import pygalmesh
from nanomesh.structures import Pore3D, FullCube, XDIM, YDIM, ZDIM
from nanomesh import Volume
# ### Instantiate the pore and visualize a section of the data
# +
pore = Pore3D()
pore_vol = Volume(pore.data)
pore_vol.show_slice(along='z')
# -
# ### Generate full cube
full_cube = FullCube()
# ### Mesh the data over a multiple domains
#
# The resulting mesh will be deformed as our domain is not cubic
#
# Generates `dummies_post_treatment.xyz`, `dummies_pre_treatment.xyz`
#
# Afterwards, scale the mesh to obtain the correct dimension
# +
mesh = pygalmesh.generate_periodic_mesh_multiple_domains(
[pore, full_cube],
["--", "+-"],
[0,0,0, XDIM, YDIM, ZDIM],
max_cell_circumradius=0.025,
min_facet_angle=30,
max_radius_surface_delaunay_ball=0.025,
max_facet_distance=0.025,
max_circumradius_edge_ratio=2.0,
number_of_copies_in_output=1,
exude=False,
perturb=False,
odt=False,
lloyd=False,
verbose=True
)
# scale from cubic to tetragonal
mesh.points[:, 1] *= 0.68/0.48
# -
# ### View the mesh using pyvista
# +
import pyvista as pv
pv.plot_itk(mesh)
# -
# ## Save the mesh
#
# Insert information about periodicity for HPGEM
from nanomesh.periodic_utils import insert_periodic_info
mesh = insert_periodic_info(mesh, [0,0,0,1.,0.68,1.])
mesh.write('nanopt_full_period_2domain_all_connections.gmsh', file_format="gmsh22", binary=False)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recomendación de libros a usuarios de Cross Booking
# ### Importamos las librerias requeridas
#
# Utilizaremos en el programa librerías que nos permitiran cargar y limpiar los datos con el fin de tener la mejor calidad en la información. Igualmente trabajaremos con la librería [**SurPRISE**](http://surpriselib.com/) para realizar el proceso de recomendación que consta de los siguientes pasos:
# 1. Carga de datos para entrenamiento
# 2. Cálculo estadísticos de error
# 3. Ejecución de las predicciones
# 4. Generación de las recomendaciones
#
# El estilo de recomendación que vamos a extraer con el proceso anterior se basa en la técnica de Filtrado Colaborativo (Segaran, 2007). Dado esto trabajaremos con los siguientes algorítmos de predicción (Ekstrand, Riedl, & Konstan, 2010) los cuales arrojaran recomendaciones de **5 Libros** para una muestra de **5 Usuarios**, teniendo en cuenta que para Neighborhood-Based se trabajará con dos algorítmos de similitud:
# * Baseline Predictors
# * Neighborhood Item-Base
# - *Cosine Similarity*
# - *Pearson Correlation*
# * Singular Value Decomposition (SVD)
#
# Como valor agregado del trabajo se utilizará la librería [**Scikit-Learn**](http://scikit-learn.org/stable/) con la finalidad de implementar el algorítmo de clustering K-Means proporcionado por este paquete. La idea con esta implementación es realizar una recomendación de **5 Usuarios** a seguir (recomendación para solicitud de amistad o seguir en el sitio) para una muestra de **5 Usuarios** (los mismos indicados anteriormente).
#
# **Referencias de las librerías:**
# * [Numpy](http://www.numpy.org/)
# * [Pandas](https://pandas.pydata.org)
# * [Surprise](http://surpriselib.com/)
# * [SciPy](https://www.scipy.org/)
# * [Scikit-Learn](http://scikit-learn.org/stable/)
# * [MatPlotLib](http://matplotlib.org/index.html)
# * [Seaborn](http://seaborn.pydata.org/index.html)
# +
import numpy as np
import pandas as pd
from surprise import Reader, Dataset
from surprise import BaselineOnly, KNNBasic, SVD
from surprise import evaluate
from collections import defaultdict
from random import randint
from scipy import stats, integrate
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.cluster import KMeans
# -
# ### Cargamos el archivo con las calificaciones
#
# El archivo fuente representa un conjunto de usuarios de Amazon que realizaron calificaciones de libros en la página. Estas calificaciones están en un rango de 1 a 10. Dado que los datos fueron preparados por colaborados de [GroupLens](https://grouplens.org/datasets/book-crossing/) se entiende que están pre-procesados y por tanto, partimos del supuesto que son válidos.
#
# Antes de comenzar a utilizar los datos, realizaremos un **proceso de depuración** con el fin de tener data limpia durante el proceso. Realizaremos un corte de los datos con base en los siguientes críterios:
# * Eliminación de usuarios con edad mayor a 90 años
# * Eliminación de ratings con calificación igual a 0
# * Eliminación de ratings que tengan menos de 200 calificaciones por libro
# * Eliminación de ratings que tengan menos de 3 calificaciones por usuario
# +
# Carga base de datos de los usuarios.
df_users = pd.read_csv("Files/BX-Users.csv",
delimiter=";",
na_values="NA",
encoding="latin1")
# Eliminar los usuarios con edades mayores a 90. Hay un usuario que tiene 201 año, que está generando un cluster para él solo.
df_users = df_users[df_users['Age'] < 90]
print("Cantidad de usuarios cargados: ", df_users.index.size)
# +
# Carga base de datos de los libros.
df_books = pd.read_csv("Files/BX-Books.csv",
delimiter=";",
na_values="NA",
encoding="latin1")
print("Cantidad de libros cargados: ", df_books.index.size)
# +
# Carga base de datos de los ratings.
df_ratings = pd.read_csv("Files/BX-Book-Ratings.csv",
delimiter=";",
na_values="NA",
encoding="latin1")
# Se leen los registros con Rating > 0 que son los que tienen calificacion.
df_ratings= df_ratings[df_ratings['Book-Rating'] > 0]
# Realizamos una copia del conjunto de datos ratings y conservamos el original para otros calculos.
#datafile = df_ratings
# Realizamos un agrupamiento por ISBN, para identificar los libros con mayor frecuencia en calificación
# y de esta manera depurar el conjunto de ratings, tomando datos muy significativos en calificación
df_books_group = pd.DataFrame(df_ratings.groupby(['ISBN']).size(), columns=['Count'])
# Seleccionamos libros con una frecuencia en calificación mayor que 200, para garantizar que hallan
# varios usuarios calificando el mismo libro.
df_books_top = df_books_group[df_books_group['Count'] > 200]
# Realizamos el filtro de ratings aplicando regla de los libros top (> 200 calificaciones)
df_user_top_book = df_ratings.loc[df_ratings['ISBN'].isin(df_books_top.index)]
df_user_top_book.head()
# En el conjunto de datos anterior, que tiene el listado de los libros con mas de 200 calificaciones cada uno,
# identifico los lectores que asignaron mas de 3 calificaciones. El nuevo conjunto de datos tendrá los usuarios
# que calificaron más de 3 libros.
df_user_group = pd.DataFrame(df_user_top_book.groupby(['User-ID']).size(), columns=['Count'])
df_user_top = df_user_group[df_user_group['Count'] > 3]
# Realizamos el filtro de ratings aplicando regla de los usuarios top (> 3 calificaciones)
df_filtro_final = df_user_top_book.loc[df_user_top_book['User-ID'].isin(df_user_top.index)]
# Imprimimos totales del conjunto de datos ratings ya depurados
print("Cantidad de ratings cargados: ", df_filtro_final.index.size)
df_filtro_final.head(10)
# -
#Validación de registros duplicados
print("Cantidad de duplicados =", df_filtro_final.duplicated().sum())
#Validación cantidad de usuarios únicos
print("Cantidad de usuarios =", len(df_filtro_final['User-ID'].drop_duplicates()))
#Validación cantidad de libros únicos
print("Cantidad de libros =", len(df_filtro_final['ISBN'].drop_duplicates()))
# +
#Validación cantidad de calificaciones
distratings = df_filtro_final.groupby(['Book-Rating']).count()["User-ID"].to_frame().reset_index(level=0, inplace=False)
distratings = distratings.rename(columns = {"User-ID":'Count'})
# Realizamos el gráfico de frecuencias por calificación
#plt.figure(figsize=(12, 8))
sns.set(rc={"figure.figsize": (12, 8)})
hst = sns.countplot(x="Book-Rating", data=df_filtro_final, palette="Set2")
plt.xlabel('Calificaciones')
plt.ylabel('Frecuencias')
plt.title('Distribución de Calificaciones')
plt.show()
#Imprimimos la tabla de datos con las cantidades de votos totales por tipo
distratings
# -
#Estadisticos básicos del conjunto de datos
print("Estadísticos básicos de las calificaciones:")
df_filtro_final["Book-Rating"].describe()
# ### Procedimiento de recomendación
#
# A continuación se realizaran cada uno de los pasos para realizar la predicción de las recomendaciones usando la librería **SurPRISE**. Es preciso recordar que serán utilizados los algorítmos de **filtrado colaborativo** mencionados anteriormente.
#
# #### 1. Carga de datos para entrenamiento
#
# En primera instancia se realiza la carga de los datos en el objeto **Reader** de Surprise utilizado para el mapeo de información de entrenamiento. Igualmente se realiza la creación del conjuto de datos en el objeto **Dataset** requerido por la librería para el manejo de la información, a partir de nuestro DataFrame inicial.
# +
# Creación del reader con la escala de calificación únicamente
# ya que los datos se cargan desde un DataFrame.
reader = Reader(rating_scale=(1,10))
# Creación del conjuto de datos en el objeto Dataset.
# Se mapean los nombres de las columnas del archivo original
# tal como las tiene el DataFrame.
data = Dataset.load_from_df(df_filtro_final[['User-ID', 'ISBN', 'Book-Rating']], reader)
# Para hacer la validación cruzada dividimos el conjunto de datos en diferentes particiones.
# Esto garantiza que el algoritmo de evaluación utiliza todas las particiones excepto una
# para aplicar la validación a los datos donde se realiza el entrenamiento.
data.split(2)
# Optimización del algorítmo de entrenamiento
#param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005], 'reg_all': [0.4, 0.6]}
#grid_search = GridSearch(SVD, param_grid, measures=['RMSE', 'MAE'])
#grid_search.evaluate(data)
#results_df = pd.DataFrame.from_dict(grid_search.cv_results)
#print(results_df)
# -
# ***NOTA 1:*** Los pasos del procedimiento de recomendación subsiguientes se deben repetir para cada uno de los algorítmos de predicción que serán trabajados: *BaselineOnly, KNN Item-Base, SVD (nombres de algorítmos según [docs](http://surprise.readthedocs.io/en/stable/prediction_algorithms_package.html) de SurPRISE)*.
#
# ***NOTA 2:*** Para las predicción con el algorítmo de Neighborhood (KNN) solo se tomará en cuenta el filtrado **Item-Base** (basado en elementos), para nuestro caso sería "Basado en Libros". Esto dado que el filtrado Item-Base es significativamente **más rápido** que el User-Base al momento de recuperar una lista de recomendaciones en un conjunto de datos grande. Además, el filtrado Item-Base por lo general supera al filtrado User-Base cuando el conjunto de datos es **"disperso"**, sin embargo, se comportan de manera similar en conjuntos de datos **"densos"**. (Segaran, 2007)
# #### 2. Cálculo estadísticos de error
#
# El algoritmo intentará predecir la calificación de una combinación **Usuario/Libro** y comparará esa predicción con la predicción real. La diferencia entre la calificación real y la predicha se mide utilizando medidas de error clásicas como Root mean squared error ([RMSE](https://medium.com/human-in-a-machine-world/mae-and-rmse-which-metric-is-better-e60ac3bde13d)) y Mean absolute error ([MAE](https://medium.com/human-in-a-machine-world/mae-and-rmse-which-metric-is-better-e60ac3bde13d)). Dichas mediciones será utilizadas en el proceso de evaluación de los algorítmos de predicción. A continuación se realizan los procesos de **evaluación** para todos los algorítmos definidos.
# +
#Evaluación algoritmo Baseline Predictors (BaselineOnly)
algoBO = BaselineOnly()
evaluateBO = evaluate(algoBO, data, measures=['RMSE', 'MAE'])
#Evaluación algoritmo Neighborhood Item-Base con Cosine Similarity (KNNBasic)
sim_options = {'name': 'cosine', 'user_based': False}
algoKNNc = KNNBasic(sim_options=sim_options)
evaluateKNNc = evaluate(algoKNNc, data, measures=['RMSE', 'MAE'])
#Evaluación algoritmo Neighborhood Item-Base con Pearson Correlation (KNNBasic)
sim_options = {'name': 'pearson_baseline', 'user_based': False}
algoKNNp = KNNBasic(sim_options=sim_options)
evaluateKNNp = evaluate(algoKNNp, data, measures=['RMSE', 'MAE'])
#Evaluación algoritmo Singular Value Decomposition (SVD)
algoSVD = SVD()
evaluateSVD = evaluate(algoSVD, data, measures=['RMSE', 'MAE'])
# -
# Formateo de las métricas de error para impresión en dataframe.
ma_eval_result = np.matrix([
['RMSE', np.mean(evaluateBO['RMSE']), np.mean(evaluateKNNc['RMSE']), np.mean(evaluateKNNp['RMSE']), np.mean(evaluateSVD['RMSE'])],
['MAE', np.mean(evaluateBO['MAE']), np.mean(evaluateKNNc['MAE']), np.mean(evaluateKNNp['MAE']), np.mean(evaluateSVD['MAE'])]
])
df_eval_result = pd.DataFrame(ma_eval_result, columns=['Metric','BaselineOnly','KNNCosine','KNNPearson','SVD'])
df_eval_result
# Antes de continuar con el proceso de predicción realizaremos la **selección del mejor algorítmo** con base en las métricas de error calculadas en el paso anterior. El procedimiento de selección consiste en sumar el valor de **RSME** y **MAE** de cada algorítmo, luego se eleva al cuadrado y finalmente se identifica el menor valor.
# +
df_eval_result['BaselineOnly'] = pd.to_numeric(df_eval_result['BaselineOnly'], errors='coerce').fillna(0)
df_eval_result['KNNCosine'] = pd.to_numeric(df_eval_result['KNNCosine'], errors='coerce').fillna(0)
df_eval_result['KNNPearson'] = pd.to_numeric(df_eval_result['KNNPearson'], errors='coerce').fillna(0)
df_eval_result['SVD'] = pd.to_numeric(df_eval_result['SVD'], errors='coerce').fillna(0)
# Calculamos la media entre las medidas RMSE y MAE de cada algorítmo y elevamos al cuadrado.
bo = df_eval_result['BaselineOnly'].mean()**2
knnc = df_eval_result['KNNCosine'].mean()**2
knnp = df_eval_result['KNNPearson'].mean()**2
svd = df_eval_result['SVD'].mean()**2
alg_means = [bo, knnc, knnp, svd]
min_value = min(alg_means)
select_algorit = -1
# Seleccionamos el indice del algorítmo con mejores resultados en la estimación.
# Los indices de los algorítmos son:
# 0 : BaselineOnly
# 1 : KNNCosine
# 2 : KNNPearson
# 3 : SVD
for index in range(len(alg_means)):
if(min_value == alg_means[index]):
select_algorit = index
break
print("Algorítmo seleccionado para recomendar: ", select_algorit)
# +
#### DEPRECATED INITIAL ####
# Finalmente se realiza el cálculo de las predicciones para un
# conjunto de prueba seleccionado por la librería
#df_predict_group = df_predict_book.groupby(['User-ID','ISBN'])
#df_predict_means = df_predict_group.agg({
# 'Book-Rating': 'mean',
# 'BaselineOnly': 'mean',
# 'KNNCosine': 'mean',
# 'KNNPearson': 'mean',
# 'SVD': 'mean'
#})
#Realizamos las diferencias del real por el estimado de cada algorítmo
#br_means = df_predict_means['Book-Rating'].values
#di_means = [
# (np.abs(br_means - df_predict_means['BaselineOnly'].values)**2).sum(),
# (np.abs(br_means - df_predict_means['KNNCosine'].values)**2).sum(),
# (np.abs(br_means - df_predict_means['KNNPearson'].values)**2).sum(),
# (np.abs(br_means - df_predict_means['SVD'].values)**2).sum()
#]
#min_value = min(di_means)
# Seleccionamos el indice del algorítmo con mejores resultados en la estimación
# Los indices de los algorítmos son:
# 0 : BaselineOnly
# 1 : KNNCosine
# 2 : KNNPearson
# 3 : SVD
#select_algorit = -1
#for index in range(len(di_means)):
# if(min_value == di_means[index]):
# select_algorit = index
# break
#### DEPRECATED FINAL ####
# -
# #### 3. Ejecución de las predicciones
#
# A continuación se realizará el pronóstico para una determinada muestra de usuarios quienes serán objeto de las predicciones del tipo **Usuario: Libros** (serán en total *5 usuarios* y se recomendarán *5 libros*), con esto podremos saber si al usuario le gustará un determinado libro o no. Para realizar las predicciones primero se ejecutan las funciones de "entrenamiento", ya que los algorítmos ejecutarán en total 2 corridas de la predicción (dado el split del conjunto de datos, que lo parte en tres grupos de datos).
# +
# Realizamos el entrenamiento de todos los algorítmos.
# Entrenamiento Baseline Predictors (BaselineOnly)
trainset = data.build_full_trainset()
algoBO.train(trainset)
# Entrenamiento Neighborhood Item-Base con Cosine Similarity (KNNBasic)
trainset = data.build_full_trainset()
algoKNNc.train(trainset)
# Entrenamiento Neighborhood Item-Base con Pearson Correlation (KNNBasic)
trainset = data.build_full_trainset()
algoKNNp.train(trainset)
# Entrenamiento Singular Value Decomposition (SVD)
trainset = data.build_full_trainset()
algoSVD.train(trainset)
# -
# #### Análisis y comparación de predictores
#
# Antes de continuar adelante con el proceso de pronóstico y recomendación, se realizará la predicción de 5 usuarios con calificiaciones estimadas en un solo libro, seleccionado manualmente, y que dichos usuarios tengan calificado, con el fin de comparar predicción vs calificación real.
# +
# Seleccionamos un libro y una muestra de 5 usuarios para comparar pronosticos
#x = df_filtro_final.groupby('ISBN').count()['User-ID'].to_frame().reset_index(level=0, inplace=False)
#x = x.rename(columns = {"User-ID":'Count'})
#itemid = x.sort_values('Count', ascending=False).iloc[1]['ISBN']
#y = datafile[datafile['ISBN'] == itemid]
#n = [randint(i, y.index.size-1) for i in range(5)]
#usersid = list(y.iloc[n]['User-ID'])
#ratings = list(y.iloc[n]['Book-Rating'])
itemid = '0312195516'
usersid = [218724,243065,98551,236757,154499]
ratings = [9,9,9,9,9]
ma_predict_book = []
for i in range(len(usersid)):
bo = algoBO.predict(usersid[i], itemid, ratings[i])
knnc = algoKNNc.predict(usersid[i], itemid, ratings[i])
knnp = algoKNNp.predict(usersid[i], itemid, ratings[i])
svd = algoSVD.predict(usersid[i], itemid, ratings[i])
ma_predict_book.append([bo[0], bo[1], bo[2], bo[3], knnc[3], knnp[3], svd[3]])
df_predict_book = pd.DataFrame(np.matrix(ma_predict_book),
columns=['User-ID','ISBN','Book-Rating','BaselineOnly','KNNCosine','KNNPearson','SVD'])
#Convierte las columnas de metrica en valores numericos
df_predict_book['Book-Rating'] = pd.to_numeric(df_predict_book['Book-Rating'], errors='coerce').fillna(0)
df_predict_book['BaselineOnly'] = pd.to_numeric(df_predict_book['BaselineOnly'], errors='coerce').fillna(0)
df_predict_book['KNNCosine'] = pd.to_numeric(df_predict_book['KNNCosine'], errors='coerce').fillna(0)
df_predict_book['KNNPearson'] = pd.to_numeric(df_predict_book['KNNPearson'], errors='coerce').fillna(0)
df_predict_book['SVD'] = pd.to_numeric(df_predict_book['SVD'], errors='coerce').fillna(0)
# Graficamos para cada usuario las calificaciones por cada algoritmo para comparar
df_predict_book2 = pd.melt(df_predict_book,
id_vars=["User-ID", "ISBN"],
var_name="Algorithm",
value_name="Prediction")
df_predict_book2 = df_predict_book2.sort_values(['Prediction'], ascending=False)
sns.set(rc={"figure.figsize": (12, 8)})
bar = sns.barplot(hue="Algorithm", y="Prediction", x="User-ID", data=df_predict_book2, palette="Set2")
plt.title('Predicciones por Usuario y Algorítmos')
plt.yticks(np.arange(15))
plt.show()
# Imprimimos la tabla de datos resultande de la predicción para análisis
df_predict_book
# -
# A continuación se realizará la ejecución de las **predicciones** para la población general. Para las recomendaciones de libros no tendremos en cuenta predicciones de todos los algorítmos, por tanto nos basaremos en el algorítmo seleccionado luego del cálculo de las métricas de error.
# +
# Creamos el conjunto de pruebas para realizar la predicción.
testset = trainset.build_anti_testset()
predictions = None
# Ejecutamos la predicción correspondiente según cual sea la mejor estimación.
if(select_algorit == 0): predictions = algoBO.test(testset)
elif(select_algorit == 1): predictions = algoKNNc.test(testset)
elif(select_algorit == 2): predictions = algoKNNp.test(testset)
elif(select_algorit == 3): predictions = algoSVD.test(testset)
# -
# #### 4. Generación de las recomendaciones
#
# Finalizado el proceso de predicción, pasamos a la etapa de las **recomendaciones de libros a los usuarios**. En este caso tenemos una muestra al azar de 5 usuarios para los cuales se realizará la recomendación. Estos usuarios son diferentes a los procesados anteriormente, con el fin de que el sistema lance recomendaciones diferentes de libros. El programa generará una tabla con la siguiente información: *ID de usuario, ISBN Libro 1, ISBN Libro 2, ISBN Libro 3, ISBN Libro 4, ISBN Libro 5.*
# +
# Función requerida para la extracción del top 5 de recomendaciones de libros a usuarios
# Fuente: https://github.com/NicolasHug/Surprise/issues/40
def get_top_n(predictions, users=[], nitems=5):
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
if(uid in users):
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
if(uid in users):
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:nitems]
return top_n
# Listado de 5 usuarios al azar para los cuales se realizará la recomendación
users = [249628,91103,229313,81492,36907]
nitems = 5
#for i in range(nitems):
# num = randint(0, len(testset)-1)
# usr = testset[num][0]
# users.append(usr)
# Extracción de las recomendaciones de los usuarios indicados
top_n = get_top_n(predictions, users=users, nitems=nitems)
# Obtenemos las recomendaciones seleccionadas
ma_predict_final = []
lb_predict_final = []
for uid, user_ratings in top_n.items():
items = [iid for (iid, _) in user_ratings]
for i in range(nitems - len(items)):
items = items + [np.nan]
ma_predict_final.append([uid] + items)
lb_predict_final.append('User-ID')
lb_predict_final = lb_predict_final + [('ISBN-'+str(i+1)) for i in range(len(ma_predict_final[0])-1)]
df_predict_final = pd.DataFrame(np.matrix(ma_predict_final), columns=lb_predict_final)
df_predict_final
# -
# ### Recomendaciones utilizando Machine Learning
#
# En el siguiente apartado realizaremos el proceso de predicción y posterior **recomendación de usuarios**. La idea central es identificar un usuario que haya sido recomendado en las predicciones de libros e identificar para este un conjunto de usuarios a quienes puede seguir o suscribirse dada la afinidad o cercanía de su comportamiento en el sitio.
#
# La técnica de Machine Learning que se implementará será una **clasificación de usuarios** por preferencias de libros y edad, utilizando el algorítmo de K-Means que se encuentra disponible en la librería *Scikit-Learn*.
# #### 1. Preparación de los datos para clustering
#
# Inicialmente se preparan los datos de calificaciones para realizar la ejecución del algorítmo de **K-Means**. Es preciso que los datos tengan como indice los códigos de usuarios y en cada columna los ISBN de los libros. En los valores de las celdas deben quedar las calificaciones de cada usuario a cada libro, considerando que algunas celdas quedaran vacías.
# +
# Pivoteamos la tabla original para dejar el listado de Libros como variables con las que vamos a hacer el cluster.
df_pivot_rating = pd.pivot_table(df_filtro_final, index='User-ID', columns="ISBN", values="Book-Rating", aggfunc='mean')
# Reemplazar los valores nulos (NaN) por 0, dado que el algoritmo de cluster no admite celdas nulas.
# Representan que el usuario no ha leido el libro
df_pivot_rating = df_pivot_rating.fillna(0)
# Extraemos las edades de los usuarios para agregarlas al conjunto de datos para clustering.
df_users_1 = df_users.set_index("User-ID")
df_users_2 = df_users_1[['Age']]
# Normalizamos el valor de la edad a la escala de valores de las calificaciones.
df_users_2['Age'] = round(df_users_2[['Age']].sum(axis=1).apply(np.sqrt), 0)
# Realizar un Merge del conjunto con edades y el conjuto pivoteado, para agregar la edad al conjunto.
df_merge_2 = df_users_2.merge(df_pivot_rating, left_index=True, right_index=True)
print("Cantidad de usuarios para agrupamiento: ", df_merge_2.index.size)
df_merge_2.head(10)
# -
# #### 2. Ejecución del algorítmo de clustering
#
# A continuación se configura y ejecuta el algorítmo de clustering que provee la librería **Scikit-Learn**, con el fin de agrupar los usuarios dadas las preferencias de libros y su edad. Esta clasificación tendrá como objetivo seleccionar la lista de usuarios que se recomendarán al usuario, ya sea para seguirlos o suscribirse a sus canales o simplemente para tenerlos como referentes en el sitio.
# +
# Utilizamos la librería scikit-learn para crear un cluster de usuarios de acuerdo a su comportamiento
# calificando libros y la edad de los mismos
# Para el ejercicio utilizaremos 10 grupos:
kmeans = KMeans(n_clusters = 10)
kmeans.fit(df_merge_2)
y_kmeans = kmeans.predict(df_merge_2)
# Imprimimos el conjunto de grupos por usuario, clusters:
print("Listado de clusters por usuario:\n\n", y_kmeans)
print()
## Imprimimos una de los centroides para fines informativos, centro de cada cluster:
print("Listado de valores primer centroide:\n\n", kmeans.cluster_centers_[0])
# +
# Asignar el valor de cluster a cada usuario.
df_merge_2['Cluster'] = kmeans.labels_
# Imprimimos los usuarios con su respectivo cluster el cuál está en la última columna
#print("Listado de 10 usuarios con su cluster:")
#df_merge_2[['Age','Cluster']].head(10)
#Validación cantidad de clusters
distcluster = df_merge_2.groupby(['Cluster']).count()["Age"].to_frame().reset_index(level=0, inplace=False)
distcluster = distcluster.rename(columns = {"Age":'Count'})
# Realizamos el gráfico de frecuencias por cluster
sns.set(rc={"figure.figsize": (12, 8)})
hst = sns.countplot(x="Cluster", data=df_merge_2, palette="Set2")
plt.xlabel('Clusters')
plt.ylabel('Frecuencias')
plt.title('Distribución de Clusters')
plt.show()
# -
# ### Recomendaciones de libros y usuarios
#
# Finalmente, tenemos un conjunto de recomendaciones que podemos presentar a un usuario particular como parte del valor agregado que genera el sitio. A continuación realizaremos la selección de un usuario a partir de las predicciones de libros realizadas en la primera sección y se mostrará el top 5 de libros recomendados, así como la lista de usuarios que puede seguir o suscribirse.
# +
# Seleccionamos el primer usuario de la lista de los 5 recomendados en la sección anterior.
record = df_predict_final.iloc[0]
user_id = int(record['User-ID'])
items_recom = [record[('ISBN-'+str(i))] for i in range(1,6)]
# Seleccionamos el número de cluster y ubicamos los usuarios recomendados.
num_cluster = df_merge_2[df_merge_2.index == user_id]['Cluster'].values[0]
users_recom = list(df_merge_2[df_merge_2['Cluster'] == num_cluster].index)
# Imprimo la información del usuario a quien voy a recomendar libros y otros usuarios.
df_users[df_users['User-ID'] == user_id]
# +
# Busco una muestra de los libros mejor calificados por el usuario
# para validar si en genero o tipo se parecen con las recomendaciones (validación visual)
user_ratings = df_ratings[df_ratings['User-ID'] == user_id].sort_values('Book-Rating', ascending=False)
user_books = list(user_ratings.iloc[:5]['ISBN'].values)
df_user_books = df_books[df_books['ISBN'].isin(user_books)][['ISBN','Book-Title','Book-Author','Year-Of-Publication','Publisher']]
df_user_books
# -
# #### Lecturas que quizás te gusten (libros recomendados por el algorítmo)
# +
df_books_recom = df_books[df_books['ISBN'].isin(items_recom)][['ISBN','Book-Title','Book-Author','Year-Of-Publication','Publisher','Image-URL-M']]
# Recomendación de 5 libros.
df_books_recom[['ISBN','Book-Title','Book-Author','Year-Of-Publication','Publisher']]
# +
import urllib.request
# Descargamos las imagenes de los libros para tener la información en la presentación.
for index, row in df_books_recom.iterrows():
urllib.request.urlretrieve(row['Image-URL-M'], "Images/"+row['ISBN']+".jpg")
# -
# #### Usuarios a quienes puedes seguir (usuarios recomendados por machine learning)
# Recomendación de 5 usuarios a quien seguir.
df_users[df_users['User-ID'].isin(users_recom)].head(5)
# +
# Elegimos el primer usuario recomendado para seguir y buscaremos los libros
# que ha calificado y que también son libros calificados por el usuario en estudio
user_ref = df_users[df_users['User-ID'].isin(users_recom)].iloc[0]['User-ID']
user_ref_ratings = df_ratings[df_ratings['User-ID'] == user_ref]
user_ref_books = list(user_ref_ratings[user_ref_ratings['ISBN'].isin(items_recom)].iloc[:5]['ISBN'].values)
df_books[df_books['ISBN'].isin(user_ref_books)][['ISBN','Book-Title','Book-Author','Year-Of-Publication','Publisher']]
# -
# ### Bibliografía
#
# * Segaran, T. (2007). Programming Collective Intelligence. O'Reilly.
# * Ekstrand, M. D., Riedl, J., & Konstan, J. (2010). Collaborative Filtering Recommender Systems. Foundations and Trends.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="KQgqvN5kZZ6L" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
import eli5
from eli5.sklearn import PermutationImportance
# + [markdown] id="AqZa66KOasMM" colab_type="text"
# # Load data
# + id="gMkHdL0eauS2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9a3489b4-876a-4689-b729-22c30733c811" executionInfo={"status": "ok", "timestamp": 1583393032685, "user_tz": -60, "elapsed": 2603, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
df = pd.read_hdf('data/car.h5')
df.shape
# + [markdown] id="nKGlyeuxa6HQ" colab_type="text"
# # Code from day 3
# + id="_fhxue2_a9dz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0d469741-43cb-4fdb-8b92-3a65ad17c372" executionInfo={"status": "ok", "timestamp": 1583393042916, "user_tz": -60, "elapsed": 9130, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
# remove EUR
df = df[ df['price_currency'] != 'EUR' ]
# format features
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[ feat ][0], list): continue
factorized_values = df[ feat ].factorize()[0]
# avoid columns named like 'feat___cat___cat'
if SUFFIX_CAT in feat:
df[ feat ] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
cat_feats = [x for x in df.columns if SUFFIX_CAT in x]
cat_feats = [x for x in cat_feats if 'price' not in x]
# initial model
X = df[ cat_feats ].values
y = df[ 'price_value' ].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, cv = 5, scoring='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
# + id="LxlCKKwbanjP" colab_type="code" colab={}
# function for model running
def run_model(model, feats):
X = df[ feats ].values
y = df[ 'price_value' ].values
scores = cross_val_score(model, X, y, cv = 5, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="LJT8NWGCcRPi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="dfebe27b-5942-4f31-abfa-a050b95066c7" executionInfo={"status": "ok", "timestamp": 1583390758603, "user_tz": -60, "elapsed": 7289, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
run_model(model, cat_feats)
# + [markdown] id="uP4r6yD8cfrd" colab_type="text"
# # Random Forest
# + id="ibqh-NJmch1m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3e44f5a1-2ba6-4ee2-ce51-bc5e9019c21e" executionInfo={"status": "ok", "timestamp": 1583391173393, "user_tz": -60, "elapsed": 188849, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
rf_model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)
run_model(rf_model, cat_feats)
# + [markdown] id="Qax7Irx-dDKR" colab_type="text"
# # XGBoost
# + id="5m3NL1-cdF8H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="e9bf30a7-1418-4c2a-d308-b65b476d2a64" executionInfo={"status": "ok", "timestamp": 1583391366411, "user_tz": -60, "elapsed": 115891, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
xgb_params = {
'max_depth' : 5,
'n_estimators' : 50,
'learning_rate': 0.1,
'seed':0
}
xgb_model = xgb.XGBRegressor(**xgb_params)
run_model(xgb_model, cat_feats)
# + id="-F0Ux7_He2gl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="201826ab-8741-43ec-adf0-4092e0ac2923" executionInfo={"status": "ok", "timestamp": 1583391823006, "user_tz": -60, "elapsed": 347176, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
xgb_model.fit(X, y)
imp = PermutationImportance(xgb_model, random_state=0).fit(X, y)
eli5.show_weights(imp, feature_names=cat_feats)
# + id="RTGiSpNgghso" colab_type="code" colab={}
feats = ['param_napęd__cat','param_rok-produkcji__cat','param_stan__cat','param_faktura-vat__cat','param_moc__cat',\
'param_skrzynia-biegów__cat','feature_kamera-cofania__cat','param_marka-pojazdu__cat','param_typ__cat',\
'param_pojemność-skokowa__cat','feature_wspomaganie-kierownicy__cat','seller_name__cat','param_wersja__cat',\
'param_model-pojazdu__cat','feature_światła-led__cat','feature_asystent-pasa-ruchu__cat','param_kod-silnika__cat',\
'feature_system-start-stop__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat']
# + id="pQc8UF9Bhqe5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="0eee724b-990d-4e5c-ab3d-66d72d938468" executionInfo={"status": "ok", "timestamp": 1583392183080, "user_tz": -60, "elapsed": 25267, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
# run model on limited number of features
run_model(xgb_model, feats)
# + [markdown] id="33xRyHX5hz0p" colab_type="text"
# # Features
# + id="0zbTqbihh3nc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="5a219013-c191-4cc5-a202-96dbf95ae453" executionInfo={"status": "ok", "timestamp": 1583393093064, "user_tz": -60, "elapsed": 25897, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
# Convert strings to integers and none to -1
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
# replace param_rok-produkcji_cat with modified param_rok-produkcji
feats = ['param_napęd__cat','param_rok-produkcji','param_stan__cat','param_faktura-vat__cat','param_moc__cat',\
'param_skrzynia-biegów__cat','feature_kamera-cofania__cat','param_marka-pojazdu__cat','param_typ__cat',\
'param_pojemność-skokowa__cat','feature_wspomaganie-kierownicy__cat','seller_name__cat','param_wersja__cat',\
'param_model-pojazdu__cat','feature_światła-led__cat','feature_asystent-pasa-ruchu__cat','param_kod-silnika__cat',\
'feature_system-start-stop__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat']
# run model on modified features
run_model(xgb_model, feats)
# + id="yCcid7mgjCpY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="db82dc4f-9b21-45c9-fa3c-8454563992dd" executionInfo={"status": "ok", "timestamp": 1583393122754, "user_tz": -60, "elapsed": 25271, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]))
# replace param_moc_cat with modified param_moc
feats = ['param_napęd__cat','param_rok-produkcji','param_stan__cat','param_faktura-vat__cat','param_moc',\
'param_skrzynia-biegów__cat','feature_kamera-cofania__cat','param_marka-pojazdu__cat','param_typ__cat',\
'param_pojemność-skokowa__cat','feature_wspomaganie-kierownicy__cat','seller_name__cat','param_wersja__cat',\
'param_model-pojazdu__cat','feature_światła-led__cat','feature_asystent-pasa-ruchu__cat','param_kod-silnika__cat',\
'feature_system-start-stop__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat']
# run model on modified features
run_model(xgb_model, feats)
# + id="6-p_ypLZkAqg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="ac2b7f8e-1e25-4cbe-e09f-07080d4e7e5a" executionInfo={"status": "ok", "timestamp": 1583393239419, "user_tz": -60, "elapsed": 25293, "user": {"displayName": "Micha\u0142 Kaminski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNEiKYXKoLPayrW4pRAXgkUo1NvJkFJjHgwi4UdA=s64", "userId": "08020763565284961230"}}
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm3')[0].replace(' ', '')))
# replace param_pojemność-skokowa_cat with modified param_pojemność-skokowa
feats = ['param_napęd__cat','param_rok-produkcji','param_stan__cat','param_faktura-vat__cat','param_moc',\
'param_skrzynia-biegów__cat','feature_kamera-cofania__cat','param_marka-pojazdu__cat','param_typ__cat',\
'param_pojemność-skokowa','feature_wspomaganie-kierownicy__cat','seller_name__cat','param_wersja__cat',\
'param_model-pojazdu__cat','feature_światła-led__cat','feature_asystent-pasa-ruchu__cat','param_kod-silnika__cat',\
'feature_system-start-stop__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat']
# run model on modified features
run_model(xgb_model, feats)
# + id="v6MbF6e6m55-" colab_type="code" colab={}
# !git status
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# ## 線形代数
#
# 線形代数に関する関数は、LinearAlgebraモジュールに含まれている。
using LinearAlgebra
# ### ベクトルの演算
# +
# 内積
println(dot([1,2,3], [4,5,6]))
# 外積(ベクトル積)
println(cross([0,1,0], [0,0,1]))
v = [-1, 2, 3];
# L1ノルム
println(norm(v, 1))
# L2ノルム
println(norm(v, 2))
# L∞ノルム
println(norm(v, Inf))
# L1ノルムで正規化
println(normalize(v, 1))
# L2ノルムで正規化
normalize(v, 2)
# -
# ### 行列の演算
# +
A = [1 2 3; 4 1 6; 7 8 1]
# トレース
println(tr(A))
# 行列式
println(det(A))
# 逆行列
inv(A)
# +
# 行列式の対数
println(logdet(A))
# 擬似逆行列
pinv(A)
# -
# ### 行列の種類
#
# 対称行列やエルミート行列などの特殊な形に行列は、それぞれSymmetric型やHermitian型が用意されている。
#
# 対称行列は、:LであればAの下三角行列に基づく対称行列に変換される.
#
# また、Juliaでサポートされている特殊な種類の行列は以下のようになる。
#
# **Symmetric:対称行列、Tridiagonal:三重対角行列、Hermitian:エルミート行列、SymTridiagonal:対称三重対角行列**
#
# **UpperTriangular:上三角行列、Bidiagonal:二重対角行列、LowerTriangular:下三角行列、Diagonal:対角行列**
A = rand(3,3)
Symmetric(A)
Symmetric(A, :L)
issymmetric(Symmetric(A))
# たとえば、上三角行列は以下のように作成することができる。
# +
A = [1 2 3; 4 5 6; 7 8 9;]
UpperTriangular(A)
# -
# ### 行列分解
# Cholesky分解やLU分解など、いくつかの分解手法が知られている。それぞれの関数は、例えばcholeskyに対してcholesky!のように入力の行列を書き換えることで空間計算量を節約する関数も合わせて提供されている。
#
# **cholesky,hessenberg,lu,eigen,qr,svdなどの分解手法が提供されている**。
#
# If `F::SVD` is the factorization object, `U`, `S`, `V` and `Vt` can be obtained
# via `F.U`, `F.S`, `F.V` and `F.Vt`, such that `A = U * Diagonal(S) * Vt`.
# The singular values in `S` are sorted in descending order.
# Iterating the decomposition produces the components `U`, `S`, and `V`.
# +
A = rand(Float32, 4, 3)
F = svd(A)
println(typeof(F))
# S,U,V,Vtの引数がある
F
# -
# ### BLAS(basic linear algebra subprograms)
# 線形代数の演算に関する標準的なAPI規格。Juliaでは、LinearAlgebra.BLASモジュールでBLASのラッパーを提供している。
# BLASは余計な処理が入って遅いことが多いので、使うことは頻繁にないと思う
#
# ここでは、gemv関数の使い方を紹介する。gemv関数は、行列とベクトルの積を計算するBLAS関数であり、行列をA,二つのベクトルをx,yとすると、$y = \alpha * A * x + \beta * y$を計算する。係数$\alpha,\beta$はそれぞれスカラーで、行列Aとベクトルyに対する重み係数である。オプションでAの転置行列を指定することも可能である。
# +
using LinearAlgebra.BLAS
A = [1.0 4.0; 2.0 5.0; 3.0 6.0]
x = [1.0, 2.0, 3.0]
y = [0.0, 0.0]
BLAS.gemv!('T', 1.0, A, x, 1.0, y)
# -
# ## ファイル入出力
#
# ファイルの入出力とシリアライズ・デシリアライズ、XMLやJSONファイルの扱いに関して紹介する
#
# ### ファイルとストリーム
# 構文
# $open(filename::String, [mode::String]) -> IOStream$
#
# mode一覧
# r:read, w:write, a:append, r+ or w+:read+write, a+:read+append
f = open("input.txt")
# 一行ずつ読み込む
println(readlines(f))
close(f)
# +
f = open("input.txt")
# これでも良い(一行ずつ何かしらの処理が可能)
for line in eachline(f)
println(line)
end
close(f)
# -
# #### close関数の省略
# また、以下のように処理することで、close関数を省略することができる
open(readlines, "input.txt")
open("input.txt") do f
for line in eachline(f)
println(line)
end
end
# #### 書き込み
# +
open("output.txt","w") do f
println(f, "Line 1")
println(f, "Line 2")
end
open(readlines, "output.txt")
# -
# ## シリアライズとデシリアライズ
# シリアライズとは、オブジェクトをバイトストリームに変換する処理をさす。その逆に、バイトストリームをオブジェクトに変換する処理をデシリアライズと呼ぶ。
#
# これによって、Juliaのオブジェクトをファイルとして保存したり、ファイルからオブジェクトを復元することが可能になる。Pythonでは、pickleというモジュールが標準で用意されており、Pythonのオブジェクトをバイトストリームに変換することができる。
#
# Juliaでは、標準でSerializationモジュールが提供されているのでそれを利用する。
#
# 構文:
#
# $serialize(stream::IO, value)$
#
# $serialize(filename::String, value)$
#
# 以下に、辞書オブジェクトをシリアライズしてファイルへ保存し、デシリアライズで復元する例を示す。
# +
using Serialization
dict = Dict("a"=>1, "b"=>2)
serialize("dict.dat", dict)
deserialize("dict.dat")
# -
# ただし、関数の中身や型の定義はシリアライズによって保存されないので、デシリアライズするときに、それあrの関数や型があらかじめ読み込まれた状態にする必要性がある、また、シリアライズとデシリアライズでJuliaのバージョンが異なっている場合、元のデータが復元されることが保証されていない。
#
# そこで、**長期間保存する場合はJLD2.jlを使用することが推奨されている**。
# ## JLD2
#
# 入出力に関するJuliaの主要なパッケージは、JuliaIOというGithubページにまとめられている。
#
# JLD2.jlはJuliaのオブジェクトを保存するためのパッケージである。JLD2はHDF5というフォーマットのサブセットである。HDF5は、大規模な階層データを保存するためのフォーマットとして、科学技術で広く採用されている。ちなみに、HDF5を扱うためのパッケージとしてHDF5.jlがある(こっちも有名)。
#
# JLD2を使うときは、FileIO.jlパッケージも合わせてインストールしておくと便利である。
#
#
# +
using JLD2, FileIO
data = rand(3, 2)
save("out.jld2", "data", data)
load("out.jld2")
# -
# #### 注意
# JLD2ではHDF5と同様に、保存するデータに名前を付ける必要性があり、指定した名前のデータのみを復元することが可能である。
#
# 複数のデータを保存するには$save("out.jld2", Dict("data1"=>data1, "data2"=>data2)$のように名前とデータの辞書オブジェクトを保存すれば良い
# ### JSONファイルの入出力
# JSON.jlというパッケージがある
# +
using JSON
JSON.parsefile("test.json")
# -
# JuliaのデータをJSON形式に変換するには、Json.json関数を使用する
dict = JSON.parsefile("test.json")
JSON.json(dict)
# ## XMLファイルの入出力
#
# 今回はコードのみ紹介して省略する(EzXML.jlを使った事例)
# + active=""
# using EzXML
# xml = readxml("test.xml")
# xmlroot = root(xml)
#
# # 子ノードの取得
# children = elements(xmlroot)
#
# # ノードの名前の一覧を取得
# nodename.(children)
#
# # 属性の値を取得
# children[1]["name"]
#
# # XPathを使ったノードの検索
# nodes = findall("//species/text()", xmlroot)
# nodecontent.(nodes)
#
# # 他にも、最初や最後に見つかったノードを返すfindfirst,findlast関数が存在する
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Geometrical transformations
# ==============================
#
# This examples demos some simple geometrical transformations on a Racoon face.
#
#
# +
import numpy as np
import scipy.misc
from scipy import ndimage
import matplotlib.pyplot as plt
face = scipy.misc.face(gray=True)
lx, ly = face.shape
# Cropping
crop_face = face[lx//4:-lx//4, ly//4:-ly//4]
# up <-> down flip
flip_ud_face = np.flipud(face)
# rotation
rotate_face = ndimage.rotate(face, 45)
rotate_face_noreshape = ndimage.rotate(face, 45, reshape=False)
plt.figure(figsize=(12.5, 2.5))
plt.subplot(151)
plt.imshow(face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(152)
plt.imshow(crop_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(153)
plt.imshow(flip_ud_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(154)
plt.imshow(rotate_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(155)
plt.imshow(rotate_face_noreshape, cmap=plt.cm.gray)
plt.axis('off')
plt.subplots_adjust(wspace=0.02, hspace=0.3, top=1, bottom=0.1, left=0,
right=1)
plt.show()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib as plt
import math
import vpython as vp
from vpython import *
#h=specific angular momentum, independent of mass
#n=node vector
#e=eccentricity or Lenz vector
#r=position vector
#v=velocity vector
#k=unit vector parallel to ECI Z-axis
#double mod_h,mod_e; //magnitude of associated vectors
#double mod_v,mod_r,mod_n; //magnitude of associated vectors
#double b1,b2; //terms used in calculation of Lenz vector
#double E; //specific mechanical energy
#double p; //semi-latus rectum, removed from this version
GM = 398600.4415 #km^3 s^-2
r = vector(0,0,0)
v = vector(0,0,0)
# +
r.x= float(input('x:')) #in km
r.y= float(input('y:')) #in km
r.z= float(input('z:')) #in km
v.x= float(input('\nu:')) #in km/s
v.y= float(input('v:')) #in km/s
v.z= float(input('w:')) #in km/s
print ('\nspecified vector r:',r)
print ('specified vector v:',v)
# -
#Compute the specific angular momentum
h = cross(r,v)
print("Value of the vector h:")
print (h)
print("Magnitude of the vector h:")
print(mag(h))
# +
#compute the node vector
#Vector pointing towards the ascending node
n = cross(k,h)
print("Value of the vector n:")
print(n)
print("Magnitude of the vector n:")
print (mag(n))
# +
#populate the unit vector parallel to the ECI Z-axis
#The 0 on each axis is defined as the centre of mass of the earth for an ECI reference frame
k = vector(0.0,0.0,1.0)
print ('specified vector k:',k)
# +
#Compute the specific mechanical energy
E = (mag(v)**2)/2 - (GM/mag(r))
print ('The specific mechanical energy is:',E) #MJ/Kg or km^-2 s^-2
# -
a = (-GM)/(2*E)
print ('The semi-major axis is:', a)
p = (mag(h)**2/GM)
print ('The semi-latus rectum is:', p)
# +
e_vec = vector(0,0,0)
b1 = mag(v)**2 - (GM/mag(r));
b2 = dot(r,v);
e_vec.x = (b1*r.x-b2*v.x)/GM;
e_vec.y = (b1*r.y-b2*v.y)/GM;
e_vec.z = (b1*r.z-b2*v.z)/GM;
print (e_vec)
print (mag(e_vec))
# +
#Compute inclination
i = np.arccos(h.z/mag(h))
print ('The inclination in radians is:',i)
# +
#Compute the right ascension of the ascending node - W
W = np.arccos(n.x/mag(n))
if n.y<0:
W = 2*np.pi - W
else:
W = W
print ('the right ascension of the ascending node in radians is:', W)
# +
#Compute the argument of perigee
w = np.arccos(dot(n,e_vec)/(mag(n)*mag(e_vec)))
if e_vec.z < 0.0:
w = 2*np.pi - w
else:
w = w
print ('the argument of perigee is:', w)
# +
#Compute the true anomaly
V = vector (0.0,0.0,0.0)
print(V)
V = np.arccos(dot(e_vec,r)/(mag(e_vec)*mag(r)))
print (V)
if dot(r,v) < 0:
V = 2*np.pi - V
else: V = V
print ('the true anomaly in radians is:', V)
# +
Keplerian_Elements = pd.DataFrame ({"Orbital Parameter":["semi-major axis","eccentricity","inclination","argument of perigee","RAAN","TRAN"],
"Cartesian":[r.x,r.y,r.z,v.x,v.y,v.z],
"Keplerian": [a,e,i,w,W,V]})
print ('Table of results:')
Keplerian_Elements
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ibrahima1289/Python-Codes/blob/main/kura%20labs/Bank_Account.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="V38oW7tn9S2T"
# **Bank account:**
# This python program is a bank account with the attributes and operatins below.
#
# The program is not exaustive.
#
# Attributes:
#
# * account_number
# * balance
# * name
#
# Operations:
#
# * authentication
# * deposit
# * withdraw
# * get_balance
# * transfer
#
# Ibrahima Diallo
# + id="ldJH0B-O9RbZ"
class Bankaccount:
def __init__(self):
self.name = ""
self.balance = 0
self.passwd = 1234
self.login = 12345
self.islocked_out = False
def authentication(self):
self.name = str (input("Enter your name: "))
self.submitted_login = int(input("Enter your account number: "))
self.submitted_passwd = int(input("Password: "))
counter = 1
while ((self.login != self.submitted_login) or (self.submitted_passwd != self.passwd)) and (counter <=1):
print ("Attempt ", counter)
self.account_number = int(input("Login: "))
self.submitted_passwd = int(input("Password: "))
counter += 1
if (self.submitted_login == self.login or self.account_number == self.login) and (self.submitted_passwd == self.passwd):
print ("\nWelcome to your account!\n")
else:
print ("Incorrect login credentials.\nToo many attempts. You are locked out of this account.")
self.islocked_out = True
def deposit(self):
if not self.islocked_out:
print("Do you want to deposit money? (yes/no)")
answer = input()
if answer == "yes":
amount = float(input("\n\nEnter the amount you want to deposit: "))
self.balance += amount
else:
print("Thank you for visiting us.")
def withdraw(self):
if not self.islocked_out:
print("\n\nDo you want to withdraw money? (yes/no)")
answer = str (input())
if answer == "yes":
wthdrw = float (input("How much do you want to withdraw? "))
if wthdrw <= self.balance:
self.balance = self.balance - wthdrw
print("You enough money.")
print("Balance after withdraw = ",self.balance)
else:
print("Sorry, you do not have enough money.")
def transfer(self, other_account):
if not self.islocked_out:
print("\n\nDo you want to transfer money? (yes/no)")
answer = str (input())
if answer == "yes":
trnsfr = float (input("How much do you want to transfer? "))
if trnsfr <= self.balance:
other_account.balance += trnsfr
self.balance = self.balance - trnsfr
print("You enough money.")
print("\nBalance after transfer = ",self.balance )
print("The new account has:",other_account.balance)
else:
print("Sorry, you do not have enough money to transfer.")
def display(self):
if not self.islocked_out:
print(f"Account Infos| Name: {self.name}, New balance for the old account is: $ {self.balance}, Account #: {self.login}")
print("\nWe assume the currency used is in dollars!!!\n")
object_Bank = Bankaccount()
# balance 0
object_Bank2 = Bankaccount()
# balance 0
object_Bank.authentication()
object_Bank.deposit()
object_Bank.withdraw()
object_Bank.transfer(object_Bank2)
object_Bank.display()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 递归(Recursion)
# >递归是一种解决问题的方法,它把一个问题分解为越来越小的子问题,直到问题的规模小到
# 可以被很简单直接解决。通常为了达到分解问题的效果,递归过程中要引入一个调用自身的函数。
# 乍一看,递归算法并没有什么特别的地方,但是,利用递归我们能够写出极为简明的解决问题的方
# 法,而且如果不用递归,这些问题将具有很大的编程难度。
#
#
# ### 递归三大定律
# - 递归算法必须有个基本结束条件;
# - 递归算法必须改变自己的状态并向基本结束条件演进;
# - 递归算法必须递归地调用自身。
#
# + pycharm={"name": "#%% \u4ee3\u7801\u5b9e\u73b0\n", "is_executing": false}
from typing import List
# 1.递归求和
def sum_recursion(arr:List[int]):
"""
递归求和,使用递归实现数字列表求和
:param arr: 待求和数字列表
:return: 求和结果
"""
if len(arr) == 1:
return arr[0]
else:
return arr[0] + sum_recursion(arr[1:])
# 2.递归求阶乘
def fact(n:int):
"""
递归求阶乘,n!
:param n:待求阶乘的数字
:return: 阶乘结果
"""
if n < 0:
return None
elif n <= 1:
return 1
else:
return n*fact(n-1)
# 3.将整数转换到指定进制字符串
def convert_to_str(n:int,base:int):
"""
递归转换整数到指定进制字符串
:param n: 待转换整数
:param base: 进制数,目前支持2~16进制
:return: 转换后的进制字符串
"""
convert_string="0123456789ABCD"
if n < base:
return convert_string[n]
else:
return convert_to_str(n//base,base) + convert_string[n%base]
# 4.翻转字符串实现
def reverse_string(s:str):
"""
递归返回翻转字符串
:param s: 待翻转字符串
:return: 翻转后字符串
"""
if len(s) <= 1:
return s
else:
return s[-1] + reverse_string(s[:-1])
# 5.递归实现斐波那契数列
from functools import lru_cache
@lru_cache(maxsize=None) # 缓存每次计算结果,可减少重复计算
def fibonacci(n:int):
"""
斐波那契数列,1 1 2 3 5 8 13 21 34 55 89
:param n: 斐波那契数列阶数
:return: 返回相应的斐波那契数
"""
if n < 2:
return n
else:
return fibonacci(n-2) + fibonacci(n-1)
# + pycharm={"name": "#%% \u6d4b\u8bd5\u6570\u636e\n", "is_executing": false}
if __name__ == '__main__':
import random
random.seed(54)
arr = [random.randint(0,100) for _ in range(10)]
print("递归求和结果:", sum_recursion(arr))
print("递归求阶乘结果为:4! =", fact(4))
print("整数转换到指定进制结果为:", convert_to_str(1024,16))
print("字符串翻转结果为:", reverse_string("Hello world"))
print("斐波那契数输出结果为:", fibonacci(8))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="b0ce7404-a95a-4d1e-ac52-7b483d9fe427"
# NB. Run this in Google colab NOT Jupyter lab. https://colab.research.google.com/notebooks/intro.ipynb?utm_source=scs-index#recent=true
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Is this notebook running on Colab or Kaggle?
IS_COLAB = "google.colab" in sys.modules
IS_KAGGLE = "kaggle_secrets" in sys.modules
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. LSTMs and CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
if IS_KAGGLE:
print("Go to Settings > Accelerator and select GPU.")
# Common imports
import numpy as np
import os
from pathlib import Path
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rnn"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# + id="9785355a-8f83-4bfb-a58d-3e8f6d92cb48"
# Import libraries
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# + id="17302c80-77df-4551-9030-1847c7fa8778"
sf_df = pd.read_csv('zillow_singlefamily_CA.csv',index_col=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 474} id="mFHYOLHzJM3e" outputId="05095639-b27a-49b3-e610-9efadaadde32"
# Make sure to set the zip codes down the rows
sf_df
# + [markdown] id="a2MS2nO8VmTW"
# # New Section
# + [markdown] id="38bcb674-e1eb-4feb-b474-40a30184df23"
#
# + id="5c11038c-ad28-4d9b-a929-8bb3a44008c0"
# set the random seed
np.random.seed(42)
#remove text column headers
data = sf_df.values
# + [markdown] id="tbHpioNvQMEL"
# Scale the data
#
# + id="yUT0LG9sPmlQ"
# Scale the data so that the model doesn't take too long to run (and to remove all units to ensure all features are on the same footing)
from sklearn.preprocessing import MinMaxScaler
data_scaled = MinMaxScaler().fit(data).transform(data)
# + id="se_q3158QIdf" colab={"base_uri": "https://localhost:8080/"} outputId="bd31129a-fc0e-46ba-ca79-e29a2f3ecc15"
data_scaled.shape
# + [markdown] id="f09e84e6-53aa-426d-94b5-cad1d4f1505e"
# Define training/testing/validation cells
# + id="9acefd5d-d243-455e-9735-c37a123d2186" colab={"base_uri": "https://localhost:8080/"} outputId="0c7adb4b-d935-4f66-bbf3-a5ef1906d925"
train_stop = 1414
valid_stop = 1614
n_steps = len(sf_df.columns)
n_steps
# + id="9eb1dc1b-efc8-425a-bfd3-aead25d25976"
# split the data into test, training and validation sets
X_train, y_train = data_scaled[:train_stop,:-1], data_scaled[:train_stop,-1]
X_valid, y_valid = data_scaled[train_stop:valid_stop,:-1], data_scaled[train_stop:valid_stop,-1]
X_test, y_test = data_scaled[valid_stop:,:-1], data_scaled[valid_stop:,-1]
# + id="Qzk_upvDK-ZC"
y_train = np.expand_dims(y_train,-1)
y_valid = np.expand_dims(y_valid,-1)
y_test = np.expand_dims(y_test,-1)
X_train = np.expand_dims(X_train,-1)
X_valid = np.expand_dims(X_valid,-1)
X_test = np.expand_dims(X_test,-1)
# + id="11ab4ee2-cfc9-48bb-9839-07bfd9fa605a"
# Define a function to plot our sample
def plot_series(series, y=None, y_pred=None, x_label="$t$", y_label="$x(t)$"):
plt.plot(series, ".-")
if y is not None:
plt.plot(n_steps, y, "bx", markersize=10)
if y_pred is not None:
plt.plot(n_steps, y_pred, "ro")
plt.grid(True)
if x_label:
plt.xlabel(x_label, fontsize=16)
if y_label:
plt.ylabel(y_label, fontsize=16, rotation=0)
plt.hlines(0, 0, 100, linewidth=1)
plt.axis([0, n_steps + 1, 0, 1])
# + colab={"base_uri": "https://localhost:8080/"} id="g6MZV0meN-j4" outputId="c0c78d63-5605-4b42-dec6-6b0fd2a8f7a4"
X_valid[0,:,0].shape
# + colab={"base_uri": "https://localhost:8080/", "height": 315} id="e8d002d7-f764-4245-81af-28ac0a8659c3" outputId="93cbf7c4-315e-4c46-b869-41477d54b937"
fig, axes = plt.subplots(nrows=1, ncols=5, sharey=True, figsize=(18, 4))
for col in range(5):
plt.sca(axes[col])
plot_series(X_valid[col, :, 0], y_valid[col, 0],
y_label=("$x(t)$" if col==0 else None))
save_fig("time_series_plot")
plt.show()
# 'x' in the graphs is what we are trying to predict
# + [markdown] id="NPQFgdPiaQ-G"
# Establish a baseline
# + colab={"base_uri": "https://localhost:8080/"} id="MJaqfFu7aT1r" outputId="862f0d5c-a389-4d6c-d458-af6e1a4e0950"
#Assume the base prediction is simply the last datapoint in the series
y_pred = X_valid[:, -1]
np.mean(keras.losses.mean_squared_error(y_valid, y_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 293} id="eWUjVeGAaVoy" outputId="974e5c49-1087-4e68-f1a2-44cb5aa360c2"
plot_series(X_valid[0, :, 0], y_valid[0, 0], y_pred[0, 0])
plt.show()
# + id="10uc4CZqauJP"
np.random.seed(42)
tf.random.set_seed(42)
# + id="eGuEH_pwau2H"
# Deep RNN
model = keras.models.Sequential([
keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),
keras.layers.SimpleRNN(20, return_sequences=True),
keras.layers.SimpleRNN(1)
#This gives us 2 hidden layers and 1 layer at the end (the final number!. return_sequences=true ensures it passes the entire time series along at each layer)
])
# + colab={"base_uri": "https://localhost:8080/"} id="XhpLLmDHazkS" outputId="09b71423-0314-4a30-cb17-02550b2f6d86"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="TIneirXda7z3" outputId="36f7492a-6cd7-4134-d39d-3a3453608d99"
# We tried up to 20 epochs but the model started to exhibit signs of overfitting (training loss < validation loss) so we decided to stop at 6.
model.compile(loss="mse", optimizer="adam")
history = model.fit(X_train, y_train, epochs=6,
validation_data=(X_valid, y_valid))
# + colab={"base_uri": "https://localhost:8080/"} id="g3pYPeoFbDHQ" outputId="0a98bef1-16cb-4541-f2d7-bd049cc80681"
model.evaluate(X_valid, y_valid)
# we want our loss function to be as close to 0 as possible
# + id="9MG8l1wuoB8z"
model.save("singlefamily_trained.hdf5")
# + colab={"base_uri": "https://localhost:8080/"} id="rzMNYNoov-M8" outputId="cd437f93-fc2a-49fb-d773-ff672cfbb591"
X_train[:,1:,:].shape
# + colab={"base_uri": "https://localhost:8080/"} id="57pqbf9OTL-k" outputId="1e111617-34f9-4260-d5b6-3bc335772ca5"
y_train[:,].shape
# + colab={"base_uri": "https://localhost:8080/"} id="Gf5wRxqgQ16L" outputId="3900ca05-6411-4d78-e6c9-78e779751eab"
# Model evaluations
model.evaluate(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="-yZSEpeqQ2HG" outputId="ff38497f-3706-4b00-f404-85a6d205e54f"
model.evaluate(X_valid,y_valid)
# + colab={"base_uri": "https://localhost:8080/"} id="DSlG7PjEQ2Q1" outputId="8453eb0c-5193-458b-fa55-0926ab927586"
model.evaluate(X_test,y_test)
# + id="Cxd3AHRhTV7F"
# X_train: Use expand_dims in order to give the y_train an extra dimension.
x_train_pred = np.concatenate((X_train[:,1:,:],np.expand_dims(y_train,-1)),axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="Bb_u__i3Kh3D" outputId="865b0930-2ce5-41bd-c115-75818c1566a4"
# build y_train prediction
y_train_pred = model.predict(x_train_pred)
y_train_pred.shape
# + id="DOFaf0I1-BaS"
# X_valid: Use expand_dims in order to give the y_train an extra dimension.
x_valid_pred = np.concatenate((X_valid[:,1:,:],np.expand_dims(y_valid,-1)),axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="CRfLKTEJLVT3" outputId="d5525b33-6ca7-434a-c3da-efa6331fd6e8"
# build y_valid prediction
y_valid_pred = model.predict(x_valid_pred)
y_valid_pred.shape
# + id="sXrt_LgTLmYM"
# X_test: Use expand_dims in order to give the y_train an extra dimension.
x_test_pred = np.concatenate((X_test[:,1:,:],np.expand_dims(y_test,-1)),axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="e9TARL4WLnaY" outputId="4d711385-49e7-4984-bd90-5a09b9bab21c"
# build y_test prediction
y_test_pred = model.predict(x_test_pred)
y_test_pred.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 422} id="9TF6-BIkN9Ms" outputId="549fdcae-6c66-4c60-8f90-ce5678afa35b"
# concatenate all three y_pred series together and append them to the original data
y_pred_all = np.concatenate((y_train_pred,y_valid_pred,y_test_pred),axis=0)
y_pred_all_df = pd.DataFrame(y_pred_all).astype('float')
y_pred_all_df
# + id="IXW3k-B_XWr6"
sf_df['5/31/2021'] = y_pred_all_df[0].values
# + id="tOHLIEXjSk_g"
sf_df["max"]=sf_df.iloc[:,0:-2].max(axis=1)
# + id="KkBU-VaAfsEW"
sf_df["min"]=sf_df.iloc[:,0:-3].min(axis=1)
# + id="O_4rJkKKSjIv"
sf_df['5/31/2021_scaled'] = sf_df['5/31/2021']*(sf_df['max']-sf_df['min'])+sf_df['min']
# + id="Emb5wV1ESjOX"
sf_final_df = sf_df.drop(['max','min','5/31/2021'],axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 474} id="cJ7shgvHkFps" outputId="c1c91604-9c5c-428f-d4c7-a27f19980920"
sf_final_df.rename(columns={'5/31/2021_scaled':'5/31/2021'},inplace=True)
sf_final_df
# + id="mvmayudwVQrZ"
sf_final_df.to_csv('single_family_final.csv',index=True)
# + id="mJwzQKBULlZ9"
# + id="0FUpTls2VYse"
# + id="yYbGbokxVeIg"
# + id="SSQIbhz5LeHy"
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial for using the package `fast-ml`
#
# This package is as good as having a junior Data Scientist working for you. Most of the commonly used EDA steps, Missing Data Imputation techniques, Feature Engineering steps are covered in a ready to use format
# ## Part 5. Feature Engineering for Categorical Variables / Categorical Encodings
#
#
#
# #### 1. Import feature engineering module from the package
# `from fast_ml.feature_engineering import FeatureEngineering_Categorical, FeatureEngineering_Numerical`
#
# #### 2. Define the imputer object.
# * For Categorical variables use `FeatureEngineering_Categorical`
# * For Numerical variables use `FeatureEngineering_Numerical`
#
# `cat_encoder = FeatureEngineering_Categorical(method = 'label')`
# <br>or<br>
# `num_encoder = FeatureEngineering_Numerical(method = 'decile')`
#
# #### 3. Fit the object on your dataframe and provide a list of variables
# `cat_encoder.fit(train, variables = ['BsmtQual'])`
#
# #### 4. Apply the transform method on train / test dataset
# `train = cat_encoder.transform(train)`
# <br>&<br>
# `test = cat_encoder.transform(test)`
#
# #### 5. parameter dictionary gets created which store the values used for encoding. It can be viewed as
# `cat_encoder.param_dict_`
#
# ### Available Methods for Categorical Encoding
#
#
# 1. One-hot Encoding
# 1. Label Encoding / Integer Encoding
# 1. Count Encoding
# 1. Frequeny Encoding
# 1. Ordered Label Encoding
#
# <b>Target Based Encoding</b>
# 6. Target Ordered Encoding
# 7. Target Mean Value Encoding
# 8. Target Probability Ratio Encoding (only Classification model)
# 9. Weight of Evidence (WOE) Encoding (only Classification model)
# ## Start Feature Engineering for Categorical Variables
# +
# Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from fast_ml.feature_engineering import FeatureEngineering_Categorical
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
df = pd.read_csv('../data/titanic.csv')
df.shape
df.head(5)
numeric_type = ['float64', 'int64']
category_type = ['object']
# ## Categorical Variables
# ## Step 1 : Rare Encoding the variables
rare_encode = ['BsmtQual', 'FireplaceQu']
for var in rare_encode:
df[var].fillna('Missing', inplace = True)
print(df[var].value_counts())
rare_encoder = FeatureEngineering_Categorical(method='rare', rare_tol=0.05)
rare_encoder.fit(df, variables = rare_encode)
rare_encoder.param_dict_
df = rare_encoder.transform(df)
for var in rare_encode:
print(df[var].value_counts())
# ## Step 2 : Encoding the Variables in Numeric values
# ### 1. BsmtQual
#Before Imputation
df['BsmtQual'].value_counts()
cat_encoder1 = FeatureEngineering_Categorical(method = 'label')
cat_encoder1.fit(df, variables = ['BsmtQual'])
cat_encoder1.param_dict_
df = cat_encoder1.transform(df)
#After Imputation
df['BsmtQual'].value_counts()
# ### 2. FireplaceQu
# +
df = pd.read_csv('../data/house_prices.csv')
df['FireplaceQu'].fillna('Missing', inplace = True)
#Before Imputation
df['FireplaceQu'].value_counts()
# +
cat_encoder2 = FeatureEngineering_Categorical(method = 'target_mean')
cat_encoder2.fit(df, variables = ['FireplaceQu'],target = 'SalePrice')
print (cat_encoder2.param_dict_)
df = cat_encoder2.transform(df)
# -
#After Imputation
df['FireplaceQu'].value_counts()
# ----
# ## Classification Model - Target based Encoding
# +
df = pd.read_csv('../data/titanic.csv')
df['Embarked'].fillna('Missing', inplace = True)
#Before Imputation
print('Before Imputation')
print(df['Embarked'].value_counts())
cat_encoder3 = FeatureEngineering_Categorical(model='clf', method = 'target_prob_ratio')
cat_encoder3.fit(df, variables = ['Embarked'],target = 'Survived')
print('Parameter Dictionary')
print(cat_encoder3.param_dict_)
df = cat_encoder3.transform(df)
#After Imputation
print('After Imputation')
print(df['Embarked'].value_counts())
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Stock Market Prediction using Numerical and Textual Analysis
# +
# importing libraries
import warnings
warnings.filterwarnings('ignore')
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing, metrics
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Dense, Activation
import nltk
nltk.download('vader_lexicon')
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import subjectivity
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
# -
# reading the datasets into pandas
stock_price = pd.read_csv('D:/abcd/TSLA.csv')
stock_headlines = pd.read_csv('D:/abcd/india-news-headlines.csv')
stock_price.head()
len(stock_price), len(stock_headlines)
stock_price.isna().any(), stock_headlines.isna().any()
# Stock Prices
#dropping any null entries in our data
stock_price.dropna(axis=0, inplace=True)
# +
# dropping duplicates in data
stock_price = stock_price.drop_duplicates()
# coverting the datatype of column 'Date' from type object to type 'datetime'
stock_price['Date'] = pd.to_datetime(stock_price['Date']).dt.normalize()
# filtering the important columns
stock_price = stock_price.filter(['Date', 'Close', 'Open', 'High', 'Low', 'Volume'])
# setting column 'Date' as the index column
stock_price.set_index('Date', inplace= True)
# sorting the data according to Date
stock_price = stock_price.sort_index(ascending=True, axis=0)
stock_price
# -
# Stock Headlines
# +
# dropping duplicates
stock_headlines = stock_headlines.drop_duplicates()
# coverting the datatype of column 'Date' from type string to type 'datetime'
stock_headlines['publish_date'] = stock_headlines['publish_date'].astype(str)
stock_headlines['publish_date'] = stock_headlines['publish_date'].apply(lambda x: x[0:4]+'-'+x[4:6]+'-'+x[6:8])
stock_headlines['publish_date'] = pd.to_datetime(stock_headlines['publish_date']).dt.normalize()
# filtering the important columns
stock_headlines = stock_headlines.filter(['publish_date', 'headline_text'])
# grouping the news headlines according to Date
stock_headlines = stock_headlines.groupby(['publish_date'])['headline_text'].apply(lambda x: ','.join(x)).reset_index()
# setting Date as the index column
stock_headlines.set_index('publish_date', inplace= True)
# sorting the data according to the Date
stock_headlines = stock_headlines.sort_index(ascending=True, axis=0)
stock_headlines
# -
# Combining both Prices and Headlines
# +
# concatenation of the datasets stock_price and stock_headlines
stock_data = pd.concat([stock_price, stock_headlines], axis=1)
stock_data.dropna(axis=0, inplace=True)
# displaying the combined stock_data
stock_data
# -
#Sentiment Analysis
stock_data['compound'] = ''
stock_data['negative'] = ''
stock_data['neutral'] = ''
stock_data['positive'] = ''
stock_data.head()
# +
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import unicodedata
#Sentiment Analyzer
sid = SentimentIntensityAnalyzer()
print('Start calculating sentiment scores:')
stock_data['compound'] = stock_data['headline_text'].apply(lambda x: sid.polarity_scores(x)['compound'])
print('Compound Done')
stock_data['negative'] = stock_data['headline_text'].apply(lambda x: sid.polarity_scores(x)['neg'])
print('Negative Done')
stock_data['neutral'] = stock_data['headline_text'].apply(lambda x: sid.polarity_scores(x)['neu'])
print('Neutral Done')
stock_data['positive'] = stock_data['headline_text'].apply(lambda x: sid.polarity_scores(x)['pos'])
print('Positive Done')
print('Stop')
stock_data.head()
# +
# dropping unwanted 'headline_text' and rearranging columns
stock_data.drop(['headline_text'], inplace=True, axis=1)
stock_data = stock_data[['Close', 'compound', 'negative', 'neutral', 'positive', 'Open', 'High', 'Low', 'Volume']]
# final stock_data
stock_data.head()
# -
# Feature engineering of the Combined Data
stock_data.isna().any()
stock_data.describe(include='all')
# displaying stock_data information
stock_data.info()
# Stock Data Analysis
# +
#Setting plot size and labels
plt.figure(figsize=(15,10))
stock_data['Close'].plot()
plt.title("Close Price")
plt.xlabel('Date')
plt.ylabel('Close Price (INR)')
# -
# calculating 7 day rolling mean
stock_data.rolling(7).mean().head(20)
# +
# setting figure size
plt.figure(figsize=(15,10))
# plotting the close price and a 30-day rolling mean of close price
stock_data['Close'].plot()
stock_data.rolling(window=30).mean()['Close'].plot()
# -
# displaying stock_data
stock_data
# DATA FOR MODELLING
# +
# calculating data_to_use
percentage_of_data = 1.0
data_to_use = int(percentage_of_data*(len(stock_data)-1))
# using 80% of data for training
train_end = int(data_to_use*0.8)
total_data = len(stock_data)
start = total_data - data_to_use
# printing number of records in the training and test datasets
print("Number of records in Training Data:", train_end)
print("Number of records in Test Data:", total_data - train_end)
# +
# predicting one step ahead
steps_to_predict = 1
close_price = stock_data.iloc[start:total_data,0] #close
compound = stock_data.iloc[start:total_data,1] #compound
negative = stock_data.iloc[start:total_data,2] #neg
neutral = stock_data.iloc[start:total_data,3] #neu
positive = stock_data.iloc[start:total_data,4] #pos
open_price = stock_data.iloc[start:total_data,5] #open
high = stock_data.iloc[start:total_data,6] #high
low = stock_data.iloc[start:total_data,7] #low
volume = stock_data.iloc[start:total_data,8] #volume
# printing close price
print("Close Price:")
close_price
# +
# shifting next day close
close_price_shifted = close_price.shift(-1)
# shifting next day compound
compound_shifted = compound.shift(-1)
# concatenating the captured training data into a dataframe
data = pd.concat([close_price, close_price_shifted, compound, compound_shifted, volume, open_price, high, low], axis=1)
# setting column names of the revised stock data
data.columns = ['close_price', 'close_price_shifted', 'compound', 'compound_shifted','volume', 'open_price', 'high', 'low']
# dropping nulls
data = data.dropna()
data.head(10)
# -
#close price shifted set as the target var
y = data['close_price_shifted']
y
# setting the features dataset for prediction
cols = ['close_price', 'compound', 'compound_shifted', 'volume', 'open_price', 'high', 'low']
x = data[cols]
x
# +
# scaling the feature dataset
scaler_x = preprocessing.MinMaxScaler (feature_range=(-1, 1))
x = np.array(x).reshape((len(x) ,len(cols)))
x = scaler_x.fit_transform(x)
# scaling the target variable
scaler_y = preprocessing.MinMaxScaler (feature_range=(-1, 1))
y = np.array (y).reshape ((len( y), 1))
y = scaler_y.fit_transform (y)
# displaying the scaled feature dataset and the target variable
x, y
# -
# Data divided for training and test datasets
# +
# preparing training and test dataset
X_train = x[0 : train_end,]
X_test = x[train_end+1 : len(x),]
y_train = y[0 : train_end]
y_test = y[train_end+1 : len(y)]
# printing the shape of the training and the test datasets
print('Number of rows and columns in Training set X:', X_train.shape, 'and y:', y_train.shape)
print('Number of rows and columns in Test set X:', X_test.shape, 'and y:', y_test.shape)
# -
#reshaping dataset
X_train = np.reshape(X_train, (X_train.shape[0], 7, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 7, X_test.shape[1]))
# Data Modelling
# +
# seed set to achieve consistent and less random predictions
np.random.seed(2020)
# setting the architecture for the data model
model=Sequential()
model.add(LSTM(100,return_sequences=True,activation='tanh',input_shape=(len(cols),1)))
model.add(Dropout(0.1))
model.add(LSTM(100,return_sequences=True,activation='tanh'))
model.add(Dropout(0.1))
model.add(LSTM(100,activation='tanh'))
model.add(Dropout(0.1))
model.add(Dense(1))
# printing the model summary
model.summary()
# +
model.compile(loss='mse' , optimizer='adam')
# fitting the model using the training dataset
model.fit(X_train, y_train, batch_size=8, epochs=10, verbose=1)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Answers to tutorial questions and exercises
#
# ## Part 1: Creating a BLAST database
#
# ### General questions
#
# **What is the name of the file containing our FASTA sequences?**
# _bacteria.fa_
#
# **What type of sequences do we have in our bacteria file?**
# _Nucleotide_
#
# **What is our new BLAST database (DB) called?**
# _bacteria.fa_
#
# **How many sequences were added to our new database?**
# _75_
#
# **Was the number of sequences added to our database the same as the number of sequences in our FASTA file?**
# _Yes_
#
#
# ### Exercise 1
#
# You will have noticed that there is also a file in the /bacteria folder called **bacteria_tr.fa** which also contains FASTA sequences which need to be converted into a BLAST database. Create a BLAST database from this file which has the output prefix **bacteria_prot** and can be referenced using the title **bacteria_prot**.
#
# It is up to you whether you create a logfile but it is worth using _head_ to check the type of sequences.
# (_hint: they might not be nucleotide_).
makeblastdb -in db/bacteria/bacteria_tr.fa -dbtype prot -title bacteria_prot -out db/bacteria/bacteria_prot -logfile db/bacteria/bacteria_prot.log
head db/bacteria/bacteria_prot.log
ls -l .
# **What do you notice about the file extensions for the bacteria_prot database?**
# _They begin with a 'p' not an 'n' (e.g. '.pin' not '.nin')_
#
# **Why do you think they are different from the previous files?**
# _Because nucleotide BLAST database files have an 'n' prefix (e.g. '.nin'), but protein BLAST database files have a 'p' prefix (e.g. '.pin')_
# ## Part 2: Running a local BLAST search
#
# ### General questions
#
# **What percentage of our query aligns with our top hit?**
# _100%_
#
# **Is our query sequence the same length as our top hit?**
# _Yes, they are both 924 bp_
#
# **Based on the output of our blastn search, which species do you think our unknown sequence comes from? What gene might it be?**
# _Based on the description of the top hit, our sequence is TcpC from_ Escherichia coli
#
# ### Exercise 2
#
# **Using mammalian.fa create a new database which has the output prefix mammalian and can be referenced as mammalian.**
# (_hint: you don't need to be in the same folder as your FASTA file to write your database files there, just prefix the output prefix with the relative location - e.g. db/mammalian/mammalian)_
head db/mammalian/mammalian.fa
makeblastdb -in db/mammalian/mammalian.fa -dbtype prot -title mammalian -out db/mammalian/mammalian -logfile db/mammalian/mammalian.log
ls -l db/mammalian
# **If our query sequence is nucleotide and we want to search a protein database, what BLAST application do we need to use?**
# _blastx_
#
# ** With example/unknown.fa, run a BLAST search using the application in your answer above and search the database you have just created. We want a standard tabulated output file with the following additional columns**
# * Full subject title
# * Query length
# * Subject length
# * Percentage query coverage
blastx -query example/unknown.fa -db db/mammalian/mammalian -out example/blastx_mammalian.out -outfmt "6 std stitle qlen slen qcovs"
head example/blastx_mammalian.out
# ** What is our top hit?**
# _toll-like receptor 1 precursor [Homo sapiens]_
#
# ** How much of our query sequence is covered by this alignment?**
# _45%_
#
# ** What is the length of our top hit and where does the alignment start and finish?**
# _Our top his is 786 amino acids in length with the alignment covering residues 634-764_
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Trial with nglview and ase
#
# https://github.com/arose/nglview
#
# https://wiki.fysik.dtu.dk/ase/about.html
#
# ### Borrowing from this example :
#
# https://github.com/arose/nglview/blob/master/examples/users/ase.md
#
# ### Installation
#
# pip install nglview==1.1.5
#
# pip install ase
#
# pip install ipywidgets==7.0.0
#
# pip install widgetsnbextension==3.0.0
#
# maybe also ipykernel=4.6?
#
# ### useful pages
#
# https://wiki.fysik.dtu.dk/ase/ase/build/build.html?highlight=molecule#ase.build.molecule
#
from ase.build import molecule
import nglview
import numpy as np
import ipywidgets
def makeview(models):
view = nglview.show_ase(model)
names = ['OCHCHO', 'C3H9C', 'CH3COF', 'CH3CH2NH2']
mols = [molecule(name) for name in names]
view = nglview.show_ase(mols[3])
nglview.write_html('index.html', [view])
view
# !open index.html
from ipywidgets.embed import embed_minimal_html
embed_minimal_html('index.html', views=[view], title='test export')
mol = mdt.from_name('ethylene')
mol.draw()
# # Trying with rdkit
#
# conda install -c rdkit rdkit
#
# also see here: http://patrickfuller.github.io/imolecule/examples/ipython.html
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.Draw import IPythonConsole
IPythonConsole.ipython_3d = True
taxol = ("CC(=O)OC1C2=C(C)C(CC(O)(C(OC(=O)c3ccccc3)C4C5(COC5CC(O)C4(C)C1=O)"
"OC(=O)C)C2(C)C)OC(=O)C(O)C(NC(=O)c6ccccc6)c7ccccc7")
mol = Chem.AddHs(Chem.MolFromSmiles(taxol))
AllChem.EmbedMolecule(mol)
AllChem.MMFFOptimizeMolecule(mol)
mol
# # Trying with py3Dmol
#
# pip install py3Dmol
#
# http://nbviewer.jupyter.org/github/3dmol/3Dmol.js/blob/9050b97144e81f065df7eecc87ba9a16723ab14b/py3Dmol/examples.ipynb
import py3Dmol
view = py3Dmol.view(query='pdb:1hvr')
view.setStyle({'cartoon':{'color':'spectrum'}})
view
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import clustertools as ctools
import numpy as np
# # Setup
# As discussed in the documentation, a wrapper has been written around the LIMEPY code (Gieles, M. & Zocchi, A. 2015, MNRAS, 454, 576 & Claydon, I., Gieles, M., Varri, A.L., Heggie, D.C., Zocchi, A. 2019, MNRAS, 487, 147) to automatically setup clusters from a pre-defined distribution function. If one is familiar with LIMEPY, simply give the ``setup_cluster`` the same commands you would give ``limepy.sample``. For example, to setup a King (1966) cluster with W0=5.0, a mass of 1000 Msun and an effective radius of 3 pc using 1000 stars:
cluster=ctools.setup_cluster('limepy',g=1,phi0=5.0,M=1000.,rm=3,N=1000)
print(cluster.ntot,cluster.rm,cluster.mtot)
# Alternatively in ``clustertools`` one can simply using ``'king'`` and ``'W0'``:
cluster=ctools.setup_cluster('king',W0=5.0,M=1000.,rm=3,N=1000)
print(cluster.ntot,cluster.rm,cluster.mtot)
# It is also possible, for King (1966) clusters, to specify ``c`` instead of ``W0``, as I have included for convenience conversion functions ``c_to_w0`` and ``w0_to_c`` as both valus are quoted throughout the literature.
# # Galactic Globular Clusters
# It is possible to set up a StarCluster that represents a Galactic Globular Cluster, where the structural information is taken from either de Boer, T. J. L., Gieles, M., Balbinot, E., Hénault-Brunet, V., Sollima, A., Watkins, L. L., Claydon, I. 2019, MNRAS, 485, 4906 (default) or Harris, W.E. 1996 (2010 Edition), AJ, 112, 1487. Orbital information is taken from Vasiliev E., 2019, MNRAS, 484,2832. To setup Pal 5, for example:
#
cluster=ctools.setup_cluster('Pal5')
print(cluster.ntot,cluster.rm,cluster.mtot)
print(cluster.xgc,cluster.ygc,cluster.zgc,cluster.vxgc,cluster.vygc,cluster.vzgc)
# Unless otherwise specified, the cluster is setup to be in ``pckms`` units in clustercentric coordinates. The number of stars is set using ``mbar`` variable which has a default of 0.3 solar masses.
# Clusters can easily be viewed as they would be in the sky using the ``skyplot`` command:
ctools.skyplot(cluster)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 1
# Applications in supervised learning:
# * Prediction of price or click on ad (yes/no) --> Standard NN
# * From image to object (photo tagging) --> Convolutional NN
# * From audio to text trasncript (speech recognicion) --> Recurrent NN
# * Translation from language to language --> More advanced RNN
# * From image/radars to position of other cars (autonomous driving) --> Custom/Hybrid NN
# Types of data:
# * Structured data: databases
# * Unstructured data: audio, images, text
# # Week 2
# ## Logistic regression as a NN
# ### Binary classification
# Training sample $X \in \mathbb{R}^{n_X \times m}$, where $n_X$ is the number of features and $m$ is the number of observations
# ### Logistic regression
# We want to estimate $\mathbb{E}[y|x] = P(y=1|x) = \hat{y} \in [0,1]$.
# We set $\hat{y} = \sigma(w^Tx+b)$, the *sigmoid* function $\sigma(z) = \frac{1}{1+e^{-z}}$.
# + hide_input=true
# the sigmoid function
import matplotlib.pylab as plt
# %matplotlib inline
import numpy as np
x = np.arange(-8, 8, 0.1)
f = 1 / (1 + np.exp(-x))
plt.plot(x, f)
plt.axvline(x=0, ls = "--", c = 'black')
plt.axhline(y=1, c = 'black')
plt.axhline(y=0, c = 'black')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Sigmoid function')
plt.show()
# -
# ### Logistic regression cost function
# Loss function $L(\hat{y},y):\mathbb{R}^m \rightarrow \mathbb{R}$ to measure how good our estimate is compared to the real $y$.
#
# A quadratic function such as $\frac{1}{2}(\hat{y}-y)^2$ is not good for gradient discent.
#
# We use $L(\hat{y},y) = -(y \ln(\hat{y}) +(1-y)\ln(1-\hat{y}))$:
# * if $y=1 \implies L(\hat{y},y) = -\ln(\hat{y})$, therefore we minimize $L$ by choosig the $w$ and $b$ such that $\hat{y} \in (0,1)$ is as large as possible
# * if $y=0 \implies L(\hat{y},y) = -\ln(1-\hat{y})$, therefore we want $\hat{y} \in (0,1)$ to be as small as possible
#
# The cost function $J(w,b) = \frac{1}{m}\sum_{i=1}^{m} L(\hat{y}^i,y^i) = -\frac{1}{m}\sum_{i=1}^{m} \bigl[y^i \ln(\hat{y}^i) +(1-y^i)\ln(1-\hat{y}^i)\bigr]$ is convex.
#
# Note: minimizing the cost funcion is equivalent to maximizing the log-likelihood function:
#
# $\ln P(y_1,...y_m|w,b) = \ln \prod_{i=1}^m P(y_i|x) = \sum_{i=1}^{m} \ln P(y_i|x) = \sum_{i=1}^{m} \ln (\hat{y}_i^{y_i}(1-\hat{y}_i)^{1-y_i})$
# ### Gradient discent
# Consider $w^*,b^* = argmin_{w,b} J(w,b)$.
#
# We find $w^*$ and $b^*$ by initializing some $(w^0,b^0)$ and by iterating
#
# * $w^{t+1} = w^t - \alpha \frac{\partial J(w^t,b^t)}{\partial w^t}$, where $\alpha$ is the *learning rate*.
# * $b^{t+1} = b^t - \alpha \frac{\partial J(w^t,b^t)}{\partial b^t}$
#
# Because this function is convex, no matter where you initialize, you should get to the same point or roughly the same point. And what gradient descent does is it starts at that initial point and then takes a step in the steepest downhill direction.
#
# We will denote $\partial x^t = \frac{\partial J(x^t)}{\partial x^t}$.
#
# By the chain rule, we have that
#
# $\partial w^t = \frac{\partial J(w^t,b^t)}{\partial w^t} = \frac{1}{m}\sum_{i=1}^{m} \frac{\partial L(\hat{y}_i,y_i)}{\partial \hat{y}_i} \frac{\partial \hat{y}_i}{\partial z_i} \frac{\partial z_i}{\partial w^t}$, where $\hat{y}_i = \sigma(z_i)$ and $z_i = w^Tx_i+b$.
#
# By using the sigmoid function, we have that $\frac{\partial \hat{y}}{\partial z} = \frac{\partial (1+e^z)^{-1}}{\partial z} = \frac{e^{-z}}{(1+e^z)^2}$
#
# Therefore $\partial z_i = \frac{1}{m}\sum_{i=1}^{m} \frac{\partial L(\hat{y}_i,y_i)}{\partial \hat{y}_i}\frac{e^{-z_i}}{(1+e^{z_i})^2} = \frac{1}{m}\sum_{i=1}^{m} -\big(\frac{y_i}{\hat{y}_i} - \frac{1-y_i}{1-\hat{y}_i} \big) \frac{e^{-z_i}}{(1+e^{z_i})^2} = \frac{1}{m}\sum_{i=1}^{m} \hat{y}_i - y_i$.
#
# Hence
#
# $\partial w^t = -\frac{1}{m}\sum_{i=1}^{m} (y^i-\hat{y}^i) x^i$
#
# and
#
# $\partial b^t = -\frac{1}{m}\sum_{i=1}^{m} (y^i-\hat{y}^i)$.
# # Week 3
# ## Shallow Neural Network
# <img src="NN.PNG" width="400" />
# For a single observation $i$, let $a^{[0]} = [x_1, x_2, \dots x_{n_x}]$ be the input layer.
#
# Each node $j$ of the hidden layer is computed as $a^{[1]}_j = \sigma(z^{[1]}_j) = \sigma(w^{[1]T}_jx+b^{[1]}_j)$, therefore the hidden layer is based on a matrix of weights $W^{[1]}$ of dimension $n_x \times n_j$, where $n_j$ is the number of nodes of the hidden layer, and a vector $b^{[1]}$. The output layer is computed as $\hat{y} = a^{[2]} = \sigma(z^{[2]}) = \sigma(w^{[2]T} a^{[1]} + b^{[2]})$.
#
# The function that maps from $z^{[l]}$ to $a^{[l]}$ is called *activation function*.
#
# The number of units in each layer $l$ are denoted by $n^{[l]}$. We have $n^{[0]} = n_x$ and $n^{[2]} = 1$
# Consider now the case where $m>1$, now the input layer $X = a^{[0]}$ has dimension $n_x \times m$.
#
# In the hidden layer there are $n^{[1]}$ units (for each observation). We compute $a^{[1]} = \sigma(z^{[1]})$ with $z^{[1]} = W^{[1]} X + b^{[1]}$ of dimension $n^{[1]} \times m$, where $W^{[1]}$ has dimension $n^{[1]} \times n_x$ and where for each observation of the same unit we sum a constant from $b^{[1]}$.
#
# For the case with 2 features, 2 observations and 3 nodes we have:
#
# $$
# W^{[1]} X + b^{[1]} = \left(\begin{array}{cc}
# w_{11} & w_{12}\\
# w_{21} & w_{22}\\
# w_{31} & w_{32}
# \end{array}\right)
# \left(\begin{array}{cc}
# x^{(1)}_1 & x^{(2)}_1\\
# x^{(1)}_2 & x^{(2)}_2
# \end{array}\right)
# +
# \left(\begin{array}{cc}
# b_1 & b_1\\
# b_2 & b_2\\
# b_3 & b_3
# \end{array}\right)
# =
# \left(\begin{array}{cc}
# w_{11}x^{(1)}_1+w_{12}x^{(1)}_2+b_1 & w_{11}x^{(2)}_1+w_{12}x^{(2)}_1+b_1\\
# w_{21}x^{(1)}_1+w_{22}x^{(1)}_2+b_2 & w_{21}x^{(2)}_1+w_{22}x^{(2)}_2+b_2\\
# w_{31}x^{(1)}_1+w_{32}x^{(1)}_2+b_3 & w_{31}x^{(2)}_1+w_{32}x^{(2)}_2+b_3
# \end{array}\right)
# $$
#
# Note that in python, thanks to *broadcasting*, it is sufficient to use a vector `b1` of dimension $n^{[1]} \times 1$, since in
#
# `a1 = np.dot(W1,X)+b1`
#
# the vector `b1` will be added to each column of matrix `np.dot(W1,X)`.
# ### Activation functions
# As an alternative to the sigmoid function, one can use the hyperbolic tangent function $tanh(z) = \frac{e^z-e^{-z}}{e^z+e^{-z}}$ which has zero mean and makes the learning for the next layer easier. In binary classification, the last layer can still use the sigmoid function because $\hat{y} \in (0,1)$.
# + hide_input=true
# the hyperbolic tangent function
import matplotlib.pylab as plt
# %matplotlib inline
import numpy as np
x = np.arange(-8, 8, 0.1)
f = np.tanh(x)
plt.plot(x, f)
plt.axvline(x=0, ls = "--", c = 'black')
plt.axhline(y=0, ls = "--",c = 'black')
plt.axhline(y=1, c = 'black')
plt.axhline(y=-1, c = 'black')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Hyperbolic tangent function')
plt.show()
# -
# The problem with the above activation functions is that the derivative for $z$ very large or very small are close to zero in both cases, and this slows down gradient discent. A very common activation funcion is the *Rectified Linear Unit (ReLU)* function $a = \max\{0,z\}$, which has derivative equal to 1 when $z>0$. To avoid the negative derivative, an alternative is the *Leaky ReLU* function $a = \max\{0.01z,z\}$
# + hide_input=true
# ReLU and Leaky ReLU functions
import matplotlib.pylab as plt
# %matplotlib inline
import numpy as np
x = np.arange(-8, 8, 0.1)
plt.figure(1)
plt.subplot(2,2,1)
f = np.maximum(0,x)
plt.plot(x, f)
plt.axvline(x=0, ls = "--", c = 'black')
plt.axhline(y=0, ls = "--",c = 'black')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('ReLU function')
plt.subplot(2,2,2)
f = np.maximum(0.1*x,x)
plt.plot(x, f)
plt.axvline(x=0, ls = "--", c = 'black')
plt.axhline(y=0, ls = "--",c = 'black')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Leaky ReLU function')
plt.show()
# -
# If you use a linear activation function then the neural network is just outputting a linear function of the input. If you use a linear activation function then no matter how many layers your neural network has, all it's doing is just computing a linear activation function. So you might as well not have any hidden layers.
# ### Backpropagation
# To compute the derivatives with respect to the parameters, consider the following schema:
# * $x_i$, with dimension $n_x \times 1$
# * $w^{[1]}$, with dimension $n^{[1]} \times n_x$
# * $b^{[1]}$, with dimension $n^{[1]} \times 1$
# * $z^{[1]} = w^{[1]} x_i + b^{[1]}$, with dimension $n^{[1]} \times 1$
# * $a^{[1]} = g(z^{[1]})$, with dimension $n^{[1]} \times 1$
# * $w^{[2]}$, with dimension $1 \times n^{[1]}$
# * $b^{[2]}$, with dimension $1 \times 1$
# * $z^{[2]} = w^{[2]}a^{[1]}+b^{[2]}$, with dimension $1 \times 1$
# * $a^{[2]} = \sigma(z^{[2]})$, with dimension $1 \times 1$
# * $L(a^{[2]},y_i)$, with dimension $1 \times 1$
# Therefore$^*$
#
# $\frac{\partial J(.)}{\partial w^{[1]}} = \frac{\partial L(.)}{\partial a^{[2]}} \frac{\partial a^{[2]}}{\partial z^{[2]}} \frac{\partial z^{[2]}}{\partial a^{[1]}} \frac{\partial a^{[1]}}{\partial z^{[1]}} \frac{\partial z^{[1]}}{\partial w^{[1]}}$
#
# Such that
# * $\partial z^{[2]} = a^{[2]} - y$
# * $\partial w^{[2]} = \partial z^{[2]} a^{[1]T}$ (dimension $1 \times n^{[1]}$)
# * $\partial b^{[2]} = \partial z^{[2]}$
# * $\partial z^{[1]} =\partial z^{[2]} \frac{\partial z^{[2]}}{\partial a^{[1]}} \frac{\partial a^{[1]}}{\partial z^{[1]}} = \partial z^{[2]} w^{[2]T} g'(z^{[1]})$ (dimension $n^{[1]} \times 1$)
# * $\partial w^{[1]} = \partial z^{[1]} x_i^T$ (dimension $n^{[1]} \times n_x$)
# * $\partial b^{[1]} = \partial z^{[1]}$
#
# $^*$ To be more formal, with $m$ observations remember that the cost function $J(.)$ is the mean of the loss function $L(.)$, hence $\partial w^{[l]} = \frac{1}{m} \partial z^{[l]} a^{[l-1]T}$ and $\partial b^{[l]} = \frac{1}{m} \sum \partial z^{[l]}$.
# ### Random Initialization
# If you initialize the gradient descent algorithm by choosing $w^{[1]}$ as a zero matrix then the hidden units/nodes will be the same.
#
# The solution to this is to initialize your parameters randomly. You can generate a gaussian random variable, usually multiplied by very small number, such as 0.01. But when you're training a very very deep neural network, then you might want to pick a different constant than 0.01 [next class' material]
# # Week 4
# ## Deep Neural Network
# ### Why deep representations?
# If you're building a system for face recognition or face detection, then the first layer of the neural network you can think of as maybe being a feature detector or an edge detector. A hidden unit may try to figure out where the edges of a particular orientation are in the image. And maybe another hidden unit might be trying to figure out where are the horizontal edges in the image. You can think of the first layer of the neural network as looking at the picture and trying to figure out where are the edges in the picture. Now, let's think about where the edges in this picture are by grouping together pixels to form edges. It can then detect the edges and group edges together to form parts of faces. So for example, you might have a low neuron trying to see if it's finding an eye, or a different neuron trying to find that part of the nose. And so by putting together lots of edges, it can start to detect different parts of faces. And then, finally, by putting together different parts of faces, like an eye or a nose or an ear or a chin, it can then try to recognize or detect different types of faces. So intuitively, you can think of the earlier layers of the neural network as detecting simple functions, like edges. And then composing them together in the later layers of a neural network so that it can learn more and more complex functions. These visualizations will make more sense when we talk about *convolutional nets*. And one technical detail of this visualization, the edge detectors are looking in relatively small areas of an image, maybe very small regions like that. And then the facial detectors you can look at maybe much larger areas of image. But the main intuition you take away from this is just finding simple things like edges and then building them up. Composing them together to detect more complex things like an eye or a nose then composing those together to find even more complex things. And this type of simple to complex hierarchical representation, or compositional representation, applies in other types of data than images and face recognition as well.
# For example, if you're trying to build a speech recognition system, it's hard to revisualize speech but if you input an audio clip then maybe the first level of a neural network might learn to detect low level audio wave form features, such as is this tone going up? Is it going down? Is it white noise or sniffling sound. And what is the pitch? When it comes to that, detect low level wave form features like that. And then by composing low level wave forms, maybe you'll learn to detect basic units of sound. In linguistics they call phonemes. But, for example, in the word cat, the C is a phoneme, the A is a phoneme, the T is another phoneme. But learns to find maybe the basic units of sound and then composing that together maybe learn to recognize words in the audio. And then maybe compose those together, in order to recognize entire phrases or sentences. So deep neural network with multiple hidden layers might be able to have the earlier layers learn these lower level simple features and then have the later deeper layers then put together the simpler things it's detected in order to detect more complex things like recognize specific words or even phrases or sentences. The uttering in order to carry out speech recognition. And what we see is that whereas the other layers are computing, what seems like relatively simple functions of the input such as where the edge is, by the time you get deep in the network you can actually do surprisingly complex things. Such as detect faces or detect words or phrases or sentences.
# ### Forward and Backward Propagation
# * Forward propagation:
# * input $a^{[l-1]}$
# * output $a^{[l]}$ and cache $z^{[l]}, a^{[l-1]}$ (and $W^{[l]}, b^{[l]}$)
# * Backward propagation:
# * input $da^{[l]}$
# * output $da^{[l-1]}, dW^{[l-1]}, db^{[l-1]}$
#
# where
# - $dz^{[l]} = da^{[l]} * g^{[l]'}(z^{[l]})$, element-wise product
# - $dW^{[l]} = dz^{[l]} a^{[l-1]}$ (or $\frac{1}{m}dZ^{[l]} A^{[l-1]T}$ with $m$ observations)
# - $db^{[l]} = dz^{[l]}$ (or $\frac{1}{m} \sum dZ^{[l]}$ with $m$ observations)
# - $da^{[l-1]} = W^{[l]T} dz^{[l]}$
# ### Parameters vs Hyperparameters
# Hyperparameters:
# * The learning rate $\alpha$
# * The number of iterations of gradient descent
# * The number of layers
# * The number of units in each layer
# * The activation function
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 5. Varying N in Top-N Simulations
# This notebook loads an existing Beer1pos data and runs it through the simulator with varying N (the number of precursor peaks selected for fragmentations) for Top-N DDA fragmentation. The results here correspond to Section 3.3 in the paper for the Beer1pos data.
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import numpy as np
import pandas as pd
import pylab as plt
import pymzml
import math
import seaborn as sns
import sys
sys.path.append('../..')
from vimms.Roi import RoiToChemicalCreator, make_roi
from vimms.DataGenerator import DataSource, PeakSampler, get_spectral_feature_database
from vimms.MassSpec import IndependentMassSpectrometer
from vimms.Controller import TopNController
from vimms.TopNExperiment import get_params, run_serial_experiment, run_parallel_experiment
from vimms.PlotsForPaper import get_df, load_controller, compute_performance_scenario_2
from vimms.Common import *
set_log_level_debug()
# ## 1. Parameters
# +
base_dir = 'example_data'
mzml_path = os.path.join(base_dir, 'beers', 'fragmentation', 'mzML')
file_name = 'Beer_multibeers_1_T10_POS.mzML'
experiment_name = 'beer1pos'
experiment_out_dir = os.path.abspath(os.path.join(base_dir, 'results', experiment_name, 'mzML'))
# -
experiment_out_dir
min_rt = 3*60 # start time when compounds begin to elute in the mzML file
max_rt = 21*60
kde_min_ms1_intensity = 0 # min intensity to be selected for kdes
kde_min_ms2_intensity = 0
# ### a. ROI extraction parameters
roi_mz_tol = 10
roi_min_length = 1
roi_min_intensity = 0
roi_start_rt = min_rt
roi_stop_rt = max_rt
# ### b. Top-N parameters
isolation_window = 1 # the isolation window in Dalton around a selected precursor ion
ionisation_mode = POSITIVE
N = 10
rt_tol = 15
mz_tol = 10
min_ms1_intensity = 1.75E5 # minimum ms1 intensity to fragment
mzml_out = os.path.join(experiment_out_dir, 'simulated.mzML')
# ## 2. Train densities
ds = DataSource()
ds.load_data(mzml_path, file_name=file_name)
bandwidth_mz_intensity_rt=1.0
bandwidth_n_peaks=1.0
ps = get_spectral_feature_database(ds, file_name, kde_min_ms1_intensity, kde_min_ms2_intensity, min_rt, max_rt,
bandwidth_mz_intensity_rt, bandwidth_n_peaks)
# ## 3. Extract all ROIs
mzml_file = os.path.join(mzml_path, file_name)
good_roi, junk = make_roi(mzml_file, mz_tol=roi_mz_tol, mz_units='ppm', min_length=roi_min_length,
min_intensity=roi_min_intensity, start_rt=roi_start_rt, stop_rt=roi_stop_rt)
all_roi = good_roi + junk
len(all_roi)
# How many singleton and non-singleton ROIs?
len([roi for roi in all_roi if roi.n == 1])
len([roi for roi in all_roi if roi.n > 1])
# Keep only the ROIs that can be fragmented above **min_ms1_intensity threshold**.
min_ms1_intensity
# +
keep = []
for roi in all_roi:
if np.count_nonzero(np.array(roi.intensity_list) > min_ms1_intensity) > 0:
keep.append(roi)
all_roi = keep
len(keep)
# -
# Turn ROIs into chromatograms/chemicals
set_log_level_debug()
rtcc = RoiToChemicalCreator(ps, all_roi)
data = rtcc.chemicals
save_obj(data, os.path.join(experiment_out_dir, 'dataset.p'))
# ## 4. Run Top-N Controller
set_log_level_warning()
pbar = False # turn off progress bar
Ns = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]
rt_tols = [15]
params = get_params(experiment_name, Ns, rt_tols, mz_tol, isolation_window, ionisation_mode, data, ps,
min_ms1_intensity, min_rt, max_rt, experiment_out_dir, pbar)
experiment_out_dir
# Run the experiments.
# +
# # %time run_serial_experiment(params)
# -
# Alternatively since each simulated run is completely independent of the others, we can save time by running the different values of N in parallel. Here we use the [iparallel](https://ipyparallel.readthedocs.io/en/latest/) package. To do this, start a local parallel cluster with the following command:
#
# $ ipcluster start -n 5
#
# where 5 is the number of cores to use (for example).
# %time run_parallel_experiment(params)
# ## 5. Analyse Results
# Now we need to load the ground truth peaks found by xcms from each mzML file.
# - P = peaks picked by XCMS from the full-scan file
# - Q = peaks picked by XCMS from the fragmentation file
#
# Peak picking was done using the script `extract_peaks.R` in the `example_data/results/ground_truth` folder.
# **Manual step: to generate the lists of ground truth peaks for evaluation, please run the R script on both the full-scan and simulated fragmentation files.**
#
# Requirements:
# - Ensure that XCMS3 has been installed: https://bioconductor.org/packages/release/bioc/html/xcms.html.
#
# Steps for peak picking on simulated fragmentation files:
# 1. Ensure that fragmentation .mzML file are located in `examples/example_data/results/beer1pos/mzML`.
# 2. Open a new R session and run the R script `examples/example_data/results/beer1pos/extract_peaks.R`. The script will process any files found in an `mzML` folder relative to its current location.
# 3. The file `extracted_peaks_ms1.csv` will be created in the folder of step 2.
#
# We have provided the peak-picking result for the full-scan file, but to do it manually using your own full-scal file, follow the same steps as above.
# 1. Place your full-scan .mzML file in `examples/example_data/results/ground_truth/mzML`.
# 2. Open a new R window and run the R script `examples/example_data/results\ground_truth/extract_peaks.R`. The script will process any files found in an `mzML` folder relative to its current location.
# 3. The file `extracted_peaks_ms1.csv` will be created in the folder of step 2.
min_ms1_intensity = 0
rt_range = [(min_rt, max_rt)]
mz_range = [(0, math.inf)]
results_dir = os.path.join(base_dir, 'results', 'ground_truth', 'mzML')
csv_file = os.path.join(results_dir, 'extracted_peaks_ms1.csv')
P_peaks_df = get_df(csv_file, min_ms1_intensity, rt_range, mz_range)
csv_file = os.path.join(experiment_out_dir, 'extracted_peaks_ms1.csv')
Q_peaks_df = get_df(csv_file, min_ms1_intensity, rt_range, mz_range)
fullscan_filename = 'Beer_multibeers_1_fullscan1.mzML'
matching_mz_tol = 10 # ppm
matching_rt_tol = 30 # seconds
results = []
for N in Ns:
for rt_tol in rt_tols:
# load chemicals and check for matching
chemicals = load_obj(os.path.join(experiment_out_dir, 'dataset.p'))
fragfile_filename = 'experiment_%s_N_%d_rttol_%d.mzML' % (experiment_name, N, rt_tol)
# load controller and compute performance
controller = load_controller(experiment_out_dir, experiment_name, N, rt_tol)
if controller is not None:
tp, fp, fn, prec, rec, f1 = compute_performance_scenario_2(controller, chemicals, min_ms1_intensity,
fullscan_filename, fragfile_filename,
P_peaks_df, Q_peaks_df, matching_mz_tol, matching_rt_tol)
print('%s N=%d rt_tol=%d tp=%d fp=%d fn=%d prec=%.3f rec=%.3f f1=%.3f' % (experiment_name,
N, rt_tol, tp, fp, fn, prec, rec, f1))
res = (experiment_name, N, rt_tol, tp, fp, fn, prec, rec, f1)
results.append(res)
result_df = pd.DataFrame(results, columns=['experiment', 'N', 'rt_tol', 'TP', 'FP', 'FN', 'Prec', 'Rec', 'F1'])
# ### Plot precision, recall, f1
# +
plt.figure(figsize=(12, 6))
ax = sns.lineplot(x='N', y='Prec', hue='experiment', legend='brief', data=result_df)
plt.title('Top-N Precision')
for l in ax.lines:
plt.setp(l, linewidth=5)
plt.ylabel('Precision')
plt.xlabel(r'Top-$N$')
plt.legend(prop={'size': 20})
plt.tight_layout()
fig_out = os.path.join(experiment_out_dir, 'topN_precision.png')
plt.savefig(fig_out, dpi=300)
# +
plt.figure(figsize=(12, 6))
ax = sns.lineplot(x='N', y='Rec', hue='experiment', legend='brief', data=result_df)
plt.title('Top-N Recall')
for l in ax.lines:
plt.setp(l, linewidth=5)
plt.ylabel('Recall')
plt.xlabel(r'Top-$N$')
plt.legend(prop={'size': 20})
plt.tight_layout()
fig_out = os.path.join(experiment_out_dir, 'topN_recall.png')
plt.savefig(fig_out, dpi=300)
# +
plt.figure(figsize=(12, 6))
ax = sns.lineplot(x='N', y='F1', hue='experiment', legend='brief', data=result_df)
plt.title('Top-N F1')
for l in ax.lines:
plt.setp(l, linewidth=5)
plt.ylabel(r'$F_{1}\;score$')
plt.xlabel(r'Top-$N$')
plt.legend(prop={'size': 20})
plt.tight_layout()
fig_out = os.path.join(experiment_out_dir, 'topN_f1.png')
plt.savefig(fig_out, dpi=300)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
values = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
values
# +
from collections import defaultdict
rows = 'ABCDEFGHI'
cols = '123456789'
boxes = [r + c for r in rows for c in cols]
history = {}
# +
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
# -
row_units
column_units
square_units
unitlist
extract_units(unitlist, boxes)
# +
cols
# -
rows
[rows[i]+cols[i] for i in range(9)]
[rows[i]+cols[len(cols)-i-1] for i in range(9)]
unitlist.append([rows[i]+cols[i] for i in range(9)])
unitlist.pop()
unitlist
len(unitlist)
unitlist + [[rows[i]+cols[i] for i in range(9)]] + [[rows[i]+cols[len(cols)-i-1] for i in range(9)]]
diagonal_a1_i9 = [rows[i]+cols[i] for i in range(9)]
diagonal_a9_i1 = [rows[i]+cols[len(cols)-i-1] for i in range(9)]
unitlist.append(diagonal_a1_i9)
unitlist.append(diagonal_a9_i1)
unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
units
boxes
def extract_units(unitlist, boxes):
"""Initialize a mapping from box names to the units that the boxes belong to
Parameters
----------
unitlist(list)
a list containing "units" (rows, columns, diagonals, etc.) of boxes
boxes(list)
a list of strings identifying each box on a sudoku board (e.g., "A1", "C7", etc.)
Returns
-------
dict
a dictionary with a key for each box (string) whose value is a list
containing the units that the box belongs to (i.e., the "member units")
"""
# the value for keys that aren't in the dictionary are initialized as an empty list
units = defaultdict(list)
for current_box in boxes:
for unit in unitlist:
if current_box in unit:
# defaultdict avoids this raising a KeyError when new keys are added
units[current_box].append(unit)
return units
extract_units(unitlist, boxes)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
potential_twins = [box for box in values.keys() if len(values[box]) == 2]
naked_twins = [[box1,box2] for box1 in potential_twins \
for box2 in peers[box1] \
if set(values[box1])==set(values[box2]) ]
for i in range(len(naked_twins)):
box1 = naked_twins[i][0]
box2 = naked_twins[i][1]
peers1 = set(peers[box1])
peers2 = set(peers[box2])
peers_int = peers1 & peers2
for peer_val in peers_int:
if len(values[peer_val])>2:
for rm_val in values[box1]:
#values[peer_val] = values[peer_val].replace(rm_val,'')
values = assign_value(values, peer_val, values[peer_val].replace(rm_val,''))
return values
naked_twins(values)
grid = values
grid
def grid_values(grid):
"""Convert grid string into {<box>: <value>} dict with '.' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '.' if it is empty.
"""
assert len(grid) == 81, "Input grid must be a string of length 81 (9x9)"
return dict(zip(boxes, grid))
grid_values(grid)
def grid_values(grid):
"""Convert grid string into {<box>: <value>} dict with '123456789' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '123456789' if it is empty.
"""
d = dict()
for i in range(len(grid)):
if grid[i] == '.':
d[boxes[i]] = cols
else:
d[boxes[i]] = grid[i]
return d
grid_values(values)
def only_choice(values):
"""Finalize all values that are the only choice for a unit.
Go through all the units, and whenever there is a unit with a value
that only fits in one box, assign the value to this box.
Input: Sudoku in dictionary form.
Output: Resulting Sudoku in dictionary form after filling in only choices.
"""
# TODO: Implement only choice strategy here
for unit in unitlist:
for digit in '123456789':
#print(digit)
dplaces = [box for box in unit if digit in values[box]]
# print(dplaces)
if len(dplaces) == 1:
values[dplaces[0]] = digit
#print(digit)
return values
only_choice(grid)
grid
values = grid_values(grid)
values
# +
rows = 'ABCDEFGHI'
cols = '123456789'
def cross(a, b):
return [s+t for s in a for t in b]
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
def display(values):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Input: A grid in string form.
Output: A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
chars = []
digits = '123456789'
for c in grid:
if c in digits:
chars.append(c)
if c == '.':
chars.append(digits)
assert len(chars) == 81
return dict(zip(boxes, chars))
def eliminate(values):
"""
Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(values):
"""
Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value to this box.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
# +
from utils import *
def search(values):
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
# -
values = search(values)
values
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
potential_twins = [box for box in values.keys() if len(values[box]) == 2]
naked_twins = [[box1,box2] for box1 in potential_twins \
for box2 in peers[box1] \
if set(values[box1])==set(values[box2]) ]
for i in range(len(naked_twins)):
box1 = naked_twins[i][0]
box2 = naked_twins[i][1]
peers1 = set(peers[box1])
print(peers1)
peers2 = set(peers[box2])
print(peers2)
peers_int = peers1 & peers2
for peer_val in peers_int:
if len(values[peer_val])>2:
for rm_val in values[box1]:
#values[peer_val] = values[peer_val].replace(rm_val,'')
values = assign_value(values, peer_val, values[peer_val].replace(rm_val,''))
return values
naked_twins(values)
x=set(peers['A1'])
y = set(peers['A2'])
x & y
x
y
len(x)
len(y)
x & y
len(x&y)
len(x and y)
help(set)
a = set.intersection(x, y)
a
len(a)
x = list(x)
y = list(y)
x
set.intersection(set(x), set(y))
x = set(x)
y = set(y)
x & y
# +
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
unitlist = row_units + column_units + square_units
diagonal_a1_i9 = [[rows[i]+cols[i] for i in range(9)]]
diagonal_a9_i1 = [[rows[i]+cols[len(cols)-i-1] for i in range(9)]]
# TODO: Update the unit list to add the new diagonal units : done
unitlist.append(diagonal_a1_i9)
unitlist.append(diagonal_a9_i1)
# Must be called after all units (including diagonals) are added to the unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the naked twins eliminated from peers
Notes
-----
Your solution can either process all pairs of naked twins from the input once,
or it can continue processing pairs of naked twins until there are no such
pairs remaining -- the project assistant test suite will accept either
convention. However, it will not accept code that does not process all pairs
of naked twins from the original input. (For example, if you start processing
pairs of twins and eliminate another pair of twins before the second pair
is processed then your code will fail the PA test suite.)
The first convention is preferred for consistency with the other strategies,
and because it is simpler (since the reduce_puzzle function already calls this
strategy repeatedly).
"""
# TODO: Implement this function!
# adding twins
possible_twins = []
for box in values.keys():
if values.get(box).__len__() == 2:
possible_twins.append(box)
_naked_twins = []
for i in possible_twins:
for j in peers:
if set(values.get(i)) == set(values.get(j)):
_naked_twins.append([i, j])
for i in range(len(_naked_twins)):
twin_1 = _naked_twins[i][0]
twin_2 = _naked_twins[i][1]
peer_twin_1 = set(peers[twin_1])
peer_twin_2 = set(peers[twin_2])
common_peers = set.intersection(peer_twin_1, peer_twin_2)
for j in common_peers:
if values.get(j).__len__() > 2:
for k in values[twin_1]:
values = assign_value(values, j, values.get(j).replace(k, ''))
return values
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
# TODO: Copy your code from the classroom to complete this function
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit, '')
return values
def only_choice(values):
"""Apply the only choice strategy to a Sudoku puzzle
The only choice strategy says that if only one box in a unit allows a certain
digit, then that box must be assigned that digit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with all single-valued boxes assigned
Notes
-----
You should be able to complete this function by copying your code from the classroom
"""
# TODO: Copy your code from the classroom to complete this function
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
# TODO: Copy your code from the classroom and modify it to complete this function
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""Apply depth first search to solve Sudoku puzzles in order to solve puzzles
that cannot be solved by repeated reduction alone.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary with all boxes assigned or False
Notes
-----
You should be able to complete this function by copying your code from the classroom
and extending it to call the naked twins strategy.
"""
# TODO: Copy your code from the classroom to complete this function
values = reduce_puzzle(values)
if values is False:
return False
if all(len(values[s]) == 1 for s in boxes):
return values
n, s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""Find the solution to a Sudoku puzzle using search and constraint propagation
Parameters
----------
grid(string)
a string representing a sudoku grid.
Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns
-------
dict or False
The dictionary representation of the final sudoku grid or False if no solution exists.
"""
values = grid2values(grid)
values = search(values)
return values
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
# -
result
# +
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]
unitlist = row_units + column_units + square_units
diagonal_a1_i9 = [rows[i]+cols[i] for i in range(9)]
diagonal_a9_i1 = [rows[i]+cols[len(cols)-i-1] for i in range(9)]
# TODO: Update the unit list to add the new diagonal units : done
unitlist.append(diagonal_a1_i9)
unitlist.append(diagonal_a9_i1)
# Must be called after all units (including diagonals) are added to the unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
possible_twins = []
_naked_twins = []
common_peers = {}
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the naked twins eliminated from peers
Notes
-----
Your solution can either process all pairs of naked twins from the input once,
or it can continue processing pairs of naked twins until there are no such
pairs remaining -- the project assistant test suite will accept either
convention. However, it will not accept code that does not process all pairs
of naked twins from the original input. (For example, if you start processing
pairs of twins and eliminate another pair of twins before the second pair
is processed then your code will fail the PA test suite.)
The first convention is preferred for consistency with the other strategies,
and because it is simpler (since the reduce_puzzle function already calls this
strategy repeatedly).
"""
# TODO: Implement this function!
twins = []
#Find all twins
for box in boxes:
if len(values[box])==2:
for row in row_units:
if box in row:
box_index = row.index(box)
# print(box_index)
for val in row[box_index:]:
if val!=box and values[val]==values[box]:
twins+=[[box,val]]
for col in column_units:
if box in col:
box_index = col.index(box)
for val in col[box_index:]:
if val!=box and values[val]==values[box]:
twins+=[[box,val]]
print(twins)
for twin in twins:
rowflag = 1
#Are the twins in the same row or column?
if twin[0][0] == twin [1][0]:
for row in row_units:
if twin[0] in row:
index = row_units.index(row)
else:
rowflag = 0
for col in column_units:
if twin[0] in col:
index = column_units.index(col)
#Which square unit do they belong to?
for su in square_units:
if twin[0] in su:
sind1 = square_units.index(su)
if twin[1] in su:
sind2 = square_units.index(su)
#Eliminate values in square units
if sind1==sind2:
for val in square_units[sind1]:
if len(values[val])>1 and val not in twin and values[val]!= values[twin[0]]:
if values[twin[0]][0] in values[val]:
values[val] = values[val].replace(values[twin[0]][0],'')
if values[twin[0]][1] in values[val]:
values[val] = values[val].replace(values[twin[0]][1],'')
#Runs only if the twins lie in different squares
# if sind1!=sind2:
# for val in square_units[sind2]:
# # if len(values[val])>2 and val not in twin an values[val]!=values[twin[1]]:
# print(val)
# if values[twin[1]][0] in values[val]:
# # values[val] = values[val].replace(valu [twin[1]][0],'')
# if values[twin[1]][1] in values[val]:
# # values[val] = values[val].replace(valu [twin[1]][1],'')
if rowflag == 1:
for val in row_units[index]:
if len(values[val])>1 and val not in twin:
if values[twin[0]][0] in values[val]:
values[val] = values[val].replace(values[twin[0]][0],'')
if values[twin[0]][1] in values[val]:
values[val] = values[val].replace(values[twin[0]][1],'')
else:
for val in column_units[index]:
if len(values[val])>1 and val not in twin:
if values[twin[0]][0] in values[val]:
values[val] = values[val].replace(values[twin[0]][0],'')
if values[twin[0]][1] in values[val]:
values[val] = values[val].replace(values[twin[0]][1],'')
return values
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
# TODO: Copy your code from the classroom to complete this function
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit, '')
return values
def only_choice(values):
"""Apply the only choice strategy to a Sudoku puzzle
The only choice strategy says that if only one box in a unit allows a certain
digit, then that box must be assigned that digit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with all single-valued boxes assigned
Notes
-----
You should be able to complete this function by copying your code from the classroom
"""
# TODO: Copy your code from the classroom to complete this function
for unit in unitlist:
for digit in '123456789':
dplaces = []
# dplaces = [box for box in unit if digit in values[box]]# unhashable
for box in unit:
if digit in values[box]:
dplaces.append(box)
if len(dplaces) == 1:
dplaces = tuple(dplaces)
values = assign_value(values, dplaces[0], digit)
return values
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
# TODO: Copy your code from the classroom and modify it to complete this function
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""Apply depth first search to solve Sudoku puzzles in order to solve puzzles
that cannot be solved by repeated reduction alone.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary with all boxes assigned or False
Notes
-----
You should be able to complete this function by copying your code from the classroom
and extending it to call the naked twins strategy.
"""
# TODO: Copy your code from the classroom to complete this function
values = reduce_puzzle(values)
if values is False:
return False
if all(len(values[s]) == 1 for s in boxes):
return values
n, s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""Find the solution to a Sudoku puzzle using search and constraint propagation
Parameters
----------
grid(string)
a string representing a sudoku grid.
Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns
-------
dict or False
The dictionary representation of the final sudoku grid or False if no solution exists.
"""
values = grid2values(grid)
values = search(values)
return values
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright 2019 Carsten Blank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + jupyter={"outputs_hidden": true} pycharm={"is_executing": false}
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
sys.path.append("{}/../lib_paper".format(os.getcwd()))
from lib_experimental_utils import compare_plot
# + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"}
theta_start = 0.0
theta_end = 2*np.pi
theta_step = 0.01
w_1 = 0.5
w_2 = 1 - w_1
copies = 500
theta = np.arange(theta_start, theta_end, theta_step)
def classification(copies=1):
return w_1 * np.sin(theta/2 + np.pi/4)**(2*copies) - w_2 * np.cos(theta/2 + np.pi/4)**(2*copies)
compare_plot(theta,
classification=classification(1),
classification_label='1 copy',
compare_classification=classification(copies),
compare_classification_label='500 copies')
plt.savefig("../../images/product-state_1_vs_500_copies.pdf")
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Basic example for highstock module in python-highcharts
# ===============================
#
# As in highcharts, datasets need to be input using the "add_data_set" or "add_data_from_jsonp" methods.
# Options can be either set by "set_options" method as showing here or
# construct a option dictionary object and input using "set_dict_options" method (recommended).
#
# In highstock, the (new) feature "navigator" is automatically added into the bottom of the chart
# based on the first dataset added into chart. But the dataset used in navigator can be changed using
# add_navi_series and add_navi_series_from_jsonp methods:
#
# 1. add_navi_series(data, series_type="line", **kwargs)
# 1. data is the dataset added into the navigator
# 2. series_type is the plot type for navigator
# 3. kwargs are for parameters in series
# (for detail please ref to highcharts API: http://api.highcharts.com/highcharts#)
#
# 2. add_navi_series_from_jsonp(data_src=None, data_name='json_data', series_type="line", **kwargs)
# add dataset from the data_src using jsonp. It is converted to jquery function "$.getJSON" in javascript environment
# 1. data_src is the url (https) for the dataset
# 2. data_name is the variable name of dataset. This name is used for javascript environment (not in python)
# 3. series_type( default: "line") is the type of plot this dataset will be presented
# 4. kwargs are for parameters in series or plotOptions
# (for detail please ref to highcharts API: http://api.highcharts.com/highcharts#)
#
# In most examples, add_data_from_jsonp method is used to show a similar practice in Highstock Demos
#
# The following example is from Highstock Demos
# Single line series: http://www.highcharts.com/stock/demo/basic-line
# +
from highcharts import Highstock
from highcharts.highstock.highstock_helper import jsonp_loader
H = Highstock()
data_url = 'http://www.highcharts.com/samples/data/jsonp.php?filename=aapl-c.json&callback=?'
H.add_data_from_jsonp(data_url, 'json_data', 'line', 'AAPL', tooltip = {
'valueDecimals': 2
}
)
options = {
'rangeSelector' : {
'selected' : 1
},
'title' : {
'text' : 'AAPL Stock Price'
},
}
H.set_dict_options(options)
H
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Learning the weights of a mixture model using EM algorithm
# +
# load the data from pickle
import pickle
data = pickle.load(open('./data.dat', 'rb'))
labels = []
values = []
for label, value in data:
labels.append(label)
values.append(value)
# +
import numpy as np
from scipy.stats import norm
normal_parameters = [
[10, 5],
[25, 3]
]
multinomial_parameters = [15.5, 16]
original_params = (normal_parameters, multinomial_parameters)
# compute the likelihood of fully observed model
def compute_ll(data, params):
p_n, p_m = params
p = np.exp(p_m)
p = [i/sum(p) for i in p]
ll = 0
for label, value in data:
ll += np.log(p[label])
mu, sigma = p_n[label]
ll += norm.logpdf(value, loc=mu, scale=sigma)
return ll
print compute_ll(data, original_params)
# -
# apply EM to the data
from sklearn import mixture
g = mixture.GMM(n_components=2)
v = np.array(values).reshape(1000,1)
g.fit(v)
print g.weights_
print g.means_
print g.covars_
# +
p = [ 0.39687174, 0.60312826]
p_n = [[ 10.33611463, np.sqrt(31.21640241)],
[ 25.16361922, np.sqrt(9.07752611)]]
ll = 0
for label, value in data:
ll += np.log(p[label])
mu, sigma = p_n[label]
ll += norm.logpdf(value, loc=mu, scale=sigma)
print ll
# +
p_n = [[16.55104836045259, 6.829974864553629], [15.709475390466789, 6.905680062219306]]
p = [15.798033405566205, 15.701966594433795]
p = np.exp(p)
p = [i/sum(p) for i in p]
ll = 0
for label, value in data:
ll += np.log(p[label])
mu, sigma = p_n[label]
ll += norm.logpdf(value, loc=mu, scale=sigma)
print ll
# +
p_n = [
[15.5, 5],
[15, 5]
]
p = [15.7, 15.8]
p = np.exp(p)
p = [i/sum(p) for i in p]
ll = 0
for label, value in data:
ll += np.log(p[label])
mu, sigma = p_n[label]
ll += norm.logpdf(value, loc=mu, scale=sigma)
print ll
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import keras
from sklearn.model_selection import train_test_split
import numpy as np
import os
import string
import re
import io
import pandas as pd
from nltk.tokenize import sent_tokenize, word_tokenize
from keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from zipfile import ZipFile
import requests
import gzip
import tensorflow as tf
from keras import backend as K
if not os.path.isdir("bbc"):
# !wget "http://mlg.ucd.ie/files/datasets/bbc-fulltext.zip"
ZipFile("bbc-fulltext.zip").extractall()
if not os.path.isfile("glove.6B.50d.txt.gz"):
# !wget "https://github.com/kmr0877/IMDB-Sentiment-Classification-CBOW-Model/blob/master/glove.6B.50d.txt.gz?raw=true" -O "glove.6B.50d.txt.gz"
business_text_files = os.listdir("bbc/business")
entertainment_text_files = os.listdir("bbc/entertainment")
politics_text_files = os.listdir("bbc/politics")
tech_text_files = os.listdir("bbc/tech")
sports_text_files = os.listdir("bbc/sport")
# +
def read_text(file,directory):
file_path = directory + "/" + file
#print(file_path)
try:
with open(file_path,'r') as f:
text = f.read()
# at least one file is ISO-8859-14 encoded. That could cause some issues unless accounted for
except UnicodeDecodeError:
with open(file_path,'r',encoding="ISO-8859-14") as f:
text = f.read()
return text
business_texts = [read_text(text_file,directory="bbc/business") for text_file in business_text_files]
entertainment_texts = [read_text(text_file,directory="bbc/entertainment") for text_file in entertainment_text_files]
politics_texts = [read_text(text_file,directory="bbc/politics") for text_file in politics_text_files]
tech_texts = [read_text(text_file,directory="bbc/tech") for text_file in tech_text_files]
sport_texts = [read_text(text_file,directory="bbc/sport") for text_file in sports_text_files]
# -
all_texts = [business_texts, entertainment_texts, politics_texts, tech_texts, sport_texts]
text_idx = [0,1,2,3,4]
article_types = ["business","entertainment","politics","tech","sports"]
class_dict = dict(zip(text_idx,article_types))
stopwords = [ "a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are", "as", "at",
"be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "could", "did", "do",
"does", "doing", "down", "during", "each", "few", "for", "from", "further", "had", "has", "have",
"having", "he", "he'd", "he'll", "he's", "her", "here", "here's", "hers", "herself", "him", "himself",
"his", "how", "how's", "i", "i'd", "i'll", "i'm", "i've", "if", "in", "into", "is", "it", "it's", "its",
"itself", "let's", "me", "more", "most", "my", "myself", "nor", "of", "on", "once", "only", "or", "other",
"ought", "our", "ours", "ourselves", "out", "over", "own", "same", "she", "she'd", "she'll", "she's",
"should", "so", "some", "such", "than", "that", "that's", "the", "their", "theirs", "them", "themselves",
"then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've", "this", "those",
"through", "to", "too", "under", "until", "up", "very", "was", "we", "we'd", "we'll", "we're", "we've",
"were", "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom",
"why", "why's", "with", "would", "you", "you'd", "you'll", "you're", "you've", "your", "yours",
"yourself", "yourselves" ]
df = pd.DataFrame([[text,label] for (texts, label) in zip(all_texts,text_idx) for text in texts],columns=["text","label"])
df_train, df_test = train_test_split(df, train_size=.8,random_state=111)
df_train = df_train.copy()
df_test = df_test.copy()
# +
def process_text(text):
processed_text = " ".join([word for word in re.sub("\.+", ". ", re.sub("[\(\)\[\]\"\']","",text.replace("\n|\w+", " "))).split(" ") if word.lower().strip() not in stopwords])
return processed_text
def remove_punctuation(text):
return text.translate(str.maketrans('', '', string.punctuation))
# +
all_sentences = [sentence.strip() for text in df_train.text.values for sentence in text.split(".") if sentence.strip() != ""]
tokenizer = Tokenizer(oov_token="<OOV>")
tokenizer.fit_on_texts(all_sentences)
word_index = tokenizer.word_index
reverse_idx = {value :key for (key, value) in word_index.items()}
maxlen = 500
# +
df_train['tokenized'] = df_train.text.apply(lambda text: tokenizer.texts_to_sequences([text])[0])
df_train["tokenized"] = [sequence for sequence in pad_sequences(df_train.tokenized.values,maxlen=maxlen,truncating="post",padding="post")]
df_test['tokenized'] = df_test.text.apply(lambda text: tokenizer.texts_to_sequences([text])[0])
df_test["tokenized"] = [sequence for sequence in pad_sequences(df_test.tokenized.values,maxlen=maxlen,truncating="post",padding="post")]
train_X = np.vstack(df_train["tokenized"].values)
test_X = np.vstack(df_test["tokenized"].values)
train_y = to_categorical(df_train.label.values)
test_y = to_categorical(df_test.label.values)
# -
with gzip.open("glove.6B.50d.txt.gz", 'r') as f:
embedding_list = f.read().decode("utf-8")
# +
embedding_vectors = {}
for embedding_line in embedding_list.split("\n"):
embedding_split = embedding_line.split(" ")
embedding_vectors[embedding_split[0]] = embedding_split[1:]
vocab_size = len(word_index.keys()) + 1
embedding_matrix = np.zeros((vocab_size,50))
for word,i in word_index.items():
vector = embedding_vectors.get(word)
if vector is not None:
embedding_matrix[i,:] = vector
# +
vocab_size = len(word_index.keys()) + 1
embedding_matrix = np.zeros((vocab_size,50))
for word,i in word_index.items():
vector = embedding_vectors.get(word)
if vector is not None:
embedding_matrix[i,:] = vector
# +
## Model 1 ##
model1 = keras.models.Sequential([keras.layers.Embedding(vocab_size, 50, input_length = maxlen, weights= [embedding_matrix],
trainable=False, mask_zero=True),
keras.layers.Conv1D(64, 10,activation='relu'),
keras.layers.MaxPooling1D(4),
keras.layers.Conv1D(96, 5, activation='relu'),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(15,activation="relu"),
keras.layers.Dropout(.2),
keras.layers.Dense(5, activation="softmax")
])
model1.compile(loss="categorical_crossentropy", optimizer="adam",metrics=['accuracy'])
model1.summary()
# +
model1.fit(train_X, train_y,validation_data=(test_X,test_y),
epochs=23, batch_size=32, steps_per_epoch= 55,validation_steps=32,validation_batch_size=13,
workers=5)
model1.save("cnn-model-glove")
# +
## Model 2 ##
model2 = keras.models.Sequential([keras.layers.Embedding(vocab_size, 50, input_length = maxlen,
mask_zero=True),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(22,activation="relu"),
keras.layers.Dropout(.2),
keras.layers.Dense(11,activation="relu"),
keras.layers.Dropout(.2),
keras.layers.Dense(5, activation="softmax")
])
model2.compile(loss="categorical_crossentropy", optimizer="adam",metrics=['accuracy'])
model2.summary()
# +
model2.fit(train_X, train_y,validation_data=(test_X,test_y),
epochs=20, batch_size=32, steps_per_epoch= 55,
validation_steps=32,validation_batch_size=13,
workers=5)
model2.save("cnn-model-gloveless")
# +
weights = model2.layers[0].get_weights()[0]
word_vectors = {reverse_idx[i]:model2.layers[0].weights[0][i].numpy() for i in range(1, vocab_size)}
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word, vector in word_vectors.items():
if not np.all(vector == 0):
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in vector]) + "\n")
out_v.close()
out_m.close()
# -
def predict(text, model,verbose=True):
if verbose:
print("#####################################\nAnalyzing Statement:\n"+text)
processed_text = process_text(remove_punctuation(text))
tokenized_text = tokenizer.texts_to_sequences([processed_text])[0]
padded_sequence = pad_sequences([tokenized_text],maxlen=maxlen,truncating="post",padding="post")
likelihoods = model.predict(padded_sequence)[0]
idx = np.argmax(likelihoods)
highest_probability = likelihoods[idx]
class_prediction = class_dict[idx]
if verbose:
print("\nClass:",class_prediction,"\nLikelihood:",str(highest_probability*100)+"%")
print("#####################################\n\n")
return class_prediction, highest_probability
predict("Liverpool wins the match!",model=model2)
predict("TV",model=model2)
predict("Democracy",model=model2)
predict("nvidia graphics card",model=model2)
predict("video driver",model=model2)
predict("luxury",model=model2)
predict("stocks",model=model2)
print("Done")
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from bokeh.plotting import figure, output_notebook, show
# ## algorithm
# +
def split(u, v, points):
# return points on left side of UV
return [p for p in points if np.cross(p - u, v - u) < 0]
def extend(u, v, points):
if not points:
return []
# find furthest point W, and split search to WV, UW
w = min(points, key=lambda p: np.cross(p - u, v - u))
p1, p2 = split(w, v, points), split(u, w, points)
return extend(w, v, p1) + [w] + extend(u, w, p2)
def convex_hull(points):
# find two hull points, U, V, and split to left and right search
u = min(points, key=lambda p: p[0])
v = max(points, key=lambda p: p[0])
left, right = split(u, v, points), split(v, u, points)
# find convex hull on each side
return [v] + extend(u, v, left) + [u] + extend(v, u, right) + [v]
# -
# ## run
points = np.random.rand(100, 2)
hull = np.array(convex_hull(points))
hull
# +
output_notebook()
plot = figure()
plot.scatter(x=points[:, 0], y=points[:, 1])
plot.line(x=hull[:, 0], y=hull[:, 1], color='red')
show(plot)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "39e9137a-8f34-4a0c-acc7-f9bae9c47408", "showTitle": false, "title": ""}
# d
# ## Example 2-1 M&M Count
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "75660698-938f-4a4d-8208-7a021ed4b8ac", "showTitle": false, "title": ""}
from pyspark.sql.functions import *
mnm_file = "/databricks-datasets/learning-spark-v2/mnm_dataset.csv"
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6f82cf33-9f22-4ce2-9b1f-b51b314d0a26", "showTitle": false, "title": ""}
# ### Read from the CSV and infer the schema
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "74d7bc0b-7287-457c-bb93-2e3b29edd27b", "showTitle": false, "title": ""}
mnm_df = (spark
.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(mnm_file))
display(mnm_df)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "21d5a1a2-b29f-4d51-80aa-da580b625164", "showTitle": false, "title": ""}
# ### Aggregate count of all colors and groupBy state and color, orderBy descending order
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "baa30c51-7685-489e-83de-0fe19af5817a", "showTitle": false, "title": ""}
count_mnm_df = (mnm_df
.select("State", "Color", "Count")
.groupBy("State", "Color")
.agg(count("Count").alias("Total"))
.orderBy("Total", ascending=False))
count_mnm_df.show(n=60, truncate=False)
print(f"Total Rows = {count_mnm_df.count()}")
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5a4d0665-477c-43b6-9ad8-590f02b0c8b4", "showTitle": false, "title": ""}
# ### Find the aggregate count for California by filtering on State
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e06ea97d-1282-4dc5-a45e-e14505ef510b", "showTitle": false, "title": ""}
ca_count_mnm_df = (mnm_df
.select("State", "Color", "Count")
.where(mnm_df.State == "CA")
.groupBy("State", "Color")
.agg(count("Count").alias("Total"))
.orderBy("Total", ascending=False))
ca_count_mnm_df.show(n=10, truncate=False)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from pandas import DataFrame
from pandas import Series
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
fer_images = pd.read_csv('/Users/lab/Documents/Datasets/fer2013/fer2013.csv')
# +
def makeMat(str_arr, size=(48,48)):
vec = np.array(str_arr.split()).astype(float)
return vec.reshape(48,48)
fer_images['mat'] = fer_images['pixels'].map(lambda x: makeMat(x))
# -
fig = plt.figure()
x = 1
for i in fer_images['mat'].head().values:
fig.add_subplot(2,3,x)
imgplt = plt.imshow(i)
x += 1
# +
import os
import cv2
imgs = []
index = []
for root,dirs,files in os.walk('/Users/lab/Documents/Datasets/ulitmateEmotion/emotion', topdown=False):
if(root.find('\\') != -1):
print(root)
emotion = root.split('\\')[1]
for file in files:
if file.find('.jpg') != -1 or file.find('.jpeg') != -1:
img = cv2.imread(os.path.join(root,file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgs.append(gray)
index.append(emotion)
img_ser = Series(Series(imgs, index=index))
print(img_ser.head())
# +
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
fig = plt.figure()
i = 1
new_img = img_ser
for img in new_img[:90]:
faces = face_cascade.detectMultiScale(img, 1.3, 5)
if len(faces) == 0:
for (x,y,w,h) in faces:
cv2.circle(img,(x,y), 10, (255,0,0))
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
fig.add_subplot(4,5,i)
plt.imshow(img)
i += 1
print(x,y)
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# # Chapter 1 Foundation of Probability Theory Part II
#
# #### *Zhuo Jianchao*
#
# Feb 4, 2020 *Rev 1*
# ## Fundamental Probability Laws
# We have discovered what is an experiment, a sample space, an outcome, an event. But so far they are still **sets** *(sample space)* or **subsets** *(event)* or **element of set** *(outcome)*.
#
# We haven't touched the concept of **probability** which plays major role in the Econometrics. From now on, we are heading towards danger zone where probability enjoys its dance.
# ### Example 4.1
# Let's put forward a naive example of probability. We roll a coin three times and record the *number of times the coin is landing on a head*. This process is called a **random experiment**.
#
# ❗️To be aware that every time rolling a coin yiels an outcome which is the **realization** of *one component* of the possible outcome. Because in the experiment, we are told to roll a coin *three* times, only after we get three realizations each for each time's rolling, we have a **realization** for the possible outcome of the experiment.
#
# Since the total possible outcomes of three successive rollings of a coin is
#
# $$\{HHH, HHT, HTH, HTT, THH, THT, TTH, TTT\},$$
#
# and we only care the number of heads showing up, then the **sample space** of the experiment is
#
# $$S=\{0, 1, 2, 3\},$$
# since the ordering does not make a difference.
#
# Because rolling a coin three times, the **possible outcome**, or the number of heads showing up in one experiment consisting of three rollings must be one and only one element in the set $S$.
#
# There are 8 possible successive outcomes, 1 of which head shows 1 time, 3 of which head shows 3 times, 3 of which 3 times, and 1 of which 1 time, collected in the table below.
#
# |# of heads|0|1|2|3|
# |:-:|---|---|---|---|
# |# of occurence|1|3|3|1|
#
# we can say that the probability of *rolling three heads*$(HHH)$ is 1/8.
#
# The Statement "rolling three heads" is actually an **event**. Recall that event set is a subset of sample space. Let's denote the event *rolling three heads* to be $A$. The event set of the event HHH is $\{3\}$ since 3 heads showing up in this case, denoted as
# $$A=\{3\}$$
#
# And we've already know that the probability of *rolling three heads* is 1/8, which is equivalent to say <u>we **assign** probability of 1/8 to the event A</u>, which is denoted as
# $$P(A)=\frac{1}{8}$$
#
# As for other events,
# $$P(B)=\frac{3}{8}$$
#
# $$P(C)=\frac{3}{8}$$
#
# $$P(D)=\frac{1}{8},$$
# where *B*, *C* and *D* represent the event of rolling heads two times, one time and naught respectively.
#
# Now you gain the basic intuition of probability, we can go further.
# ### Definition 4.1 Sigma Algebra 🎉
# I know the name is frightening, but please stay with me and do not be scared by the title. Let's start it with the example we have explored previously in the Example 4.1.
#
# Still, we roll a coin three times as an experiment and observe the number of heads showing up.
#
# For convenient reference, we paste the table below, which is the number of occurance of the corresponding number of heads.
#
# |# of heads|0|1|2|3|
# |:-:|---|---|---|---|
# |# of occurence|1|3|3|1|
#
# There are four events and its corresponding probability we have discovered already,
#
# * *A* is the event that three heads showing up
# * *B* is the event that two heads showing up
# * *C* is the event that one head showing up
# * *D* is the event that no heads showing up
#
# But now, let's put forward some more events and investigate its probability.
#
# How about the probability of the event that *the number of rolling heads is **not** three times*? We use $^\mathrm{c}$ to denote `complement` in set theory, that is, the event is denoted as $A^\mathrm{c}$, then the probability of such event is
# $$P(A^\mathrm{c})=P(B)+P(C)+P(D)=\frac{1}{8}+\frac{3}{8}+\frac{3}{8}=\frac{7}{8},$$
#
# or simply and intuitively,
#
# $$P(A^\mathrm{c})=1-P(A)=1-\frac{1}{8}=\frac{7}{8}$$
#
# Also, how about the probability of the event that *rolling heads more than one time*? This question is equivalent to rolling heads two times ***or*** three times. We use union symbol $\cup$ to denote `or` in set theory, then the event that *rolling heads more than one time* is denoted as $A\cup B$, the probability of such event is
# $$P(A\cup B)=P(A)+P(B)=\frac{1}{8}+\frac{3}{8}=\frac{4}{8}$$
# Not only these events, by the same way performed as before, we can also **figure out the exact probability** of following events.
#
# * $B^\mathrm{c}$, $C^\mathrm{c}$, $D^\mathrm{c}$
# * $A\cup C$, $B\cup C$
# * $A\cup B\cup C$, $A\cup B\cup D$, $A\cup C\cup D$, $B\cup C\cup D$
# * $A\cup B\cup C\cup D$ = $S$ = $\emptyset^\mathrm{c}$
# * $\emptyset$ = $S^\mathrm{c},$
# where $\emptyset$ indicates none of the events happening.
# The probabilities of the events listed above can be calculated because we have known the probability of events A, B, C and D, and by performing complement and union operation, we can easily calculate the probability of any event listed above.
#
# **The set containing those events whose probability can be calculated by some events whose probability is already known is called a $\sigma$-algebra of the sample space $S$, to which the events with known probability belong**.
#
# In this case,
#
# $$\sigma = \{\emptyset, \{A\},\{B\},\{C\},\{D\},\{A,B\},\{A,C\},\{A,D\},\{B,C\},\{B,D\},\{C,D\},\{A,B,C\},\{A,B,D\},\{B,C,D\},\{A,B,C,D\}\}$$
#
# Any element in the $\sigma$-algebra can be constructed by the elements with known probability which is A, B, C, and D, and the operation **complement** and **union**, and that's the informal definition of a $\sigma$-algebra.
# ### Properties of Sigma Algebra
# A $\sigma$-algebra, denoted as $\mathbb{B}$ is a collection of subsets of a set $S$ such that the following conditions hold:
# 1. $\emptyset \in \mathbb{B}$;
# 2. If $A \in \mathbb{B}$, then $A^\mathrm{c} \in \mathbb{B}$ as well;
# 3. If $A_1, A_2,\cdots \in \mathbb{B}$, then $\cup_{i=1}^{n}{A_i} \in \mathbb{B}$
#
# From the properties above, we can have some deductions:
# 1. the empty set and sample space $S$ is included in $\mathbb{B}$;
# 2. the $\mathbb{B}$ is closed under countable complement;
# 3. the $\mathbb{B}$ is closed under countable unions.
# Note that any element in the $\sigma$-algebra is a **symbolic statement** of an event, for example,
#
# * $S$ denotes the event that *the number of heads rolled is 0 or 1 or 2 or 3*.
# * $\emptyset$, the complement of $S$, represents the event that *the number of heads rolled is not 0 and 1 and 2 and 3*.
# * $\{B,C\}$ stands for the event that *the number of heads rolled is 1 or 2*, etc.
#
# The $\sigma$-algebra contains all the events whose associated probability can be calculated. In other words, we can assign **probability** to the events in the $\sigma$-algebra. We don't manually assign probability to an event, instead, it's generated from **probability function**, which is a function assigning probability to each event in the $\sigma$-algebra.
#
# Since every event in the $\sigma$-algebra has a specific probability, that is to say, the probability of the event in the $\sigma$-algebra is **measurable**.
#
# So the pair, ($S$, $\mathbb{B}$) is called a **measurable space**. The probability function assign probability to events in the measurable space, that is to say, the measurable space is the **domain** of probability function.
# $\sigma$-algebra provides us a set of reasonable events, any event out of the $\sigma$-algebra has no probability associated with it. To be clear, *have no probability* is different from *with probability of zero*. For instance, the event *rolling heads 4 times* in this example is impossible, hence has probability of 0 and is in the $\sigma$-algebra. Whereas the event *first roll showing up a head* is utterly irrelevant considering the sample space hence is not in the $\sigma$-algebra of $S$.
# ### Definition 4.2 Generated Sigma Algebra
# Ask a pragmatic question: **how to find the $\sigma$-algebra?** Does every time to find a $\sigma$-algebra we need to play such a word game that what are possible events associated with the sample space?
#
# Not necessary, just put another simple example.
#
# Let's say $\Omega = \{1,2,3\}$, then what are **subsets** of $\Omega$?
#
# If we take one element each time from it, we get $\{1\}$, $\{2\}$ and $\{3\}$; if we take two, we get $\{1,2\}$, $\{1,3\}$ and $\{2,3\}$; if we take three, that will be the $\Omega$ as a whole $\{1,2,3\}$.
#
# But remember that empty set is a set included in every set. Then $\{\emptyset\}$ is also a subset of $\Omega$.
#
# Denote the non-empty collection of subsets of $\Omega$ as $\mathbb{B}$, then
#
# $$\mathbb{B}=\{\emptyset,\{1\},\{2\},\{3\},\{1,2\},\{2,3\},\{1,3\},\{1,2,3\}\}$$
#
# Note that elements in the $\mathbb{B}$ satisfies properties of a $\sigma$-algebra, that is, the $\mathbb{B}$ is a $\sigma$-algebra.
#
# $\mathbb{B}$ can be **subset** of other $\sigma$-algebra. If $\mathbb{C}$ is the collection of subsets of $S$, where $S=\{1,2,3,4\}$, then $\mathbb{C}$ is a $\sigma$-algebra and the $\mathbb{B}$ is **included** in $\mathbb{C}$, try it!
#
# We can deduce it further that there will be infinitely many $\sigma$-algebra that contains the $\sigma$-algebra constructed using elements of $\Omega$, that is, $\mathbb{B}$. Take intersection of those infinite $\sigma$-algebra that include $\mathbb{B}$ we have a $\sigma$-algebra identical to $\mathbb{B}$, which is called the **smallest sigma algebra**, or **sigma algebra generated by $\Omega$**, denoted as $\sigma(\Omega)$.
#
# Since the intersection of those $\sigma$-algebra which contains $\sigma$-algebra generated by some set $X$, that is $\sigma(X)$, is identical to the $\sigma$-algebra generated by $X$, why bother should we put forward another concept? In other words, what is the difference between a $\sigma$-algebra and a smallest $\sigma$-algebra?
#
# Any non-empty collection of sets whose elements satisfy the properties of sigma algebra can be called a sigma algebra. It doesn't matter *what the elements are* but ***how do they behave***. As long as they behave like elements of sigma algebra, they form a sigma algebra.
#
# As for the smallest sigma algebra, **what the elements are** come into play. Different collection of subsets forms a different sigma algebra generated by some set, or different smallest sigma algebra of some set.
# Why do smallest sigma algebra so important in probability theory? Because the smallest sigma algebra generated by the sample space $\Omega$, that is, $\sigma(\Omega)$, is the **collection of all possible events** in a random experiment, and all the possible events are composed with basic outcomes in sample space.
# ### Definition 5 Probability Function
# We've mentioned probability function many times but haven't given a precise definition yet. Now it's time since we have covered all the necessary concepts. We say the probability function is a function which is defined on the measurable space, and assigns real-numbered values to the element or event in the sigma algebra.
#
# Suppose a random experiment has a sample space $S$ and a $\sigma$-algebra $\mathbb{B}$ associated with it. Then $(S,\mathbb{B})$ is a measurable space. A probability function $P:\mathbb{B} \rightarrow [0,1]$ is a mapping defined on the sigma algebra $\mathbb{B}$ to a real-numbered value between 0 and 1 that has the following axioms:
#
# 1. $0\leq P(A) \leq 1$, for any event $A$ in $\mathbb{B}$;
# 2. $P(S)=1$;
# 3. $If A_1, A_2, \dots \in \mathbb{B}$ are mutually exclusive, then $P(\cup_{i=1}^\infty A_i)=\sum_{i=1}^\infty P(A_i)$.
#
# Some discussions about the axioms above are:
# 1. Every event in the sigma algebra are *qualified* enough to get a real-numbered value indicating their likelihood of occurence, which is between 0 and 1 by our convention, that an impossible event has a probability of 0 and a certain event has probability of 1.
# 2. The requirement that $P(S)=1$ means every time we do the random experiment, there'll always be one event that happens no matter what it is. The experiment always returns an outcome which we can observe.
# 3. the property(3) tells us how to calculate probability of event which is composed with multiple disjoint events.
#
# Any function which satisfies the axioms above is a probability function defined on the measurable space. A single random experiment has an associated measurable space but inifinitely many probability function. To find a probability function suitably describing actual situation is a vital part of work for econometrician.
#
# Probablity function has three basic properties.
# ### Properties of Probability Function
# Let $(S,\mathbb{B})$ be a measurable space where probability function $P$ is defined.
# 1. For any two sets $A$ and $B$ $\in$ $\mathbb{B}$, if $A \subset B$, then $P(A) \leq P(B)$;
# 2. For any sequence $A_{1}, A_{2}, \dots$ $\in$ $\mathbb{B}$, $P\left(\cup_{i=1}^{\infty} A_{i}\right) \leq \sum_{i=1}^{\infty} P\left(A_{i}\right)$;
# 3. 1. For any sequence $A_{1}, A_{2}, \dots$ such that $A_{1} \subset A_{2} \subset A_{3} \subset \ldots$, then
# $P\left(\lim _{\mathrm{n} \rightarrow \infty} \mathrm{A}_{\mathrm{n}}\right)=\lim _{\mathrm{n} \rightarrow \infty} P\left(\mathrm{A}_{\mathrm{n}}\right)$, where $\lim _{\mathrm{n} \rightarrow \infty} \mathrm{A}_{\mathrm{n}}=\cup_{\mathrm{i}=1} \mathrm{A}_{\mathrm{i}}$
# 2. For any sequence $A_{1}, A_{2}, \dots$ such that $A_{1} \supset A_{2} \supset A_{3} \supset \ldots$ and $P(A_1)<\infty$, then $P\left(\lim _{\mathrm{n} \rightarrow \infty} \mathrm{A}_{\mathrm{n}}\right)=\lim _{\mathrm{n} \rightarrow \infty} P\left(\mathrm{A}_{\mathrm{n}}\right)$, where $\lim _{\mathrm{n} \rightarrow \infty} \mathrm{A}_{\mathrm{n}}=\cap_{\mathrm{i}=1} \mathrm{A}_{\mathrm{i}}$
#
# Some discussions of the properties:
# 1. Recall that all events are comprised of basic outcomes in sample space. If an event contains another one, it means that the event has more elements in it, since every element has non-negative probability, a larger event has higher probability.
# ### Definition 6 Probability Space
# Once we have the measurable space $(S,\mathbb{B})$, define a probability function as a measure, we determine a **probability space** $(S,\mathbb{B}, P)$.
#
# A probability space completely describes a random experiment. We use a simple example to illustrate it; no matter how abstract the random experiment is, basics are the same.
# ### Example 6.1
# If we are to roll a coin once, and record which face it lands on, with $H$ denoting head, and $T$ denoting tail.
#
# We know the sample space is $\{H,T\}$, and the sigma algebra associated with it is $\{\emptyset,H,T,\{H,T\}\}$, and the probability function $P$ is that:
#
# $$\begin{align}
# P(\emptyset)=0\\
# P(H)=\frac{1}{2}\\
# P(T)=\frac{1}{2}\\
# P(H,T)=1
# \end{align}
# $$
#
# Probability function takes one event from the sigma algebra and maps it to a real-numbered value between 0 and 1.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Juan's olive grove - temperature
#
# The south of Spain (andalusia) is famous for its olive groves and olive oil production. However olive trees are sensitive to climate change. Changes in temperature and precipitation can cause changing flowering times (Gabaldón-Leal et al., 2017), reduce production and may even result in abandoned olive groves when farmers are forced to move to different areas. Olive production is a culturally and economically crucial industry to the region therefore adaptation is necessary. Adaptation measures include but are not limited to: different and more precise irrigation practices (Lorite et al., 2018), change of location (Ropero, Rumí, & Aguilera, 2018), change of crop.
#
#
#
# In this case study we will take a look at how climate data can help two olive grove owners, Juan and Carlos, in their olive farming.
#
# Farmer Juan has an olive grove between Granada and Malaga (lat:37 lon:-4). The specific breed of olive tree in his grove study cannot flower if the temperature in reaches above 27 degrees. With the Toolbox you can plot a graph of the temperature fluctuations from 2008 to 2017.
#
#
# <img src="https://www.freeworldmaps.net/europe/spain/andalusia/andalusia-map.jpg" />
#
# # Question
#
# ### How many years did the mean daily temperature exceed 27°C in the period of 2008-2017 at Juan's olive grove?
# # Retrieve ERA-5 reanalysis
#
# - 2 meter temperature (K)
# - hourly data
# - select a small area to reduce the amount of data downloaded
# +
import cdsapi
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'variable': '2m_temperature',
'year': [
'2008', '2009', '2010',
'2011', '2012', '2013',
'2014', '2015', '2016',
'2017',
],
'month': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
],
'day': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
],
'area': [37.5, -4.5, 36.5, -3.5], # North, West, South, East. Default: global
'time': [
'00:00', '01:00', '02:00',
'03:00', '04:00', '05:00',
'06:00', '07:00', '08:00',
'09:00', '10:00', '11:00',
'12:00', '13:00', '14:00',
'15:00', '16:00', '17:00',
'18:00', '19:00', '20:00',
'21:00', '22:00', '23:00',
],
'format': 'netcdf',
},
'era5_t2m_2008-2017_hourly.nc')
# -
# # Compute daily mean from hourly data
#
# - One single location (longitude: -4, latitude: 37) e.g. take the nearest grid point
# - Plot time series from 2008 to 2017
import matplotlib.pyplot as plt
import xarray as xr
import cftime
# %matplotlib inline
filename = 'era5_t2m_2008-2017_hourly.nc'
dset = xr.open_dataset(filename, decode_times=True, use_cftime=True)
dset
# ## Daily mean
dset_daily_mean = dset.sel(latitude=37.0, longitude=-4, method="nearest").resample(time='1D').mean()
dset_daily_mean.time
# ## Plot timeserie
(dset_daily_mean['t2m']-273.15).plot.line('ro-')
plt.ylabel("2 metre temperature (C)")
plt.title("Daily mean temperature")
plt.axhline(y=27)
# ## Compute number of days where mean 2m temperature > 27C
dset_daily_mean_greater_27 = dset_daily_mean.where(dset_daily_mean['t2m'] > 27 + 273.15).groupby('time.year').count()
# ## Plot
dset_daily_mean_greater_27['t2m'].plot.line('ro-')
plt.ylabel("Number of days where T2M mean > 27C")
plt.axhline(y=0)
dset_daily_mean_greater_27['t2m']
# ## Count number of years where daily mean > 27C
nyears = dset_daily_mean_greater_27.where(dset_daily_mean_greater_27['t2m']>0).count()
print("In the period of 2008-2017, the mean daily temperature exceeded 27C ", nyears['t2m'].values, " years at Juan's olive grove")
# ## Same using cdo
#
# - you can use [cdo](https://code.mpimet.mpg.de/projects/cdo) to compute daily mean values
# !cdo -daymean era5_t2m_2008-2017_hourly.nc era5_t2m_2008-2017_daily_mean.nc
filename = 'era5_t2m_2008-2017_daily_mean.nc'
dset = xr.open_dataset(filename, decode_times=True, use_cftime=True)
dset_daily_mean = dset.sel(latitude=37.0, longitude=-4, method="nearest")
dset_daily_mean_greater_27 = dset_daily_mean.where(dset_daily_mean['t2m'] > 27 + 273.15).groupby('time.year').count()
dset_daily_mean_greater_27
nyears = dset_daily_mean_greater_27.where(dset_daily_mean_greater_27['t2m']>0).count()
print("In the period of 2008-2017, the mean daily temperature exceeded 27C ", nyears['t2m'].values, " years at Juan's olive grove")
# # Maximum daily temperature
#
# Farmer Juan's olive trees cannot flower when the temperature is above 27 degrees. In the previous exercise we analyzed the mean daily temperature but actually we need to look at the maximum daily temperature to find out which years were problematic for Farmer Juan.
#
# The daily temperature in Andalusia reaches its maximum at 14:00h. To get this maximum daily temperature we need to make some adjustments to the python script.
dset_max_daily = dset.sel(latitude=37.0, longitude=-4, time = dset.time.dt.hour.isin([14, 14]), method="nearest")
dset_daily_max_greater_27 = dset_max_daily.where(dset_max_daily['t2m'] > 27 + 273.15).groupby('time.year').count()
nyears = dset_daily_max_greater_27.where(dset_daily_max_greater_27['t2m']>0).count()
print("In the period of 2008-2017, the mean daily temperature exceeded 27C ", nyears['t2m'].values, " years at Juan's olive grove")
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:scihub]
# language: python
# name: conda-env-scihub-py
# ---
# # Sci-Hub's coverage on the "State of OA" 2017 DOI Catalogs
#
# Calculate Sci-Hub's coverage on the DOI catalogs from
#
# > Piwowar, Priem, Larivière, Alperin, Matthias, Norlander, Farley, West, Haustein. (2017-08-02) [**The State of OA: A large-scale analysis of the prevalence and impact of Open Access articles**](https://doi.org/10.7287/peerj.preprints.3119v1). _PeerJ Preprints_
#
# Supplemenental data for this study is [available on Zenodo](https://doi.org/10.5281/zenodo.837902). From the [`README.txt`](https://zenodo.org/record/837902/files/README.txt):
#
# > Columns for raw data files:
# + doi: the DOI, from crossref
# + evidence: the response from oaDOI
# + oa_color_long: the OA "color" of the open copy we found. See paper for details.
# + best_open_url: the url of the open copy we found
# + year: the year of the article, from crossref
# + found_green: true if we found a green copy, even if we also found a hybrid, gold, or bronze copy. See paper for details.
# + journal: the journal of the article, from crossref
# + publisher: the publisher of the article, from crossref
# + license: the license of the paper, when we found one
# + random: a random number
#
# See [this GitHub issue](https://github.com/greenelab/scihub-manuscript/issues/18) for more discussion of this analysis.
# +
import collections
import itertools
import json
import pathlib
import pandas
from statsmodels.stats.proportion import proportion_confint
# -
# ## Read State of OA datasets
def read_zenodo_csv(name):
"""
Read CSV from https://doi.org/10.5281/zenodo.837902
"""
url = f'https://zenodo.org/record/837902/files/{name}'
df = (
pandas.read_csv(url, encoding='latin_1')
.rename(columns={'oa_color_long': 'oadoi_color'})
)
# gray is closed, see https://github.com/Impactstory/oadoi-paper1/issues/1
if 'oadoi_color' in df.columns:
df.oadoi_color.replace({'gray': 'closed'}, inplace=True)
return df
unpaywall_df = read_zenodo_csv('unpaywall_100k.csv.gz')
unpaywall_df['collection'] = 'Unpaywall'
unpaywall_df.head(2)
wos_df = read_zenodo_csv('wos_100k.csv.gz')
wos_df['collection'] = 'Web of Science'
wos_df.head(2)
crossref_df = read_zenodo_csv('crossref_100k.csv.gz')
crossref_df['collection'] = 'Crossref'
crossref_df.head(2)
# +
oadoi_df = (
pandas.concat([unpaywall_df, wos_df, crossref_df])
[['collection', 'doi', 'oadoi_color']]
.sort_values(['collection', 'oadoi_color'])
)
oadoi_df.head(2)
# -
# DOIs with inconsistent colors between collections
inconsistencies = list()
for doi, df in oadoi_df.groupby('doi'):
if len(set(df.oadoi_color)) > 1:
inconsistencies.append(doi)
oadoi_df.query("doi in @inconsistencies")
# ## Read Penn library access
with pathlib.Path('00.configuration.json').open() as read_file:
config = json.load(read_file)
url = config['library_access_url'] + 'data/library_coverage_xml_and_fulltext_indicators.tsv.xz'
penn_df = (
pandas.read_table(url)
.rename(columns={'full_text_indicator': 'penntext'})
)
penn_df.head(2)
# ## Read literature catalog
path = pathlib.Path('data/doi.tsv.xz')
article_df = pandas.read_table(path, compression='xz')
article_df.head(2)
# ## Integrate datasets
# +
access_df = (
oadoi_df
.merge(article_df[['doi', 'in_scihub_dois']])
.merge(penn_df)
.rename(columns={'in_scihub_dois': 'scihub'})
)
# oaDOI provides access to all colors besides closed
access_df['oadoi'] = (access_df.oadoi_color != 'closed').astype(int)
access_df.head(2)
# -
# Create a dataset that combines the three State of OA collections
combined_df = (
access_df
.sort_values(['doi', 'collection'])
.drop_duplicates('doi')
.assign(collection='Combined')
)
combined_df = pandas.concat([access_df, combined_df])
combined_df.head()
combined_df.collection.value_counts()
# ## Compute coverage
renamer = collections.OrderedDict()
renamer['oadoi'] = 'oaDOI'
renamer['penntext'] = 'PennText'
renamer['scihub'] = 'Sci-Hub'
renamer
# Find all possible repository combinations
repositories = list(renamer)
combinations = list()
for i in range(len(repositories)):
combinations.extend(itertools.combinations(repositories, i + 1))
combinations = list(map(list, combinations))
combinations
def summarize(df):
"""
Compute coverage for all repo combinations
"""
rows = list()
for repos in combinations:
row = collections.OrderedDict()
row['venn'] = ''.join('1' if repo in repos else '0' for repo in repositories)
row['repos'] = ', '.join(renamer[repo] for repo in repos)
row['n_repos'] = len(repos)
access = df[repos].max(axis='columns')
row['available'] = sum(access)
row['articles'] = len(access)
row['coverage'] = access.mean()
row['coverage_lower'], row['coverage_upper'] = proportion_confint(
count=row['available'], nobs=row['articles'], alpha=0.05, method='jeffreys')
row['coverage_formatted'] = '{coverage:.1%} [{coverage_lower:.1%}–{coverage_upper:.1%}]'.format(**row)
rows.append(row)
return pandas.DataFrame(rows)
coverage_df = (
pandas.concat([
combined_df, # Coverage by color
combined_df.query("oadoi_color in ['green', 'closed']").assign(oadoi_color='closed + green'), # Total coverage (grouping all colors)
combined_df.assign(oadoi_color='all'), # Total coverage (grouping all colors)
])
.groupby(['collection', 'oadoi_color'])
.apply(summarize)
.reset_index(level=['collection', 'oadoi_color'])
)
coverage_df.head(2)
path = pathlib.Path('data/state-of-oa-coverage.tsv')
coverage_df.to_csv(str(path), sep='\t', index=False, float_format='%.5g')
# ## Venn diagrams
#
# An alternative [may be](https://github.com/hms-dbmi/UpSetR/issues/102#issuecomment-351193870) UpSetR-style axis labels.
# +
import matplotlib
import matplotlib_venn
# %matplotlib inline
matplotlib.pyplot.style.use('ggplot')
# Fonts: http://jonathansoma.com/lede/data-studio/matplotlib/list-all-fonts-available-in-matplotlib-plus-samples/
matplotlib.rcParams['font.sans-serif'] = "Garuda"
matplotlib.rcParams['font.family'] = "sans-serif"
# matplotlib.rcParams['figure.figsize'] = 2.5, 2.5
# +
cmap = matplotlib.cm.get_cmap('viridis')
# cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
# name='coverage_cmap', colors=['#ffffff', '#000000'])
def venn_plotter(df, ax=None):
"""
Use matplotlib-venn
https://github.com/konstantint/matplotlib-venn
"""
venn = matplotlib_venn.venn3_unweighted(
subsets=7 * [1],
set_labels=renamer.values(),
alpha=1,
ax=ax,
)
circles = matplotlib_venn.venn3_circles(
subsets=7 * [1],
linestyle='dotted',
linewidth=1.5,
color='#ffffff',
ax=ax,
)
for row in df.itertuples():
label = venn.get_label_by_id(row.venn)
label.set_text(f'{100 * row.coverage:.1f}')
label.set_fontsize(9 if row.n_repos == 2 else 11)
label.set_color('#000000' if row.coverage > 0.75 else '#ffffff')
patch = venn.get_patch_by_id(row.venn)
patch.set_color(cmap(row.coverage))
if not ax:
figure = matplotlib.pyplot.figure()
return figure
# -
def set_title(axes, label, rightside=False):
"""
Set title of subplot on top (rightside=False) or on right (rightside=True).
Uses workaround from https://stackoverflow.com/a/47777701/4651668
"""
if rightside:
text = axes.text(s=label, x=0.8, y=-0.05, verticalalignment='center', rotation=270, size=14)
else:
text = axes.set_title(label, loc='center', y=1.05)
text.set_bbox({'facecolor': '#FEF2E2', 'boxstyle': 'round', 'edgecolor': '#ccb494'})
return text
# +
oadoi_colors = ['all', 'closed', 'closed + green', 'bronze', 'green', 'hybrid', 'gold']
collections = ['Combined', 'Crossref', 'Unpaywall', 'Web of Science']
figure, axes = matplotlib.pyplot.subplots(
nrows=len(oadoi_colors),
ncols=len(collections),
figsize=(2.6 * len(collections), 2.6 * len(oadoi_colors))
)
for (oadoi_color, collection), df in coverage_df.groupby(['oadoi_color', 'collection']):
i = oadoi_colors.index(oadoi_color)
j = collections.index(collection)
ax = axes[i, j]
venn_plotter(df, ax=ax)
if i == 0:
set_title(ax, collection, rightside=False)
if j == len(collections) - 1:
set_title(ax, oadoi_color.title(), rightside=True)
matplotlib.pyplot.subplots_adjust(wspace=0.3, hspace=0.05)
matplotlib.pyplot.savefig('figure/state-of-oa-venns.svg', bbox_inches='tight')
# +
figure, axes = matplotlib.pyplot.subplots(nrows=1, ncols=2, figsize=(2 * 2.7, 2.7))
# Coverage of all articles that are paywalled by their publisher
venn_df = coverage_df.query("oadoi_color == 'all' and collection == 'Combined'")
venn_plotter(venn_df, ax=axes[0])
text = set_title(axes[0], 'All Articles', rightside=False)
# Coverage of articles that are paywalled by their publisher
venn_df = coverage_df.query("oadoi_color == 'closed + green' and collection == 'Combined'")
venn_plotter(venn_df, ax=axes[1])
text = set_title(axes[1], 'Closed + Green', rightside=False)
matplotlib.pyplot.subplots_adjust(wspace=0.3, hspace=0.05)
matplotlib.pyplot.savefig('figure/state-of-oa-venns-small.svg', bbox_inches='tight')
venn_df
# -
# ## State of OA accuracy analysis
#
# Currently, reads data, but does no analysis.
# Manually classified accuracy analysis
acc_df = pandas.read_excel('https://zenodo.org/record/837902/files/accuracy_analysis.xlsx')
acc_df['manual oa_color'].value_counts()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# +
import sympy as sy
from sympy import pi, cos, sin, tan
from IPython.display import display
x, y, z, alpha, beta, gamma = sy.symbols("x, y, z, alpha, beta, gamma")
Y = sy.Matrix([[x, y, z, alpha, beta, gamma]]).T # 状態ベクトル
x_d, y_d, z_d, alpha_d, beta_d, gamma_d = sy.symbols("x_d, y_d, z_d, alpha_d, beta_d, gamma_d")
Y_d = sy.Matrix([[x_d, y_d, z_d, alpha_d, beta_d, gamma_d]]).T # 目標位置ベクトル
x_e, y_e, z_e, alpha_e, beta_e, gamma_e = sy.symbols("x_e, y_e, z_e, alpha_e, beta_e, gamma_e")
Y_e = sy.Matrix([[x_e, y_e, z_e, alpha_e, beta_e, gamma_e]]).T # 目標位置ベクトル
eta, a = sy.symbols("eta, a")
def sigmoid(x):
return 1 / (1 + sy.exp(-a*x))
def pot(x):
x_norm = x.norm()
return 1/eta * sy.log(sy.exp(eta*x_norm) + sy.exp(-eta*x_norm))
def pot_sice(y, y_d, y_e):
e_d = y - y_d
e_e = y - y_e
alpha = sigmoid(e_d.norm())
return (1-alpha)*pot(e_d) + alpha*pot(e_e)
p = pot_sice(Y, Y_d, Y_e)
grad_p = sy.tensor.array.derive_by_array(p, Y)
grad_p
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mortgage Workflow
#
# ## The Dataset
# The dataset used with this workflow is derived from [Fannie Mae’s Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html) with all rights reserved by Fannie Mae. This processed dataset is redistributed with permission and consent from Fannie Mae.
#
# To acquire this dataset, please visit [RAPIDS Datasets Homepage](https://rapidsai.github.io/demos/datasets/mortgage-data)
#
# ## Introduction
# The Mortgage workflow is composed of three core phases:
#
# 1. ETL - Extract, Transform, Load
# 2. Data Conversion
# 3. ML - Training
#
# ### ETL
# Data is
# 1. Read in from storage
# 2. Transformed to emphasize key features
# 3. Loaded into volatile memory for conversion
#
# ### Data Conversion
# Features are
# 1. Broken into (labels, data) pairs
# 2. Distributed across many workers
# 3. Converted into compressed sparse row (CSR) matrix format for XGBoost
#
# ### Machine Learning
# The CSR data is fed into a distributed training session with Dask-XGBoost
# #### Imports statements
import numpy as np
import dask_xgboost as dxgb_gpu
import dask
import dask_cudf
from dask.delayed import delayed
from dask.distributed import Client, wait
import xgboost as xgb
import cudf
from cudf.dataframe import DataFrame
from collections import OrderedDict
import gc
from glob import glob
import os
# #### Define the # of GPUs used
# Change the GPU 8 to be GPU X where X = number of GPUs in your system (e.g. 4 for DGX Station, 8 for DGX-1, 16 for DGX-2).
#
# +
import subprocess
cmd = "hostname --all-ip-addresses"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
IPADDR = str(output.decode()).split()[0]
cmd = "../utils/dask-setup.sh 0"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
cmd = "../utils/dask-setup.sh rapids GPU 8 8786 8787 8790 " + str(IPADDR) + " MASTER INFO"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print(output.decode())
# +
import dask
from dask.delayed import delayed
from dask.distributed import Client, wait
_client = IPADDR + str(":8786")
client = dask.distributed.Client(_client)
client
# -
# #### Define the paths to data and set the size of the dataset
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
acq_data_path = "/path/to/mortgage/acq"
perf_data_path = "/path/to/mortgage/perf"
col_names_path = "/path/to/mortgage/names.csv"
start_year = 2000
end_year = 2016 # end_year is inclusive
part_count = 16 # the number of data files to train against
# +
def initialize_rmm_pool():
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = True
#rmm_cfg.initial_pool_size = 2<<30 # set to 2GiB. Default is 1/2 total GPU memory
import cudf
return cudf._gdf.rmm_initialize()
def initialize_rmm_no_pool():
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = False
import cudf
return cudf._gdf.rmm_initialize()
# -
client.run(initialize_rmm_pool)
# #### Define functions to encapsulate the workflow into a single call
# +
def run_dask_task(func, **kwargs):
task = func(**kwargs)
return task
def process_quarter_gpu(year=2000, quarter=1, perf_file=""):
ml_arrays = run_dask_task(delayed(run_gpu_workflow),
quarter=quarter,
year=year,
perf_file=perf_file)
return client.compute(ml_arrays,
optimize_graph=False,
fifo_timeout="0ms")
def null_workaround(df, **kwargs):
for column, data_type in df.dtypes.items():
if str(data_type) == "category":
df[column] = df[column].astype('int32').fillna(-1)
if str(data_type) in ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']:
df[column] = df[column].fillna(-1)
return df
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs):
names = gpu_load_names()
acq_gdf = gpu_load_acquisition_csv(acquisition_path= acq_data_path + "/Acquisition_"
+ str(year) + "Q" + str(quarter) + ".txt")
acq_gdf = acq_gdf.merge(names, how='left', on=['seller_name'])
acq_gdf.drop_column('seller_name')
acq_gdf['seller_name'] = acq_gdf['new']
acq_gdf.drop_column('new')
perf_df_tmp = gpu_load_performance_csv(perf_file)
gdf = perf_df_tmp
everdf = create_ever_features(gdf)
delinq_merge = create_delinq_features(gdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(gdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(gdf, joined_df)
del(gdf, joined_df)
final_gdf = join_perf_acq_gdfs(perf_df, acq_gdf)
del(perf_df)
del(acq_gdf)
final_gdf = last_mile_cleaning(final_gdf)
return final_gdf
def gpu_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
GPU DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = OrderedDict([
("loan_id", "int64"),
("monthly_reporting_period", "date"),
("servicer", "category"),
("interest_rate", "float64"),
("current_actual_upb", "float64"),
("loan_age", "float64"),
("remaining_months_to_legal_maturity", "float64"),
("adj_remaining_months_to_maturity", "float64"),
("maturity_date", "date"),
("msa", "float64"),
("current_loan_delinquency_status", "int32"),
("mod_flag", "category"),
("zero_balance_code", "category"),
("zero_balance_effective_date", "date"),
("last_paid_installment_date", "date"),
("foreclosed_after", "date"),
("disposition_date", "date"),
("foreclosure_costs", "float64"),
("prop_preservation_and_repair_costs", "float64"),
("asset_recovery_costs", "float64"),
("misc_holding_expenses", "float64"),
("holding_taxes", "float64"),
("net_sale_proceeds", "float64"),
("credit_enhancement_proceeds", "float64"),
("repurchase_make_whole_proceeds", "float64"),
("other_foreclosure_proceeds", "float64"),
("non_interest_bearing_upb", "float64"),
("principal_forgiveness_upb", "float64"),
("repurchase_make_whole_proceeds_flag", "category"),
("foreclosure_principal_write_off_amount", "float64"),
("servicing_activity_indicator", "category")
])
print(performance_path)
return cudf.read_csv(performance_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def gpu_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
GPU DataFrame
"""
cols = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'
]
dtypes = OrderedDict([
("loan_id", "int64"),
("orig_channel", "category"),
("seller_name", "category"),
("orig_interest_rate", "float64"),
("orig_upb", "int64"),
("orig_loan_term", "int64"),
("orig_date", "date"),
("first_pay_date", "date"),
("orig_ltv", "float64"),
("orig_cltv", "float64"),
("num_borrowers", "float64"),
("dti", "float64"),
("borrower_credit_score", "float64"),
("first_home_buyer", "category"),
("loan_purpose", "category"),
("property_type", "category"),
("num_units", "int64"),
("occupancy_status", "category"),
("property_state", "category"),
("zip", "int64"),
("mortgage_insurance_percent", "float64"),
("product_type", "category"),
("coborrow_credit_score", "float64"),
("mortgage_insurance_type", "float64"),
("relocation_mortgage_indicator", "category")
])
print(acquisition_path)
return cudf.read_csv(acquisition_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
def gpu_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
GPU DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = OrderedDict([
("seller_name", "category"),
("new", "category"),
])
return cudf.read_csv(col_names_path, names=cols, delimiter='|', dtype=list(dtypes.values()), skiprows=1)
# -
def create_ever_features(gdf, **kwargs):
everdf = gdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id', method='hash').max()
del(gdf)
everdf['ever_30'] = (everdf['max_current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['max_current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['max_current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop_column('max_current_loan_delinquency_status')
return everdf
def create_delinq_features(gdf, **kwargs):
delinq_gdf = gdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(gdf)
delinq_30 = delinq_gdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min()
delinq_30['delinquency_30'] = delinq_30['min_monthly_reporting_period']
delinq_30.drop_column('min_monthly_reporting_period')
delinq_90 = delinq_gdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min()
delinq_90['delinquency_90'] = delinq_90['min_monthly_reporting_period']
delinq_90.drop_column('min_monthly_reporting_period')
delinq_180 = delinq_gdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id', method='hash').min()
delinq_180['delinquency_180'] = delinq_180['min_monthly_reporting_period']
delinq_180.drop_column('min_monthly_reporting_period')
del(delinq_gdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'], type='hash')
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left', type='hash')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(gdf, everdf, **kwargs):
test = gdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(gdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop_column('monthly_reporting_period')
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop_column('current_loan_delinquency_status')
test['upb_12'] = test['current_actual_upb']
test.drop_column('current_actual_upb')
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'], type='hash')
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = ((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12).floor()
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], method='hash').agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['max_delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['min_upb_12']==0).astype('int32')
tmpdf.drop_column('max_delinquency_12')
tmpdf['upb_12'] = tmpdf['min_upb_12']
tmpdf.drop_column('min_upb_12')
tmpdf['timestamp_year'] = (((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).floor().astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop_column('josh_mody_n')
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return cudf.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop_column('delinquency_12')
joined_df.drop_column('upb_12')
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'], type='hash')
def final_performance_delinquency(gdf, joined_df, **kwargs):
merged = null_workaround(gdf)
joined_df = null_workaround(joined_df)
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'], type='hash')
merged.drop_column('timestamp_year')
merged.drop_column('timestamp_month')
return merged
def join_perf_acq_gdfs(perf, acq, **kwargs):
perf = null_workaround(perf)
acq = null_workaround(acq)
return perf.merge(acq, how='left', on=['loan_id'], type='hash')
def last_mile_cleaning(df, **kwargs):
drop_list = [
'loan_id', 'orig_date', 'first_pay_date', 'seller_name',
'monthly_reporting_period', 'last_paid_installment_date', 'maturity_date', 'ever_30', 'ever_90', 'ever_180',
'delinquency_30', 'delinquency_90', 'delinquency_180', 'upb_12',
'zero_balance_effective_date','foreclosed_after', 'disposition_date','timestamp'
]
for column in drop_list:
df.drop_column(column)
for col, dtype in df.dtypes.iteritems():
if str(dtype)=='category':
df[col] = df[col].cat.codes
df[col] = df[col].astype('float32')
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
for column in df.columns:
df[column] = df[column].fillna(-1)
return df.to_arrow(index=False)
# ## ETL
# #### Perform all of ETL with a single call to
# ```python
# process_quarter_gpu(year=year, quarter=quarter, perf_file=file)
# ```
# +
# %%time
# NOTE: The ETL calculates additional features which are then dropped before creating the XGBoost DMatrix.
# This can be optimized to avoid calculating the dropped features.
gpu_dfs = []
gpu_time = 0
quarter = 1
year = start_year
count = 0
while year <= end_year:
for file in glob(os.path.join(perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + "*")):
gpu_dfs.append(process_quarter_gpu(year=year, quarter=quarter, perf_file=file))
count += 1
quarter += 1
if quarter == 5:
year += 1
quarter = 1
wait(gpu_dfs)
# -
client.run(cudf._gdf.rmm_finalize)
client.run(initialize_rmm_no_pool)
# ## Machine Learning
# #### Set the training parameters
dxgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2**8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'distributed_dask': True,
'loss': 'ls',
'objective': 'gpu:reg:linear',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
'verbose': True
}
# #### Load the data from host memory, and convert to CSR
# +
# %%time
gpu_dfs = [delayed(DataFrame.from_arrow)(gpu_df) for gpu_df in gpu_dfs[:part_count]]
gpu_dfs = [gpu_df for gpu_df in gpu_dfs]
wait(gpu_dfs)
tmp_map = [(gpu_df, list(client.who_has(gpu_df).values())[0]) for gpu_df in gpu_dfs]
new_map = {}
for key, value in tmp_map:
if value not in new_map:
new_map[value] = [key]
else:
new_map[value].append(key)
del(tmp_map)
gpu_dfs = []
for list_delayed in new_map.values():
gpu_dfs.append(delayed(cudf.concat)(list_delayed))
del(new_map)
gpu_dfs = [(gpu_df[['delinquency_12']], gpu_df[delayed(list)(gpu_df.columns.difference(['delinquency_12']))]) for gpu_df in gpu_dfs]
gpu_dfs = [(gpu_df[0].persist(), gpu_df[1].persist()) for gpu_df in gpu_dfs]
gpu_dfs = [dask.delayed(xgb.DMatrix)(gpu_df[1], gpu_df[0]) for gpu_df in gpu_dfs]
gpu_dfs = [gpu_df.persist() for gpu_df in gpu_dfs]
gc.collect()
wait(gpu_dfs)
# -
# #### Train the Gradient Boosted Decision Tree with a single call to
# ```python
# dask_xgboost.train(client, params, data, labels, num_boost_round=dxgb_gpu_params['nround'])
# ```
# %%time
labels = None
bst = dxgb_gpu.train(client, dxgb_gpu_params, gpu_dfs, labels, num_boost_round=dxgb_gpu_params['nround'])
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
sns.set_style('darkgrid')
# # **AR2 C Data**
AR2c = pd.read_csv('./DATA/AR2 C', sep=';')
AR2c.set_index('Index', inplace=True)
AR2c.rename(columns={'Potential applied (V)': 'Applied Potential (V)', 'WE(1).Current (A)': 'Measured J (mA/cm2)', 'WE(1).Potential (V)': 'Measured Potential (V)'}, inplace=True)
AR2c["Applied Potential (V)"] *= -1
# AR2c["Applied Potential (V)"].map(lambda x: x*-1) #
#active surface area mask, A = 0.125 cm^2
AR2c["Measured J (mA/cm2)"] *= 1000/(0.125)
AR2c.head()
# ## Plot the current density and get the short circuit current and open circuit voltage
sns.lineplot(data=AR2c, x="Applied Potential (V)", y="Measured J (mA/cm2)")
plt.title('AR2 C')
plt.ylabel('Real Current Density $(\\frac{mA}{cm^2})$')
plt.xlim([0,1])
plt.ylim([0,15])
plt.savefig('A2C_real_current_density_cropped', dpi=300)
# ## Jsc and Voc
# Voc = V where J is zero
min_J_idx = AR2c['Measured J (mA/cm2)'].abs().idxmin()
Voc = AR2c.loc[min_J_idx, "Applied Potential (V)" ]
Voc = round(Voc,3)
print(f"Voc = {Voc} mA/cm2")
# AR2c.iloc[195:198,:]
# Jsc = J where V = 0
min_V_idx = AR2c['Applied Potential (V)'].abs().idxmin()
Jsc = AR2c.loc[min_V_idx,'Measured J (mA/cm2)']
Jsc = round(Jsc,3)
print(f"Jsc = {Jsc} V")
# ## Calculate Power Density
AR2c["Power Density (mW/cm2)"] = AR2c["Measured J (mA/cm2)"]*AR2c["Applied Potential (V)"]
AR2c.head()
# ## Plot the Power Density and get the Max Power
sns.lineplot(data=AR2c, y="Power Density (mW/cm2)", x="Applied Potential (V)")
plt.title('AR2 C')
plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
# plt.xlim([0,1])
# plt.ylim([0,8])
plt.savefig('AR2c_power_density', dpi=300)
# ## Calculate Jmp and Vmp
# +
max_power = AR2c["Power Density (mW/cm2)"].max()
max_power_idx = AR2c["Power Density (mW/cm2)"].idxmax()
max_power = round(max_power, 3)
print(f"Max Power = J {max_power} (mW/cm2)")
Jmp = AR2c.loc[max_power_idx, 'Measured J (mA/cm2)']
Jmp = round(Jmp, 3)
print(f"Jmp = {Jmp} (mA/cm2)")
Vmp = AR2c.loc[max_power_idx, 'Applied Potential (V)']
Vmp = round(Vmp, 3)
print(f"Vmp = {Vmp} (V)")
# -
# ## Calculate the Fill Factor and Percent Efficiency
# +
ff = (Jmp*Vmp)/(Jsc*Voc) # Fill Factor
ff = round(ff,3)
print(f"Fill Factor = {ff}")
pce = Jsc*Voc*ff/100
pce = round(pce,3)
print(f"PCE = {pce}")
# -
# ## Plot the IPCE and Absorbance Spectra
# IPCE = number of extracted electrons in SC conditions / number of absorbed photon
# Active surface area (no mask): A=0.25 cm2
AR2c_ipce = pd.read_csv('DATA/AR C 2 IPCE.txt', sep = '[\t, ' ']', names = ['a', 'wavelength (nm)', 'c'])
AR2c_ipce.drop_duplicates(['wavelength (nm)', 'c'], inplace=True) #the second half is just the same as the first. repeated but with c1=.1 instead
# AR2c_ipce['IPCE'] = AR2c_ipce['c']*1/.25
AR2c_ipce['IPCE'] = AR2c_ipce.c.apply(lambda x: x/.25)
AR2c_ipce.head()
# ## Plot IPCE
sns.scatterplot(data=AR2c_ipce, x='wavelength (nm)', y='IPCE', label='D205')
plt.title('D205 IPCE vs. Wavelength')
plt.savefig('AR2C_IPCE_vs_Wavelength', dpi=300)
# ## Plot Absorbance
# +
AR2c_acn = pd.read_csv('DATA/D205_acn toluen 1_1.txt', header=19, skipfooter=47, sep='\t', names = ['Wavelength (nm)', 'Absorbance'])
AR2c_acn['Wavelength (nm)'].apply(lambda x: float(x))
AR2c_acn['Absorbance'].apply(lambda x: float(x))
AR2c_acn = AR2c_acn[AR2c_acn["Wavelength (nm)"] >= 400.00]
sns.lineplot(data=AR2c_acn, x="Wavelength (nm)", y='Absorbance')
plt.title('D205 Absorbance')
plt.xlim(left=400, right=800)
plt.savefig('AR2c_Absorbance', dpi=300)
# -
# # **AR2 C Dark**
AR2cdark = pd.read_csv('./DATA/AR2 C dark', sep=';')
AR2cdark.set_index('Index', inplace=True)
AR2cdark.rename(columns={'Potential applied (V)': 'Applied Potential (V)', 'WE(1).Current (A)': 'Measured J (mA/cm2)', 'WE(1).Potential (V)': 'Measured Potential (V)'}, inplace=True)
AR2cdark["Applied Potential (V)"] *= -1
# AR2c["Applied Potential (V)"].map(lambda x: x*-1) #
#active surface area mask, A = 0.125 cm^2
AR2cdark["Measured J (mA/cm2)"] *= 1000/(0.125)
AR2cdark["Measured J (mA/cm2)"] += Jsc # need to shift by the short circuit current to see the ideal situation
AR2cdark.head()
# ## Plot current density and Compare it to the ambient light version
ax = AR2c.plot(x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 Real')
AR2cdark.plot(ax=ax, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 Ideal')
plt.title('D205 Real vs. Ideal Comparison')
plt.ylabel('Current Density $(\\frac{mA}{cm^2})$')
plt.savefig('AR2c_vs_dark_J(V)', dpi=300)
plt.xlim([0,1])
plt.ylim([-1,15])
plt.savefig('AR2C_bright_and_dark_Current_Density', dpi=300)
# ## Calculate Jsc and Voc
# +
# Voc = V where Jsc = 0
min_J_idx = AR2cdark['Measured J (mA/cm2)'].abs().idxmin()
Vocdark = AR2cdark.loc[min_J_idx, "Applied Potential (V)" ]
Vocdark = round(Vocdark,3)
print(f"Voc = {Vocdark} mA/cm2")
# AR2c.iloc[195:198,:]
# Jsc = J where V is zero
min_V_idx = AR2cdark['Applied Potential (V)'].abs().idxmin()
Jscdark = AR2cdark.loc[min_V_idx,'Measured J (mA/cm2)']
Jscdark = round(Jscdark,3)
print(f"Jsc = {Jscdark} V")
AR2cdark.iloc[min_V_idx-10:min_V_idx+10, :]
# -
# ## Calculate Power Density
AR2cdark["Power Density (mW/cm2)"] = AR2cdark["Measured J (mA/cm2)"]*AR2cdark["Applied Potential (V)"]
AR2cdark.head()
# sns.lineplot(data=AR2cdark, y="Power Density (mW/cm2)", x="Applied Potential (V)")
# plt.title('AR2 C Dark')
# plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
ax = AR2c.plot(y="Power Density (mW/cm2)", x="Applied Potential (V)", label='AR2 C')
AR2cdark.plot(ax=ax, y="Power Density (mW/cm2)", x="Applied Potential (V)", label='AR2 C Dark')
plt.title('AR2 C and AR2 C Dark Power Comparison')
plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
plt.savefig('AR2c_vs_dark_P(V)', dpi=300)
# plt.xlim(left=0)
# plt.ylim([0,15])
# ## Calculate Jmp and Vmp
# +
max_powerdark = AR2cdark["Power Density (mW/cm2)"].max()
max_power_idx = AR2cdark["Power Density (mW/cm2)"].idxmax()
max_powerdark = round(max_powerdark, 3)
print(f"Max Power = J {max_powerdark} (mW/cm2)")
Jmpdark = AR2cdark.loc[max_power_idx, 'Measured J (mA/cm2)']
Jmpdark = round(Jmpdark,3)
print(f"Jmp = {Jmpdark} (mA/cm2)")
Vmpdark = AR2cdark.loc[max_power_idx, 'Applied Potential (V)']
Vmpdark = round(Vmpdark, 3)
print(f"Vmp = {Vmpdark} (V)")
# -
# ## Calculate FF and PCE
# +
ffdark = (Jmpdark*Vmpdark)/(Jscdark*Vocdark) # Fill Factor
ffdark = round(ffdark,3)
print(f"Fill Factor = {ffdark}")
pcedark = Jscdark*Vocdark*ffdark/100
pcedark = round(pcedark,3)
print(f"PCE = {pcedark}")
# -
# # **AR2 C 1Week**
AR2c1week = pd.read_csv('./DATA/AR2 C 1week', sep=';')
AR2c1week.set_index('Index', inplace=True)
AR2c1week.rename(columns={'Potential applied (V)': 'Applied Potential (V)', 'WE(1).Current (A)': 'Measured J (mA/cm2)', 'WE(1).Potential (V)': 'Measured Potential (V)'}, inplace=True)
AR2c1week["Applied Potential (V)"] *= -1
AR2c1week["Measured J (mA/cm2)"] *= 1000/(0.125)
AR2c1week.head()
# ## Plot Current Density, 1 week comparison
# +
ax = AR2c.plot(x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c')
AR2c1week.plot(ax=ax, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c 1 Week')
# sns.lineplot(data=AR2c1week, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c 1 Week')
plt.title('AR2 C and AR2 C 1 Week Comparison')
plt.ylabel('Real Current Density $(\\frac{mA}{cm^2})$')
plt.savefig('AR2C_real_vs_1week_curent_density', dpi=300)
# -
# ## Calculate Jsc and Voc
# +
# Voc = V where J = 0
min_J_idx = AR2c1week['Measured J (mA/cm2)'].abs().idxmin()
Voc1week = AR2c1week.loc[min_J_idx, "Applied Potential (V)" ]
Voc1week = round(Voc1week, 3)
print(f"Voc = {Voc1week} mA/cm2")
# AR2c1week.iloc[(min_J_idx-3):(min_J_idx+1),:]
# Jsc = J where V is zero
min_V_idx = AR2c1week['Applied Potential (V)'].abs().idxmin()
Jsc1week = AR2c1week.loc[min_V_idx,'Measured J (mA/cm2)']
Jsc1week = round(Jsc1week, 3)
print(f"Jsc = {Jsc1week} V")
# -
# ## Calculate Power Density
AR2c1week["Power Density (mW/cm2)"] = AR2c1week["Measured J (mA/cm2)"]*AR2c1week["Applied Potential (V)"]
AR2c1week.head()
# sns.lineplot(data=AR2c1week, y="Power Density (mW/cm2)", x="Applied Potential (V)")
# plt.title('AR2 C 1 week')
# plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
ax = AR2c.plot(y="Power Density (mW/cm2)", x="Applied Potential (V)", label='AR2 C')
AR2c1week.plot(ax=ax, y="Power Density (mW/cm2)", x="Applied Potential (V)", label='AR2 C 1 week')
plt.title('AR2 C and AR2 C 1 Week Power Comparison')
plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
# ## Calculate Jmp and Vmp
# +
max_power1week = AR2c1week["Power Density (mW/cm2)"].max()
max_power_idx = AR2c1week["Power Density (mW/cm2)"].idxmax()
max_power1week = round(max_power1week,3)
print(f"Max Power = {max_power1week} (mW/cm2)")
Jmp1week = AR2c1week.loc[max_power_idx, 'Measured J (mA/cm2)']
Jmp1week = round(Jmp1week,3)
print(f"Jmp = {Jmp1week} (mA/cm2)")
Vmp1week = AR2c1week.loc[max_power_idx, 'Applied Potential (V)']
Vmp1week = round(Vmp1week, 3)
print(f"Vmp = {Vmp1week} (V)")
# -
# ## Calculate FF and PCE
# +
ff1week = (Jmp1week*Vmp1week)/(Jsc1week*Voc1week) # Fill Factor
ff1week = round(ff1week,3)
print(f"Fill Factor = {ff1week}")
pce1week = Jsc1week*Voc1week*ff1week/100
pce1week = round(pce1week,3)
print(f"PCE = {pce1week}")
# -
# # AR2C 1 Week Dark
AR2c1weekdark = pd.read_csv('./DATA/AR2 C 1week dark', sep=';')
AR2c1weekdark.set_index('Index', inplace=True)
AR2c1weekdark.rename(columns={'Potential applied (V)': 'Applied Potential (V)', 'WE(1).Current (A)': 'Measured J (mA/cm2)', 'WE(1).Potential (V)': 'Measured Potential (V)'}, inplace=True)
AR2c1weekdark["Applied Potential (V)"] *= -1
AR2c1weekdark["Measured J (mA/cm2)"] *= 1000/(0.125)
AR2c1weekdark["Measured J (mA/cm2)"] += Jsc1week
AR2c1weekdark.head()
# ## plot the current density comparison
ax = AR2c1week.plot(x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 1 Week Real')
AR2c1weekdark.plot(ax=ax, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 1 Week Ideal')
plt.title('D205 1 Week Real vs. Ideal Comparison')
plt.ylabel('Current Density $(\\frac{mA}{cm^2})$')
plt.xlim([0,1])
plt.ylim([-1,15])
plt.savefig('AR2c_vs_dark1week_J(V)', dpi=300)
# ## Calculate Jsc and Vsc for 1week dark
# +
# Voc = V where J = 0
min_J_idx = AR2c1weekdark['Measured J (mA/cm2)'].abs().idxmin()
Voc1weekdark = AR2c1weekdark.loc[min_J_idx, "Applied Potential (V)" ]
Voc1weekdark = round(Voc1weekdark, 3)
print(f"Voc = {Voc1weekdark} mA/cm2")
# AR2c1week.iloc[(min_J_idx-3):(min_J_idx+1),:]
# Jsc = J where V is zero
min_V_idx = AR2c1weekdark['Applied Potential (V)'].abs().idxmin()
Jsc1weekdark = AR2c1weekdark.loc[min_V_idx,'Measured J (mA/cm2)']
Jsc1weekdark = round(Jsc1weekdark, 3)
print(f"Jsc = {Jsc1weekdark} V")
# -
# ## Calculate Jmp and Vmp
# +
AR2c1weekdark["Power Density (mW/cm2)"] = AR2c1weekdark["Measured J (mA/cm2)"]*AR2c1weekdark["Applied Potential (V)"]
max_power1weekdark = AR2c1weekdark["Power Density (mW/cm2)"].max()
max_power_idx = AR2c1weekdark["Power Density (mW/cm2)"].idxmax()
max_power1weekdark = round(max_power1weekdark,3)
print(f"Max Power = {max_power1weekdark} (mW/cm2)")
Jmp1weekdark = AR2c1weekdark.loc[max_power_idx, 'Measured J (mA/cm2)']
Jmp1weekdark = round(Jmp1weekdark,3)
print(f"Jmp = {Jmp1weekdark} (mA/cm2)")
Vmp1weekdark = AR2c1week.loc[max_power_idx, 'Applied Potential (V)']
Vmp1weekdark = round(Vmp1weekdark, 3)
print(f"Vmp = {Vmp1weekdark} (V)")
# -
# ## Calculate FF and PCE 1week dark
# +
ff1weekdark = (Jmp1weekdark*Vmp1weekdark)/(Jsc1weekdark*Voc1weekdark) # Fill Factor
ff1weekdark = round(ff1weekdark,3)
print(f"Fill Factor = {ff1weekdark}")
pce1weekdark = Jsc1weekdark*Voc1weekdark*ff1weekdark/100
pce1weekdark = round(pce1weekdark,3)
print(f"PCE = {pce1weekdark}")
# -
# # **AR2C 4h**
AR2c4h = pd.read_csv('./DATA/AR2 C 4h', sep=';')
AR2c4h.set_index('Index', inplace=True)
AR2c4h.rename(columns={'Potential applied (V)': 'Applied Potential (V)', 'WE(1).Current (A)': 'Measured J (mA/cm2)', 'WE(1).Potential (V)': 'Measured Potential (V)'}, inplace=True)
AR2c4h["Applied Potential (V)"] *= -1
AR2c4h["Measured J (mA/cm2)"] *= 1000/(0.125)
AR2c4h.head()
# ## Plot Current Density, 1 week comparison
ax = AR2c.plot(x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205')
ay = AR2c4h.plot(ax=ax, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 4 Hours')
AR2c1week.plot(ax=ay, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 1 Week')
# sns.lineplot(data=AR2c4h, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c 4 Hours')
# sns.lineplot(data=AR2c1week, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c 1 Week')
plt.title('Current Density: D205 (Real) Comparison')
plt.ylabel('Current Density $(\\frac{mA}{cm^2})$')
plt.xlim([0,1])
plt.ylim([0,15])
plt.savefig('AR2c_all3_current_density_comparison', dpi=300)
# ## Calculate Jsc and Voc
# +
# Voc = V where J = 0
min_J_idx = AR2c4h['Measured J (mA/cm2)'].abs().idxmin()
Voc4h = AR2c4h.loc[min_J_idx, "Applied Potential (V)" ]
Voc4h = round(Voc4h, 3)
print(f"Voc = {Voc4h} mA/cm2")
# AR2c1week.iloc[(min_J_idx-3):(min_J_idx+1),:]
# Jsc = J where V is zero
min_V_idx = AR2c4h['Applied Potential (V)'].abs().idxmin()
Jsc4h = AR2c4h.loc[min_V_idx,'Measured J (mA/cm2)']
Jsc4h = round(Jsc4h, 3)
print(f"Jsc = {Jsc4h} V")
# -
# ## Calculate Power Density
AR2c4h["Power Density (mW/cm2)"] = AR2c4h["Measured J (mA/cm2)"]*AR2c4h["Applied Potential (V)"]
AR2c4h.head()
# sns.lineplot(data=AR2c1week, y="Power Density (mW/cm2)", x="Applied Potential (V)")
# plt.title('AR2 C 1 week')
# plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
ax = AR2c.plot(y="Power Density (mW/cm2)", x="Applied Potential (V)", label='D205')
ay = AR2c4h.plot(ax=ax, y="Power Density (mW/cm2)", x="Applied Potential (V)", label='D205 4 Hours')
AR2c1week.plot(ax=ay, y="Power Density (mW/cm2)", x="Applied Potential (V)", label='D205 1 week')
plt.title('D205: Power Comparison Comparison')
plt.ylabel('Power Density $\\frac{mW}{cm^2}$')
plt.savefig('AR2_all3comparison_Power(V)', dpi=300)
# ## Calculate Jmp and Vmp
# +
max_power4h = AR2c4h["Power Density (mW/cm2)"].max()
max_power_idx = AR2c4h["Power Density (mW/cm2)"].idxmax()
max_power4h = round(max_power4h,3)
print(f"Max Power = {max_power4h} (mW/cm2)")
Jmp4h = AR2c4h.loc[max_power_idx, 'Measured J (mA/cm2)']
Jmp4h = round(Jmp4h,3)
print(f"Jmp = {Jmp4h} (mA/cm2)")
Vmp4h = AR2c4h.loc[max_power_idx, 'Applied Potential (V)']
Vmp4h = round(Vmp4h, 3)
print(f"Vmp = {Vmp4h} (V)")
# -
# ## Calculate FF and PCE
# +
ff4h = (Jmp4h*Vmp4h)/(Jsc4h*Voc4h) # Fill Factor
ff4h = round(ff4h,3)
print(f"Fill Factor = {ff4h}")
pce4h = Jsc4h*Voc4h*ff4h/100
pce4h = round(pce4h,3)
print(f"PCE = {pce4h}")
# -
# # AR2 4h Dark
AR2c4hdark = pd.read_csv('./DATA/AR2 C 4h dark', sep=';')
AR2c4hdark.set_index('Index', inplace=True)
AR2c4hdark.rename(columns={'Potential applied (V)': 'Applied Potential (V)', 'WE(1).Current (A)': 'Measured J (mA/cm2)', 'WE(1).Potential (V)': 'Measured Potential (V)'}, inplace=True)
AR2c4hdark["Applied Potential (V)"] *= -1
AR2c4hdark["Measured J (mA/cm2)"] *= 1000/(0.125)
AR2c4hdark["Measured J (mA/cm2)"] += Jsc4h
# ## Plot current density comparison
ax = AR2c4h.plot(x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 4h Real')
AR2c4hdark.plot(ax=ax, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 4h Ideal')
plt.title('D205 4h Real vs. Ideal Comparison')
plt.ylabel('Current Density $(\\frac{mA}{cm^2})$')
plt.xlim([0,1])
plt.ylim([-1,15])
plt.savefig('AR2c_vs_dark4h_J(V)', dpi=300)
# ## Calculate Jsc and Voc
# +
# Voc = V where J = 0
min_J_idx = AR2c4hdark['Measured J (mA/cm2)'].abs().idxmin()
Voc4hdark = AR2c4hdark.loc[min_J_idx, "Applied Potential (V)" ]
Voc4hdark = round(Voc4hdark, 3)
print(f"Voc = {Voc4hdark} mA/cm2")
# AR2c1week.iloc[(min_J_idx-3):(min_J_idx+1),:]
# Jsc = J where V is zero
min_V_idx = AR2c4hdark['Applied Potential (V)'].abs().idxmin()
Jsc4hdark = AR2c4hdark.loc[min_V_idx,'Measured J (mA/cm2)']
Jsc4hdark = round(Jsc4hdark, 3)
print(f"Jsc = {Jsc4hdark} V")
# -
# ## calculate Jmp and Vmp
# +
AR2c4hdark["Power Density (mW/cm2)"] = AR2c4hdark["Measured J (mA/cm2)"]*AR2c4hdark["Applied Potential (V)"]
max_power4hdark = AR2c4hdark["Power Density (mW/cm2)"].max()
max_power_idx = AR2c4hdark["Power Density (mW/cm2)"].idxmax()
max_power4hdark = round(max_power4hdark,3)
print(f"Max Power = {max_power4hdark} (mW/cm2)")
Jmp4hdark = AR2c4hdark.loc[max_power_idx, 'Measured J (mA/cm2)']
Jmp4hdark = round(Jmp4hdark,3)
print(f"Jmp = {Jmp4hdark} (mA/cm2)")
Vmp4hdark = AR2c4hdark.loc[max_power_idx, 'Applied Potential (V)']
Vmp4hdark = round(Vmp4hdark, 3)
print(f"Vmp = {Vmp4hdark} (V)")
# -
# ## Calculate FF and PCE
# +
ff4hdark = (Jmp4hdark*Vmp4hdark)/(Jsc4hdark*Voc4hdark) # Fill Factor
ff4hdark = round(ff4hdark,3)
print(f"Fill Factor = {ff4hdark}")
pce4hdark = Jsc4hdark*Voc4hdark*ff4hdark/100
pce4hdark = round(pce4hdark,3)
print(f"PCE = {pce4hdark}")
# -
# ## Plot the ideal curves
ax = AR2cdark.plot(x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 Ideal')
ay = AR2c4hdark.plot(ax=ax, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 4 Hours Ideal')
AR2c1weekdark.plot(ax=ay, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='D205 1 Week Ideal')
# sns.lineplot(data=AR2c4h, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c 4 Hours')
# sns.lineplot(data=AR2c1week, x="Applied Potential (V)", y="Measured J (mA/cm2)", label='AR2c 1 Week')
plt.title('Current Density: D205 (Ideal) Comparison')
plt.ylabel('Current Density $(\\frac{mA}{cm^2})$')
plt.xlim([0,1])
plt.ylim([0,15])
plt.savefig('AR2c_all3_current_density_dark_comparison', dpi=300)
# # Combine Everything into one Table
# indices = ['AR2c','AR2c Dark','AR2c 1 Week']
indices = ['AR2 Dye Screen Printed Fresh (Real)', 'AR2 Dye Screen Printed Fresh (Ideal)', 'AR2 Dye Screen Printed After 4 Hours', 'AR2 Dye Screen Printed After 1 Week']
big_table = pd.DataFrame({'Jsc (mA/cm2)': [Jsc, Jscdark, Jsc4h, Jsc1week],
'Voc (V)': [Voc, Vocdark, Voc4h, Voc1week],
'Jmp (mA/cm2)': [Jmp, Jmpdark, Jmp4h, Jmp1week],
'Vmp (V)': [Vmp, Vmpdark, Vmp4h, Vmp1week],
'PCE (%)': [pce, pcedark, pce4h, pce1week],
'FF': [ff, ffdark, ff4h, ff1week]}, index=indices)
big_table.index.name = 'Cell'
big_table
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/xSakix/AI_colan_notebooks/blob/master/pytorch_transformer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="VV_t32wUin9k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 646} outputId="f5f4bc8a-5f43-4fe7-86ee-b414e80b8b5b"
# !pip install transformers
# + id="XeVup08MivQa" colab_type="code" colab={}
# + id="rnAGLm8mfL-f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="e6376ec7-53d6-4e54-f1b3-844d21b760ed"
from google.colab import drive
drive.mount('/content/drive')
# + id="S75c7nlmf2Vz" colab_type="code" colab={}
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
# + id="0aKCoVZhfrS3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="28589120-e9a7-4e48-db9f-c14255357cd9"
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
import os
from transformers import BertTokenizer, AdamW, get_linear_schedule_with_warmup
NUM_BATCHES = 40
BATCH_SIZE = 8
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 3e-4
VALIDATE_EVERY = 4
GENERATE_EVERY = 4
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def get_top_p(logits, top_p=0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = float('-inf')
return logits
def sample_next_token(logits, top_p=0.9, temperature = 1.0):
logits = logits[0, -1, :] / temperature
filtered_logits = get_top_p(logits, top_p=top_p)
probs = F.softmax(filtered_logits, dim=-1)
return torch.multinomial(probs, 1)
def decode_token(token):
return str(chr(token))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# model = ReformerLM(
# dim = 512,
# depth = 6,
# max_seq_len = SEQ_LEN,
# num_tokens = 256,
# heads = 8,
# bucket_size = 64,
# n_hashes = 8,
# ff_chunks = 10,
# lsh_dropout = 0.1,
# weight_tie = True,
# causal = True,
# use_full_attn = False # set this to true for comparison with full attention
# )
# instantiate model
#ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout
# ntoken, ninp, nhead, nhid, nlayers, dropout=0.5
model = TransformerModel(256,512,8,64,6,0.1)
# if last_model_file is not None:
# model.load_state_dict(torch.load(last_model_file ))
if torch.cuda.is_available():
model.cuda()
# prepare enwik8 data
with gzip.open('/content/drive/My Drive/model_data/merged.gz') as file:
X = np.array([int(c) for c in file.read()])
si = int(len(X)-len(X)*0.2)
trX, vaX = np.split(X, [si])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
if torch.cuda.is_available():
return full_seq[0:-1].cuda(), full_seq[1:].cuda()
return full_seq[0:-1], full_seq[1:]
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
print(len(train_dataset))
print(len(val_dataset))
# optimizer
# optimizer.load_state_dict(torch.load('optimizer.pt'))
# scheduler.load_state_dict(torch.load('scheduler.pt'))
optim = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE,amsgrad=True)
# if os.path.exists('/content/drive/My Drive/model_saves/optim.pt'):
# optim.load_state_dict(torch.load('/content/drive/My Drive/model_saves/optim.pt'))
#scheduler
# scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=VALIDATE_EVERY, gamma=0.1)
scheduler = get_linear_schedule_with_warmup(
optim,
num_warmup_steps=0,
num_training_steps=len(train_dataset) // GRADIENT_ACCUMULATE_EVERY * NUM_BATCHES
)
# if os.path.exists('/content/drive/My Drive/model_saves/scheduler.pt'):
# scheduler.load_state_dict(torch.load('/content/drive/My Drive/model_saves/scheduler.pt'))
# training
def get_batch_loss(model, data):
x, y = data
pred = model(x)
return F.cross_entropy(pred.transpose(1, 2), y, reduction='mean')
for i in tqdm.tqdm(range(0, NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = get_batch_loss(model, next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
scheduler.step()
if i % VALIDATE_EVERY == 0:
torch.save(model.state_dict(), os.path.join('/content/drive/My Drive/model_saves', 'epoch-{}.pt'.format(i)))
torch.save(optim.state_dict(),'/content/drive/My Drive/model_saves/optim.pt')
torch.save(scheduler.state_dict(),'/content/drive/My Drive/model_saves/scheduler.pt')
model.eval()
with torch.no_grad():
loss = get_batch_loss(model, next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp, _ = random.choice(val_dataset)
output_str = ''
prime = decode_tokens(inp)
# print(f'%s \n\n %s', (prime, '*' * 100))
print(prime)
print('*'*100)
for _ in tqdm.tqdm(range(GENERATE_LENGTH), desc='generating'):
logits = model(inp[None, :])
next_token = sample_next_token(logits)
output_str += decode_token(next_token)
inp = torch.cat((inp[1:], next_token), dim=0)
print(output_str)
# + id="tWHy-TfDiyhu" colab_type="code" colab={}
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from IPython import display
import scipy.signal
# +
sys.path.append('../pythonWrapper/')
import EAProjectorWrapper
sys.path.append('../FileOps/')
import PatchSample
import FileIO
sys.path.append('../Autoencoders/')
import AEDenoising
import SSAE
# +
dataPath = '/home/data1/dufan/lowdoseCTsets/L291/'
prj = EAProjectorWrapper.Projector()
prj.FromFile(os.path.join(dataPath, 'param.txt'))
layer = 78 #L291
with open(os.path.join(dataPath, 'quarter.raw'), 'rb') as f:
f.seek(prj.nu*prj.rotview*layer*4, os.SEEK_SET)
sino = np.fromfile(f, dtype=np.float32, count=prj.nu*prj.rotview)
f.close()
sino = sino.reshape([prj.rotview, prj.nu])
# +
img0 = np.fromfile('/home/data0/dufan/Reconstruction/recon_new/recon/L291-78/fbp-quarter-3mean.raw', dtype=np.float32)
img0 = (img0 + 1000) / 1000 * 0.01937
img0 = np.reshape(img0, [640,640])
plt.figure(figsize=[8,8])
plt.imshow(img0 / 0.01937 * 1000 - 1000, 'Greys_r', vmin=-160, vmax=240)
# -
def SAEReconSQS(sino, img0, prj, sae, sess, strides = None, nIter = 1,
hyper=0, subStepSize=0.05, nSubSteps=5, gamma = 0.5, random_patch = True, showSAELoss = False):
if strides is None:
strides = [sae.imgshape[0] / 2, sae.imgshape[1] / 2]
# pre calculation
# w = sqrt(exp(-sino)) / prj_ones gives more stable results than exp(-sino) weighting
prj_ones = prj.ProjectionEA(np.ones(img0.shape, dtype=np.float32)) + 1e-6
w = np.sqrt(np.exp(-sino)) / prj_ones
normImg = prj.BackProjectionEA(w * prj.ProjectionEA(np.ones(img0.shape, dtype=np.float32)))
total_cfs = list()
sae_cfs = list()
x = np.copy(img0)
z = np.copy(x)
patches_latent = None
for iIter in range(nIter):
x_input = x / 0.01937 * 2 - 2
y_input = np.copy(x_input)
sum_diff, sum_ones, cf_sae, _ = \
AEDenoising.SAEDenoisingSQS(x_input, sae, sess, y_input, None,
strides, step=subStepSize, nSteps = nSubSteps,
random=random_patch, calcLoss=showSAELoss)
sum_diff = sum_diff / 2 * 0.01937
cf_sae = cf_sae / 2 / 2 * 0.01937 * 0.01937
dprj = (prj.ProjectionEA(x) - sino)
dprj[prj_ones <= 1e-6] = 0
dimg_prj = prj.BackProjectionEA(w * dprj)
# Nesterov Momentum
x_new = z - (dimg_prj + 2 * hyper * sum_diff) / (normImg + 2 * hyper * sum_ones)
z = x_new + gamma * (x_new - x)
x = np.copy(x_new)
cf_prj = 0.5 * np.sum(w * dprj**2)
cf = cf_prj + hyper * cf_sae
total_cfs.append(cf)
sae_cfs.append(cf_sae)
display.clear_output()
print 'CF=(%f, %f, %f)'%(cf, cf_prj, cf_sae)
plt.figure(figsize=[16,8])
plt.subplot(121); plt.imshow(x / 0.01937 * 1000 - 1000, 'Greys_r', vmin=-160, vmax=240); plt.title('Image at %d'%iIter)
plt.subplot(222); plt.plot(sae_cfs); plt.xlim((0, nIter)); plt.title('SAE loss')
plt.subplot(224); plt.semilogy(total_cfs); plt.xlim((0, nIter)); plt.title('Total loss')
plt.show()
return x, total_cfs, sae_cfs
# +
sparsity = 100
sparsity_src = 100
tf.reset_default_graph()
ae = SSAE.StackedSparseAutoEncoder(imgshape=[16,16,1], nFeatures=[1024,1024,1024],
sparsity=[sparsity,sparsity,sparsity], mode=0)
ae.BuildStackedAutoEncoder(scope='SSAE')
ae.BuildGradientsWRTInput(scope='SSAE')
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list='0',
per_process_gpu_memory_fraction=0.3)))
loader = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSAE'))
loader.restore(sess, '/home/data0/dufan/Reconstruction/recon_new/train/KSAE/16x16-xy/k-%d-wd-0.1-f-1024-1024-1024/49'%sparsity_src)
# -
res = SAEReconSQS(sino, img0, prj, ae, sess, hyper=50, nIter=200, strides=[8,8],
subStepSize=0.05, nSubSteps=5, random_patch=True, showSAELoss=True)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Wikipedia Talk Data - Getting Started
# This notebook gives an introduction to working with the various data sets in [Wikipedia
# Talk](https://figshare.com/projects/Wikipedia_Talk/16731) project on Figshare. The release includes:
#
# 1. a large historical corpus of discussion comments on Wikipedia talk pages
# 2. a sample of over 100k comments with human labels for whether the comment contains a personal attack
# 3. a sample of over 100k comments with human labels for whether the comment has aggressive tone
#
# Please refer to our [wiki](https://meta.wikimedia.org/wiki/Research:Detox/Data_Release) for documentation of the schema of each data set and our [research paper](https://arxiv.org/abs/1610.08914) for documentation on the data collection and modeling methodology.
#
# In this notebook we show how to build a simple classifier for detecting personal attacks and apply the classifier to a random sample of the comment corpus to see whether discussions on user pages have more personal attacks than discussion on article pages.
# ## Building a classifier for personal attacks
# In this section we will train a simple bag-of-words classifier for personal attacks using the [Wikipedia Talk Labels: Personal Attacks]() data set.
import pandas as pd
import urllib
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
# download annotated comments and annotations
'''
ANNOTATED_COMMENTS_URL = 'https://ndownloader.figshare.com/files/7554634'
ANNOTATIONS_URL = 'https://ndownloader.figshare.com/files/7554637'
def download_file(url, fname):
urllib.request.urlretrieve(url, fname)
download_file(ANNOTATED_COMMENTS_URL, 'attack_annotated_comments.tsv')
download_file(ANNOTATIONS_URL, 'attack_annotations.tsv')
'''
comments = pd.read_csv('../../datasets/attack_annotated_comments.tsv', sep = '\t', index_col = 0)
annotations = pd.read_csv('../../datasets/attack_annotations.tsv', sep = '\t')
len(annotations['rev_id'].unique())
# labels a comment as an atack if the majority of annoatators did so
labels = annotations.groupby('rev_id')['attack'].mean() > 0.5
# join labels and comments
comments['attack'] = labels
# remove newline and tab tokens
comments['comment'] = comments['comment'].apply(lambda x: x.replace("NEWLINE_TOKEN", " "))
comments['comment'] = comments['comment'].apply(lambda x: x.replace("TAB_TOKEN", " "))
comments.query('attack')['comment'].head()
comments.columns
import numpy as np
np.unique(test_comments['attack'])
# +
# fit a simple text classifier
train_comments = comments.sample(round(len(comments)*0.7))#comments.query("split=='train'")
test_comments = comments[~comments['comment'].isin(train_comments.comment.values)]#comments.query("split=='test'")
clf = Pipeline([
('vect', CountVectorizer(max_features = 50000)),#, ngram_range = (1,2))),
#('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LogisticRegression()),
])
clf = clf.fit(train_comments['comment'], train_comments['attack'])
auc = roc_auc_score(test_comments['attack'], clf.predict(test_comments['comment'])) #clf.predict_proba(test_comments['comment'])[:, 1])
print('Test ROC AUC: %.3f' %auc)
# -
auc = roc_auc_score(test_comments['attack'], clf.predict_proba(test_comments['comment'])[:, 1])
print('Test ROC AUC: %.3f' %auc)
roc_auc_score([True,False,True],[0.008,0.002,0.7])
roc_auc_score([True,False,True],[1,0,1])
# correctly classify nice comment
clf.predict(['Thanks for you contribution, you did a great job!'])
# correctly classify nasty comment
clf.predict(['People as stupid as you should not edit Wikipedia!'])
# ## Prevalence of personal attacks by namespace
# In this section we use our classifier in conjunction with the [Wikipedia Talk Corpus](https://figshare.com/articles/Wikipedia_Talk_Corpus/4264973) to see if personal attacks are more common on user talk or article talk page discussions. In our paper we show that the model is not biased by namespace.
# +
import os
import re
from scipy.stats import bernoulli
import seaborn as sns
import matplotlib.pyplot as plt
# -
def download_file(url, fname):
urllib.request.urlretrieve(url, fname)
# +
# download and untar data
USER_TALK_CORPUS_2004_URL = 'https://ndownloader.figshare.com/files/6982061'
ARTICLE_TALK_CORPUS_2004_URL = 'https://ndownloader.figshare.com/files/7038050'
download_file(USER_TALK_CORPUS_2004_URL, 'comments_user_2004.tar.gz')
download_file(ARTICLE_TALK_CORPUS_2004_URL, 'comments_article_2004.tar.gz')
os.system('tar -xzf comments_user_2004.tar.gz')
os.system('tar -xzf comments_article_2004.tar.gz')
# -
# helper for collecting a sample of comments for a given ns and year from
def load_no_bot_no_admin(ns, year, prob = 0.1):
dfs = []
data_dir = "comments_%s_%d" % (ns, year)
for _, _, filenames in os.walk(data_dir):
for filename in filenames:
if re.match("chunk_\d*.tsv", filename):
df = pd.read_csv(os.path.join(data_dir, filename), sep = "\t")
df['include'] = bernoulli.rvs(prob, size=df.shape[0])
df = df.query("bot == 0 and admin == 0 and include == 1")
dfs.append(df)
sample = pd.concat(dfs)
sample['ns'] = ns
sample['year'] = year
return sample
# collect a random sample of comments from 2004 for each namespace
corpus_user = load_no_bot_no_admin('user', 2004)
corpus_article = load_no_bot_no_admin('article', 2004)
corpus = pd.concat([corpus_user, corpus_article])
# Apply model
corpus['comment'] = corpus['comment'].apply(lambda x: x.replace("NEWLINE_TOKEN", " "))
corpus['comment'] = corpus['comment'].apply(lambda x: x.replace("TAB_TOKEN", " "))
corpus['attack'] = clf.predict_proba(corpus['comment'])[:,1] > 0.425 # see paper
# +
# plot prevalence per ns
sns.pointplot(data = corpus, x = 'ns', y = 'attack')
plt.ylabel("Attack fraction")
plt.xlabel("Dicussion namespace")
# -
# Attacks are far more prevalent in the user talk namespace.
arr = [[0,1],[1,0],[1,0]]
r = [False if i[0]==1 else True for i in arr]
r
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Poupar ou Gastar os Pontos do NuBank Rewards?
#
# ## O que é o programa NuBank Rewards
#
# O Rewards é um programa de recompensas por fidelidade do uso do cartão NuBank, um cartão de crédito sem anuidade de um dos primeiros bancos digitais do Brasil.
#
# No NuBank Rewards, cada real que você gasta no cartão NuBank vale um ponto. Estes pontos nunca expiram e podem ser trocados por descontos na sua fatura do cartão de crédito NuBank no valor de compras já efetuadas com o cartão.
# Ou seja, você usa seus pontos para apagar uma cobrança de algo de comprou da sua fatura, mesmo depois da compra já ter sido paga.
#
# O programa NuBank rewards não é gratuito - existe uma cobrança mensal de R\$ 19.90 para todos os participantes do programa.
#
# É importante ressaltar que a quantidade de pontos que você precisa para apagar uma compra não é a mesma que a quantidade de centavos na compra - cada ponto vale MENOS de 1 centavo na maioria das compras.
#
# Auditando minhas faturas, descobri que cada ponto do NuBank Rewards vale 0.769 centavos de real para apagar compras feitas no UBER, CABIFY ou IFOOD.
#
# | Uber - 0.007689 | Cabify - 0.007690 | Ifood - 0.007692 |
# | --------------- | ----------------- | ---------------- |
# | \$4.74 - 617 | \$6.90 - 897 | \$55.90 - 7267 |
# | \$12.66 - 1646 | \$8.52 - 1108 | \$81.37 - 10579 |
# | \$15.18 - 1974 | \$10.72 - 1394 | \$99.90 - 12987 |
# | \$15.28 - 1987 | \$22.58 - 2936 | \$91.37 - 11879 |
# | \$6.23 - 810 | \$7.42 - 965 | \$56.00 - 7280 |
#
# Contudo, para compras de VOOS ou HOTÉIS, cada ponto vale um centavo, e para compras no SPOTIFY, 1.345 centavos
#
# | Voos & Hotéis - 0.010000 | Spotify - 0.013450 |
# | -- | -- |
# | \$702.12 - 70212 | \$26.90 - 2000 |
# | \$216.75 - 21675 | \$26.90 - 2000 |
# | \$526.62 - 52662 | \$26.90 - 2000 |
# | \$412.17 - 41217 | \$26.90 - 2000 |
# | \$359.01 - 35901 | \$26.90 - 2000 |
#
# **Ou seja, os pontos do NuBank Rewards são 30% mais valiosos para uso em VÔOS e HOTÉIS do que para uso em outros convênios, com exceção do Spotify**
#
# ## O NuBank Rewards vale a pena?
#
# Para saber se o programa vale a pena, podemos pensar o valor descontado da fatura como o rendimento de um investimento. Para que esse investimento valha a pena, o rendimento tem que superar outros investimentos de investimento mínimo, liquidez e risco similares.
#
# - O Investimento Mínimo (mínima quantidade de reais necessária para fazer o investimento) do Nubank Rewards é de R\$ 19.90, o valor da mensalidade.
# - A Liquidez do Nubank Rewards não é diretamente comparável a liquidez da Caderneta de Poupança uma vez que os "rendimentos" somente podem ser usados para abater valores da fatura, mas tal abatimento afeta instantaneamente seu limite de crédito. Para esta análise, considero que os rendimentos da poupança, de fundos de investimento e do NuBank Rewards só são mensuraveis no momento do pagamento da fatura, ou seja, com liquidez em uma certa data do mês.
# - O Risco de Mercado (risco de variação dos preços por forças de mercado) do NuBank Rewards é nulo, já que não há um fator de mercado que afete a relação de conversão entre os pontos do programa e as compras efetuadas. Para esta análise, consideraremos nulo o risco de mercado da Caderneta de Poupança.
# - O Risco de Default do NuBank Rewards é o risco do banco NuBank quebrar. Para esta análise, consideraremos este e outros riscos técnicos negligíveis.
# - Os elementos apagados da fatura do NuBank Rewards não são tributáveis pelo Imposto de Renda
#
# Então vamos assumir que o investimento mínimo do NuBank Rewards (R\$ 20,00, para simplificar) seja investido em um fundo, que tem imposto de renda, ou numa Caderneta de Poupança, que não tem imposto de renda.
#
# - Rendimento da Caderneta de Poupança: 6.5% ao ano, 0% de Imposto de Renda
# - Rendimento do Fundo Fictício FicSAFE: 9.15% ao ano, 12.5% de Imposto de Renda
# - Rendimento do Fundo Fictício FicRISK: 19.35% ao ano, 20% de Imposto de Renda
#
# Obs: assumo que os fundos fictícios FicSAFE e FicRISK também tem investimento mínimo de R\$20
# +
def rendimento_aplicacao_regular(meses, valor_aplicacao_regular, rendimento_anual, aliquota_ir):
lista_periodos_meses = list(range(meses+1))[1:]
lista_periodos_meses.reverse()
def _rendimento_ajustado(meses):
anos = meses/12
rend = (1+rendimento_anual)**anos
ir = (rend-1)*aliquota_ir
return rend - ir
return sum([
valor_aplicacao_regular * _rendimento_ajustado(m)
for m in lista_periodos_meses
])
import pandas as pd
df_rendimentos_comparaveis = pd.DataFrame({
'Mês': [1, 2, 3, 4, 6, 9, 12, 15, 18, 24, 30, 36, 42, 48]
})
df_rendimentos_comparaveis.loc[:, 'Investimento'] = df_rendimentos_comparaveis['Mês'] * 20
df_rendimentos_comparaveis.loc[:, 'Poupança (M)'] = df_rendimentos_comparaveis['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, 20, 0.065, 0))
df_rendimentos_comparaveis.loc[:, 'Poupança (R%)'] = \
df_rendimentos_comparaveis['Poupança (M)'] / df_rendimentos_comparaveis['Investimento']
df_rendimentos_comparaveis.loc[:, 'FicSAFE (M)'] = df_rendimentos_comparaveis['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, 20, 0.0915, 0.125))
df_rendimentos_comparaveis.loc[:, 'FicSAFE (R%)'] = \
df_rendimentos_comparaveis['FicSAFE (M)'] / df_rendimentos_comparaveis['Investimento']
df_rendimentos_comparaveis.loc[:, 'FicRISK (M)'] = df_rendimentos_comparaveis['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, 20, 0.1935, 0.2))
df_rendimentos_comparaveis.loc[:, 'FicRISK (R%)'] = \
df_rendimentos_comparaveis['FicRISK (M)'] / df_rendimentos_comparaveis['Investimento']
df_rendimentos_comparaveis.round(2)
# -
# Para que o NuBank Rewards valha a pena, o valor descontado da fatura deveria ser maior do que a rentabilidade de uma das alternativas acima no mesmo periodo.
#
# Ou seja, em 3 anos (36 meses), se o valor total descontado da minha fatura for:
# - MAIOR QUE R\$ 74.59 - valeu mais a pena que a poupança.
# - MAIOR QUE R\$ 93.11 - Valeu mais a pena que a poupança e o fundo FicSAFE
# - MAIOR QUE R\$ 189.49 - Valeu mais a pena que a poupança e os dois fundos
#
# Utilizando o pior caso do valor dos pontos do NuBank Rewards (0.769 centavos/ponto), seriam necessários 24708 pontos para descontar compras que totalizem R\$ 190,00. Seriam necessários mais 93629 pontos para descontar os R\$ 720,00 pagos como mensalidade para programa.
#
# **Assim, seria necessário que o titular do cartão NuBank gaste no mínimo R\$ 118.337,00 reais ao longo de três anos, ou R\$ 3.288,00 por mês. O valor de desconto na fatura mensal seria de R\$ 25.29, totalizando uma fatura de R\$ 3.262,71**
#
# **Numa estimativa mais concervadora, seriam necessários 105853 pontos, R\$ 2.940,37 gastos por mês para uma fatura de R\$ 2.917,74 após o desconto de R\$ 22.62**
# ## Qual a melhor forma de usar os pontos do NuBank Rewards?
#
# O ideal é priorizar os pontos para uso em compras de vôos e hotéis, onde os pontos chegam a valer 30% a mais. Contudo, vôos e hotéis tendem a ser compras mais esporádicas e de maior volume unitário, o que significa que é necessário uma maior quantidade de pontos acumulados para poder descontá-las da fatura.
#
# É melhor conservar os pontos, acumulando para uso nas compras de voos e hotéis, ou gastá-los imediatamente?
#
# A vantagem de utilizar os pontos imediatamente é de liberar recursos que poderiam ser melhor investidos. A vantagem de acumular os pontos é que os mesmos seriam mais valorizados quando utilizados para descontar uma compra de voos ou hotéis.
#
# Portanto, o custo de acumular os pontos é equivalente ao rendimento de uma aplicação na qual o valor dos pontos poderia ter sido investido. Vale a pena guardar os pontos quando o custo de acumulá-los é menor que o benefício dos mesmos.
#
# Dada a natureza esporádica e variada das compras de vôos e hotéis, prefiro calcular os custos de acumulação de pontos necessários para cada compra.
#
# Assim sendo, assumindo um gasto mensal de R\$ 3.300,00, e uma compra de voo de R\$ 1.000,00:
# +
pontos_mensais = 3300
valor_compra = 1000
df_rendimentos_acumulo = pd.DataFrame({
'Mês': list(range(41))[1:]
})
df_rendimentos_acumulo.loc[:, 'Pontos Gerados'] = df_rendimentos_acumulo['Mês'] * pontos_mensais
df_rendimentos_acumulo.loc[:, 'Valor U.I.'] = df_rendimentos_acumulo['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, (pontos_mensais*0.769)/100.0, 0.0, 0))
df_rendimentos_acumulo.loc[:, 'U.I. (Poupanca)'] = df_rendimentos_acumulo['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, (pontos_mensais*0.769)/100.0, 0.065, 0))
df_rendimentos_acumulo.loc[:, 'U.I. (FicSAFE)'] = df_rendimentos_acumulo['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, (pontos_mensais*0.769)/100.0, 0.0915, 0.125))
df_rendimentos_acumulo.loc[:, 'U.I. (FicRISK)'] = df_rendimentos_acumulo['Mês'].apply(
lambda m: rendimento_aplicacao_regular(m, (pontos_mensais*0.769)/100.0, 0.1935, 0.2))
def vermelho_se_maior_igual(v, ref):
if v < ref:
return ''
else:
return 'background-color: green'
df_rendimentos_acumulo.round(2).style.applymap(
lambda i: vermelho_se_maior_igual(i, valor_compra),
subset=['Valor U.I.', 'U.I. (Poupanca)', 'U.I. (FicSAFE)', 'U.I. (FicRISK)']
).applymap(
lambda i: vermelho_se_maior_igual(i, valor_compra*100),
subset=['Pontos Gerados']
)
# -
# No gráfico/tabela acima, o objetivo é atingido quando as linhas de uma coluna se tornam verdes. O objetivo aqui é acumular pontos o bastante para pagar a compra ou acumular rendimento suficiente das aplicações dos pontos já utilizados para superar o valor da compra.
#
# Assim, atingimos o objetivo de R\$ 1.000,00 por meio do acumulo de pontos em 31 meses, 33 caso tenhamos investido o valor dos pontos em um fundo de rentabilidade similar ao do FicRISK e 36 caso investido na poupança ou no FicSAFE. Caso os pontos não tenham sido investidos, o objetivo é atingido em 40 meses.
#
# Logo, **acumular os pontos do NuBank Rewards oferece uma vantagem para o membro do programa.**
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import random
import scipy.stats as ss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
def distance(p1, p2):
"""
Find the distance between Point 1 and Point 2
"""
return np.sqrt(np.sum(np.power(p2-p1, 2)))
# +
def majority_vote(votes):
"""
Return the most common element in votes.
"""
vote_counts = {}
for vote in votes:
if vote in vote_counts:
vote_counts[vote] += 1
else:
vote_counts[vote] = 1
winners = []
max_count = max(vote_counts.values())
for vote, count in vote_counts.items():
if count == max_count:
winners.append(vote)
return random.choice(winners)
# Alternative to above function
def majority_vote_short(votes):
"""
Return the most common element in votes.
"""
mode, count = ss.mstats.mode(votes)
return mode
# +
def find_nearest_neighbours(p, points, k=5):
"""
Find the nearest k neighbours of point p and return their indices.
"""
distances = np.zeros(points.shape[0])
for i in range(len(distances)):
distances[i] = distance(p, points[i])
ind = np.argsort(distances)
return ind[:k]
def knn_predict(p, points, outcomes, k=5):
"""
To predict k-nearest neighbors
"""
ind = find_nearest_neighbours(p, points, k)
return majority_vote(outcomes[ind])
# -
def generate_synth_data(n=50):
"""
Create two sets of points from bivartiate normal distributions.
"""
points = np.concatenate((ss.norm(0,1).rvs((n,2)), ss.norm(1,1).rvs((n,2))), axis=0)
outcomes = np.concatenate((np.repeat(0, n), np.repeat(1, n)))
return (points, outcomes)
def make_prediction_grid(predictors, outcomes, limits, h, k):
"""
Classify each point on the prediction grid.
"""
(x_min, x_max, y_min, y_max) = limits
xs = np.arange(x_min, x_max, h)
ys = np.arange(y_min, y_max, h)
xx, yy = np.meshgrid(xs, ys)
prediction_grid = np.zeros(xx.shape, dtype = int)
for i,x in enumerate(xs):
for j,y in enumerate(ys):
p = np.array([x,y])
prediction_grid[j, i] = knn_predict(p, predictors, outcomes, k)
return (xx, yy, prediction_grid)
def plot_prediction_grid (xx, yy, prediction_grid, filename):
"""
Plot KNN predictions for every point on the grid.
"""
background_colormap = ListedColormap (["hotpink","lightskyblue", "yellowgreen"])
observation_colormap = ListedColormap (["red","blue","green"])
plt.figure(figsize =(10,10))
plt.pcolormesh(xx, yy, prediction_grid, cmap = background_colormap, alpha = 0.5, shading='auto')
plt.scatter(predictors[:,0], predictors [:,1], c = outcomes, cmap = observation_colormap, s = 50)
plt.xlabel('Variable 1'); plt.ylabel('Variable 2')
plt.xticks(()); plt.yticks(())
plt.xlim (np.min(xx), np.max(xx))
plt.ylim (np.min(yy), np.max(yy))
plt.savefig(filename)
# +
iris = datasets.load_iris()
iris["data"]
predictors = iris.data[:, 0:2]
outcomes = iris.target
plt.plot(predictors[outcomes==0][:,0], predictors[outcomes==0][:,1], "ro")
plt.plot(predictors[outcomes==1][:,0], predictors[outcomes==1][:,1], "go")
plt.plot(predictors[outcomes==2][:,0], predictors[outcomes==2][:,1], "bo")
plt.savefig("iris.pdf")
k=5; filename="iris_grid.pdf"; limits = (4,8,1.5,4.5); h = 0.1
(xx, yy, prediction_grid) = make_prediction_grid(predictors, outcomes, limits, h, k)
plot_prediction_grid(xx, yy, prediction_grid, filename)
# +
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(predictors, outcomes)
sk_predictions = knn.predict(predictors)
my_predictions = np.array([knn_predict(p, predictors, outcomes, 5) for p in predictors])
print(100 * np.mean(sk_predictions == my_predictions))
print(100 * np.mean(sk_predictions == outcomes))
print(100 * np.mean(my_predictions == outcomes))
# +
#Testing Data
# p1 = np.array([1, 1])
# p2 = np.array([4, 4])
# distance(p1, p2)
# votes = [1,2,3,1,2,3,1,2,3,3,3,3,2,2,2]
# winner = majority_vote(votes)
# winner1 = majority_vote_short(votes)
# print(winner, winner1)
# points = np.array([[1,1], [1,2], [1,3], [2,1], [2,2], [2,3], [3,1], [3,2], [3,3]])
# p = np.array([2.5, 2])
# plt.plot(points[:,0], points[:,1], "ro")
# plt.plot(p[0], p[1], "bo")
# plt.axis([0.5, 3.5, 0.5, 3.5])
# n = 20
# plt.figure()
# plt.plot(points[:n,0], points[:n,1], "ro")
# plt.plot(points[n:,0], points[n:,1], "bo")
# plt.show()
# (predictors, outcomes) = generate_synth_data()
# k=5; filename="knn_synth_5.pdf"; limits = (-3,4,-3,4); h = 0.1
# (xx, yy, prediction_grid) = make_prediction_grid(predictors, outcomes, limits, h, k)
# plot_prediction_grid(xx, yy, prediction_grid, filename)
# k=50; filename="knn_synth_50.pdf"; limits = (-3,4,-3,4); h = 0.1
# (xx, yy, prediction_grid) = make_prediction_grid(predictors, outcomes, limits, h, k)
# plot_prediction_grid(xx, yy, prediction_grid, filename)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import importlib
importlib.reload(logging) # see https://stackoverflow.com/a/21475297/1469195
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
# +
# %%capture
import os
import site
os.sys.path.insert(0, '/home/schirrmr/code/reversible/')
os.sys.path.insert(0, '/home/schirrmr/braindecode/code/braindecode/')
os.sys.path.insert(0, '/home/schirrmr/code/explaining/reversible//')
# %load_ext autoreload
# %autoreload 2
import numpy as np
import logging
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
# %matplotlib inline
# %config InlineBackend.figure_format = 'png'
matplotlib.rcParams['figure.figsize'] = (12.0, 1.0)
matplotlib.rcParams['font.size'] = 14
import seaborn
seaborn.set_style('darkgrid')
from reversible2.sliced import sliced_from_samples
from numpy.random import RandomState
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import copy
import math
import itertools
import torch as th
from braindecode.torch_ext.util import np_to_var, var_to_np
from reversible2.splitter import SubsampleSplitter
from reversible2.view_as import ViewAs
from reversible2.affine import AdditiveBlock
from reversible2.plot import display_text, display_close
from reversible2.bhno import load_file, create_inputs
th.backends.cudnn.benchmark = True
# +
from reversible2.util import set_random_seeds
cuda=True
set_random_seeds(2019011641, cuda)
rand_inputs = th.randn(440,128,1024,1, device='cuda')
print((rand_inputs.numel()* 4) / (1024 ** 2))
# +
def conv_add_block_3x3(n_c, n_i_c):
return AdditiveBlock(
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c, (3, 1), stride=1, padding=(1, 0),
bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2, (3, 1), stride=1, padding=(1, 0),
bias=True)),
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c, (3, 1), stride=1, padding=(1, 0),
bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2, (3, 1), stride=1, padding=(1, 0),
bias=True)),
switched_order=False)
def dense_add_block(n_c, n_i_c):
return AdditiveBlock(
nn.Sequential(
nn.Linear(n_c // 2, n_i_c, ),
nn.ReLU(),
nn.Linear(n_i_c, n_c // 2, )),
nn.Sequential(
nn.Linear(n_c // 2, n_i_c, ),
nn.ReLU(),
nn.Linear(n_i_c, n_c // 2, )),
switched_order=False)
cuda = True
from reversible2.graph import Node
from reversible2.branching import CatChans, ChunkChans, Select
from copy import deepcopy
from reversible2.graph import Node
from reversible2.distribution import TwoClassDist
from reversible2.rfft import RFFT, Interleave
from reversible2.util import set_random_seeds
from torch.nn import ConstantPad2d
import torch as th
from reversible2.splitter import SubsampleSplitter
set_random_seeds(2019011641, cuda)
n_chans = 128
base_model = nn.Sequential(
SubsampleSplitter(stride=[2,1],chunk_chans_first=False),# 2 x 256
conv_add_block_3x3(2*n_chans,32),
conv_add_block_3x3(2*n_chans,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True), # 4 x 128
conv_add_block_3x3(4*n_chans,32),
conv_add_block_3x3(4*n_chans,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True), # 8 x 64
conv_add_block_3x3(8*n_chans,32),
conv_add_block_3x3(8*n_chans,32))
base_model.cuda();
branch_1_a = nn.Sequential(
SubsampleSplitter(stride=[2,1],chunk_chans_first=False), # 8 x 32
conv_add_block_3x3(8*n_chans,32),
conv_add_block_3x3(8*n_chans,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True), # 16 x 16
conv_add_block_3x3(16*n_chans,32),
conv_add_block_3x3(16*n_chans,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True), # 32 x 8
conv_add_block_3x3(32*n_chans,32),
conv_add_block_3x3(32*n_chans,32),
)
branch_1_b = nn.Sequential(
*(list(deepcopy(branch_1_a).children()) + [
ViewAs((-1, 32*n_chans,16,1), (-1,512*n_chans)),
dense_add_block(512*n_chans,32),
dense_add_block(512*n_chans,32),
dense_add_block(512*n_chans,32),
dense_add_block(512*n_chans,32),
]))
branch_1_a.cuda();
branch_1_b.cuda();
branch_2_a = nn.Sequential(
SubsampleSplitter(stride=[2,1], chunk_chans_first=False), # 32 x 4
conv_add_block_3x3(32*n_chans,32),
conv_add_block_3x3(32*n_chans,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True), # 64 x 2
conv_add_block_3x3(64*n_chans,32),
conv_add_block_3x3(64*n_chans,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True), # 128 x 1
ViewAs((-1, 128*n_chans,2,1), (-1,256*n_chans)),
dense_add_block(256*n_chans,64),
dense_add_block(256*n_chans,64),
dense_add_block(256*n_chans,64),
dense_add_block(256*n_chans,64),
)
branch_2_b = deepcopy(branch_2_a).cuda()
branch_2_a.cuda();
branch_2_b.cuda();
final_model = nn.Sequential(
dense_add_block(1024*n_chans,256),
dense_add_block(1024*n_chans,256),
dense_add_block(1024*n_chans,256),
dense_add_block(1024*n_chans,256),
RFFT()
)
final_model.cuda();
o = Node(None, base_model)
o = Node(o, ChunkChans(2))
o1a = Node(o, Select(0))
o1b = Node(o, Select(1))
o1a = Node(o1a, branch_1_a)
o1b = Node(o1b, branch_1_b)
o2 = Node(o1a, ChunkChans(2))
o2a = Node(o2, Select(0))
o2b = Node(o2, Select(1))
o2a = Node(o2a, branch_2_a)
o2b = Node(o2b, branch_2_b)
o = Node([o1b,o2a,o2b], CatChans())
o = Node(o, final_model)
feature_model = o
if cuda:
feature_model.cuda()
feature_model.eval();
# -
outs = feature_model(rand_inputs[:20])
inverted = feature_model.invert(outs)
loss = th.sum(inverted ** 2)
feature_model.zero_grad()
loss.backward()
# +
from reversible2.wrap_invertible import WrapInvertible
from reversible2.constantmemory import AdditiveBlockConstantMemory
def sequential_to_invertible(seq_model):
children = list(seq_model.children())
new_children = []
for i_c, c in enumerate(children):
keep_input = i_c == 0
keep_output = i_c == len(children) - 1
new_c = module_to_invertible(c, keep_input, keep_output)
new_children.append(new_c)
new_seq_model = nn.Sequential(*new_children)
assert new_seq_model[0].keep_input == True
assert new_seq_model[-1].keep_output == True
return new_seq_model
def module_to_invertible(c, keep_input, keep_output):
c = deepcopy(c)
classname = c.__class__.__name__
if classname == 'AdditiveBlock':
assert c.switched_order == False
return AdditiveBlockConstantMemory(c.FA, c.GA, keep_input=keep_input, keep_output=keep_output)
elif classname in ['SubsampleSplitter', 'ViewAs']:
return WrapInvertible(c, keep_input=keep_input, keep_output=keep_output, grad_is_inverse=True)
elif classname in ['RFFT']:
return WrapInvertible(c, keep_input=keep_input, keep_output=keep_output, grad_is_inverse=False)
else:
raise ValueError("Unknown class to convert invertible {:s}".format(
classname))
# +
from reversible2.constantmemory import AdditiveBlockConstantMemory
def conv_add_3x3_const(n_c, n_i_c, keep_input=False, keep_output=False):
return AdditiveBlockConstantMemory(
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c, (3, 1), stride=1, padding=(1, 0),
bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2, (3, 1), stride=1, padding=(1, 0),
bias=True)),
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c, (3, 1), stride=1, padding=(1, 0),
bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2, (3, 1), stride=1, padding=(1, 0),
bias=True)),
keep_input=keep_input, keep_output=keep_output)
def dense_add_const(n_c, n_i_c, keep_input=False, keep_output=False):
return AdditiveBlockConstantMemory(
nn.Sequential(
nn.Linear(n_c // 2, n_i_c, ),
nn.ReLU(),
nn.Linear(n_i_c, n_c // 2, )),
nn.Sequential(
nn.Linear(n_c // 2, n_i_c, ),
nn.ReLU(),
nn.Linear(n_i_c, n_c // 2, )),
keep_input=keep_input, keep_output=keep_output)
cuda = True
from reversible2.graph import Node
from reversible2.branching import CatChans, ChunkChans, Select
from reversible2.graph import Node
from reversible2.wrap_invertible import WrapInvertible
from copy import deepcopy
from reversible2.graph import Node
from reversible2.rfft import RFFT, Interleave
from reversible2.util import set_random_seeds
from torch.nn import ConstantPad2d
import torch as th
from reversible2.splitter import SubsampleSplitter
set_random_seeds(2019011641, cuda)
n_chans = 128
base_model_2 = nn.Sequential(
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=False),
grad_is_inverse=True, keep_input=True),# 2 x 256
conv_add_3x3_const(2*n_chans,32),
conv_add_3x3_const(2*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True), grad_is_inverse=True), # 4 x 128
conv_add_3x3_const(4*n_chans,32),
conv_add_3x3_const(4*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True), grad_is_inverse=True), # 8 x 64
conv_add_3x3_const(8*n_chans,32),
conv_add_3x3_const(8*n_chans,32, keep_output=True))
base_model_2.cuda();
branch_1_a_2 = nn.Sequential(
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=False),
grad_is_inverse=True, keep_input=True), # 8 x 32
conv_add_3x3_const(8*n_chans,32),
conv_add_3x3_const(8*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
grad_is_inverse=True),# 16 x 16
conv_add_3x3_const(16*n_chans,32),
conv_add_3x3_const(16*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
grad_is_inverse=True), # 32 x 8
conv_add_3x3_const(32*n_chans,32),
conv_add_3x3_const(32*n_chans,32, keep_output=True),
)
branch_1_b_2 = nn.Sequential(
*(list(deepcopy(branch_1_a_2).children()) + [
WrapInvertible(ViewAs((-1, 32*n_chans,16,1), (-1,512*n_chans)),
grad_is_inverse=True, keep_input=True),
dense_add_const(512*n_chans,32),
dense_add_const(512*n_chans,32),
dense_add_const(512*n_chans,32),
dense_add_const(512*n_chans,32, keep_output=True),
]))
branch_1_a_2.cuda();
branch_1_b_2.cuda();
branch_2_a_2 = nn.Sequential(
WrapInvertible(SubsampleSplitter(stride=[2,1], chunk_chans_first=False),
keep_input=True, grad_is_inverse=True),# 32 x 4
conv_add_3x3_const(32*n_chans,32),
conv_add_3x3_const(32*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
grad_is_inverse=True),# 64 x 2
conv_add_3x3_const(64*n_chans,32),
conv_add_3x3_const(64*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
grad_is_inverse=True), # 128 x 1
WrapInvertible(ViewAs((-1, 128*n_chans,2,1), (-1,256*n_chans)),
grad_is_inverse=True),
dense_add_const(256*n_chans,64),
dense_add_const(256*n_chans,64),
dense_add_const(256*n_chans,64),
dense_add_const(256*n_chans,64, keep_output=True),
)
branch_2_b_2 = deepcopy(branch_2_a_2).cuda()
branch_2_a_2.cuda();
branch_2_b_2.cuda();
final_model_2 = nn.Sequential(
dense_add_const(1024*n_chans,256,keep_input=True),
dense_add_const(1024*n_chans,256),
dense_add_const(1024*n_chans,256),
dense_add_const(1024*n_chans,256),
WrapInvertible(RFFT(), keep_output=True),
)
final_model_2.cuda();
o_2 = Node(None, base_model_2)
o_2 = Node(o_2, ChunkChans(2))
o1a_2 = Node(o_2, Select(0))
o1b_2 = Node(o_2, Select(1))
o1a_2 = Node(o1a_2, branch_1_a_2)
o1b_2 = Node(o1b_2, branch_1_b_2)
o2_2 = Node(o1a_2, ChunkChans(2))
o2a_2 = Node(o2_2, Select(0))
o2b_2 = Node(o2_2, Select(1))
o2a_2 = Node(o2a_2, branch_2_a_2)
o2b_2 = Node(o2b_2, branch_2_b_2)
o_2 = Node([o1b_2,o2a_2,o2b_2], CatChans())
o_2 = Node(o_2, final_model_2)
feature_model2 = o_2
if cuda:
feature_model2.cuda()
feature_model2.eval();
# +
outs2 = feature_model2(rand_inputs[:20])
inverted2 = feature_model2.invert(outs2)
loss2 = th.sum(inverted2 ** 2)
feature_model2.zero_grad()
loss2.backward()
assert th.allclose(outs, outs2)
assert th.allclose(inverted, inverted2)
assert th.allclose(loss, loss2)
for p1,p2 in zip(feature_model.parameters(), feature_model2.parameters()):
assert th.allclose(p1,p2)
if not th.allclose(p1.grad, p2.grad):
print("Mean: {:.0E}".format(th.mean(th.abs(p1.grad - p2.grad)).item()))
print("Max: {:.0E}".format(th.max(th.abs(p1.grad - p2.grad)).item()))
assert th.allclose(p1.grad, p2.grad, rtol=1e-3, atol=1e-2)
# +
from reversible2.constantmemory import AdditiveBlockConstantMemory
def conv_add_3x3_const(n_c, n_i_c, keep_input=False, keep_output=False):
return AdditiveBlockConstantMemory(
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c, (3, 1), stride=1, padding=(1, 0),
bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2, (3, 1), stride=1, padding=(1, 0),
bias=True)),
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c, (3, 1), stride=1, padding=(1, 0),
bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2, (3, 1), stride=1, padding=(1, 0),
bias=True)),
keep_input=keep_input, keep_output=keep_output)
def dense_add_const(n_c, n_i_c, keep_input=False, keep_output=False):
return AdditiveBlockConstantMemory(
nn.Sequential(
nn.Linear(n_c // 2, n_i_c, ),
nn.ReLU(),
nn.Linear(n_i_c, n_c // 2, )),
nn.Sequential(
nn.Linear(n_c // 2, n_i_c, ),
nn.ReLU(),
nn.Linear(n_i_c, n_c // 2, )),
keep_input=keep_input, keep_output=keep_output)
cuda = True
from reversible2.graph import Node
from reversible2.branching import CatChans, ChunkChans, Select
from reversible2.graph import Node
from reversible2.wrap_invertible import WrapInvertible
from copy import deepcopy
from reversible2.graph import Node
from reversible2.rfft import RFFT, Interleave
from reversible2.util import set_random_seeds
from torch.nn import ConstantPad2d
import torch as th
from reversible2.splitter import SubsampleSplitter
set_random_seeds(2019011641, cuda)
n_chans = 128
base_model_2 = nn.Sequential(
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=False),
grad_is_inverse=True, keep_input=True),# 2 x 256
conv_add_3x3_const(2*n_chans,32),
conv_add_3x3_const(2*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True), grad_is_inverse=True), # 4 x 128
conv_add_3x3_const(4*n_chans,32),
conv_add_3x3_const(4*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True), grad_is_inverse=True), # 8 x 64
conv_add_3x3_const(8*n_chans,32),
conv_add_3x3_const(8*n_chans,32, keep_output=True))
base_model_2.cuda();
branch_1_a_2 = nn.Sequential(
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=False),
grad_is_inverse=True, keep_input=True), # 8 x 32
conv_add_3x3_const(8*n_chans,32),
conv_add_3x3_const(8*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
grad_is_inverse=True),# 16 x 16
conv_add_3x3_const(16*n_chans,32),
conv_add_3x3_const(16*n_chans,32),
WrapInvertible(SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
grad_is_inverse=True), # 32 x 8
conv_add_3x3_const(32*n_chans,32),
conv_add_3x3_const(32*n_chans,32, keep_output=True),
)
branch_1_b_2 = nn.Sequential(
*(list(sequential_to_invertible(branch_1_a).children()) + list(sequential_to_invertible(
nn.Sequential(
ViewAs((-1, 32*n_chans,16,1), (-1,512*n_chans)),
dense_add_block(512*n_chans,32),
dense_add_block(512*n_chans,32),
dense_add_block(512*n_chans,32),
dense_add_block(512*n_chans,32),)).children())
))
branch_1_b_2 = sequential_to_invertible(branch_1_b) # causes problems
branch_1_a_2.cuda();
branch_1_b_2.cuda();
branch_2_a_2 = sequential_to_invertible(branch_2_a)
branch_2_b_2 = deepcopy(branch_2_a_2).cuda()
branch_2_a_2.cuda();
branch_2_b_2.cuda();
final_model_2 = sequential_to_invertible(final_model)
final_model_2.cuda();
o_2 = Node(None, base_model_2)
o_2 = Node(o_2, ChunkChans(2))
o1a_2 = Node(o_2, Select(0))
o1b_2 = Node(o_2, Select(1))
o1a_2 = Node(o1a_2, branch_1_a_2)
o1b_2 = Node(o1b_2, branch_1_b_2)
o2_2 = Node(o1a_2, ChunkChans(2))
o2a_2 = Node(o2_2, Select(0))
o2b_2 = Node(o2_2, Select(1))
o2a_2 = Node(o2a_2, branch_2_a_2)
o2b_2 = Node(o2b_2, branch_2_b_2)
o_2 = Node([o1b_2,o2a_2,o2b_2], CatChans())
o_2 = Node(o_2, final_model_2)
feature_model2 = o_2
if cuda:
feature_model2.cuda()
feature_model2.eval();
# -
from reversible2.constantmemory import clear_ctx_dicts
outs2 = feature_model2(rand_inputs[:20])
inverted2 = feature_model2.invert(outs2)
loss2 = th.sum(inverted2 ** 2)
feature_model2.zero_grad()
loss2.backward()
assert th.allclose(outs, outs2)
assert th.allclose(inverted, inverted2)
assert th.allclose(loss, loss2)
for p1,p2 in zip(feature_model.parameters(), feature_model2.parameters()):
assert th.allclose(p1,p2)
if not th.allclose(p1.grad, p2.grad):
print("Mean: {:.0E}".format(th.mean(th.abs(p1.grad - p2.grad)).item()))
print("Max: {:.0E}".format(th.max(th.abs(p1.grad - p2.grad)).item()))
assert th.allclose(p1.grad, p2.grad, rtol=1e-3, atol=1e-2)
clear_ctx_dicts(feature_model2)
#
# # Old
# +
from reversible2.constantmemory import AdditiveBlockConstantMemory
cuda = True
from reversible2.graph import Node
from reversible2.branching import CatChans, ChunkChans, Select
from reversible2.graph import Node
from reversible2.wrap_invertible import WrapInvertible
from copy import deepcopy
from reversible2.graph import Node
from reversible2.rfft import RFFT, Interleave
from reversible2.util import set_random_seeds
from torch.nn import ConstantPad2d
import torch as th
from reversible2.splitter import SubsampleSplitter
set_random_seeds(2019011641, cuda)
n_chans = 128
base_model_2 = sequential_to_invertible(base_model)
base_model_2.cuda();
branch_1_a_2 = sequential_to_invertible(branch_1_a)
branch_1_b_2 = sequential_to_invertible(branch_1_b)
branch_1_a_2.cuda();
branch_1_b_2.cuda();
branch_2_a_2 = sequential_to_invertible(branch_2_a)
branch_2_b_2 = deepcopy(branch_2_a_2).cuda()
branch_2_a_2.cuda();
branch_2_b_2.cuda();
final_model_2 = sequential_to_invertible(final_model)
final_model_2.cuda();
o_2 = Node(None, base_model_2)
o_2 = Node(o_2, ChunkChans(2))
o1a_2 = Node(o_2, Select(0))
o1b_2 = Node(o_2, Select(1))
o1a_2 = Node(o1a_2, branch_1_a_2)
o1b_2 = Node(o1b_2, branch_1_b_2)
o2_2 = Node(o1a_2, ChunkChans(2))
o2a_2 = Node(o2_2, Select(0))
o2b_2 = Node(o2_2, Select(1))
o2a_2 = Node(o2a_2, branch_2_a_2)
o2b_2 = Node(o2b_2, branch_2_b_2)
o_2 = Node([o1b_2,o2a_2,o2b_2], CatChans())
o_2 = Node(o_2, final_model_2)
feature_model2 = o_2
if cuda:
feature_model2.cuda()
feature_model2.eval();
# -
th.stack([p.norm() for p in feature_model2.parameters()])
feature_model.zero_grad()
th.stack([p.grad.norm() for p in feature_model2.parameters()])
# +
outs2 = feature_model2(rand_inputs[:50])
inverted2 = feature_model2.invert(outs2)
loss2 = th.sum(inverted2 ** 2)
feature_model2.zero_grad()
loss2.backward()
assert th.allclose(outs, outs2)
assert th.allclose(inverted, inverted2)
assert th.allclose(loss, loss2)
for p1,p2 in zip(feature_model.parameters(), feature_model2.parameters()):
assert th.allclose(p1,p2)
if not th.allclose(p1.grad, p2.grad):
print("Mean: {:.0E}".format(th.mean(th.abs(p1.grad - p2.grad)).item()))
print("Max: {:.0E}".format(th.max(th.abs(p1.grad - p2.grad)).item()))
assert th.allclose(p1.grad, p2.grad, rtol=1e-3, atol=1e-2)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.0
# language: julia
# name: julia-1.5
# ---
# # Compare MendelImpute against Minimac4 and Beagle5 on simulated data
using Revise
using VCFTools
using MendelImpute
using GeneticVariation
using Random
using SparseArrays
using JLD2, FileIO, JLSO
using ProgressMeter
using GroupSlices
using ThreadPools
# using Plots
# using ProfileView
# +
# keep best pair only (8 thread)
Random.seed!(2020)
d = 1000
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, max_d = d,
phase = true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile, trans=true)
# X_complete = convert_gt(Float32, "./compare1/target.vcf.gz", trans=true)
n, p = size(X_mendel)
# rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# keep best pair only (8 thread)
Random.seed!(2020)
d = 1000
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.jlso"
@time ph2 = phase(tgtfile, reffile, outfile = outfile, max_d = d,
phase = true);
# import imputed result and compare with true
# H = convert_ht(Float32, "./compare1/haplo_ref.vcf.gz", trans=true)
X1, X2 = convert_compressed(Float32, ph2, H)
X_mendel2 = X1 + X2
# X_complete = convert_gt(Float32, "./compare1/target.vcf.gz", trans=true)
n, p = size(X_mendel)
error_rate = sum(X_mendel2 .!= X_complete) / n / p
# -
# # Simulate data
#
# ### Step 0. Install `msprime`
#
# [msprime download Link](https://msprime.readthedocs.io/en/stable/installation.html).
#
# Some people might need to activate conda environment via `conda config --set auto_activate_base True`. You can turn it off once simulation is done by executing `conda config --set auto_activate_base False`.
#
#
# ### Step 1. Simulate data in terminal
#
# ```
# python3 msprime_script.py 4000 10000 5000000 2e-8 2e-8 2019 > full.vcf
# ```
#
# Arguments:
# + Number of haplotypes = 40000
# + Effective population size = 10000 ([source](https://www.the-scientist.com/the-nutshell/ancient-humans-more-diverse-43556))
# + Sequence length = 10 million (same as Beagle 5's choice)
# + Rrecombination rate = 2e-8 (default)
# + mutation rate = 2e-8 (default)
# + seed = 2019
# ### Step 2: Convert simulated haplotypes to reference haplotypes and target genotype files
#
# + `haplo_ref.vcf.gz`: haplotype reference files
# + `target.vcf.gz`: complete genotype information
# + `target_masked.vcf.gz`: the same as `target.vcf.gz` except some entries are masked
# +
records, samples = nrecords("./compare1/full.vcf"), nsamples("./compare1/full.vcf")
@show records
@show samples;
# compute target and reference index
tgt_index = falses(samples)
tgt_index[samples-999:end] .= true
ref_index = .!tgt_index
record_index = trues(records) # save all records (SNPs)
# create target.vcf.gz and haplo_ref.vcf.gz
@time VCFTools.filter("./compare1/full.vcf", record_index, tgt_index, des = "./compare1/target.vcf.gz")
@time VCFTools.filter("./compare1/full.vcf", record_index, ref_index, des = "./compare1/haplo_ref.vcf.gz")
# import full target matrix. Also transpose so that columns are samples.
@time X = convert_gt(Float32, "target.vcf.gz"; as_minorallele=false)
X = copy(X')
# mask 10% entries
p, n = size(X)
Random.seed!(123)
missingprop = 0.1
X .= ifelse.(rand(Float32, p, n) .< missingprop, missing, X)
masks = ismissing.(X)
# save X to new VCF file
mask_gt("target.vcf.gz", masks, des="target_masked.vcf.gz")
# -
# # Try compressing haplotype ref panels
# compress as jlso
d = 1000
reffile = "./compare1/haplo_ref.vcf.gz"
tgtfile = "./compare1/target_masked.vcf.gz"
outfile = "./compare1/haplo_ref.jlso"
@time compress_haplotypes(reffile, tgtfile, outfile, d);
# # Haplotype thinning
# +
# doesn't account for allele freq
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, thinning_factor=100,
thinning_scale_allelefreq=false, max_haplotypes = 100);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# haplopair_thin (scale freq by 1-p), keep = 100 (1 thread)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time hs, ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, thinning_factor=100,
thinning_scale_allelefreq=false, max_haplotypes = 100);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
# X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # MendelImpute error
# +
# keep best pair only (1 thread)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# keep best pair only (8 thread)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # Rescreen
# +
# keep top matching happairs (1 thread)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, rescreen=true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# Keep a list of top haplotype pairs (1 thread)
Random.seed!(2020)
tgtfile = "target_masked.vcf.gz"
reffile = "haplo_ref.jlso"
outfile = "imputed_target.vcf.gz"
width = 500
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# Keep a list of top haplotype pairs (8 thread)
Random.seed!(2020)
tgtfile = "target_masked.vcf.gz"
reffile = "haplo_ref.jlso"
outfile = "imputed_target.vcf.gz"
width = 500
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # MendelImpute with intersecting haplotype sets
# +
# keep best pair only (8 thread)
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = false);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# scale by allele freq (8 thread)
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = false, scale_allelefreq=true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # MendelPhase
# +
# scale by allele freq (8 thread)
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = false, phase=true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # Screening flanking windows
# +
# keep best pair only (8 thread)
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = false);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # Try Lasso
# +
# keep best pair only (8 thread)
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, lasso = 1, max_haplotypes = 100);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# keep best pair only (8 thread)
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, lasso = 20, max_haplotypes = 100);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# scale allele freq
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, lasso = 1, max_haplotypes = 100, scale_allelefreq=true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
# X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# scale allele freq
Random.seed!(2020)
width = 512
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming=false, lasso = 20, max_haplotypes = 100, scale_allelefreq=true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
# X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
rm(outfile, force=true)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # Chunking
# +
# sets num_windows_per_chunks = 10
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = false);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# default num_windows_per_chunks (i.e. only 1 chunk)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = false);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# +
# dp and only 1 chunk
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time ph = phase(tgtfile, reffile, outfile = outfile, width = width,
dynamic_programming = true);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # Beagle 5.1 Error
# convert to bref3 (run in terminal)
java -jar bref3.18May20.d20.jar haplo_ref.vcf.gz > haplo_ref.bref3
# run beagle 5 (1 thread)
run(`java -jar beagle.18May20.d20.jar gt=target_masked.vcf.gz ref=haplo_ref.bref3 out=beagle.result nthreads=1`)
# beagle 5.1 error rate
X_complete = convert_gt(Float32, "target.vcf.gz")
n, p = size(X_complete)
X_beagle = convert_gt(Float32, "beagle.result.vcf.gz")
error_rate = sum(X_beagle .!= X_complete) / n / p
# +
# run beagle 5.1 (8 thread)
run(`java -jar beagle.18May20.d20.jar gt=./compare1/target_masked.vcf.gz ref=./compare1/haplo_ref.bref3 out=./compare1/beagle.result nthreads=8`)
# beagle 5 error rate
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_complete)
X_beagle = convert_gt(Float32, "./compare1/beagle.result.vcf.gz")
error_rate = sum(X_beagle .!= X_complete) / n / p
# -
# # Minimac4 error
#
# Need to first convert reference vcf file to m3vcf using minimac3 (on Hoffman)
#
# ```Julia
# minimac3 = "/u/home/b/biona001/haplotype_comparisons/minimac3/Minimac3/bin/Minimac3"
# @time run(`$minimac3 --refHaps haplo_ref.vcf.gz --processReference --prefix haplo_ref`)
# ```
# +
# run minimac 4
minimac4 = "/Users/biona001/Benjamin_Folder/UCLA/research/softwares/Minimac4/build/minimac4"
run(`$minimac4 --refHaps haplo_ref.m3vcf.gz --haps target_masked.vcf.gz --prefix minimac4.result`)
X_minimac = convert_gt(Float32, "minimac4.result.dose.vcf.gz", as_minorallele=false)
error_rate = sum(X_minimac .!= X_complete) / n / p
# -
# # BLAS 2
# +
# haplopair_thin (doesn't accounts for allele freq), keep = 100 (1 thread)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time hs, ph = phase(tgtfile, reffile, outfile = outfile, width = width,
thinning_factor=100, thinning_scale_allelefreq=false);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
# # BLAS 3
# +
# haplopair_thin (doesn't accounts for allele freq), keep = 100 (1 thread)
Random.seed!(2020)
tgtfile = "./compare1/target_masked.vcf.gz"
reffile = "./compare1/haplo_ref.jlso"
outfile = "./compare1/imputed_target.vcf.gz"
width = 512
@time hs, ph = phase(tgtfile, reffile, outfile = outfile, width = width,
thinning_factor=100, thinning_scale_allelefreq=false);
# import imputed result and compare with true
X_mendel = convert_gt(Float32, outfile)
X_complete = convert_gt(Float32, "./compare1/target.vcf.gz")
n, p = size(X_mendel)
error_rate = sum(X_mendel .!= X_complete) / n / p
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="B42YV4QyuRKZ"
# # Instructions:
# ## Please save a copy of this notebook to your google drive and answer the questions below.
# ## Once completed please submit your notebook to the following [GitHub Repo](https://github.com/7-gate-academy-ml-program/Synopsis)
# + [markdown] colab_type="text" id="d8mmOkNLyoWt"
# ### Name:
# + [markdown] colab_type="text" id="SdkchPgStYin"
# # 1.) What is the difference between Classification and Regression?
# + [markdown] colab_type="text" id="0aKWEz_atmEp"
# Classification outputs discrete/categorical variables while regression outputs numerical/continous variables.
# + [markdown] colab_type="text" id="Ma7t_7yBtmud"
# # 2.) What is the Curse of Dimensionality?
# + [markdown] colab_type="text" id="9C3_8rvyyDYA"
# as # of features or dimensions grows, the amount of data we need to generalize accurately grows exponentially
# + [markdown] colab_type="text" id="w_rFydWVyDl3"
# # 3.) What is Cross Validation?
# + [markdown] colab_type="text" id="X-iA3jnCtxqP"
# When we take some of the test data and 'pretend' it is the training data. We do this to create a model complex enough to model the data while making sure it isn't going to diverge when applied to test set. For example we might take training data and split into 4 folds. We would train on first 3 folds and test on 4th. THen train on folds 1,2,4 and test on 3rd and so forth.
# + [markdown] colab_type="text" id="ieaSLJE2tyVm"
# # 4.) On a high level how do Decision Trees work?
# + [markdown] colab_type="text" id="sD6nBEWCt8ud"
# Tree has decision nodes and result leaves. At each node a 'question' is asked in regards to a specific feature/attribute. depending on the answer a path is taken down the tree until a result is reached at the end.
# + [markdown] colab_type="text" id="6akxVwD_t82G"
# # 5.) In regards to SVMs what is the Kernel Trick?
# + [markdown] colab_type="text" id="eDGR-lhHuKNq"
# SVMs only work if data is linearly seperable. If we cannot seperate the data in the current dimensional space we might be able to seperate it in a higher dimensional space. Kernel trick does this. It is also where we can inject domain knowledge.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ____
#
# <center> <h1 style="background-color:#975be5; color:white"><br>01-Categorical_Vars_Exercise - Solutions<br></h1></center>
#
# ____
# <div align="right">
# <b><a href="https://keytodatascience.com/">KeytoDataScience.com </a></b>
# </div>
import pandas as pd
# +
# Define the headers since the data does not have any
headers = ["symboling", "normalized_losses", "make", "fuel_type", "aspiration",
"num_doors", "body_style", "drive_wheels", "engine_location",
"wheel_base", "length", "width", "height", "curb_weight",
"engine_type", "num_cylinders", "engine_size", "fuel_system",
"bore", "stroke", "compression_ratio", "horsepower", "peak_rpm",
"city_mpg", "highway_mpg", "price"]
# Read in the CSV file and convert "?" to NaN
df = pd.read_csv("Automobile price data _Raw_.txt", header=None, names=headers, na_values="?" )
df = df.loc[1:,]
df = df[["make", "fuel_type", "num_doors", "body_style", "city_mpg", "price"]]
df.head()
# +
# check nulls for the column num_doors
df.num_doors.isna().sum()
# +
# check distribution of var num_doors
df.num_doors.value_counts()
# -
# Before going any further, there are a couple of null values in the data that we need to clean up.
# +
# impute num_doors by its mode
df.fillna(df['num_doors'].value_counts().index[0],inplace=True)
# +
# Label encode the feature 'num_doors'
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
df['num_doors_le'] = lb_make.fit_transform(df['num_doors'])
df.head()
# +
# Perform One hot encode on 'num_doors' and merge it with rest of dataset
df_onehot = df.copy()
df_onehot = pd.get_dummies(df_onehot, columns=['num_doors'], prefix = ['num_doors'])
df_onehot.head()
# -
# ____
#
# <center> <h1 style="background-color:#975be5; color:white"><br>Great Job!<br></h1><br></center>
#
# ____
# <div align="right">
# <b><a href="https://keytodatascience.com/">KeytoDataScience.com</a></b>
# </div>
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Day 4, Part B: Creating Custom Environments
# ## Learning goals
# - How to build your own custom environment
# - How to connect environments to other simulations/platforms
# ## Definitions
# - **Simulation environment**: Notice that this is not the same as the python/conda environment. The simulation environment is the simulated world where the reinforcement learning takes place. It provides opportunities for an agent to learn and explore, and ideally provides challenges that aid in efficient learning.
# - **Agent (aka actor or policy)**: An entity in the simulation environment that performs actions. The agent could be a person, a robot, a car, a thermostat, etc.
# - **State variable**: An observed variable in the simulation environment. They can be coordinates of objects or entities, an amount of fuel in a tank, air temperature, wind speed, etc.
# - **Action variable**: An action that the agent can perform. Examples: step forward, increase velocity to 552.5 knots, push object left with force of 212.3 N, etc.
# - **Reward**: A value given to the agent for doing something considered to be 'good'. Reward is commonly assigned at each time step and cumulated during a learning episode.
# - **Episode**: A learning event consisting of multiple steps in which the agent can explore. It starts with the unmodified environment and continues until the goal is achieved or something prevents further progress, such as a robot getting stuck in a hole. Multiple episodes are typically run in loops until the model is fully trained.
# - **Model (aka policy or agent)**: An RL model is composed of the modeling architecture (e.g., neural network) and parameters or weights that define the unique behavior of the model.
# - **Policy (aka model or agent)**: The parameters of a model that encode the best choices to make in an environment. The choices are not necessarily good ones until the model undergoes training. The policy (or model) is the "brain" of the agent.
# - **Replay Buffer**: A place in memory to store state, action, reward and other variables describing environmental state transitions. It is effectively the agent's memory of past experiences.
# - **On-policy**: The value of the next action is determined using the current actor policy.
# - **Off-policy**: The value of the next action is determined by a function, such as a value function, instead of the current actor policy.
# - **Value function**: Function (typically a neural network) used to estimate the value, or expected reward, of an action.
# ## More practical than the Ant environment
#
# 
#
# Well. Now you can take your ant off-the-shelf and make it do all sorts of fun tricks with modifications to the environment, the reward, and the training routine. It's time to go make an army of virtual ants to do everything we need. Oh - you had other, *actual*, applications? Are you sure it can't be solved with a dancing ant?
#
# And that brings us to the pressing point: a virtual ant is well and fine, but we don't *actually* just want a fancy, simulated robo-ant. What we want is to be able to hook up our new intelligent machinery to something we actually care about and that needs a boost from an RL model.
#
# Maybe the agent is a person, a vehicle, a robot, or whatever. It could also have actions that we haven't used before, like pick up box and put down box. You can choose whatever state and action variables you need to accomplish a goal in the environment.
#
# The environment does not need to be a simulated world either. RL models can control lots of things. For example, Google uses RL to control its [data warehouse cooling system](https://blog.google/outreach-initiatives/environment/data-centers-get-fit-on-efficiency/).
#
# 
#
# In this lesson, however, we will stick with an environment that is not too different from the one you've been working with for the sake of simplicy.
# ## Customize `SimpleEnv`
#
# Thankfully, the template to make your own environment is actually simpler than you'd think. The details will only get as messy as your environment, so start with the general architecture and add features as you go.
# By building a new class off of `gym.Env` (subclassing) we get all the general machinery we need - we just have to define four things:
# - `__init__(self)`
# - `step(self, action)`
# - `render(self)`
# - `reset(self)`
# - Bonus optional: `seed(self, seed=None)`
#
# The great majority of your work will happen in `step()` - given the past `action` (usually an array) this is what takes your environment from state `t` to state `t+1`. The `step()` is also in charge of determining the reward for that step from `t` to `t+1` and returning it. In a simple case, your `step()` may simply take the action components and modify the environment directly, or in more complicated cases, this is where you would call other components in your simulation engine to step forward (e.g. robotics, physics, etc. simulations). If you were, for example, running a physics environment similar to Pybullet, then `step()` would pass the action and step-simulation commands over to the engine and have methods for receiving the new state, as well as calculating the reward.
#
# Take a look at a bare-bones setup below:
# +
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
class SimpleEnv(gym.Env):
def __init__(self):
self.action_space = gym.spaces.box.Box(low=0,high=1, shape=(2,), dtype=np.float32)
self.observation_space = gym.spaces.box.Box(low=0,high=10, shape=(4,), dtype=np.float32)
self.reset()
def step(self, action):
self.state = np.array([action[0],action[0],action[1],action[1]], dtype=np.float32)
reward = 1
done = False
info = {}
return self.state, reward, done, info
def render(self):
pass
def reset(self):
self.state = np.array([0,0,0,0], dtype=np.float32)
return self.state
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
# -
# If you were to take our usual TD3 implementation (similar to the code below), dump the code above into a file named `MySimpleEnv.py`, and change the registration to point to `file:class` it will run without error and dump 1000 as the episode reward over and over (reward = 1 for 1000 steps). It's not terribly exciting, but it ran!
#
# Let's look more closely at `action_space` and `observation_space`. These are defined by gym utility functions in `spaces`. They define the dimension and type (discrete or continuous) of each variable.
# - It's up to the step function to make sense of those variables, and it is important to make sure values stay within the low/high ranges dictated by the definitions
# - For algorithms like TD3 that apply action noise, it's very important to understand the scale of each of the action values. If you have one variable on the range [0,100] and another on [0,1], but the algorithm is basing the scale of noise off of the [0,100] variable, you may be completely washing out your second action variable with noise, unless you make modifications to the routine to make the action noise scaled per action.
#
# Try to get a feel for what's happening in the action and observation spaces by examining some characteristics of them (**add some new cells and examine the observation space too**):
action_space = gym.spaces.box.Box(low=0,high=1, shape=(2,), dtype=np.float32)
action_space.sample()
action_space.high
action_space.high[0]
# In TD3 `main()`, they scale noise by `max_action = float(env.action_space.high[0])` so all action noise is based on the scale of the first action-space variable.
#
# Looking again at the SimpleEnv() class:
#
# - `step()` takes the action array and applies changes to the environment state with those action variables.
# - `step()` then returns that state in the range and dimension that the observation_space can handle.
# - recall that not all information about the state of your environment needs to be passed back. You can have placeholders and simulation states held by things external to the observation/state return.
# - Finally, `reset()` needs to take the environment state back to the starting conditions of your episode - just reset everything to 0.
# ## MyCustomEnv
#
# Let's now look at an environment with a tiny bit more meat on its bones - we've included `MyCustomEnv.py` in the `Course_Material` folder. If you look at the code, you'll see it's not doing *that* much more than the SimpleEnv above. The general idea in the environment is that there's an agent that needs to get to a point in (x,y)-space.
#
# In `MyCustomEnv`, the agent has two state variables
# - Position
# - Heading
#
# And it has two actions:
# - Turn heading +/- 40 degrees
# - Throttle
#
# It's rewarded by getting closer to the target coordinates, similar to our ant, but without all the robotics and physics in the way - point your agent's heading, hit the throttle and it moves; no momentum, etc.
#
# If you run the code below (again, just the `main()` from TD3 with our new environment registered) it will train the agent to achieve the goal we defined in the reward. Note these two points:
# 1. We can create environments that will actually solve, given the right inputs.
# 2. This could have been solved by an extremely simple, hand-coded function. Not everything needs to be RL... but it can be.
# +
import numpy as np
import torch
import gym
import pybullet_envs
import os
import sys
from pathlib import Path
sys.path.append(str(Path().resolve().parent))
import utils
import TD3
# -
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
# +
from gym.envs.registration import registry, make, spec
def register(id, *args, **kvargs):
if id in registry.env_specs:
return
else:
return gym.envs.registration.register(id, *args, **kvargs)
# -
register(
id='MyCustomEnv-v0',
entry_point='MyCustomEnv:MyCustomEnvClass',
max_episode_steps=1000,
reward_threshold=2500.0
)
def main():
args = {
"policy" : "TD3", # Policy name (TD3, DDPG or OurDDPG)
"env" : "MyCustomEnv-v0", # OpenAI gym environment name
"seed" : 0, # Sets Gym, PyTorch and Numpy seeds
"start_timesteps" : 25e3, # Time steps initial random policy is used
"eval_freq" : 5e3, # How often (time steps) we evaluate
"max_timesteps" : 0.25e6, # Max time steps to run environment
"expl_noise" : 0.1, # Std of Gaussian exploration noise
"batch_size" : 256, # Batch size for both actor and critic
"discount" : 0.99, # Discount factor
"tau" : 0.005, # Target network update rate
"policy_noise" : 0.2, # Noise added to target policy during critic update
"noise_clip" : 0.5, # Range to clip target policy noise
"policy_freq" : 2, # Frequency of delayed policy updates
"save_model" : "store_true", # Save model and optimizer parameters
"load_model" : "", # Model load file name, "" doesn't load, "default" uses file_name
}
file_name = f"{args['policy']}_{args['env']}_{args['seed']}_custom"
print("---------------------------------------")
print(f"Policy: {args['policy']}, Env: {args['env']}, Seed: {args['seed']}")
print("---------------------------------------")
if not os.path.exists("./rewards"):
os.makedirs("./rewards")
if not os.path.exists("./results"):
os.makedirs("./results")
if args['save_model'] and not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args['env'])
# Set seeds
env.seed(args['seed'])
env.action_space.seed(args['seed'])
torch.manual_seed(args['seed'])
np.random.seed(args['seed'])
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args['discount'],
"tau": args['tau'],
}
# Initialize policy
if args['policy'] == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args['policy_noise'] * max_action
kwargs["noise_clip"] = args['noise_clip'] * max_action
kwargs["policy_freq"] = args['policy_freq']
policy = TD3.TD3(**kwargs)
if args['load_model'] != "":
policy_file = file_name if args['load_model'] == "default" else args['load_model']
policy.load(f"./models/{policy_file}")
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args['env'], args['seed'])]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
with open('./rewards/Day4_cust.txt', 'w') as f:
for t in range(int(args['max_timesteps'])):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args['start_timesteps']:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args['expl_noise'], size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= args['start_timesteps']:
policy.train(replay_buffer, args['batch_size'])
if done:
# +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}", file=f)
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args['eval_freq'] == 0:
evaluations.append(eval_policy(policy, args['env'], args['seed']))
np.save(f"./results/{file_name}", evaluations)
if args['save_model']: policy.save(f"./models/{file_name}")
# + tags=[]
main()
# -
# Our included `SingleAnalysis.ipynb` will plot your rewards over time. We also modified `main()` above to dump the rewards info to file along the way, so if you've just run things, the file should be in the rewards folder. We also have the plotting code duplicated below, so you can run those cells and view the plot here.
# +
import time
import pandas
import hvplot.pandas
t_steps = []
reward_vals = []
def build_plot(file_name):
f = open(f"./rewards/{file_name}.txt")
for i in f:
if i[0] == "T":
a = str.split(i, ":")
b = str.split(a[1], " ")
t_steps.append(int(b[1]))
reward_vals.append(float(a[-1].strip()))
f.close()
# +
build_plot("Day4_cust")
df = pandas.DataFrame({'Time_Steps':t_steps, 'Reward':reward_vals})
df.hvplot(x='Time_Steps', y='Reward').opts(alpha=0.5, color='#8848ab')
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Setup
# Because we switch to `tf2`, instead of using `tf.slim`, we will just use `tf.keras` for the inception model weights.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import Image
from tqdm.notebook import tqdm
# +
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
DARK_READER = True
if DARK_READER:
plt.rcParams.update({
"lines.color": "white",
"patch.edgecolor": "white",
"text.color": "black",
"axes.facecolor": "black",
"axes.edgecolor": "lightgray",
"axes.labelcolor": "white",
"axes.titlecolor": "white",
"xtick.color": "white",
"ytick.color": "white",
"grid.color": "lightgray",
"figure.facecolor": "black",
"figure.edgecolor": "black",
"savefig.facecolor": "black",
"savefig.edgecolor": "black",
})
# -
import tensorflow as tf
import tensorflow.keras as keras
tf.__version__, keras.__version__
inception_model = keras.applications.inception_v3.InceptionV3()
inception_model, type(inception_model)
isinstance(inception_model, keras.Model)
def non_dunder(obj, strict=False):
if strict:
prefix = "_"
else:
prefix = "__"
return [s for s in dir(obj) if not s.startswith(prefix)]
inception_model.trainable = False
inception_model.trainable
inception_model.trainable_weights, inception_model.trainable_variables
inception_model.layers[0].input.shape
import numpy as np
from tensorflow.keras.applications.inception_v3 import preprocess_input
# + active=""
# preprocess_input(
# np.random.randint(low=0, high=255, size=(2,2,3)))
# + active=""
# random_tensor = tf.constant(
# np.random.randint(low=0, high=255, size=(2,2,3)),
# dtype=tf.float32,
# )
# #random_tensor.dtype
# preprocess_input(random_tensor)
# -
def inception(image):
if isinstance(image, tf.Variable):
#image.assign(preprocess_input(image.numpy()))
pass
elif isinstance(image, np.ndarray) and image.dtype == np.uint8:
image = preprocess_input(image)
if len(image.shape) == 3:
image_batch = tf.expand_dims(image, axis=0)
#if image.dtype is np.uint8:
# image = image.astype(np.float32)
#if image.dtype is tf.uint8:
# image = tf.cast(image, tf.float32)
#image = preprocess_input(image)
return inception_model(image_batch)[0]
import json
from pathlib import Path
from urllib.request import urlretrieve
# + active=""
# help(urlretrieve)
# +
imagenet_json = Path("imagenet.json")
if not imagenet_json.exists():
imagenet_json, _ = urlretrieve(
'https://www.anishathalye.com/media/2017/07/25/imagenet.json',
imagenet_json,
)
with open(imagenet_json) as f:
imagenet_labels = json.load(f)
imagenet_labels
# -
len(imagenet_labels)
img_path = Path("cat.jpg")
if not img_path.exists():
img_path, _ = urlretrieve(
'https://www.anishathalye.com/media/2017/07/25/cat.jpg',
img_path
)
img_class = 281
imagenet_labels[img_class]
img = Image.open(img_path)
#big_dim = max(img.width, img.height)
wide = img.width > img.height
new_w = 299 if not wide else int(img.width * 299 / img.height)
new_h = 299 if wide else int(img.height * 299 / img.width)
img = img.resize((new_w, new_h)).crop((0, 0, 299, 299))
#img = (np.asarray(img) / 255.0).astype(np.float32)
#img = np.asarray(img, dtype=np.float32)
img = np.asarray(img)
img.shape, img.dtype, img.max(), img.min()
def classify(img, correct_class=None, target_class=None, k=10):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
fig.sca(ax1) # sca() Select Current Axis
ax1.imshow(img)
#p = inception_model(np.expand_dims(img, axis=0))[0]
p = inception(img)
p = p.numpy()
#print(f"type(p) = {type(p)}, len(p) = {len(p)}")
#fig.sca(ax1) # Why should there be two fig.sca(ax1)?
# display the top 10 prediceted classes
#topk = list(p.argsort()[-10:][::-1])
#topk = list(tf.argsort(p)[-10:][::-1])
#k = 20
topk = list((tf.argsort(p)[-k:][::-1]).numpy())
#print(f"topk = {topk}")
topprobs = p[topk]
barlist = ax2.bar(range(k), topprobs)
if target_class in topk:
barlist[topk.index(target_class)].set_color('r')
if correct_class in topk:
barlist[topk.index(correct_class)].set_color('g')
plt.sca(ax2)
plt.ylim([0, 1.1])
plt.xticks(range(k),
[imagenet_labels[i][:15] for i in topk],
rotation='vertical')
# :15 means "taking the first 15 characters" for fear of long-string class.
fig.subplots_adjust(bottom=0.2)
plt.show()
classify(img, correct_class=img_class)
isinstance(tf.Variable(10), tf.Tensor)
isinstance(tf.Variable(10), tf.Variable)
# + active=""
# def inverse_preprocess(array, uint8=False):
# if isinstance(array, tf.Tensor) or isinstance(array, tf.Variable):
# array = array.numpy()
# array01 = (array + 1) / 2
# if uint8:
# return (array01 * 255).astype(np.uint8)
# else:
# return array01
# -
def inverse_preprocess(array, uint8=False):
return (array + 1) / 2
# # Adversarial examples
#
# Given an image $\mathbf{x}$, our neural network outputs a probability distribution over labels, $P(y \mid \mathbf{x})$. When we craft an adversarial input, we want to find an $\hat{\mathbf{x}}$ where $\log P(\hat{y} \mid \hat{\mathbf{x}})$ is maximized for a target label $\hat{y}$: that way, our input will be misclassified as the target class. We can ensure that $\hat{\mathbf{x}}$ doesn't look too different from the original $\mathbf{x}$ by constraining ourselves to some $\ell_\infty$ <del>box</del> ball with radius $\epsilon$, requiring that $\left\lVert \mathbf{x} - \hat{\mathbf{x}} \right\rVert_\infty \le \epsilon$.
#
# In this framework, an adversarial example is the solution to a constrained optimization problem that we can solve using [backpropagation](https://colah.github.io/posts/2015-08-Backprop/) and projected gradient descent, basically the same techniques that are used to train networks themselves. The algorithm is simple:
#
# We begin by initializing our adversarial example as $\hat{\mathbf{x}} \leftarrow \mathbf{x}$. Then, we repeat the following until convergence:
#
# 1. $\hat{\mathbf{x}} \leftarrow \hat{\mathbf{x}} + \alpha \cdot \nabla \log P(\hat{y} \mid \hat{\mathbf{x}})$
# 2. $\hat{\mathbf{x}} \leftarrow \mathrm{clip}(\hat{\mathbf{x}}, \mathbf{x} - \epsilon, \mathbf{x} + \epsilon)$
# +
#image = tf.Variable(img, dtype=tf.float32)
image = tf.Variable(tf.zeros((299,299,3), dtype=tf.float32))
x_hat = image
x_hat.dtype, x_hat.shape
# -
demo_epsilon = 2.0/255.0 # a really small perturbation
# Test other target classes at your own will
#demo_target = imagenet_labels.index("pizza, pizza pie")
demo_target = imagenet_labels.index("desktop computer")
demo_epsilon, demo_target
# + active=""
# help(np.clip)
# -
img_processed = keras.applications.inception_v3.preprocess_input(img)
below = img_processed - demo_epsilon
below = np.clip(below, -1, 1)
above = img_processed + demo_epsilon
above = np.clip(above, -1, 1)
print(f"img[:2, :2, 0] =\n{img[:2, :2, 0]}\n")
print(f"img_processed[:2, :2, 0] =\n{img_processed[:2, :2, 0]}\n")
print(f"below[:2, :2, 0] =\n{below[:2, :2, 0]}\n")
print(f"above[:2, :2, 0] =\n{above[:2, :2, 0]}")
# + active=""
# demo_lr = 1e-1
# demo_steps = 100
#
# # projected gradient descent
# for i in tqdm(range(demo_steps)):
# ## gradient descent step
# #_, loss_value = sess.run(
# # [optim_step, loss],
# # feed_dict={learning_rate: demo_lr, y_hat: demo_target})
# ## project step
# #sess.run(project_step, feed_dict={x: img, epsilon: demo_epsilon})
# with tf.GradientTape() as tape:
# #tape.watch(x_hat)
# proba = inception(x_hat)[demo_target]
# #loss = keras.losses.sparse_categorical_crossentropy()
# #log_likelihood = inception(image)[tf.constant(1)]
#
# grad = tape.gradient(proba, [x_hat])[0]
# #print(f"grad.shape = {grad.shape}")
# #print(f"demo_lr*grad = {type(demo_lr*grad)}")
# #tf.Variable.assign_add(x_hat, demo_lr*grad)
# #x_hat.assign_add(demo_lr*grad)
# #x_hat.assign_add(demo_lr*grad.numpy())
# #x_hat = x_hat + demo_lr*grad
# #projected = tf.clip_by_value(tf.clip_by_value(x_hat, below, above), 0, 1)
# #projected = tf.clip_by_value(
# # tf.clip_by_value(x_hat + demo_lr * grad, below, above),
# # -1, 1)
# #tf.Variable.assign(x_hat, projected)
# projected = tf.clip_by_value(x_hat + demo_lr * grad, below, above)
# x_hat.assign(projected)
# if (i+1) % 10 == 0:
# print(f'step{i+1: 4d}, proba={proba:.9f}')
# + active=""
# classify(
# inverse_preprocess(x_hat),
# correct_class=img_class,
# target_class=demo_target
# )
# + active=""
# imagenet_labels[tf.argmax(inception(x_hat)).numpy()]
# + active=""
# demo_target, tf.argsort(inception(x_hat), direction="DESCENDING").numpy()[:10]
# -
# +
demo_lr = 1e-1
demo_steps = 100
# projected gradient descent
for i in tqdm(range(demo_steps)):
## gradient descent step
#_, loss_value = sess.run(
# [optim_step, loss],
# feed_dict={learning_rate: demo_lr, y_hat: demo_target})
## project step
#sess.run(project_step, feed_dict={x: img, epsilon: demo_epsilon})
with tf.GradientTape() as tape:
#tape.watch(x_hat)
#proba = inception(x_hat)[demo_target]
probas = inception(x_hat)
loss = keras.losses.sparse_categorical_crossentropy(demo_target, probas)
#loss = keras.losses.sparse_categorical_crossentropy([demo_target], [proba])
#log_likelihood = inception(image)[tf.constant(1)]
grad = tape.gradient(loss, [x_hat])[0]
#print(f"grad.shape = {grad.shape}")
#print(f"demo_lr*grad = {type(demo_lr*grad)}")
#tf.Variable.assign_add(x_hat, demo_lr*grad)
#x_hat.assign_add(demo_lr*grad)
#x_hat.assign_sub(demo_lr*grad)
#x_hat.assign_add(demo_lr*grad.numpy())
#x_hat = x_hat + demo_lr*grad
projected = tf.clip_by_value(
tf.clip_by_value(x_hat - demo_lr*grad, below, above),
-1, 1)
#projected = tf.clip_by_value(x_hat + demo_lr * grad, below, above)
#tf.Variable.assign(x_hat, projected)
x_hat.assign(projected)
if (i+1) % 10 == 0:
print(f'step{i+1: 4d}, loss={loss:.6f}, proba={probas[demo_target].numpy():.6f}')
# -
classify(
(inverse_preprocess(x_hat.numpy()) * 255).astype(np.uint8),
#inverse_preprocess(x_hat.numpy()),
correct_class=img_class,
target_class=demo_target
)
probas[demo_target]
imagenet_labels.index("Egyptian cat")
probas[imagenet_labels.index("Egyptian cat")]
demo_target, img_class
tf.sort(probas, direction="DESCENDING")[:10]
def top(array, k=10):
return [imagenet_labels[index] for index in tf.argsort(probas, direction="DESCENDING")[:k]]
top(probas)
# + active=""
# tf.argsort(probas, direction="DESCENDING")
# -
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
tf.logging.set_verbosity(tf.logging.ERROR)
sess = tf.InteractiveSession()
# First, we set up the input image. We use a `tf.Variable` instead of a `tf.placeholder` because we will need it to be trainable. We can still feed it when we want to.
image = tf.Variable(tf.zeros((299, 299, 3)))
# Next, we load the Inception v3 model.
# +
def inception(image, reuse):
preprocessed = tf.multiply(tf.subtract(tf.expand_dims(image, 0), 0.5), 2.0)
arg_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)
with slim.arg_scope(arg_scope):
logits, _ = nets.inception.inception_v3(
preprocessed, 1001, is_training=False, reuse=reuse)
logits = logits[:,1:] # ignore background class
probs = tf.nn.softmax(logits) # probabilities
return logits, probs
logits, probs = inception(image, reuse=False)
# -
# The `preprocessed = tf.multiply(tf.subtract(tf.expand_dims(image, 0), 0.5), 2.0)` line, I guess, was just to make the output real numbers lying in $[-1, 1]$ instead of $[0, 1]$.
#
# Next, we load pre-trained weights. This Inception v3 has a top-5 accuracy of 93.9%.
import tempfile
from urllib.request import urlretrieve
#from urllib import urlretrieve
import tarfile
import os
# In the original notebook, the url of the following cell were `https` but I don't know why it does not work any more; instead, one has to replace `https` by `http`.
data_dir = tempfile.mkdtemp()
inception_tarball, _ = urlretrieve(
'http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz')
tarfile.open(inception_tarball, 'r:gz').extractall(data_dir)
# **(?1)**
# - `urlretrieve()`'s return value
# - `tarfile.open`'s return value
data_dir
# ls $data_dir
# + active=""
# ls ${data_dir}
# + active=""
# ls: cannot access '$/tmp/tmpvzku6vpk': No such file or directory
# + active=""
# ls $(data_dir)
# + active=""
# /bin/bash: line 1: data_dir: command not found
# 02_remarked.ipynb adversarial.ipynb README.md trash.py
# -
restore_vars = [
var for var in tf.global_variables()
if var.name.startswith('InceptionV3/')
]
saver = tf.train.Saver(restore_vars)
saver.restore(sess, os.path.join(data_dir, 'inception_v3.ckpt'))
# Next, we write some code to show an image, classify it, and show the classification result.
# + active=""
# !conda install -n homl1e matplotlib
# -
import json
import matplotlib.pyplot as plt
imagenet_json, _ = urlretrieve(
'https://www.anishathalye.com/media/2017/07/25/imagenet.json')
with open(imagenet_json) as f:
imagenet_labels = json.load(f)
imagenet_json
imagenet_labels
type(imagenet_labels), len(imagenet_labels)
def classify(img, correct_class=None, target_class=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
fig.sca(ax1) # sca() Select Current Axis
p = sess.run(probs, feed_dict={image: img})[0]
ax1.imshow(img)
fig.sca(ax1) # Why should there be two fig.sca(ax1)?
# display the top 10 prediceted classes
topk = list(p.argsort()[-10:][::-1])
topprobs = p[topk]
barlist = ax2.bar(range(10), topprobs)
if target_class in topk:
barlist[topk.index(target_class)].set_color('r')
if correct_class in topk:
barlist[topk.index(correct_class)].set_color('g')
plt.sca(ax2)
plt.ylim([0, 1.1])
plt.xticks(range(10),
[imagenet_labels[i][:15] for i in topk],
rotation='vertical')
# :15 means "taking the first 15 characters" for fear of long-string class.
fig.subplots_adjust(bottom=0.2)
plt.show()
L = ["a", "b", "c"]
L.index("c"), L.index("b"), #L.index("z")
# ## Example image
#
# We load our example image and make sure it's classified correctly.
import PIL
import numpy as np
img_path, _ = urlretrieve('https://www.anishathalye.com/media/2017/07/25/cat.jpg')
img_class = 281
img = PIL.Image.open(img_path)
big_dim = max(img.width, img.height)
wide = img.width > img.height
new_w = 299 if not wide else int(img.width * 299 / img.height)
new_h = 299 if wide else int(img.height * 299 / img.width)
img = img.resize((new_w, new_h)).crop((0, 0, 299, 299))
img = (np.asarray(img) / 255.0).astype(np.float32)
# **(?)** What is the `new_w, new_h, resize()` all about?<br>
# **(R)** The story was that Anish wanted to `crop((0,0,299,299))`, i.e. crop the `299x299` subimage for the upper left corner for each image. But the reality is that _**not every image has both width and height larger than**_ `299`. So the `resize((new_w, new_h))` was there to guarantee this. Indeed,
# \begin{align}
# h_{\text{new}} = 299,\; w_{\text{new}} = h_{\text{new}} \frac{w}{h} \quad\text{when}\quad h < w \\
# w_{\text{new}} = 299,\; h_{\text{new}} = w_{\text{new}} \frac{h}{w} \quad\text{when}\quad h \ge w
# \end{align}
#
# which converted into words says **_always convert the shorter side to_** `299` and **_the longer side to its rightful length according to the original ratio_**.
imagenet_labels[img_class]
classify(img, correct_class=img_class)
# # Adversarial examples
#
# Given an image $\mathbf{x}$, our neural network outputs a probability distribution over labels, $P(y \mid \mathbf{x})$. When we craft an adversarial input, we want to find an $\hat{\mathbf{x}}$ where $\log P(\hat{y} \mid \hat{\mathbf{x}})$ is maximized for a target label $\hat{y}$: that way, our input will be misclassified as the target class. We can ensure that $\hat{\mathbf{x}}$ doesn't look too different from the original $\mathbf{x}$ by constraining ourselves to some $\ell_\infty$ <del>box</del> ball with radius $\epsilon$, requiring that $\left\lVert \mathbf{x} - \hat{\mathbf{x}} \right\rVert_\infty \le \epsilon$.
#
# In this framework, an adversarial example is the solution to a constrained optimization problem that we can solve using [backpropagation](https://colah.github.io/posts/2015-08-Backprop/) and projected gradient descent, basically the same techniques that are used to train networks themselves. The algorithm is simple:
#
# We begin by initializing our adversarial example as $\hat{\mathbf{x}} \leftarrow \mathbf{x}$. Then, we repeat the following until convergence:
#
# 1. $\hat{\mathbf{x}} \leftarrow \hat{\mathbf{x}} + \alpha \cdot \nabla \log P(\hat{y} \mid \hat{\mathbf{x}})$
# 2. $\hat{\mathbf{x}} \leftarrow \mathrm{clip}(\hat{\mathbf{x}}, \mathbf{x} - \epsilon, \mathbf{x} + \epsilon)$
# ## Initialization
#
# We start with the easiest part: writing a TensorFlow op for initialization.
# + jupyter={"outputs_hidden": true}
x = tf.placeholder(tf.float32, (299, 299, 3))
x_hat = image # our trainable adversarial input
assign_op = tf.assign(x_hat, x)
# -
# ## Gradient descent step
#
# Next, we write the gradient descent step to maximize the log probability of the target class (or equivalently, minimize the [cross entropy](https://en.wikipedia.org/wiki/Cross_entropy)).
type(()) # empty tuple as the shape of a float
# + jupyter={"outputs_hidden": true}
learning_rate = tf.placeholder(tf.float32, ())
y_hat = tf.placeholder(tf.int32, ())
labels = tf.one_hot(y_hat, 1000)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=[labels])
optim_step = tf.train.GradientDescentOptimizer(
learning_rate).minimize(loss, var_list=[x_hat])
# -
# **N.B.** `logits` was defined in an earlier cell, via `image`, which should probably viewed as identical to `x_hat`.
# **(?)** The `var_list` param of `minimize()`
# ## Projection step
#
# Finally, we write the projection step to keep our adversarial example visually close to the original image. Additionally, we clip to $[0, 1]$ to keep it a valid image.
# + jupyter={"outputs_hidden": true}
epsilon = tf.placeholder(tf.float32, ())
below = x - epsilon
above = x + epsilon
projected = tf.clip_by_value(tf.clip_by_value(x_hat, below, above), 0, 1)
with tf.control_dependencies([projected]):
# cf. p.323 ageron's homl1e
project_step = tf.assign(x_hat, projected)
# -
# **(?)** What would happen if we do not use `tf.control_dependencies()` here?
# ## Execution
#
# Finally, we're ready to synthesize an adversarial example. We arbitrarily choose "guacamole" (imagenet class 924) as our target class.
imagenet_labels.index("pizza, pizza pie")
# +
demo_epsilon = 2.0/255.0 # a really small perturbation
demo_lr = 1e-1
demo_steps = 100
demo_target = 924 # "guacamole"
# Test other target classes at your own will
demo_target = imagenet_labels.index("pizza, pizza pie")
# initialization step
sess.run(assign_op, feed_dict={x: img})
# projected gradient descent
for i in range(demo_steps):
# gradient descent step
_, loss_value = sess.run(
[optim_step, loss],
feed_dict={learning_rate: demo_lr, y_hat: demo_target})
# project step
sess.run(project_step, feed_dict={x: img, epsilon: demo_epsilon})
if (i+1) % 10 == 0:
print('step %d, loss=%g' % (i+1, loss_value))
adv = x_hat.eval() # retrieve the adversarial example
# -
# **Rmk.**
# - Took several minutes to finish running `100` steps on Thinkpad X200.
# - [%g](https://stackoverflow.com/questions/30658919/the-precision-of-printf-with-specifier-g)
# - Note that `image` and `x_hat` really are regarded as identical in the TensorFlow graph above, because
# - `project_step` assigns the projection to `x_hat`
# - Optimization updates `image`
# - They have to be the same thing in order for this to work.
# This adversarial image is visually indistinguishable from the original, with no visual artifacts. However, it's classified as "guacamole" with high probability!
classify(adv, correct_class=img_class, target_class=demo_target)
# **(?)** How small was the perturbation `2.0/255.0`? Try to do an experiment with random noise of that magnitude of perturbation.
# **Stopped here (2021/02/19 (金) 16h04)**
#
# This adv attack is very cool, but we haven't verified that the weights we downloaded really classify the imagenet images well. Let's do this.
# + active=""
# import tensorflow_datasets as tfds
# ds = tfds.load("imagenet2012_subset/1pct")
# ds
# + active=""
# ---------------------------------------------------------------------------
# ModuleNotFoundError Traceback (most recent call last)
# <ipython-input-32-188e57836169> in <module>
# ----> 1 import tensorflow_datasets as tfds
# 2 ds = tfds.load("imagenet2012_subset/1pct")
# 3 ds
#
# ModuleNotFoundError: No module named 'tensorflow_datasets'
# + active=""
# !conda install -n homl1e tensorflow-datasets -y
# + active=""
# !which pip
# + active=""
# !pip install tensorflow_datasets
# + active=""
# !pip uninstall -y tensorflow_datasets
# + active=""
# import tensorflow_datasets as tfds
# ds, ds_info = tfds.load("imagenet2012_subset/1pct", with_info=True)
# ds_info
# -
tf.__version__
# It seems that one cannot use `tfds` in `tensorflow==1.13.1`.<br>
# `pip` seems to download ok, but cannot run.
# # Robust adversarial examples
#
# Now, we go through a more advanced example. We follow our approach for [synthesizing robust adversarial examples](https://arxiv.org/abs/1707.07397) to find a single perturbation of our cat image that's simultaneously adversarial under some chosen distribution of transformations. We could choose any distribution of differentiable transformations; in this post, we'll synthesize a single adversarial input that's robust to rotation by $\theta \in [-\pi/4, \pi/4]$.
#
# Before we proceed, let's check if our previous example is still adversarial if we rotate it, say by an angle of $\theta = \pi/8$.
# +
ex_angle = np.pi/8
angle = tf.placeholder(tf.float32, ())
rotated_image = tf.contrib.image.rotate(image, angle)
rotated_example = rotated_image.eval(feed_dict={image: adv, angle: ex_angle})
classify(rotated_example, correct_class=img_class, target_class=demo_target)
# -
# Looks like our original adversarial example is not rotation-invariant! Let's try one more.
rotated_example = rotated_image.eval(feed_dict={image: adv, angle: -np.pi/4})
classify(rotated_example, correct_class=img_class, target_class=demo_target)
# So, how do we make an adversarial example robust to a distribution of transformations? Given some distribution of transformations $T$, we can maximize $\mathbb{E}_{t \sim T} \log P\left(\hat{y} \mid t(\hat{\mathbf{x}})\right)$, subject to $\left\lVert \mathbf{x} - \hat{\mathbf{x}} \right\rVert_\infty \le \epsilon$. We can solve this optimization problem via projected gradient descent, noting that $\nabla \mathbb{E}_{t \sim T} \log P\left(\hat{y} \mid t(\hat{\mathbf{x}})\right)$ is $\mathbb{E}_{t \sim T} \nabla \log P\left(\hat{y} \mid t(\hat{\mathbf{x}})\right)$ and approximating with samples at each gradient descent step.
#
# Rather than manually implementing the gradient sampling, we can use a trick to get TensorFlow to do it for us: we can model our sampling-based gradient descent as doing gradient descent over an ensemble of stochastic classifiers that randomly sample from the distribution and transform their input before classifying it.
# **(?)** Why does Anish keep speak of **distribution of transformations**?
# + jupyter={"outputs_hidden": true}
num_samples = 10
average_loss = 0
for i in range(num_samples):
rotated = tf.contrib.image.rotate(
image, tf.random_uniform((), minval=-np.pi/4, maxval=np.pi/4))
rotated_logits, _ = inception(rotated, reuse=True)
average_loss += tf.nn.softmax_cross_entropy_with_logits(
logits=rotated_logits, labels=labels) / num_samples
# -
# **(?)** What is `reuse=True` here? It means that we reuse the weights we've found before?
# **(?)** Why `average_loss` has to be divided by `num_samples` here?
# **(?)** Why in the last `tf.nn.softmax_cross_entropy_with_logits()` we had `labels=[labels]` while in this one we have `labels=labels`?
# We can reuse our `assign_op` and `project_step`, though we'll have to write a new `optim_step` for this new objective.
optim_step = tf.train.GradientDescentOptimizer(
learning_rate).minimize(average_loss, var_list=[x_hat])
# Finally, we're ready to run PGD to generate our adversarial input. As in the previous example, we'll choose "guacamole" as our target class.
# +
demo_epsilon = 8.0/255.0 # still a pretty small perturbation
demo_lr = 2e-1
demo_steps = 300
demo_target = 924 # "guacamole"
# initialization step
sess.run(assign_op, feed_dict={x: img})
# projected gradient descent
for i in range(demo_steps):
# gradient descent step
_, loss_value = sess.run(
[optim_step, average_loss],
feed_dict={learning_rate: demo_lr, y_hat: demo_target})
# project step
sess.run(project_step, feed_dict={x: img, epsilon: demo_epsilon})
if (i+1) % 50 == 0:
print('step %d, loss=%g' % (i+1, loss_value))
adv_robust = x_hat.eval() # retrieve the adversarial example
# -
# This adversarial image is classified as "guacamole" with high confidence, even when it's rotated!
rotated_example = rotated_image.eval(feed_dict={image: adv_robust, angle: ex_angle})
#rotated_example = rotated_image.eval(feed_dict={image: adv_robust, angle: np.random.uniform(-np.pi/4, np.pi/4)})
classify(rotated_example, correct_class=img_class, target_class=demo_target)
# ## Evaluation
#
# Let's examine the rotation-invariance of the robust adversarial example we produced over the entire range of angles, looking at $P(\hat{y} \mid \hat{\mathbf{x}})$ over $\theta \in [-\pi/4, \pi/4]$.
# +
thetas = np.linspace(-np.pi/4, np.pi/4, 301)
p_naive = []
p_robust = []
for theta in thetas:
rotated = rotated_image.eval(feed_dict={image: adv_robust, angle: theta})
p_robust.append(probs.eval(feed_dict={image: rotated})[0][demo_target])
rotated = rotated_image.eval(feed_dict={image: adv, angle: theta})
p_naive.append(probs.eval(feed_dict={image: rotated})[0][demo_target])
robust_line, = plt.plot(thetas, p_robust, color='b', linewidth=2, label='robust')
naive_line, = plt.plot(thetas, p_naive, color='r', linewidth=2, label='naive')
plt.ylim([0, 1.05])
plt.xlabel('rotation angle')
plt.ylabel('target class probability')
plt.legend(handles=[robust_line, naive_line], loc='lower right')
plt.show()
# -
# It's super effective!
# ## Challenge
# It would be a good challenge to write this same notebook using
# - `tf2`
# - `torch`, etc.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Transfer Learning - 1
# Making Predictions using Pre-trained Networks
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
# +
#create the model
model = ResNet50(weights='imagenet')
# -
# !pip install pillow
# Read the Image
import cv2
img_path = 'image.jpg'
img = cv2.imread(img_path)
img = cv2.resize(img,(224,224))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
import matplotlib.pyplot as plt
plt.title("Actual Image")
plt.imshow(img)
plt.show()
plt.title("Normalised Image")
plt.imshow(x[0])
plt.show()
preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="74l7lcFQk4kT"
# ## Setup
#
# + [markdown] colab_type="text" id="ixh2Tyl1FHaj"
# In this first cell we''ll load the necessary libraries and setup some logging and display options.
# + colab={} colab_type="code" id="JaCENoitkiXK"
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# %matplotlib inline
# + [markdown] colab_type="text" id="y0gBz25Glf-3"
# Next we'll load our flow variables and time tendency forcings datasets into Xarray Dataset objects.
# + colab={} colab_type="code" id="_cC_-nNSlWIO"
ds_h0 = xr.open_dataset('C:/home/cam_learn/fv091x180L26_dry_HS.cam.h0.2000-12-27-00000_lowres.nc', decode_times=False)
ds_h1 = xr.open_dataset('C:/home/cam_learn/fv091x180L26_dry_HS.cam.h1.2000-12-27-00000_lowres.nc', decode_times=False)
# -
ds_h0.info
ds_h1.info
# Look at the time variable in order to work out the initial date, number of steps, units, etc.
ds_h0.variables['time']
# Make sure we have the same time values for the targets data.
if (ds_h0.variables['time'].values != ds_h1.variables['time'].values).any():
print('ERROR: Non-matching time values')
# Create array of datetime values from the times.
from datetime import datetime, timedelta
times = ds_h0.variables['time'].values.flatten()
initial = datetime(2000, 12, 27)
datetimes = np.empty(shape=times.shape, dtype='datetime64[m]')
for i in range(datetimes.size):
datetimes[i] = initial + timedelta(days=times[i])
timestamps = pd.Series(datetimes)
timestamps.head()
# ## Feature and target selection
# As features we'll use the following flow variables:
#
# * U (west-east (zonal) wind, m/s)
# * V (south-north (meridional) wind, m/s)
# * T (temperature, K)
# * PS (surface pressure, Pa)
#
# Time tendency forcings are the targets (labels) that our model should learn to predict.
#
# * PTTEND (time tendency of the temperature)
# * PUTEND (time tendency of the zonal wind)
# * PVTEND (time tendency of the meridional wind)
#
# Eventually we'll train/fit our model for an entire global 3-D grid, but for this example we'll select a single lat/lon/lev location, getting all time steps for the feature and target variables at the lat/lon/lev location.
ps = pd.Series(ds_h0.variables['PS'].values[:, 0, 0])
t = pd.Series(ds_h0.variables['T'].values[:, 0, 0, 0])
u = pd.Series(ds_h0.variables['U'].values[:, 0, 0, 0])
v = pd.Series(ds_h0.variables['V'].values[:, 0, 0, 0])
pttend = pd.Series(ds_h1.variables['PTTEND'].values[:, 0, 0, 0])
putend = pd.Series(ds_h1.variables['PUTEND'].values[:, 0, 0, 0])
pvtend = pd.Series(ds_h1.variables['PVTEND'].values[:, 0, 0, 0])
# Convert to a Pandas DataFrame containing inputs (features) and output (label/target) for use when predicting time tendency forcings.
df_vals = pd.DataFrame({'timestamp': timestamps,
'PS': ps,
'T': t,
'U': u,
'V': v,
'PTTEND': pttend,
'PUTEND': putend,
'PVTEND': pvtend})
df_vals.set_index('timestamp', inplace=True)
df_vals.head()
df_vals.info()
# Normalize the feature variables using scikit-learn's [MinMaxScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html).
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(df_vals.values)
scaled
# Normalize the feature variables using scikit-learn's MinMaxScaler.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(df_temp.values)
scaled
# Replace the original columns with the scaled values.
for i in range(len(df_vals.columns)):
s = pd.Series(scaled[:, i])
df_vals[df_vals.columns[i]] = s.values
df_vals.head()
# ## Split the data into training and testing datasets
# For simplicity we'll start with an even split of 50% for training and 50% for testing.
# +
# the values array is 720 rows x 10 columns
train = df_vals.values[:360, :] # rows 0 - 359
test = df_vals.values[360:, :] # rows 360 - 719
# # split into input and outputs
# train_X, train_y = train[:, :-3], train[:, -3:]
# test_X, test_y = test[:, :-3], test[:, -3:]
# use final value of final column as target, until we work out how to predict multiple targets
train_X, train_y = train[:, :-3], train[-1:, -1:]
test_X, test_y = test[:, :-3], test[-1:, -1:]
# # convert shape of outputs from (360, 1) to (1, 360)
# # train_y = np.swapaxes(train_y, 0, 1)
# # test_y = np.swapaxes(test_y, 0, 1)
train_y = train_y.flatten()
test_y = test_y.flatten()
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((1, train_X.shape[0], train_X.shape[1]))
test_X = test_X.reshape((1, test_X.shape[0], test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# -
# ## Create the LSTM recurrent neural network
# Next, we'll instantiate and configure a Long Short-Term Memory (LSTM) recurrent neural network using Keras's [Sequential](https://keras.io/models/sequential/) class. We'll train this model using an Adam version of stochastic gradient descent, and use the Mean Absolute Error (MAE) loss function.
#
# We will define the LSTM with 50 neurons in the first hidden layer and 1 neuron in the output layer for predicting the PVTEND target. The input shape will be 1 time step with 4 features.
# +
import keras
# design network
model = keras.models.Sequential()
model.add(keras.layers.LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(keras.layers.Dense(1))
model.compile(loss='mae', optimizer='adam')
# -
# ## Train and evaluate the model
# The model will be fit for 50 training epochs with a batch size of 72.
# + colab={} colab_type="code" id="Q6waMx-cMg71"
# fit network
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
# + colab={} colab_type="code" id="wVzN6_fWZDJn"
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # An Example in Constraint Optimization
# ## Introduction
# The task is to manage energy consumption for a local site, for example a neighborhood of households or an industrial area. The *predicted* gas consumption for the next 72 hours (3 days) is given in the following vector, say in cubic meters or similar.
T <- 1:72
P <- c( 96.6, 96, 98.4, 99.1, 98, 100.6, 100.7, 99.4, 99.5, 98.9, 98, 98.3, 96.3,
98.4, 95.5, 95.3, 95.8, 94.6, 94.4, 93.8, 95.2, 94, 96.6, 98.2, 96.7, 99.2,
100.6, 98.6, 99.4, 102, 101.3, 102.5, 102.5, 102.1, 102.2, 100.9, 100.8,
97.6, 98.1, 97.4, 95.4, 96.9, 95.3, 97.7, 95.5, 97, 96.9, 96, 95.3, 98,
97.6, 99.7, 99.9, 101.2, 100, 98.6, 99.2, 97.4, 97.7, 97.1, 96.5, 95.5,
96.4, 93, 95, 95.4, 95.3, 95.4, 95.3, 96.4, 96.9, 96.7)
# The following plot displays this prediction as a step curve. The local supplier of energy buys gas from a global supplier. The contract says he will get up to 100 [m^3] per hour to a fixed price, if more gas is consumed the price will go up significantly. The red line marks this upper limit.
#
plot(c(0, 72), c(90, 105), type = "n",
main = "Predicted gas consumption", xlab = "Time", ylab = "Gas")
grid(); abline(h = 100, col = "red")
lines(T, P, col = "green", type = "s", lwd = 2)
# To avoid these extra costs the local provider will employ some kind of gas storage, e.g. a gas tank (or the so-called "net aspiration"). When gas consumption in the area is low, the tank can be filled, and when consumption is greater than 100 the gas storage will be used to contribute the missing amount.
#
# Of course, the gas storage facility has some technical constraints that need to be taken into account. The maximum amount of gas to be stored in the tank shall be 10 [m^3], so the actual amount of gas in the tank will vary between 0 and 10. Also, the amount of gas that will be pumped in or taken out has be less or equal to 5 [m^3] per hour.
# Imagine the *planned* amount of gas bought from the global supplier is
P0 <- c(
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 99.2, 98.0, 98.3,
96.3, 98.4, 95.5, 95.3, 95.8, 94.6, 94.4, 93.8, 95.2, 94.0, 96.6, 98.2,
96.7, 99.2, 100.0, 99.2, 99.4, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 99.7, 95.3, 97.7, 95.5, 97.0, 96.9, 96.0,
95.3, 98.0, 97.6, 99.7, 99.9, 100.0, 100.0, 99.8, 99.2, 97.4, 97.7, 97.1,
96.5, 95.5, 96.4, 93.0, 95.0, 95.4, 95.3, 95.4, 95.3, 96.4, 96.9, 96.7)
# then the combined display of predicted and planned gas consumption is shown in the following plot:
plot(c(0, 72), c(90, 105), type = "n",
main = "Predicted gas consumption", xlab = "Time", ylab = "Gas")
grid(); abline(h = 100, col = "red")
lines(T, P, col = "green", type = "s", lwd = 2)
lines(T, P0, col = "navy", type = "s", lwd = 2, lty = 2)
# Can this plan be satisfied by utilizing the gas storage available to the local supplier? Assuming the gas tank is half-filled with 5 [m^3], what will be the level in the tank during the planned 72 hours? Obviously, the filling level is calculated as `5 + cumsum(P0 - P)`. So with this planning the tank level varies as follows:
T0 <- 3 + cumsum(P0 - P)
# From now on we will plot the (predicted or planned) gas consumption and the level in the gas tank -- i.e. the volume of stored gas -- side by side in one graph. For this *greedy* approach the figure looks as follows:
# +
plot_gas_solution <- function(T, P, P0, T0) {
par(mfrow = c(2, 1), ann = FALSE, mai = c(0.5, 0.5, 0.25, 0.5))
plot(T, P, type = "s", col = "green", ylim = c(90, 105)); grid()
lines(T, P0, type = "s", col = "blue")
plot(T, T0, type = "s", col = "red", ylim = c(0, 20)); grid()
}
plot_gas_solution(T, P, P0, T0)
# -
# We see that the gas tank is completely filled most of the time and that it can handle the problem of a missing gas volume during the 30--40 hours ahead.
# ## Problem Formulation
# The task now is to formulate this as an optimization problem such that, when given a prediction of gas consumption for 72 hours, a purchase plan for buying gas from the global provider will be generated. The plan shall make it possible to never buy more than a certain amount of gas, and the gas tank shall be used in a moderate way.
# #### Decision Variables
# Let $n$ be the number of time periods, $p_i, i=1, ...,72$ be the predicted consumption values, and $q_i$ the unknown planning values to be optimized during the optimization procedure.
# How can a feasible starting point `x0` be found? The same way `P0` above was found. That is, immediately fill up the tank and keep it filled as much as possible. If this leads to a non-feasible solution -- because the storage tank is not big enough -- than we know that the whole optimization problem is not solvable. It is a very nice feature of this optimization problem that feasibility can so easily be determined, and if applicable a feasible starting point is constructed.
# #### Constraints
# The following constants need to be defined:
#
# - $n$, the number of predicted resp. planned time periods.
#
# - $d_0$: initial level of the gas tank.
#
# - $d_1$: maximum level change per time unit.
#
# - $d_2$: maximum level of gas tank (minimum is 0).
#
# - $d_3$: maximum allowed amount of gas from the global supplier.
n <- 72
d0 <- 3
d1 <- 5
d2 <- 15
d3 <- 100
# With these values the constraints are defined as
#
# 1. maximum supply to be ordered from the global supplier:
# $$ q_i \leq d_3 \qquad \mathrm{for\ all} \; i = 1 \ldots n $$
#
# 1. restricted amount to be taken from or filled into gas tank:
# $$ |q_i - p_i| \le d_1 \qquad \mathrm{for\ all} \; i = 1 \ldots n$$
#
# 1. level in storage tank restricted by capacity:
# $$ 0 \le d_0 + \sum_{k=1}^{j} (q_k - p_k) \leq d_2 \qquad \mathrm{for\ all} \; j = 1 \ldots n$$
# #### Objective
# The objective function will describe the overall goal that is to be reached.
# Four different goals will be considered and compared:
#
# - (**A**) Minimize the sum of differences between predicted and planned purchase of gas from the global supplier:
# $$ \min ! \sum_{j=1}^n |p_j - q_j| $$
#
# - (**B**) Minimize the sum of squares of these differences:
# $$ \min ! \sum_{j=1}^n (p_j - q_j)^2 $$
# (Larger differences will have a larger influence on the objective.)
#
# - (**C**) Minimize the difference between subsequent supplies:
# $$ \min ! \sum_{j=1}^{n-1} |q_{j+1} - q_{j}| $$
#
# - (**D**) Minimize usage of the gas tank as much as possible:
# $$ \min ! \sum_{k=1}^{n} | \sum_{j=1}^{k} (p_j - q_j)|$$
#
# We will see that very different solutions will be returned with each of these objectives. Of course, it is up to the customer of such an optimization package to decide which solution and thus which objective function he prefers.
#
# The objective functions will be the same for all solvers, so we will define them here.
# +
funA <- function(q) sum(abs(P - q))
funB <- function(q) sum((P - q)^2)
funC <- function(q) sum(abs(diff(q)))
funD <- function(q) sum(abs(cumsum(P - q)))
# -
# ## Nonlinear Optimization Solvers in R
# The function `constrOptim()` in base R is a solver for nonlinear optimization problems with *linear* constraints. It implements an adaptive logarithmic barrier algorithm, using `optim()` as the underlying solver. Its usage is as follows:
#
# ```r
# constrOptim(theta, f, grad, ui, ci, mu = 1e-04, control = list(),
# method = if (is.null(grad)) "Nelder-Mead" else "BFGS",
# outer.iterations = 100, outer.eps = 1e-05, ..., hessian = FALSE)
# ```
#
# But our constraints are nonlinear, so we need a more powerful tool. The appropriate technique here is the *augmented Lagrangian*. Solvers available in R packages that apply this approach are
#
# - nloptr
# - alabama
# - Rsolnp
# - Rdonlp2
#
# Unfortunately, different optimization solvers will require to receive the the constraints and objective function in different ways. The following solvers for non-linear constraint optimization will be tried on the problem: 'alabama', 'nloptr'.
#
# Because all these solvers are *local* solvers, i.e. will stop in local minima, we will also try a global (or stochastic) solver and compare it with solutions found with local solvers.
# ### The *alabama* package
# Solver `auglag()` in package 'alabama' optimizes smooth nonlinear objective functions with nonlinear constraints. Equality and inequality constraints are allowed. As the problem formulated above does not involve equality constraints, the full power of an augmented Lagrangian approach is not required.
# The general call to `auglag()` looks like this:
#
# ```r
# auglag(par, fn, gr, hin, hin.jac, heq, heq.jac,
# control.outer=list(), control.optim = list(), ...)
# ```
#
# with the following parameters
#
# Argument | Meaning
# --------- | ----------------------------------------------
# `par` | initial vector of variable values
# `fn` | nonlinear objective function
# `gr` | gradient of the objective function
# `hin` | function specifying the inequality constraints
# ... | ...
#
# And `control.optim` a list of control parameters, the same as those used in `optim()`. The default method is "BFGS". At the moment we rely on numerical gradients and jacobians. For more information see `?auglag`.
require(alabama)
# ?auglag
# The constraint inequalities need to be defined through `hin[j] >= 0` for all `j`. Therefore, we define function `hin()` as
hin <- function(q) {
c(d3 - q, # q[i] <= d3
d1 - abs(P - q), # abs(P - q) <= d1
d0 + cumsum(q - P), # 0 <= d0 + cumsum(q - P)
d2 - d0 - cumsum(q - P) # d0 + cumsum(q - P) <= d2
)
}
# #### Objective `funA`
solA1 <- auglag(par = P0, fn = funA, hin = hin, control.outer = list(trace = FALSE))
xA1 <- solA1$par
solA1$value
# Display the solution and the level in the storage tank.
P1 <- solA1$par; T1 <- 3 + cumsum(P1 - P)
plot_gas_solution(T, P, P1, T1)
# In this solution, the level in the gas tank is kept at a minimum and only raised when it is absolutely necessary to provide for peaks in the predicted consumtion. Compare this with a solution to problem (B).
# #### Objective `funB`
require(alabama)
solB1 <- auglag(par = P0, fn = funB, hin = hin, control.outer = list(trace = FALSE))
xB1 <- solB1$par
solB1$value
P2 <- solB1$par; T2 <- 5 + cumsum(P2 - P)
plot_gas_solution(T, P, P2, T2)
# In this solution, the level in the gas tank is only raised slowly and only when it is absolutely necessary to provide for peaks in the predicted consumption.
# #### Objective `funC`
# For problem type (C) it is the goal to minimize the differences between gas intakes from the global supplier.
require(alabama)
solC1 <- auglag(par = P0, fn = funC, hin = hin, control.outer = list(trace = FALSE))
xC1 <- solC1$par
solC1$value
P3 <- solC1$par; T3 <- 3 + cumsum(P3 - P)
plot_gas_solution(T, P, P3, T3)
# In this case the level in the gas tank varies quite much.
# #### Objective `funD`
# For problem type (D) it is the goal to minimize the usage of the gas tank as much as possible.
require(alabama)
solD1 <- auglag(par = P0, fn = funD, hin = hin, control.outer = list(trace = FALSE))
xD1 <- solD1$par
solD1$value
P4 <- solD1$par; T4 <- 3 + cumsum(P4 - P)
plot_gas_solution(T, P, P4, T4)
# Interestingly, in this case the level in the gas tank is kept to the level it had at the start, is raised if there is a need ahead, and then kept constant at the initial value.
# ### The *nloptr* package
require(nloptr)
# ?nloptr::auglag
( sol <- nloptr::auglag(x0 = P0, fn = funD, hin = hin, localsolver = "LBFGS") )
P5 <- sol$par; T5 <- 3 + cumsum(P5 - P)
plot_gas_solution(T, P, P5, T5)
# This agrees quite exactly with the solution found by `alabama::auglag` !
# ## Some Remarks
# ### Global Solvers
# There are several global (or: stochastic) solvers in R packages that try to avoid falling into or getting stuck in local minima. There is absolutely no guaranty they will find the global minimum. The problem here with 72 dimensions may anyway be to demanding.
# For realizing constraints for application in global solvers the objective function has to be changed to include the constraints.
funDconstr <- function(q) {
if (# any(q > d3) ||
any(abs(P - q) > d1 + 0.001) ||
any(d0 + cumsum(q - P) < -0.001) ||
any(d0 + cumsum(q - P) > d2 + 0.001) ){
v <- Inf
} else {
v <- sum(abs(cumsum(P - q)))
}
v
}
require(DEoptim)
# ?DEoptim
sol <- DEoptim(funDconstr, lower = rep(0, 72), upper = rep(100, 72),
control=DEoptim.control(trace = FALSE))
sol$optim
# We can see that global solvers will have severe problems with constraints if the feasible points are rare in the solution space, or if the best solutions lie on or near the boundary.
# ### MATLAB Solution
# In MATLAB, the standard solver for constrained nonlinear optimization (i.e., 'nonlinear programming') is `fmincon` from the *Optimization Tolbox*. It incorporates **active-set**, **sqp**, **trust-region**, and **interior-point** algorithms.
# The solver `fmincon` is called with the following syntax and parameters:
#
# fmincon(fun,x0,A,b,Aeq,beq,lb,ub,nonlcon)
#
# where `obj` is the objective function, `x0` the starting point, `A*x <= b` the linear inequality and `Aeq*x = beq` the linear equality constraints, `lb` and `ub` the lower and upper bounds with `lb <= x <= ub`.
# `nonlcon` defines the nonlinear equality and inequality constraints `c(x) <= 0` and `ceq(x) = 0` by returning two vectors `c` and `ceq`: `[c,ceq] = nonlcon(x)`.
# The user interface for a demo system to discuss with engineers and customers was developed utilizing MATLAB's GUI editor and is shown in the following figure.
# 
# One can see in this figure that `fmincon` finds the same solutions as `auglag` in R above.
#
# A point to clarify will be what happens when we start with other feasible points.
# ### Modeling Language
# R and MATLAB do not have *optimization modeling languages* that would enable the user to formulate optimization problems independently of what the API of different solvers will require. Commercial programs such as AMPL, GAMS, or Gurobi each have their own (algebraic) modeling languages. Goals for applying a modeling language are, e.g., user friendliness, mathematical notation, solver independence, and getting access to advanced optimization techniques.
#
# The following is an AMPL model for our minimization problem with target function `funB`, but extended with a request that the gas tank should be filled up to a level of 5.
# **AMPL Model**
#
# param n > 0 integer; # n = 72 time units
# param p {i in 1..n}; # predicted consumption
#
# param d0; # initial gas tank storage
# param d1; # max level change per time unit
# param d2; # max level of gas tank
# param d3; # max amount of gas from supplier
#
# var x {i in 1..n} >= 0; # solution
#
# minimize target:
# sum {j in 1..n} (p[j] - x[j]) * (p[j] - x[j]);
#
# subject to rule1 {i in 1..n}: x[i] <= d3;
# subject to rule2 {i in 1..n}: abs(x[i] - p[i]) <= d1;
# subject to rule3 {j in 1..n}:
# sum {k in 1..j} (x[k] - p[k]) <= d2 - d0;
# subject to rule4 {j in 1..n}:
# sum {k in 1..j} (x[k] - p[k]) >= -d0;
#
# **AMPL Data**
#
# param n := 72;
#
# param d0 := 3;
# param d1 := 5;
# param d2 := 15;
# param d3 := 100;
#
# param: p :=
# 1 96.6 2 96.0 ... 72 96.7;
#
# **AMPL Commands**
#
# ampl: option solver minos;
# ampl: option minos_options 'iterations_limit=5000 superbasics_limit=100';
#
# ampl: model gas_ex.mod;
# ampl: data gas_ex.dat;
#
# ampl: solve;
# ampl: display x;
# Sending this to AMPL will call the MINOS solver on the model and data and will result in the following output:
#
# MINOS 5.51: iterations_limit=5000
# superbasics_limit=100
# MINOS 5.51: optimal solution found.
# 237 iterations, objective 38.3316
# Nonlin evals: obj = 352, grad = 351, constrs = 352, Jac = 351.
#
# x [*] :=
# 1 97.108 13 96.808 25 97.208 37 100 49 95.375 61 96.5
# 2 96.508 14 98.908 26 99.708 38 97.675 50 98.075 62 95.5
# 3 98.908 15 96.008 27 100 39 98.175 51 97.675 63 96.4
# 4 99.608 16 95.808 28 99.108 40 97.475 52 99.775 64 93
# 5 98.508 17 96.308 29 99.908 41 95.475 53 99.975 65 95
# 6 100 18 95.108 30 100 42 96.975 54 100 66 95.4
# 7 100 19 94.908 31 100 43 95.375 55 100 67 95.3
# 8 99.908 20 94.308 32 100 44 97.775 56 98.6 68 95.4
# 9 100 21 95.708 33 100 45 95.575 57 99.2 69 95.3
# 10 99.408 22 94.508 34 100 46 97.075 58 97.4 70 96.4
# 11 98.508 23 97.108 35 100 47 96.975 59 97.7 71 96.9
# 12 98.808 24 98.708 36 100 48 96.075 60 97.1 72 96.7
# ;
#
# This corresponds to the solution `P2` that we found above.
# ### *rneos*: XML-RPC Interface to NEOS
# > "Within this package the XML-RPC API to NEOS is implemented. This enables the
# > user to pass optimization problems to NEOS and retrieve results within R."
#
# The NEOS Server is an internet-based service for solving numerical optimization problems. It provides free access to more than 60 state-of-the-art (commercial and non-commercial) solvers. Optimization problems need to be formulated in AMPL or GAMS syntax. Results will be returned as Web pages.
# With the above model and data as files *gas_ex.mod* and *gas_ex.dat*, an interaction with the CONOPT solver on NEOS would look like:
#
# ```r
# require(rneos)
# # geting a template for category and solver
# temple <-NgetSolverTemplate(category = "nco", solvername = "CONOPT",
# inputMethod = "AMPL")
# # setting model and data file
# modf <- "gas_ex.mod"; datf <- "gas_ex.dat"
# modc <- paste(paste(readLines(modf), collapse = "\n"), "\n")
# datc <- paste(paste(readLines(datf), collapse = "\n"), "\n")
#
# # create list object
# argslist <- list(model = modc, data = datc,
# commands = "", comments = "Gas example")
# ## create XML string
# xmlstring <- CreateXmlString(neosxml = temple, cdatalist = argslist)
#
# ## submit job to the NEOS solver
# neosjob <- NsubmitJob(xmlstring, user = "hwb", interface = "gas_ex",
# id = 8237, nc = CreateNeosComm())
# neosjob
# # The job number is: 3838832
# # The pass word is : wBgHomLT
#
# # getting info about job
# NgetJobInfo(neosjob) # "nco" "MINOS" "AMPL" "Done"
# NgetFinalResults(neosjob)
# ```
# The available NEOS solvers and the required modeling language(s) can be found on the following NEOS solvers page [**http://www.neos-server.org/neos/solvers/**](NEOS Solvers.html) at the University of Wisconsin *Institutes for Discovery*.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
from arcgis.features import GeoAccessor
from arcgis.gis import GIS
from arcgis.geoenrichment import Country
import arcpy
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
# %matplotlib inline
# +
dir_prj = Path.cwd().parent
dir_data = dir_prj/'data'
dir_int = dir_data/'interim'
gdb_int = dir_int/'interim.gdb'
arcpy.env.overwriteOutput = True
# +
aoi_name = 'cbsa_seattle'
aoi_fc = gdb_int/aoi_name
id_col = 'IDField'
kmeans_fc = gdb_int/f'{aoi_fc.stem}_kmeans'
enrich_fc = gdb_int/'block_groups_enrich'
overwrite_enrich = True
# +
ev = Country('usa', gis=GIS('pro')).enrich_variables
sv = ev[
(ev.name.str.endswith('CY'))
| (
(ev.alias.str.contains('ACS'))
& (~ev.alias.str.contains('\sMOE\s'))
)
].drop_duplicates('name').reset_index(drop=True)
sv.info()
e_str = ';'.join(sv.enrich_name)
print(f'{e_str[:120]}...')
# +
if not arcpy.Exists(enrich_fc) or overwrite_enrich:
bg_fc = arcpy.ba.GenerateGeographiesFromOverlay(
geography_level="US.BlockGroups",
in_features=str(aoi_fc),
id_field=id_col,
out_feature_class='memory/bg',
overlap_type="INTERSECT",
ratios="AREA_ONLY"
)[0]
bg_fc
# -
bg_lyr = arcpy.management.MakeFeatureLayer(bg_fc)[0]
arcpy.management.SelectLayerByAttribute(bg_lyr, where_clause='Area > 0.5')
arcpy.management.DeleteFeatures(bg_lyr)
bg_lyr = arcpy.management.MakeFeatureLayer(bg_fc)[0]
arcpy.management.SelectLayerByAttribute(bg_lyr, where_clause='Area > 0.5')
arcpy.management.DeleteFeatures(bg_lyr)
bg_std_fc = arcpy.ba.StandardGeographyTA(
geography_level="US.BlockGroups",
out_feature_class=r"memory/bg_std",
input_type="TABLE",
in_ids_table=bg_fc,
geography_key_field="ID"
)[0]
arcpy.ba.EnrichLayer(str(bg_fc), out_feature_class=str(enrich_fc), variables=enrich_variables)
# +
fld_lst = [f.name.lower() for f in arcpy.ListFields(str(enrich_fc))]
analysis_vars = list(ev[ev.enrich_field_name.str.lower().isin(fld_lst)].drop_duplicates('name').enrich_field_name)
print(f'{len(analysis_vars):,}')
# +
e_df = pd.DataFrame([r for r in arcpy.da.SearchCursor(str(enrich_fc), analysis_vars)], columns=analysis_vars)
e_df.info()
e_df.head()
# +
na_cols = e_df.isna().any()
na_cols = na_cols[na_cols].index.values
na_df = ev[ev.enrich_field_name.isin(na_cols)].reset_index(drop=True)
print(len(na_df.index))
# +
e_df.dropna(axis=1, inplace=True)
e_df.info()
# +
sclr = MinMaxScaler()
scl_arr = sclr.fit_transform(e_df)
# +
pca = PCA()
pca.fit(scl_arr)
pca_min_variance = 0.95
cv = pd.Series(pca.explained_variance_ratio_.cumsum())
n_cmpnts = cv[cv > pca_min_variance].index.min() + 1
n_cmpnts
# +
pca_sel = PCA(n_components=n_cmpnts)
cmpnts = pca_sel.fit_transform(scl_arr)
# +
distortions = []
K = range(1, 15)
for k in K:
km = KMeans(n_clusters=k)
km.fit(cmpnts)
distortions.append(km.inertia_)
plt.figure(figsize=(16,8))
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.show()
# +
def get_kmeans(cluster_count):
km = KMeans(n_clusters=cluster_count, random_state=42)
km.fit_transform(cmpnts)
bg_df = GeoAccessor.from_featureclass(str(enrich_fc), fields=['ID']).rename(columns={'ID': 'id'}).set_index('id').spatial.set_geometry('SHAPE', inplace=False)
bg_df.insert(0, 'kmeans_cluster', km.labels_)
km_fc = bg_df.spatial.to_featureclass(gdb_int/f'{aoi_name}_block_group_kmeans_{cluster_count:02d}')
print(f'Created {km_fc}')
return km_fc
for k in range(2, 5):
get_kmeans(k)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **<font size="6" color='darkgreen'>Kaggle Credit Card Fraud Dataset</font>**<br>
# <br>
# <font size=5>We use an open-source [dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud) from Kaggle.<font>
# # Split Datasets
# +
import pandas as pd
import numpy as np
# import some models
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from tensorflow import keras
# import evaluation metrics
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import confusion_matrix
# plot some metrics
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
# %matplotlib inline
# +
df = pd.read_csv("creditcard.csv")
print("The number of example of this dataset is:", len(df.iloc[:,1]))
df.tail()
# -
# **<font color='green' size=3>The column of *Time* and *amount* were applied no PCA, I would like to manually scaled them to make the data comparable and also make gradient descent faster in neural network later<font>**
# +
from sklearn.preprocessing import StandardScaler, RobustScaler
std_scaler = StandardScaler()
rob_scaler = RobustScaler()
df['scaled_amount'] = rob_scaler.fit_transform(df['Amount'].values.reshape(-1,1))
df['scaled_time'] = rob_scaler.fit_transform(df['Time'].values.reshape(-1,1))
df.drop(['Time','Amount'], axis=1, inplace=True)
# for new column of scaled_amount and scaled_time are inserted in the back
# let's move them in front for convenience processing
cols = df.columns.tolist()
cols = cols[-2:] + cols[:-2]
df = df[cols]
df.head()
# -
# create training and test sets
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
# ratio of posiive examples
sum(y)/len(y)
# **<font color='green' size=3>Now we see that the dataset is extremely imbalanced with only 1~2 positive (fraud) examples in 1000.<br>
# It means that accuracy is not a good metric to evaluate model performance, for a dummy classifier that always predict negative would have a accuracy of 99.8%<font>**
# <br>
# **<font size=3>split training and test set<font>**
X_train,X_test,y_train, y_test = train_test_split(X, y.values, test_size = 0.15)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
X_train.head()
# # Quick implementation with logisitc regression
# +
# quickly implement a simple model to get intuition
for c in [0.001,0.01,0.1,1,10]:
log_reg = LogisticRegression(C=c, solver='lbfgs',penalty="l2",max_iter=1500).fit(X_train,y_train)
print("\nAUC score of regularization with L2 of C=" + str(c) +" is:", roc_auc_score(y_test,log_reg.predict(X_test)))
print("F1 score of regularization with L2 of C=" + str(c) +" is:", f1_score(y_test,log_reg.predict(X_test)))
# -
precision, recall, thresholds = precision_recall_curve(y_test,log_reg.predict_proba(X_test)[:,1])
pr_curve = plt.plot(precision, recall, label ='Precision-Recall Curve')
#
# <font color='green' size=3>From above precision recall cuve we see that with 75% AUC, a well-perform model would get around **0.7 recall and 0.8 precision**<font>
#
# # Random Forest
# Tree models are better for imbalance datasets.
# Now we try several tree models to have a look.
# +
# seperate a validation set from training set for grid_search below
X_train_t,X_val,y_train_t, y_val = train_test_split(X_train, y_train, test_size = 0.15)
from sklearn.ensemble import RandomForestClassifier
# +
best_score = 0
for d in [10,15,17,19,22]:
for l in [15,20,25,28,30,32]:
forest = RandomForestClassifier(n_estimators=30, random_state=0,max_depth=d,max_leaf_nodes=l)
forest.fit(X_train_t, y_train_t)
score = f1_score(y_val,forest.predict(X_val))
if score > best_score:
best_score = score
best_parameters = {"d":d, "l":l}
print("Best depth are:",d)
print("\nBest leaf nodes are:",l)
# print("\nAccuracy on training set: {:.3f}".format(forest.score(X_train_t, y_train_t)))
# print("\nAccuracy on validation set: {:.3f}".format(forest.score(X_val, y_val)))
# print("\nAUC score is", roc_auc_score(y_val,forest.predict(X_val)))
# print("\nF1 score is", f1_score(y_val,forest.predict(X_val)))
# -
# best parameter:<br>
# Current depth are: 22<br>
# Current leaf nodes are: 32
# +
# train more rounds with best parameter to check if there's better output
forest = RandomForestClassifier(n_estimators=500, random_state=0,max_depth=22,max_leaf_nodes=32)
forest.fit(X_train_t, y_train_t)
print("Accuracy on training set: {:.3f}".format(forest.score(X_train_t, y_train_t)))
print("Accuracy on validation set: {:.3f}".format(forest.score(X_val, y_val)))
print("\nAUC score is", roc_auc_score(y_val,forest.predict(X_val)))
print("F1 score is", f1_score(y_val,forest.predict(X_val)))
# -
forest.feature_importances_
def plot_feature_importances(model):
n_features = len(X.columns.tolist())
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns.tolist())
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.axis('tight')
plot_feature_importances(forest)
# +
# to export a beautiful tree plot
from sklearn.tree import export_graphviz
import graphviz
export_graphviz(forest.estimators_[0], out_file="forest.dot", class_names=["fraud", "normal"],
feature_names=X.columns.tolist(), impurity=False, filled=True)
with open("forest.dot") as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
# +
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=14)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# +
forest_cm = confusion_matrix(y_test, forest.predict(X_test))
labels = ['No Fraud', 'Fraud']
plt.figure()
plot_confusion_matrix(forest_cm, labels, title="Random Forest \n Confusion Matrix", cmap=plt.cm.Reds)
# -
# We can see that the recall score is not so satisfactory.
# # XGboost
# <br>
# Let's try another tree model.
# +
import xgboost as xgb
D_train = xgb.DMatrix(X_train, label=y_train)
D_test = xgb.DMatrix(X_test, label=y_test)
# set ordinary params to see performance quickly
param = {
'eta': 0.18,
'max_depth': 7,
'objective': 'multi:softprob',
'gamma':4,
'num_class': 2}
steps = 60
xgb_model = xgb.train(param, D_train, steps)
preds = xgb_model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
#print("Accuracy on training set: {:.3f}".format(xgb_model.score(X_train, y_train)))
#print("Accuracy on test set: {:.3f}".format(xgb_model.score(X_test, y_test)))
print("\nAUC score is", roc_auc_score(y_test,best_preds))
print("F1 score is", f1_score(y_test,best_preds))
# +
xgboost_cm = confusion_matrix(y_test, best_preds)
labels = ['No Fraud', 'Fraud']
plt.figure()
plot_confusion_matrix(xgboost_cm, labels, title="Xgboost \n Confusion Matrix", cmap=plt.cm.Reds)
# -
# Now we have a better recall than random foreset.
from xgboost import plot_importance
plot_importance(xgb_model)
# The feature importance is different from that of random forest.
# # Decision Tree
# <br>
# The fact is, we just quickly jump in complicated tree models like rf and xgb. Maybe this dataset requires no complicated models. Let's see decision trees to check the baseline performance of tree models.
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(random_state=0,max_depth=6,max_leaf_nodes=15,min_samples_leaf=10)
tree.fit(X_train,y_train)
print("Accuracy on training set: {:.3f}".format(tree.score(X_train_t, y_train_t)))
print("Accuracy on validation set: {:.3f}".format(tree.score(X_val, y_val)))
print("\nAUC score is", roc_auc_score(y_val,tree.predict(X_val)))
print("F1 score is", f1_score(y_val,tree.predict(X_val)))
# <font color='green'>**Now we see that actually a simple decision tree could have a good performance.**</font>
plot_feature_importances(tree)
# +
from sklearn.tree import export_graphviz
import graphviz
# class_names,先填normal,再填fraud
aa = export_graphviz(tree, out_file=None, class_names=["normal", "fraud"],
feature_names=X.columns.tolist(), impurity=False, filled=True)
graph = graphviz.Source(aa)
graph
# +
tree_cm = confusion_matrix(y_test, tree.predict(X_test))
labels = ['No Fraud', 'Fraud']
plt.figure()
plot_confusion_matrix(tree_cm, labels, title="Xgboost \n Confusion Matrix", cmap=plt.cm.Reds)
# -
# # Resample
# <font color='green'>For the dataset is extremely imbalance, it's pushing the model to predict as more 0 as possible. To avoid such problem, we may resample the dataset. There're two ways of resampling: upsamle and undersample.<br>
# By upsample we mean to create more positive datasets when training the model and vice versa for undersampling.
# **Here,We try to use SMOTE technique to upsample the training set with synthetic positive examples.<font>**
from imblearn.over_sampling import SMOTE
from collections import Counter
sm = SMOTE(sampling_strategy='minority')
X_smote, y_smote = sm.fit_sample(X_train, y_train)
Counter(y_smote)
# ## Decision Tree with SM
from sklearn.tree import DecisionTreeClassifier
tree1 = DecisionTreeClassifier(random_state=0,max_depth=6,max_leaf_nodes=15,min_samples_leaf=10)
tree1.fit(X_smote,y_smote)
print("Accuracy on training set: {:.3f}".format(tree1.score(X_smote, y_smote)))
print("Accuracy on validation set: {:.3f}".format(tree1.score(X_val, y_val)))
print("\nAUC score is", roc_auc_score(y_val,tree1.predict(X_val)))
print("F1 score is", f1_score(y_val,tree1.predict(X_val)))
print("\nAUC score is", roc_auc_score(y_test,tree1.predict(X_test)))
print("F1 score is", f1_score(y_test,tree1.predict(X_test)))
# We see that decision tree generates *poor* performance after upsampling.
plot_feature_importances(tree1)
# +
tree_cm1 = confusion_matrix(y_test, tree1.predict(X_test))
labels = ['No Fraud', 'Fraud']
plt.figure()
plot_confusion_matrix(tree_cm1, labels, title="Decision Tree \n Confusion Matrix", cmap=plt.cm.Reds)
# -
# ## Xgboost with SM
#X_test = X_test[X_train.columns]
import scipy
Xsmote = scipy.sparse.csc_matrix(X_smote)
Xtest = scipy.sparse.csc_matrix(X_test)
# +
import xgboost as xgb
test = X_test[X_train.columns]
D_train = xgb.DMatrix(Xsmote, label=y_smote)
D_test = xgb.DMatrix(Xtest, label=y_test)
param = {
'eta': 0.18,
'max_depth': 7,
'objective': 'multi:softprob',
'gamma':4,
'num_class': 2}
steps = 50
xgb_model1 = xgb.train(param, D_train, steps)
preds = xgb_model1.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
#print("Accuracy on training set: {:.3f}".score(xgb_model1.score(X_smote, y_smote)))
#print("Accuracy on test set: {:.3f}".format(xgb_model.score(X_test, y_test)))
print("\nAUC score is", roc_auc_score(y_test,best_preds))
print("F1 score is", f1_score(y_test,best_preds))
# -
# +
xgb_model1_cm1 = confusion_matrix(y_test, best_preds)
labels = ['No Fraud', 'Fraud']
plt.figure()
plot_confusion_matrix(xgb_model1_cm1, labels, title="Xgboost SM \n Confusion Matrix", cmap=plt.cm.Reds)
# -
# We get better recall but worse presicion now.
# ## Logistic with SM
# **Now we have positive and negative sample with same amount.<br> So let's train a logistic regression model another time to check if there's improvements.**
for c in [0.001,0.01,0.1,1]:
log_reg_s = LogisticRegression(C=c, solver='lbfgs',penalty="l2",max_iter=1500).fit(X_smote,y_smote)
print("\nAUC score of regularization with L2 of C=" + str(c) +" is:", roc_auc_score(y_test,log_reg_s.predict(X_test)))
print("F1 score of regularization with L2 of C=" + str(c) +" is:", f1_score(y_test,log_reg_s.predict(X_test)))
plt.figure()
precision, recall, thresholds = precision_recall_curve(y_test,log_reg_s.predict_proba(X_test)[:,1])
pr_curve = plt.plot(precision, recall, label ='Precision-Recall Curve')
# <font color='green'>We have seen that **AUC score has improved significantly to around 90%** although the above PRcurve looks similar as before.<br>
# This implies that we can reset the prediction threshold to achieve a better f1 score.
# To explore the threshold we can do below raw test:<font>
thresholds = [0.99,0.999,0.9999,0.99999,0.999999]
for i in thresholds:
print('\nconfusion matrix:\n',confusion_matrix(y_test,log_reg_s.predict_proba(X_test)[:,1]>i))
print('f1 is:',f1_score(y_test,log_reg_s.predict_proba(X_test)[:,1]>i))
print('recall is:',recall_score(y_test,log_reg_s.predict_proba(X_test)[:,1]>i))
print('AUC is:',roc_auc_score(y_test,log_reg_s.predict_proba(X_test)[:,1]>i))
# <font color='green'>**From above search we see that increasing the threshold improves model performance in terms of F1 score.<br>
# Such improvement basically comes from increasing the precision while hurting just a little recall. In a business context, a higher precision in this case means that every time the model predicts fraud, it is more likely that it is really a fraud.<br>
# However a higer precision means that the recall is lower. In a business context, it means that among all the fraud cases, it is less likely for the model to detect.**<br><font>
plt.figure()
precision, recall, thresholds = precision_recall_curve(y_test,log_reg_s.predict_proba(X_test)[:,1]>0.99999)
pr_curve = plt.plot(precision, recall, label ='Precision-Recall Curve')
# **By increasing the threshold we signifiantly *expand* our PRcurve**
# # Stratified datasets
# <br>
# <font color='green'>The datasets might not distributed evenly, which means that examples with similar features might cluster together and makes out model to overfit particular kinds of examples.<br>
# To avoid that we may stratified and shuffle our datasets.<font>
# +
from sklearn.model_selection import KFold, StratifiedKFold
sss = StratifiedKFold(n_splits=5, random_state=None, shuffle=True)
for train_index, test_index in sss.split(X, y):
print("Train:", train_index, "Test:", test_index)
original_Xtrain, original_Xtest = X.iloc[train_index], X.iloc[test_index]
original_ytrain, original_ytest = y.iloc[train_index], y.iloc[test_index]
# Turn into an array
original_Xtrain = original_Xtrain.values
original_Xtest = original_Xtest.values
original_ytrain = original_ytrain.values
original_ytest = original_ytest.values
train_unique_label, train_counts_label = np.unique(original_ytrain, return_counts=True)
test_unique_label, test_counts_label = np.unique(original_ytest, return_counts=True)
print('\nLabel Distributions: ')
print(train_counts_label/ len(original_ytrain))
print(test_counts_label/ len(original_ytest))
print("\nshape of original_Xtrain:", original_Xtrain.shape)
print("shape of original_Xtest:", original_Xtest.shape)
print("shape of original_ytrain:", original_ytrain.shape)
print("shape of original_ytest:", original_ytest.shape)
# -
sm = SMOTE(sampling_strategy='minority')
X_smote_s, y_smote_s = sm.fit_sample(original_Xtrain, original_ytrain)
Counter(y_smote_s)
for c in [0.001,0.01,0.1,1]:
log_reg_sn = LogisticRegression(C=c, solver='lbfgs',penalty="l2",max_iter=1500).fit(X_smote_s,y_smote_s)
print("\nAUC score of regularization with L2 of C=" + str(c) +" is:", roc_auc_score(original_ytest,log_reg_sn.predict(original_Xtest)))
print("F1 score of regularization with L2 of C=" + str(c) +" is:", f1_score(original_ytest,log_reg_sn.predict(original_Xtest)))
# **We can see that the performance is even a little worse than before.**
plt.figure()
precision, recall, thresholds = precision_recall_curve(original_ytest,log_reg_sn.predict_proba(original_Xtest)[:,1])
pr_curve = plt.plot(precision, recall, label ='Precision-Recall Curve')
# # Shallow neural network with Keras
# <br>
# <font color='green'>At last we may try a more complicated models such as neural network. To begin we may use Keras to quickly build a simple network to have a try.<font>
# +
n_inputs = X_smote_s.shape[1]
model_regularize = keras.Sequential([
keras.layers.Dense(units=n_inputs, input_shape=(n_inputs,),activation='relu',kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dense(32, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dense(2, activation='softmax')
])
model_regularize.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model_regularize.fit(X_smote_s, y_smote_s,validation_split=0.2, batch_size=64, epochs=10, shuffle=True, verbose=2)
# -
nn_prediction = model_regularize.predict(original_Xtest, batch_size=200, verbose=0)
nnclass_prediction = model_regularize.predict_classes(original_Xtest, batch_size=200, verbose=0)
# +
undersample_cm = confusion_matrix(original_ytest, nnclass_prediction)
actual_cm = confusion_matrix(original_ytest, original_ytest)
labels = ['No Fraud', 'Fraud']
plt.figure()
plot_confusion_matrix(undersample_cm, labels, title="Random UnderSample \n Confusion Matrix", cmap=plt.cm.Reds)
# -
# <font color='green'>**From above confusion matrix we can see that this shallow neural network does not outperform logistic regression.<br>
# This implies that our dataset does not include difficult non-linear features for the model to learn.**<font>
f1_score(original_ytest,model_regularize.predict_proba(original_Xtest)[:,1]>0.995)
plt.figure()
precision, recall, thresholds = precision_recall_curve(original_ytest,model_regularize.predict_proba(original_Xtest)[:,1]>0.995)
pr_curve = plt.plot(precision, recall, label ='Precision-Recall Curve')
# The PRcurve is much smoother than that of logistic regression.<br>
# From below model output we see that neural network is more sure of its predictions.
np.round(model_regularize.predict_proba(original_Xtest),3)
# # Anomaly Detection with MultivariateGaussian
# <br>
# <font color='green'>When the positive training example is rare or there’s no particular patterns to detect positive examples, supervised learning algorithms are difficult to train. Then comes anomaly detection.
# <br><br>
# Anomaly detection generally uses Gaussian distribution to find the mean and variance of normal examples. Then we use a validation set with that mean and variance to calculate the probability. Then we try the set a probability threshold so that each calculated probability below that threshold would be predicted as anomaly examples.<font>
df_p = df.loc[df['Class'] == 1]
df_n = df.loc[df['Class'] == 0]
print(df_p.shape)
print(df_n.shape)
# We only use nagative(non-fraud) examples to calculate the mean and variance.<br>
# Thus the training set will only contains nagative examples.
# +
X_train_anomaly = df_n.iloc[:,:-1]
y_train_anomaly = df_n.iloc[:,-1]
Xn = df_n.iloc[0:1000,:-1]
yn = df_n.iloc[0:1000,-1]
Xp = df_p.iloc[:,:-1]
yp = df_p.iloc[:,-1]
Xtest = pd.concat([Xn,Xp])
ytest = pd.concat([yn,yp])
print(X_train_anomaly.shape)
print(X_test.shape)
# +
def estimateGaussian(X):
"""
This function estimates the parameters of a Gaussian distribution using the data in X
"""
m = X.shape[0]
#compute mean
sum_ = np.sum(X,axis=0)
mu = 1/m *sum_
# compute variance
var = 1/m * np.sum((X - mu)**2,axis=0)
return mu,var
mu, sigma2 = estimateGaussian(X_train_anomaly.values)
# -
print(mu.shape)
print(sigma2.shape)
# +
def multivariateGaussian(X, mu, sigma2):
"""
Computes the probability density function of the multivariate gaussian distribution.
"""
k = len(mu)
sigma2=np.diag(sigma2)
X = X - mu.T
p = 1/((2*np.pi)**(k/2)*(np.linalg.det(sigma2)**0.5))* np.exp(-0.5* np.sum(X @ np.linalg.pinv(sigma2) * X,axis=1))
return p
p = multivariateGaussian(X_train_anomaly.values, mu, sigma2)
# -
p.shape
# +
def selectThreshold(yval, pval):
"""
Find the best threshold (epsilon) to use for selecting outliers
"""
best_epi = 0
best_F1 = 0
stepsize = (max(pval) -min(pval))/1000
epi_range = np.arange(pval.min(),pval.max(),stepsize)
for epi in epi_range:
predictions = (pval<epi)[:,np.newaxis]
tp = np.sum(predictions[yval==1]==1)
fp = np.sum(predictions[yval==0]==1)
fn = np.sum(predictions[yval==1]==0)
# compute precision, recall and F1
prec = tp/(tp+fp)
rec = tp/(tp+fn)
F1 = (2*prec*rec)/(prec+rec)
if F1 > best_F1:
best_F1 =F1
best_epi = epi
return best_epi, best_F1, prec, rec
pval = multivariateGaussian(Xtest.values, mu, sigma2)
epsilon, F1, prec, rec = selectThreshold(ytest.values, pval)
print("Best epsilon found using cross-validation:",epsilon)
print("Best F1 on Cross Validation Set:",F1)
print("Recall score:",rec)
print("Precision score:",prec)
print("Outliers found:",sum(pval<epsilon))
# -
# <font color='green'>**It turns out that anomaly detection was able to get a pretty well recall so that all fraud cases would be detected and the F1 of 71% is fairly well**<font>
# # Conclusion
# <br>
# In this credit card fraud dataset where only **0.0017** positive examples, we have used typical supervised learning algorithm like logistic regression and deep learning algorithm of neural network to detect credit card frauds.
# <br>
# It turns out that simple tree models could have quite a good performance with F1 score of 86%. <br>
# <br>
# We also try to upsample the positives to make the dataset more balanced. However, model performance after upsampling is not better than that before.Then we try shallow neural network and the recall improves while the precision deteriorate.<br>
# <br>
# At last with anomaly detection, we easily achieve a recall score of 100% while the F1 is 71%. Anomaly detection is well suited in situations where positive training examples are not enough and there's no particular patterns of postive examples.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# FP-Growth Algorithm
#
# FP-Growth is an efficient and scalable method to complete set of frequent patterns.
# It allows frequent item set discovery without candidate item set generation.
# It is a two-step approach:
#
# 1.Build a compact data structure called the FP-Tree.
#
# Scan the transaction DB for the first time, find the frequent items.
# Order them into a list L in frequency descending order.
# For each transaction, order its frequent items according to the order od L
# Scan DB the second time, construct FP-tree by putting each frequency ordered transaction onto it.
#
# 2.Extracts frequent item sets directly from the FP-Tree
#
#
# +
import pandas as pd
df = pd.read_csv("tesco_dataset.csv")
df
# +
# # %load fpgrowth.py
"""
A Python implementation of the FP-growth algorithm.
Basic usage of the module is very simple:
> from fp_growth import find_frequent_itemsets
> find_frequent_itemsets(transactions, minimum_support)
"""
from collections import defaultdict, namedtuple
from itertools import imap
__author__ = 'Eric Naeseth <eric@naeseth.com>'
__copyright__ = 'Copyright © 2009 Eric Naeseth'
__license__ = 'MIT License'
def find_frequent_itemsets(transactions, minimum_support, include_support=False):
"""
Find frequent itemsets in the given transactions using FP-growth. This
function returns a generator instead of an eagerly-populated list of items.
The `transactions` parameter can be any iterable of iterables of items.
`minimum_support` should be an integer specifying the minimum number of
occurrences of an itemset for it to be accepted.
Each item must be hashable (i.e., it must be valid as a member of a
dictionary or a set).
If `include_support` is true, yield (itemset, support) pairs instead of
just the itemsets.
"""
items = defaultdict(lambda: 0) # mapping from items to their supports
# Load the passed-in transactions and count the support that individual
# items have.
for transaction in transactions:
for item in transaction:
items[item] += 1
# Remove infrequent items from the item support dictionary.
items = dict((item, support) for item, support in items.iteritems()
if support >= minimum_support)
# Build our FP-tree. Before any transactions can be added to the tree, they
# must be stripped of infrequent items and their surviving items must be
# sorted in decreasing order of frequency.
def clean_transaction(transaction):
transaction = filter(lambda v: v in items, transaction)
transaction.sort(key=lambda v: items[v], reverse=True)
return transaction
master = FPTree()
for transaction in imap(clean_transaction, transactions):
master.add(transaction)
def find_with_suffix(tree, suffix):
for item, nodes in tree.items():
support = sum(n.count for n in nodes)
if support >= minimum_support and item not in suffix:
# New winner!
found_set = [item] + suffix
yield (found_set, support) if include_support else found_set
# Build a conditional tree and recursively search for frequent
# itemsets within it.
cond_tree = conditional_tree_from_paths(tree.prefix_paths(item))
for s in find_with_suffix(cond_tree, found_set):
yield s # pass along the good news to our caller
# Search for frequent itemsets, and yield the results we find.
for itemset in find_with_suffix(master, []):
yield itemset
class FPTree(object):
"""
An FP tree.
This object may only store transaction items that are hashable
(i.e., all items must be valid as dictionary keys or set members).
"""
Route = namedtuple('Route', 'head tail')
def __init__(self):
# The root node of the tree.
self._root = FPNode(self, None, None)
# A dictionary mapping items to the head and tail of a path of
# "neighbors" that will hit every node containing that item.
self._routes = {}
@property
def root(self):
"""The root node of the tree."""
return self._root
def add(self, transaction):
"""Add a transaction to the tree."""
point = self._root
for item in transaction:
next_point = point.search(item)
if next_point:
# There is already a node in this tree for the current
# transaction item; reuse it.
next_point.increment()
else:
# Create a new point and add it as a child of the point we're
# currently looking at.
next_point = FPNode(self, item)
point.add(next_point)
# Update the route of nodes that contain this item to include
# our new node.
self._update_route(next_point)
point = next_point
def _update_route(self, point):
"""Add the given node to the route through all nodes for its item."""
assert self is point.tree
try:
route = self._routes[point.item]
route[1].neighbor = point # route[1] is the tail
self._routes[point.item] = self.Route(route[0], point)
except KeyError:
# First node for this item; start a new route.
self._routes[point.item] = self.Route(point, point)
def items(self):
"""
Generate one 2-tuples for each item represented in the tree. The first
element of the tuple is the item itself, and the second element is a
generator that will yield the nodes in the tree that belong to the item.
"""
for item in self._routes.iterkeys():
yield (item, self.nodes(item))
def nodes(self, item):
"""
Generate the sequence of nodes that contain the given item.
"""
try:
node = self._routes[item][0]
except KeyError:
return
while node:
yield node
node = node.neighbor
def prefix_paths(self, item):
"""Generate the prefix paths that end with the given item."""
def collect_path(node):
path = []
while node and not node.root:
path.append(node)
node = node.parent
path.reverse()
return path
return (collect_path(node) for node in self.nodes(item))
def inspect(self):
print 'Tree:'
self.root.inspect(1)
print
print 'Routes:'
for item, nodes in self.items():
print ' %r' % item
for node in nodes:
print ' %r' % node
def conditional_tree_from_paths(paths):
"""Build a conditional FP-tree from the given prefix paths."""
tree = FPTree()
condition_item = None
items = set()
# Import the nodes in the paths into the new tree. Only the counts of the
# leaf notes matter; the remaining counts will be reconstructed from the
# leaf counts.
for path in paths:
if condition_item is None:
condition_item = path[-1].item
point = tree.root
for node in path:
next_point = point.search(node.item)
if not next_point:
# Add a new node to the tree.
items.add(node.item)
count = node.count if node.item == condition_item else 0
next_point = FPNode(tree, node.item, count)
point.add(next_point)
tree._update_route(next_point)
point = next_point
assert condition_item is not None
# Calculate the counts of the non-leaf nodes.
for path in tree.prefix_paths(condition_item):
count = path[-1].count
for node in reversed(path[:-1]):
node._count += count
return tree
class FPNode(object):
"""A node in an FP tree."""
def __init__(self, tree, item, count=1):
self._tree = tree
self._item = item
self._count = count
self._parent = None
self._children = {}
self._neighbor = None
def add(self, child):
"""Add the given FPNode `child` as a child of this node."""
if not isinstance(child, FPNode):
raise TypeError("Can only add other FPNodes as children")
if not child.item in self._children:
self._children[child.item] = child
child.parent = self
def search(self, item):
"""
Check whether this node contains a child node for the given item.
If so, that node is returned; otherwise, `None` is returned.
"""
try:
return self._children[item]
except KeyError:
return None
def __contains__(self, item):
return item in self._children
@property
def tree(self):
"""The tree in which this node appears."""
return self._tree
@property
def item(self):
"""The item contained in this node."""
return self._item
@property
def count(self):
"""The count associated with this node's item."""
return self._count
def increment(self):
"""Increment the count associated with this node's item."""
if self._count is None:
raise ValueError("Root nodes have no associated count.")
self._count += 1
@property
def root(self):
"""True if this node is the root of a tree; false if otherwise."""
return self._item is None and self._count is None
@property
def leaf(self):
"""True if this node is a leaf in the tree; false if otherwise."""
return len(self._children) == 0
@property
def parent(self):
"""The node's parent"""
return self._parent
@parent.setter
def parent(self, value):
if value is not None and not isinstance(value, FPNode):
raise TypeError("A node must have an FPNode as a parent.")
if value and value.tree is not self.tree:
raise ValueError("Cannot have a parent from another tree.")
self._parent = value
@property
def neighbor(self):
"""
The node's neighbor; the one with the same value that is "to the right"
of it in the tree.
"""
return self._neighbor
@neighbor.setter
def neighbor(self, value):
if value is not None and not isinstance(value, FPNode):
raise TypeError("A node must have an FPNode as a neighbor.")
if value and value.tree is not self.tree:
raise ValueError("Cannot have a neighbor from another tree.")
self._neighbor = value
@property
def children(self):
"""The nodes that are children of this node."""
return tuple(self._children.itervalues())
def inspect(self, depth=0):
print (' ' * depth) + repr(self)
for child in self.children:
child.inspect(depth + 1)
def __repr__(self):
if self.root:
return "<%s (root)>" % type(self).__name__
return "<%s %r (%r)>" % (type(self).__name__, self.item, self.count)
if __name__ == '__main__':
from optparse import OptionParser
import csv
p = OptionParser(usage='%prog data_file')
p.add_option('-s', '--minimum-support', dest='minsup', type='int',
help='Minimum itemset support (default: 2)')
p.add_option('-n', '--numeric', dest='numeric', action='store_true',
help='Convert the values in datasets to numerals (default: false)')
p.set_defaults(minsup=2)
p.set_defaults(numeric=False)
options, args = p.parse_args()
if len(args) < 1:
p.error('must provide the path to a CSV file to read')
transactions = []
with open(args[0]) as database:
for row in csv.reader(database):
if options.numeric:
transaction = []
for item in row:
transaction.append(long(item))
transactions.append(transaction)
else:
transactions.append(row)
result = []
for itemset, support in find_frequent_itemsets(transactions, options.minsup, True):
result.append((itemset,support))
result = sorted(result, key=lambda i: i[0])
for itemset, support in result:
print str(itemset) + ' ' + str(support)
# -
# %run -m fpgrowth -s 4 tesco_dataset.csv
# Reference:
#
# https://en.wikibooks.org/wiki/Data_Mining_Algorithms_In_R/Frequent_Pattern_Mining/The_FP-Growth_Algorithm
#
# https://wimleers.com/sites/wimleers.com/files/FP-Growth%20presentation%20handouts%20%E2%80%94%C2%A0Florian%20Verhein.pdf
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kl
# language: python
# name: kl
# ---
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from keras import models
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import sklearn
df=pd.read_csv("voice.csv")
df.head(10)
label_value_count=df.label.value_counts()
print(label_value_count)
print(df.info())
#Replacing labels for male as 1 and for females as 0
dict = {'label':{'male':1,'female':0}}
df.replace(dict,inplace = True)
df.head(10) #after conversion of labels as 0 or 1
df.shape
x=df.loc[:,df.columns!='label']
y=df.loc[:,'label']
#Scaling
scaler=sklearn.preprocessing.MinMaxScaler((-1,1))
x=scaler.fit_transform(x)
print(x)
#Dividing into Test and Train
x_train,x_test,y_train,y_test=train_test_split(x, y, test_size=0.2, random_state=7)
from sklearn.svm import SVC # "Support Vector Classifier"
clf = SVC(kernel='linear')
clf.fit(x_train, y_train)
y_pred=clf.predict(x_test)
print("Accuracy score is",accuracy_score(y_pred,y_test))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""Count words."""
import re
def count_words(text):
"""Count how many times each unique word occurs in text."""
counts = dict() # dictionary of { <word>: <count> } pairs to return
# Convert to lowercase
text=text.lower()
# Split text into tokens (words), leaving out punctuation
# (Hint: Use regex to split on non-alphanumeric characters)
words=re.split(r'[^\w]',text)
# Aggregate word counts using a dictionary
for word in words:
if word != "":
if word not in counts:
counts[word] = 1
else:
counts[word] += 1
return counts
def test_run():
with open("input.txt", "r") as f:
text = f.read()
counts = count_words(text)
sorted_counts = sorted(counts.items(), key=lambda pair: pair[1], reverse=True)
print("10 most common words:\nWord\tCount")
for word, count in sorted_counts[:10]:
print("{}\t{}".format(word, count))
print("\n10 least common words:\nWord\tCount")
for word, count in sorted_counts[-10:]:
print("{}\t{}".format(word, count))
if __name__ == "__main__":
test_run()
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multi-Vehicle Rendezvous Problem
#
# The dynamics model of an omnidirectional vehicle with friction coefficient
# $\alpha$ is defined by the following equation:
#
# $$
# m \dot{\textbf{v}} = \textbf{u} - \alpha \textbf{v}
# $$
#
# iLQR is applied to a two vehicle system in order to control them to gently
# collide with each other with a terminal velocity of $0 \frac{m}{s}$.
#
# The state vector $\textbf{x}$ is defined as follows:
# $$\begin{equation*}
# \textbf{x} = \begin{bmatrix}
# x_0 & y_0 & x_1 & y_1 & \dot{x}_0 & \dot{y}_0 & \dot{x}_1 & \dot{y}_1
# \end{bmatrix}^T
# \end{equation*}$$
#
# The action vector $\textbf{u}$ is defined as follows:
# $$\begin{equation*}
# \textbf{u} = \begin{bmatrix}
# F_{x_0} & F_{y_0} & F_{x_1} & F_{y_1}
# \end{bmatrix}^T
# \end{equation*}$$
#
# **Note**: That since this dynamics model is linear, this problem can be solved
# more efficiently with a simple Linear Quadratic Regulator (LQR) instead. This
# example is just used to demonstrate how to setup an auto-differentiated
# dynamics model.
# %matplotlib inline
from __future__ import print_function
import numpy as np
import theano.tensor as T
import matplotlib.pyplot as plt
from ilqr import iLQR
from ilqr.cost import QRCost
from ilqr.dynamics import AutoDiffDynamics
def on_iteration(iteration_count, xs, us, J_opt, accepted, converged):
J_hist.append(J_opt)
info = "converged" if converged else ("accepted" if accepted else "failed")
print("iteration", iteration_count, info, J_opt)
# +
x_inputs = [
T.dscalar("x_0"),
T.dscalar("y_0"),
T.dscalar("x_1"),
T.dscalar("y_1"),
T.dscalar("x_0_dot"),
T.dscalar("y_0_dot"),
T.dscalar("x_1_dot"),
T.dscalar("y_1_dot"),
]
u_inputs = [
T.dscalar("F_x_0"),
T.dscalar("F_y_0"),
T.dscalar("F_x_1"),
T.dscalar("F_y_1"),
]
dt = 0.1 # Discrete time step.
m = 1.0 # Mass.
alpha = 0.1 # Friction coefficient.
# Acceleration.
def acceleration(x_dot, u):
x_dot_dot = x_dot * (1 - alpha * dt / m) + u * dt / m
return x_dot_dot
# Discrete dynamics model definition.
f = T.stack([
x_inputs[0] + x_inputs[4] * dt,
x_inputs[1] + x_inputs[5] * dt,
x_inputs[2] + x_inputs[6] * dt,
x_inputs[3] + x_inputs[7] * dt,
x_inputs[4] + acceleration(x_inputs[4], u_inputs[0]) * dt,
x_inputs[5] + acceleration(x_inputs[5], u_inputs[1]) * dt,
x_inputs[6] + acceleration(x_inputs[6], u_inputs[2]) * dt,
x_inputs[7] + acceleration(x_inputs[7], u_inputs[3]) * dt,
])
dynamics = AutoDiffDynamics(f, x_inputs, u_inputs)
# -
# An instantaneous cost function $l(\textbf{x}_t, \textbf{u}_t)$ is defined as follows:
#
# $$
# l(\textbf{x}_t, \textbf{u}_t) = \textbf{x}_t^T Q \textbf{x}_t + \textbf{u}_t^T R \textbf{u}_t
# $$
#
# where $Q$ is the state error and $R$ is the control error.
#
# In order to approach the two vehicles to each other, $Q$ is set up to penalize differences in positions as $||\textbf{x}_0 - \textbf{x}_1||^2$ while penalizing non-zero velocities.
# +
Q = np.eye(dynamics.state_size)
Q[0, 2] = Q[2, 0] = -1
Q[1, 3] = Q[3, 1] = -1
R = 0.1 * np.eye(dynamics.action_size)
cost = QRCost(Q, R)
# -
# The vehicles are initialized at $(0, 0)$ and $(10, 10)$ with velocities $(0, -5)$ and $(5, 0)$ respectively.
# +
N = 200 # Number of time steps in trajectory.
x0 = np.array([0, 0, 10, 10, 0, -5, 5, 0]) # Initial state.
# Random initial action path.
us_init = np.random.uniform(-1, 1, (N, dynamics.action_size))
# -
J_hist = []
ilqr = iLQR(dynamics, cost, N)
xs, us = ilqr.fit(x0, us_init, on_iteration=on_iteration)
x_0 = xs[:, 0]
y_0 = xs[:, 1]
x_1 = xs[:, 2]
y_1 = xs[:, 3]
x_0_dot = xs[:, 4]
y_0_dot = xs[:, 5]
x_1_dot = xs[:, 6]
y_1_dot = xs[:, 7]
_ = plt.title("Trajectory of the two omnidirectional vehicles")
_ = plt.plot(x_0, y_0, "r")
_ = plt.plot(x_1, y_1, "b")
_ = plt.legend(["Vehicle 1", "Vehicle 2"])
t = np.arange(N + 1) * dt
_ = plt.plot(t, x_0, "r")
_ = plt.plot(t, x_1, "b")
_ = plt.xlabel("Time (s)")
_ = plt.ylabel("x (m)")
_ = plt.title("X positional paths")
_ = plt.legend(["Vehicle 1", "Vehicle 2"])
_ = plt.plot(t, y_0, "r")
_ = plt.plot(t, y_1, "b")
_ = plt.xlabel("Time (s)")
_ = plt.ylabel("y (m)")
_ = plt.title("Y positional paths")
_ = plt.legend(["Vehicle 1", "Vehicle 2"])
_ = plt.plot(t, x_0_dot, "r")
_ = plt.plot(t, x_1_dot, "b")
_ = plt.xlabel("Time (s)")
_ = plt.ylabel("x_dot (m)")
_ = plt.title("X velocity paths")
_ = plt.legend(["Vehicle 1", "Vehicle 2"])
_ = plt.plot(t, y_0_dot, "r")
_ = plt.plot(t, y_1_dot, "b")
_ = plt.xlabel("Time (s)")
_ = plt.ylabel("y_dot (m)")
_ = plt.title("Y velocity paths")
_ = plt.legend(["Vehicle 1", "Vehicle 2"])
_ = plt.plot(J_hist)
_ = plt.xlabel("Iteration")
_ = plt.ylabel("Total cost")
_ = plt.title("Total cost-to-go")
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.pyplot as plt
sys.path.append('..')
from mdgraph.data.protein_graph import pdb_to_nx, plot_protein_graph
G = pdb_to_nx('./data/1FME-unfolded.pdb',
select_string='name CA', cutoff=10)
# +
fig, ax = plt.subplots(figsize=(10, 10))
node_color = [n[1] for n in G.nodes.data('resnum')]
edges = G.edges()
weights = [G[u][v]['weight'] for u,v in edges]
options = {
'with_labels': True,
"linewidths": 0,
'node_color': node_color,
"cmap": 'Reds'
}
nx.draw(G, edges=edges, width=weights*10, **options)
# +
fig, ax = plt.subplots(figsize=(10, 10))
node_color = [n[1] for n in G.nodes.data('resnum')]
plot_protein_graph(G, ax)
ax = plt.gca()
ax.margins(0.08)
plt.axis("off")
plt.tight_layout()
# +
G1 = pdb_to_nx('./data/1FME-unfolded.pdb',
select_string='name CA', cutoff=8)
G2 = pdb_to_nx('./data/1FME.pdb',
select_string='name CA', cutoff=8)
Gs = [G1, G2]
cutoff = 6
titles = ['unfolded', 'folded']
fig, axes = plt.subplots(ncols=len(Gs), figsize=(20, 10))
for i in range(len(Gs)):
# plt.sca(ax)
G = Gs[i]
ax = axes[i]
title = titles[i]
plot_protein_graph(G, ax)
ax.set_title(title, fontsize=20)
ax.margins(0.08)
ax.axis("off")
plt.tight_layout()
plt.show()
fig.savefig('test.png', bbox_inches='tight')
# +
G1 = pdb_to_nx('./data/1FME-unfolded.pdb',
select_string='name CA', cutoff=10)
G2 = pdb_to_nx('./data/1FME.pdb',
select_string='name CA', cutoff=10)
Gs = [G1, G2]
titles = ['unfolded', 'folded']
fig, axes = plt.subplots(ncols=len(Gs), figsize=(20, 10))
for i in range(len(Gs)):
# plt.sca(ax)
G = Gs[i]
ax = axes[i]
title = titles[i]
node_color = [n[1] for n in G.nodes.data('resnum')]
plot_protein_graph(G, ax, layout_type='circular')
ax.set_title(title, fontsize=20)
ax.margins(0.08)
ax.axis("off")
plt.tight_layout()
plt.show()
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
import imageio
from skimage import io
import numpy as np
import h5py
# +
filename = 'VJ_OFC_7_240_D6'
folder = 'C:/2pData/Vijay data/VJ_OFC_7_D9_trained/'
datafile = os.path.join(folder, '%s.h5'%filename)
datafile
# -
hf = h5py.File(datafile, 'r')
list(hf)
im = hf.get("imaging").value # `data` is now an ndarray.
im_int8 = im.astype(np.uint8)
dims = im_int8.shape
dims
im_final = np.empty( [len(range(3,dims[0]-3,3)),512,512], dtype='uint8')
# +
frame_count = 0
for iframe in range(3,dims[0]-3,3):
im_final[frame_count,:,:] = np.mean( im_int8[ iframe-2:iframe+2 ,:,:], 0 )
frame_count += 1
# -
im_final.shape
imageio.mimwrite(folder + 'framAvg.mp4', im_final , fps = 15.0)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ckRUV-vZzmEQ"
from tensorflow import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten
import numpy as np
import pickle
import matplotlib.pyplot as plt
# + id="aD0U44dt0AXv"
from keras.datasets.mnist import load_data
# + id="zQQfNCX-1_x2"
(x_train, y_train), (x_test, y_test)=digits=load_data(path="mnist.npz")
# + colab={"base_uri": "https://localhost:8080/"} id="WtCSCQ802Bu4" outputId="b974d406-8d17-4417-abf7-aecaffd13b75"
x_train.shape
# + id="OfnLb0cV3nS8"
x_train=keras.utils.normalize(x_train,axis=1)
x_test=keras.utils.normalize(x_test,axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="lXWpFk7W2RDr" outputId="789f2135-99cd-4f5c-c4e0-76bc5affc3f0"
plt.imshow(x_train[0],cmap='gray')
# + id="0kbfDZQ_FGil"
x_train=x_train.reshape(len(x_train),28,28,1)
x_test=x_test.reshape(len(x_test),28,28,1)
# + id="KxMJB1fu3CfY"
model1=Sequential()
model1.add(Conv2D(filters=64,kernel_size=(3,3),activation='relu'))
model1.add(Conv2D(filters=64,kernel_size=(3,3),activation='relu'))
model1.add(Flatten())
model1.add(Dense(units=32,activation='relu'))
model1.add(Dense(10,activation='softmax'))
model1.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="TJ1Uo5-sEw71" outputId="8cc5a9c2-f10e-49e6-ad33-7dd3f8d9d4b8"
model1.fit(x_train,y_train,epochs=3)
# + id="IORfrZLrE8GV"
pridictions=model1.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/"} id="JkNhW0xtH_IE" outputId="cd218d57-3c17-4888-f3df-c177420fc3ff"
print(np.argmax(pridictions[1]))
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="LgqiiJK_IxXV" outputId="38b099f8-bb7e-4231-cbfb-c325056e404a"
plt.imshow(x_test[1])
# + id="5ug5AOu3I5G7"
from tensorflow.keras.models import load_model
model1.save('mnist_model_cnn.h5')
# + id="Ra0zS-esJhs0" outputId="3b3524d2-89d2-4d87-f4d5-0d639826f83c" colab={"base_uri": "https://localhost:8080/"}
val_loss,val_acc=model1.evaluate(x_test,y_test)
print(val_loss,val_acc)
# + id="PWS3khKoJ2s9"
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/abdulhasibuddin/My-Deep-Neural-Network/blob/master/Time_series_1_4_Unistep_Univariate.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="l5OqVKtWFiXu" colab_type="code" outputId="b704e1ae-db61-4fc2-c247-b8dba4c12762" colab={"base_uri": "https://localhost:8080/", "height": 34}
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.compat.v2.keras.models import Sequential
from tensorflow.compat.v2.keras.layers import TimeDistributed, Bidirectional, LSTM, Dense, GRU, Conv2D, Conv3D, Conv1D, ConvLSTM2D
import statistics as stat
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import math
from IPython.display import clear_output
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
# + id="CH9CL3ZOqdBY" colab_type="code" colab={}
# HYPER-PARAMETERS::
DATASET_TYPE = 'sin' #'sin'
DATASET_START_INDEX = 0
DATASET_END_INDEX = 400 #1000000
DATA_INTERVAL = 1
VALIDATION_DATASET_SIZE = 20
TESTING_DATASET_START_INDEX = 421
TESTING_DATASET_END_INDEX = 430
#TRAIN_SPLIT_RATIO = 0.8
UNIVARIATE_PAST_HISTORY = 5
UNIVARIATE_FUTURE_TARGET = 0
#BUFFER_SIZE = 2
BATCH_SIZE_TRAIN = 1
#BATCH_SIZE_VAL = 5
#BATCH_SIZE = 5
EPOCHS = 5
NUM_LAYERS = 5
NEURONS_PER_LAYER = 6
# + id="wrBtdCBVIs8K" colab_type="code" colab={}
def create_dataset(dataset_type, start_index, end_index, interval):
dataset = []
for degree in range(start_index, end_index, interval):
if dataset_type == 'sin':
dataset.append(np.sin(np.deg2rad(degree)))
elif dataset_type == 'cos':
dataset.append(np.cos(np.deg2rad(degree)))
elif dataset_type == 'tan':
dataset.append(np.tan(np.deg2rad(degree)))
elif dataset_type == 'cot':
dataset.append(1/(np.tan(np.deg2rad(degree))))
elif dataset_type == 'sec':
dataset.append(1/(np.cos(np.deg2rad(degree))))
elif dataset_type == 'cosec':
dataset.append(1/(np.sin(np.deg2rad(degree))))
elif dataset_type == 'num':
dataset.append(float(degree))
return pd.DataFrame(dataset, columns=['values'])
# + id="qe3awW7CboEn" colab_type="code" outputId="1b5bcd84-a411-4d11-8ce6-f53a46b7287b" colab={"base_uri": "https://localhost:8080/", "height": 34}
df_training_dataset = create_dataset(DATASET_TYPE, DATASET_START_INDEX, DATASET_END_INDEX, DATA_INTERVAL)
df_training_dataset.shape
# + id="LrzU3J8Dflng" colab_type="code" outputId="3ca48cea-688c-4229-981a-969923d6f61e" colab={"base_uri": "https://localhost:8080/", "height": 34}
df_validation_dataset = create_dataset(DATASET_TYPE, DATASET_END_INDEX, DATASET_END_INDEX+VALIDATION_DATASET_SIZE, DATA_INTERVAL)
df_validation_dataset.shape
# + id="0x9_L137k8ke" colab_type="code" colab={}
def univariate_data(dataset, start_index, end_index, history_size, target_size):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
print('dataset.shape =',dataset.shape)
print('start_index =',start_index)
print('end_index =',end_index)
print('history_size =',history_size)
print('target_size =',target_size)
for i in range(start_index, end_index):
#try:
indices = range(i-history_size, i)
# Reshape data from (history_size,) to (history_size, 1):
data.append(np.reshape(dataset[indices], (history_size, 1)))
labels.append(dataset[i+target_size])
#except:
# pass
print('no. of data =',len(data))
print('no. of labels =',len(labels))
return np.array(data), np.array(labels)
# + id="tXaM7coVr_ry" colab_type="code" colab={}
# Setting seed to ensure reproducibility
tf.random.set_seed(13)
# + id="Sim6UtkbMEmu" colab_type="code" outputId="3e945fce-292c-4a7a-e312-c75051d8c6ac" colab={"base_uri": "https://localhost:8080/", "height": 384}
df_training_dataset.plot(subplots=True)
# + id="iuylQQsY1e0m" colab_type="code" outputId="9f442233-8241-4a9a-c150-c58904324066" colab={"base_uri": "https://localhost:8080/", "height": 85}
training_dataset = df_training_dataset['values']
training_dataset.index = df_training_dataset.index
validation_dataset = df_validation_dataset['values']
validation_dataset.index = df_validation_dataset.index
print('type(training_dataset) =',type(training_dataset))
print('type(validation_dataset) =',type(validation_dataset))
print('training_dataset.shape =',training_dataset.shape)
print('validation_dataset.shape =',validation_dataset.shape)
#training_dataset.head()
# + id="DA5eyozwHMT5" colab_type="code" outputId="96b55dad-59af-429a-c7a5-71dbaf55d1e5" colab={"base_uri": "https://localhost:8080/", "height": 119}
training_dataset_values = training_dataset.values
num_training_data = training_dataset_values.shape[0]
validation_dataset_values = validation_dataset.values
num_validation_data = validation_dataset_values.shape[0]
print('no. of training data =',num_training_data)
print('no. of validation data =',num_validation_data)
print('type(training_dataset_values) =',type(training_dataset_values))
print('type(validation_dataset_values) =',type(validation_dataset_values))
print('training_dataset_values.shape =',training_dataset_values.shape)
print('validation_dataset_values.shape =',validation_dataset_values.shape)
# + id="THPtWv3L2eP3" colab_type="code" outputId="45383b0f-09ef-467f-9d12-a1c95dc2de66" colab={"base_uri": "https://localhost:8080/", "height": 51}
mean_training_dataset = training_dataset_values.mean()
std_training_dataset = training_dataset_values.std()
print('mean_training_dataset =',mean_training_dataset)
print('std_training_dataset =',std_training_dataset)
# + id="bc-kILgYlCSH" colab_type="code" colab={}
# Normalization:
def normalize(dataset):
normalized_dataset = (dataset-mean_training_dataset)/std_training_dataset
return normalized_dataset
# + id="tE3H6LQU2_Ue" colab_type="code" colab={}
# Denormalization:
def denormalize(normalized_data):
denormalized_data = normalized_data*std_training_dataset + mean_training_dataset
return denormalized_data
# + id="19p8K4BrmGaW" colab_type="code" colab={}
# + id="lA5OZmSa3QHb" colab_type="code" outputId="414ba918-b862-4420-ccef-6c78e21d4e5c" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Normalization:
training_dataset_normalized = normalize(training_dataset_values)
validation_dataset_normalized = normalize(validation_dataset_values) # normalization have to be same
print('training_dataset_normalized.shape =',training_dataset_normalized.shape)
print('validation_dataset_normalized.shape =',validation_dataset_normalized.shape)
print('type(training_dataset_normalized) =',type(training_dataset_normalized))
print('type(validation_dataset_normalized) =',type(validation_dataset_normalized))
# + id="u8errW5p3chn" colab_type="code" outputId="ea4b6137-0a66-472c-f67a-7515c9bf002c" colab={"base_uri": "https://localhost:8080/", "height": 306}
univariate_past_history = UNIVARIATE_PAST_HISTORY
univariate_future_target = UNIVARIATE_FUTURE_TARGET
print('Training:')
x_train_uni, y_train_uni = univariate_data(training_dataset_normalized, 0, None,
univariate_past_history,
univariate_future_target)
print('\nValidation:')
x_val_uni, y_val_uni = univariate_data(validation_dataset_normalized, 0, None,
univariate_past_history,
univariate_future_target)
# + id="erCrFzSd4_OM" colab_type="code" outputId="82727a66-37e5-4503-c6d7-c5b2678ba5f6" colab={"base_uri": "https://localhost:8080/", "height": 221}
print('x_train_uni.shape =',x_train_uni.shape)
print('y_train_uni.shape =',y_train_uni.shape)
print('x_val_uni.shape =',x_val_uni.shape)
print('y_val_uni.shape =',y_val_uni.shape)
print('num_val_data =',y_val_uni.shape[0])
print('type(x_train_uni) =',type(x_train_uni))
print('type(y_train_uni) =',type(y_train_uni))
print('type(x_val_uni) =',type(x_val_uni))
print('type(y_val_uni) =',type(y_val_uni))
print('\nLast value of the window of past history =',x_train_uni[0][-1])
print('Target value to predict =',y_train_uni[0])
# + id="DcWksTggDrx7" colab_type="code" colab={}
def create_time_steps(length): # only needed for plots
time_steps = []
for i in range(-length, 0, 1):
time_steps.append(i)
return time_steps
# + id="hI_UTq8GHlGR" colab_type="code" colab={}
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i],
label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future+5)*2])
plt.xlabel('Time-Step')
return plt
# + id="AT-hQHOWJqq9" colab_type="code" outputId="2ddd60ee-4116-40fc-e0c8-1657cda6ad01" colab={"base_uri": "https://localhost:8080/", "height": 421}
show_plot([x_train_uni[0], y_train_uni[0]], 0, 'Sample Example')
# + id="1Zk9kzocJMAf" colab_type="code" outputId="f8c54791-fbc1-44ad-d83f-ac0cae59d1c6" colab={"base_uri": "https://localhost:8080/", "height": 170}
print('(# of training data, features, time steps) =',x_train_uni.shape)
print('x_train_uni.shape[-2:] =',x_train_uni.shape[-2:])
print('y_train_uni.shape =',y_train_uni.shape)
print('y_train_uni.shape[0] =',y_train_uni.shape[0])
print('\n(# of validation data, features, time steps) =',x_val_uni.shape)
print('x_val_uni.shape[-2:] =',x_val_uni.shape[-2:])
print('y_val_uni.shape =',y_val_uni.shape)
print('y_val_uni.shape[0] =',y_val_uni.shape[0])
# + id="rzh3Z-UUNqrO" colab_type="code" outputId="707f8b12-cc8c-4234-808e-3d71457f1015" colab={"base_uri": "https://localhost:8080/", "height": 34}
x_train_uni[0][0][0]
# + [markdown] id="etC-26CmoIuH" colab_type="text"
# MODEL
# + id="4fRYLMTGZPQK" colab_type="code" outputId="6e59455f-d4e4-4e47-ebd0-952305feb20f" colab={"base_uri": "https://localhost:8080/", "height": 357}
myModel = Sequential()
myModel.add(GRU(NEURONS_PER_LAYER, input_shape=x_train_uni.shape[-2:], return_sequences=True))
for _ in range(NUM_LAYERS-2):
myModel.add(Bidirectional(GRU(NEURONS_PER_LAYER, return_sequences=True)))
myModel.add(Bidirectional(GRU(NEURONS_PER_LAYER))) # at last RNN layer, 'return_sequences=True' will create problem in prediction shape.
myModel.add(Dense(1))
myModel.compile(optimizer='adam', loss='mae')
#myModel.build(x_train_uni.shape[-2:])
myModel.summary()
# + [markdown] id="5ICGpPi_oP9-" colab_type="text"
# TRAINING
# + id="vbp6_II2iwHW" colab_type="code" outputId="43a8083d-199d-4560-d1be-641ad3de300f" colab={"base_uri": "https://localhost:8080/", "height": 204}
history = myModel.fit(x_train_uni, y_train_uni,
batch_size=BATCH_SIZE_TRAIN,
epochs=EPOCHS,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val_uni, y_val_uni)
)
# + id="DWNIv0OQd_Lw" colab_type="code" outputId="05ce2a57-64bf-4dcd-c646-8ad907aab409" colab={"base_uri": "https://localhost:8080/", "height": 51}
print('loss =',history.history['loss'])
print('val_loss =',history.history['val_loss'])
# + [markdown] id="exUsJtX5oWM3" colab_type="text"
# PREDICTION
# + id="1xtsbWowjCyu" colab_type="code" outputId="fb3c3d8f-4e14-44f7-886b-83bfcd0d26a3" colab={"base_uri": "https://localhost:8080/", "height": 408}
df_test_dataset = create_dataset(DATASET_TYPE, TESTING_DATASET_START_INDEX, TESTING_DATASET_END_INDEX, DATA_INTERVAL)
print(type(df_test_dataset))
print('df_test_dataset.shape =',df_test_dataset.shape)
test_dataset = df_test_dataset['values'].values
print('type(test_dataset) =',type(test_dataset))
print('\ntest_dataset.shape =',test_dataset.shape)
test_dataset_normalized = normalize(test_dataset)
print('\nTest dataset without normalization:')
x_test_uni, y_test_uni = univariate_data(test_dataset, 0, None,
UNIVARIATE_PAST_HISTORY,
UNIVARIATE_FUTURE_TARGET)
print('\nTest dataset with normalization:')
x_test_uni_normalized, y_test_uni_normalized = univariate_data(test_dataset_normalized, 0, None,
UNIVARIATE_PAST_HISTORY,
UNIVARIATE_FUTURE_TARGET)
# + id="TPK5OUYnusfF" colab_type="code" outputId="0f40c06b-a678-471a-9466-0b7d64565b3d" colab={"base_uri": "https://localhost:8080/", "height": 34}
y_test_uni_normalized.shape
# + id="HWDA6izXgOsJ" colab_type="code" colab={}
#prediction = simple_lstm_model.predict(x_test_uni)
prediction_normalized = myModel.predict(x_test_uni_normalized)
# + id="Truo_o3oAPuu" colab_type="code" outputId="920f25b6-4fc7-4d64-fc73-4a623a7711e1" colab={"base_uri": "https://localhost:8080/", "height": 34}
prediction_denormalized = denormalize(prediction_normalized)
index = -1
print('actual =',y_test_uni[index],'; predicted =',prediction_denormalized[index])
# + id="zBJEdcXF5QHY" colab_type="code" outputId="e7297392-08e5-4947-bfb8-3a6770f1558f" colab={"base_uri": "https://localhost:8080/", "height": 34}
type(y_test_uni[0])
# + id="RW6fNC_0tLbv" colab_type="code" outputId="2a11749c-2b12-4675-dd04-88b425766943" colab={"base_uri": "https://localhost:8080/", "height": 51}
'''
data has to be a 3-dimentional array with shape (UNIVARIATE_PAST_HISTORY,1),
i.e., (None, UNIVARIATE_PAST_HISTORY, 1)
'''
val = []
for i in range(1,UNIVARIATE_PAST_HISTORY+1):
val.append([float(i)])
#val_numpy = np.asarray(val)
print(type(val))
print('shape of val = (',len(val),',',len(val[0]),')')
#print(type(val_numpy))
#print('val_numpy.shape =',val_numpy.shape)
# + id="7kDfXAzbrhQF" colab_type="code" outputId="f5f85128-6e54-4c68-fe39-7e91fcab6df3" colab={"base_uri": "https://localhost:8080/", "height": 34}
myModel.predict([val])
# + id="hclXH4QYkTTd" colab_type="code" outputId="9bf2cd5d-6056-4af4-8844-9a696068cb48" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(type(prediction_denormalized))
print(prediction_denormalized.shape)
# + id="cqIgW7gqqMPc" colab_type="code" outputId="ddc7cf67-8c24-4ec1-82ef-a1df4b28e6ea" colab={"base_uri": "https://localhost:8080/", "height": 374}
x_axis_values = [i for i in range(prediction_normalized.shape[0])]
plt.figure()
plt.plot(x_axis_values, y_test_uni, label='actual')
plt.plot(x_axis_values, prediction_denormalized, label='predicted')
plt.legend()
plt.show()
# + id="I0ieXNzWqMZk" colab_type="code" colab={}
# + id="dfo0fEXDoA2P" colab_type="code" outputId="65e74343-eae0-4bab-dc0d-12031bdb658b" colab={"base_uri": "https://localhost:8080/", "height": 105}
# For last predicted group:
list_absolute_error = []
list_signed_error = []
list_error_percentage = []
for actual,predicted in zip(y_test_uni,prediction_denormalized):
actual = actual
predicted = predicted[0]
error = actual-predicted
error_percentage = abs(error/actual)
list_signed_error.append(error)
list_absolute_error.append(abs(error))
list_error_percentage.append(error_percentage)
#print('actual =',actual,'; predicted =',predicted,'; error =',error,'; abs_error =',abs(error))
print('actual =',actual,'; predicted =',predicted,'; error =',error,'; abs_error =',abs(error),'; error_percentage =',abs(error_percentage))
print('len(list_absolute_error) =',len(list_absolute_error))
print('len(list_signed_error) =',len(list_signed_error))
print('len(list_error_percentage) =',len(list_error_percentage))
# + id="MpvV9kiQYT00" colab_type="code" outputId="96f39bcc-8261-46c6-c087-e112a3945b2f" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(y_test_uni.shape)
print(prediction_denormalized.shape)
# + id="W54l43ehXyjX" colab_type="code" outputId="bb070805-880f-4d69-e178-76c076fc8b5a" colab={"base_uri": "https://localhost:8080/", "height": 102}
index = 0
print('actual =',y_test_uni[index],'\npredicted =',prediction_denormalized[index],
'\nerror =',list_signed_error[index],
'\nabs_error =',list_absolute_error[index],
'\nerror_percentage =',list_error_percentage[index])
# + [markdown] id="yOF-bz_K69_C" colab_type="text"
# ERROR:
# + id="DnbOVeFr0toi" colab_type="code" outputId="4ad6e14d-a2bb-41f2-a79c-3192d0b23904" colab={"base_uri": "https://localhost:8080/", "height": 34}
# ERROR: (previous error = 0.005019746610581216)
mean_mae_error = np.mean(np.asarray(list_absolute_error))
print('mean_mae_error =',mean_mae_error)
# + id="bIAIPexJR6YK" colab_type="code" outputId="f8eee933-8eee-4d6b-9133-3371fef68c71" colab={"base_uri": "https://localhost:8080/", "height": 34}
mean_error_percentage = np.mean(np.asarray(list_error_percentage))
print('mean_error_percentage =',mean_error_percentage)
#np.mean(np.asarray([1,2,3,4,5]))
# + id="2__w78iBiIa6" colab_type="code" outputId="91effa91-dee4-4048-8ccf-3e9383ef3eca" colab={"base_uri": "https://localhost:8080/", "height": 374}
# Absolute error plotting:
x_axis_values = [i for i in range(prediction_normalized.shape[0])]
plt.figure()
plt.plot(x_axis_values, list_error_percentage, label='error (pecentage)')
plt.legend()
plt.show()
# + id="91MooIAuxeYw" colab_type="code" outputId="f35b1eb5-f549-4c48-bf69-77fcad30511a" colab={"base_uri": "https://localhost:8080/", "height": 374}
# Absolute error plotting:
x_axis_values = [i for i in range(prediction_normalized.shape[0])]
plt.figure()
plt.plot(x_axis_values, list_absolute_error, label='absolute error')
plt.legend()
plt.show()
# + id="6hhvDkVMxWor" colab_type="code" outputId="b84c7913-a9e4-4eb3-d84b-32c11b5866cb" colab={"base_uri": "https://localhost:8080/", "height": 374}
# Signed error plotting:
x_axis_values = [i for i in range(prediction_normalized.shape[0])]
plt.figure()
plt.plot(x_axis_values, list_signed_error, label='signed error')
plt.legend()
plt.show()
# + id="NaQbs2RQ0DK8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="bd7d0fbd-b505-41eb-f829-adc38c3054a8"
#'''
# Both errors plotting:
x_axis_values = [i for i in range(prediction_normalized.shape[0])]
plt.figure()
plt.plot(x_axis_values, list_absolute_error, label='absolute error')
plt.plot(x_axis_values, list_signed_error, label='signed error')
plt.legend()
plt.show()
#'''
# + id="qt94Fj7R0btH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 438} outputId="3d14bbc8-d133-4f34-aa20-01ccce9330b8"
# REAL-TIME PREDICTION:
index = 0
print('History last =',x_test_uni[index][-1],'; True value =',y_test_uni[index],'; Predicted value =',prediction_denormalized[index])
show_plot([x_test_uni[index], y_test_uni[index], prediction_denormalized[index]], 0, 'Prediction')
# + id="kIKcW_xG3Jeg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 438} outputId="41c745ce-de76-49db-bfbe-e0e27e585431"
index = -1
print('History last =',x_test_uni[index][-1],'; True value =',y_test_uni[index],'; Predicted value =',prediction_denormalized[index])
show_plot([x_test_uni[index], y_test_uni[index], prediction_denormalized[index]], 0, 'Prediction')
# + id="AXt-0gz_D7xM" colab_type="code" colab={}
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check Source:
#
# - https://github.com/rlcode/reinforcement-learning/tree/master/1-grid-world
# - https://github.com/dennybritz/reinforcement-learning
# - https://github.com/Neo-47/Reinforcement-Learning-Algorithms/tree/master/Dynamic%20Programming
# ### Import Packages
import os,copy
import numpy as np
from gridworld import GridworldEnv
from plot_utils import plot_values
# ### Set Environment
# create env
env = GridworldEnv(shape=(10,10))
# +
# show created gridworld
env._render()
# x is your position and T are the two terminal states.
# -
# ### Preview
# print the state space and action space, total number of states and actions
print(f'state space: {env.observation_space}')
print(f'Action_space: {env.action_space}')
print(f'total number of states: {env.nS}')
print(f'total number of actions: {env.nA}')
# looking at one-step dynamics of the Markov decision process (MDP)
prob, next_state, reward, done = env.P[1][0][0]
prob, next_state, reward, done
# ### Iterative Policy Evaluation
def policy_evaluation(env, policy, gamma=1, theta=1e-8):
V = np.zeros(env.nS)
while True:
delta = 0
for s in range(env.nS): # loop for every state
Vs = 0
for a, action_prob in enumerate(policy[s]): # loop for every action
for prob, next_state, reward, done in env.P[s][a]: # loop for every probability of each possible reward and next state
Vs += action_prob * prob * (reward + gamma * V[next_state]) # calculate Vscore
delta = max(delta, np.abs(V[s]-Vs)) # calculate delta
V[s] = Vs # update state-value ?? based on what ??
if delta < theta: # check process, if all state has been optimized (small delta, than)
break
return V # look for that video that show the gradual change
# +
# set random policy
random_policy = np.ones([env.nS, env.nA]) / env.nA # equal prob on all
# evaluate the policy
V = policy_evaluation(env, random_policy)
worldshape = (10,10)
plot_values(V,worldshape,figsize=(20,20))
# -
# ### Iterative Policy Improvement
# +
def q_from_v(env, V, s, gamma=1):
q = np.zeros(env.nA)
for a in range(env.nA):
for prob, next_state, reward, done in env.P[s][a]:
q[a] += prob * (reward + gamma * V[next_state])
return q
def policy_improvement(env, V, gamma=1):
policy = np.zeros([env.nS, env.nA]) / env.nA
for s in range(env.nS):
q = q_from_v(env, V, s, gamma)
# OPTION 1: construct a deterministic policy
# policy[s][np.argmax(q)] = 1
# OPTION 2: construct a stochastic policy that puts equal probability on maximizing actions
best_a = np.argwhere(q==np.max(q)).flatten()
policy[s] = np.sum([np.eye(env.nA)[i] for i in best_a], axis=0)/len(best_a)
return policy
def policy_iteration(env, gamma=1, theta=1e-8):
policy = np.ones([env.nS, env.nA]) / env.nA
while True:
V = policy_evaluation(env, policy)
policy_p = policy_improvement(env, V, gamma=1)
if (policy==policy_p).all() == True:
return policy, V
# OPTION: stop if the value function estimates for successive policies has converged
# if np.max(abs(policy_evaluation(env, policy) - policy_evaluation(env, new_policy))) < theta*1e2:
# break;
else:
policy = policy_p
# +
# obtain the optimal policy and optimal state-value function
policy_pi, V_pi = policy_iteration(env)
# print the optimal policy
print("\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):")
print(policy_pi,"\n")
plot_values(V_pi,worldshape,figsize=(20,20))
# -
# ### Iterative Policy Evaluation with Truncated Process
def truncated_policy_evaluation(env, policy, V, max_it=1, gamma=1):
counter = 0
while True:
for s in range(env.nS): # loop for every state
v = 0
q = q_from_v(env, V, s, gamma)
for a, action_prob in enumerate(policy[s]):
v += action_prob * q[a]
V[s] = v
if counter == max_it:
return V
counter+=1
def truncated_policy_iteration(env, max_it=1, gamma=1, theta=1e-8):
policy = np.zeros([env.nS, env.nA]) / env.nA
V = np.zeros(env.nS)
while True:
policy = policy_improvement(env, V, gamma=1)
V_old = copy.copy(V)
V = truncated_policy_evaluation(env, policy, V, max_it, gamma)
if max(abs(V-V_old)) < theta:
return policy, V
# +
policy_tpi, V_tpi = truncated_policy_iteration(env, max_it=2)
# print the optimal policy
print("\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):")
print(policy_tpi,"\n")
# plot the optimal state-value function
plot_values(V_tpi,worldshape,figsize=(20,20))
# -
# ### Iterative Value Optimization
def value_iteration(env, gamma=1, theta=1e-8):
V = np.zeros(env.nS)
while True:
delta = 0
for s in range(env.nS):
v = V[s]
V[s] = max(q_from_v(env, V, s, gamma)) # update value
delta = max(delta,abs(V[s]-v)) # check how much value changed
if delta < theta:
break
policy = policy_improvement(env, V, gamma)
return policy, V
# +
policy_vi, V_vi = value_iteration(env)
# print the optimal policy
print("\nOptimal Policy (LEFT = 0, DOWN = 1, RIGHT = 2, UP = 3):")
print(policy_vi,"\n")
# plot the optimal state-value function
plot_values(V_vi,worldshape,figsize=(20,20))
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### This notebook collects all of the tweets for a given team, imports them into a dataframe, adds a team classification tag, and dumps to a csv.
# +
#bring in the prerequisites
import pandas as pd
import glob
import json
import numpy as np
import pandas as pd
# -
# identify the folder where the files are -- this will be done once per team, aligning the team folder name to the column we will create
files = glob.glob('raw_tweets/pride_phillies/*')
#files = glob.glob('user/*')
len(files)
# +
#make a list to hold the files we are going to read
#then read the files into a dictionary
dictlist = []
for file in files:
json_string = open(file, 'r').read()
json_dict = json.loads(json_string)
dictlist.append(json_dict)
# -
#create a dataframe from the dictionary
df = pd.DataFrame(dictlist)
df.head()
#create the new column and give all rows the same value-- this is how we will keep track of the team names
df['team'] = "phillies"
df.head()
# +
#dump it to a csv so we can use it later
df.to_csv('basic_csv/phillies_tweets.csv', index=True)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers.experimental import preprocessing
from os import path, getcwd, listdir, chdir
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import OrdinalEncoder
from sklearn.model_selection import train_test_split
# -
import statsmodels.api as sm
from statsmodels.formula.api import ols
SEED = 121
data = pd.read_csv('./data/train_set_v3.txt', sep='\t')
data.columns = ['hybrid', 'trial', 'year', 'location', 'yields',
'inbred1', 'inbred2',
'genetic_inb1',
'genetic_inb2', 'trial_cl1', 'trial_cl2']
# data = data.drop(columns=['hybrid'])
data.fillna('UNK', inplace=True)
data = data.sample(len(data), replace=False, random_state=SEED)
train, test = train_test_split(data, test_size=0.1, random_state=SEED)
val, test = train_test_split(test, test_size=0.3, random_state=SEED)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
drop = ['hybrid', 'trial', 'year', 'location', 'yield',
'genetic_inb1',
'genetic_inb2', 'trial_cl1', 'trial_cl2']
# data['yield'] = data['yield'].astype(float)
# d = val.iloc[:20, [4, 5, 6]]
# d.columns = ['y', 'x']
model = ols('yields ~ inbred1 + inbred2', data=train).fit()
model
import tensorflow as tf
tf.test.gpu_device_name()
tf.__version__
import sys
sys.argv
# !pip install pydot
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
gpus
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Content:
# 1. [Derivation of formulae using Taylor polynomial](#taylor)
# ## 1. <a name="taylor">Derivation of formulae using Taylor polynomial</a>
# 
# 
# 
# 
# 
# 
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++17
// language: C++17
// name: xcpp17
// ---
// ## Kattis (https://open.kattis.com)
//
// - throughout these notebooks, you'll find Kattis problems listed under each chapter where appropriate
// - Kattis is a free problem bank and online judge that is widely used in [International Collegiate Programming Contest (ICPC)](https://icpc.global/) around the world
// - Kattis is a great tool to learn various programming languages while solving myriad of intuitive algorithmic problems and developing problem solving skills that are sought after by many potential employers
// - research (https://rambasnet.github.io/pdfs/kattis.pdf) has shown that by introducing and assigning Kattis problems motivates students to continuously use Kattis and solve more problems there by becoming an effective problem solver -- a hallmark skill of computer scientist
// - here's a repository of some sample solutions provided in various languages with automated test cases: https://github.com/rambasnet/KattisDemos
// - eventually (by Chapter 7), you'll be able to understand and use the all the programming concepts used in those demo solutions
// - use Kattis provided help to get your started: https://open.kattis.com/help
// - you must create a free acount here: https://open.kattis.com/login to be able to submit your solution so the Kattis can judge it providing you the verdict
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''venv'': venv)'
# name: python3
# ---
# # Owners & collaborators
from prep_n_load import *
plt.rcParams["figure.figsize"] = (18,5)
# ## Number of owners, min/max ownerships
from collections import Counter
parcels_per_owner = Counter(parcels.owner)
counts = list(parcels_per_owner.values())
print("{} owners total".format(len(parcels_per_owner)))
print("Ownerhips: {} min, {} max".format(sorted(counts)[0], sorted(counts)[-1]))
# ## Owner leaderboard
show_leaderboard(parcels_per_owner, "parcels")
# * Top spots are The Vault by $WHALE, bullauge, and bnolan.
# ## Histogram of ownerships
ax = sns.histplot(data=counts)
_ = ax.set(xlabel="Number of owned parcels")
# * Obviously most owners own 1 parcel. Many 2 or 3. Curve flattens quickly afterwards.
# ### Zooming in: histogram & cumulative distribution
#
# The following histogram starts with 2 parcels. The red line shows the cumulative distribution.
fig, ax = plt.subplots()
sns.histplot(data=counts, ax=ax, bins=max(counts) + 1)
ax.set(xlabel="Number of owned parcels")
ax.set(xlim=(2, 50))
ax.grid(False, axis="y")
ax2 = ax.twinx()
sns.ecdfplot(data=counts, ax=ax2, color="red")
ax2.set(xlim=(2, 50))
ax2.set(ylabel="Cum. Probability")
_ = plt.locator_params(axis="x", integer=True, tight=True)
# * Owning > ~5 parcels brings you to the top decile of owners.
# ## Distribution of ownership
#
# How well distributed is parcel ownership? I.e., is a large number of parcels in the hands of only a few?
# What ratio of parcels does each owner own:
parcel_ratios = [c/parcels.shape[0] for o, c in sorted(parcels_per_owner.items(), key=lambda x: x[1], reverse=True)]
# Cumulated:
cumulated_parcel_ratios = [ sum(parcel_ratios[:i+1]) for i, r in enumerate(parcel_ratios)]
# Cumulated owner ratios:
cumulated_owner_ratios = [x/len(parcel_ratios) for x in range(len(parcel_ratios))]
# Plot:
ax = sns.lineplot(x=cumulated_owner_ratios, y=cumulated_parcel_ratios)
ax.set(xlabel="Ratio of owners")
ax.set(ylabel="Ratio of parcels")
_ = ax.set(xlim=(0, 0.2))
# * 10% of the owners own 60% of all parcels.
# * ~6% of the owners own half of all the parcels.
# ## Collaborators
collaborators = Counter([cleanc for p in parcels.contributors for c in (p or []) for cleanc in c.strip().split(" ") if cleanc != ""])
collaborator_counts = list(collaborators.values())
print("{} collaborators total".format(len(collaborators)))
print("Collaborations: {} min, {} max".format(sorted(collaborator_counts)[0], sorted(collaborator_counts)[-1]))
# ## Collaborators leaderboard
show_leaderboard(collaborators, "collaborations")
# ## Owner-collaborators
#
# How many owners are also collaborators?
len(set(parcels_per_owner.keys()) & set(collaborators.keys()))
# ## Parcel whales per island
#
# I.e., who owns most of which island?
parcelwhales = (
parcels.groupby(["island", "owner"])["id"]
.agg("count")
.groupby("island", group_keys=False)
.nlargest(1)
.to_frame()
.reset_index("owner")
.rename(columns={"id": "whaleparcels", "owner": "whale"})
.merge(island_sizes["parcels"], left_on="island", right_on="island")
)
parcelwhales["whalecoverage"] = parcelwhales.whaleparcels / parcelwhales.parcels * 100
parcelwhales.drop(["Little Ceres"]).sort_values(
"whalecoverage", ascending=False
)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Applying Shlomi et al's biomarker prediction method to PKU on RECON2
#
# <p>**Authors**: Thierry D.G.A Mondeel, Stefania Astrologo, Ewelina Weglarz-Tomczak & Hans V. Westerhoff <br/>
# University of Amsterdam <br/>2016 - 2018
# </p>
#
# The original publication looked at RECON2's predecessor RECON1. We will reproduce their analysis on RECON2 instead.
# +
import cobra
from utils import findBiomarkers
import pandas as pd
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
M = cobra.io.load_json_model('models/recon_2_2_simple_medium.json')
model = M.copy() # this way we can edit model but leave M unaltered
# -
# <span style="color:red">**Assignment (10 min):**</span> Take the previous tutorial's last command cell as a template (we already copy-pasted it for you) to do the same analysis here on the full RECON2 model and the PKU disease state.
#
# **Tips**
# - Don't reinvent the wheel, the idea is the same as the last tutorial.
# - Instead of giving 'R1' as the disease reaction you should now give the PKU gene HGNC:8582 or the two reactions it catalyzes as input.
# - Also think about the fact that there are two equivalent reactions that you have to account for not just one.
# - The number of exchange reactions here is much bigger than in the example. The FVA computation will take quite a bit longer as a result. It may take 2 minutes or so.
# +
exchanges = [ rxn for rxn in model.reactions if rxn.products == [] and 'EX_' in rxn.id ]
# Shlomi et al suggested using the following medium: everything in (-1) with a few exceptions
# everything out (unlimited)
# This was actually first proposed in (Sahoo et al, 2012)
for rxn in exchanges:
rxn.lower_bound = -1
rxn.upper_bound = 999999
# specifics
M.reactions.EX_o2_e.lower_bound = -40
for rxn in ['EX_h2o_e','EX_h_e','EX_co2_e','EX_nh4_e','EX_pi_e','EX_hco3_e','EX_so4_e']:
M.reactions.get_by_id(rxn).lower_bound = - 100
# to reduce computation time we check all amino acids + a couple neurotransmitters
biomarkers_to_check = ['EX_his_L_e','EX_ile_L_e','EX_leu_L_e','EX_lys_L_e','EX_met_L_e',
'EX_phe_L_e','EX_thr_L_e','EX_trp_L_e','EX_val_L_e','EX_cys_L_e',
'EX_glu_L_e','EX_tyr_L_e','EX_ala_L_e','EX_asp_L_e','EX_gly_e',
'EX_arg_L_e','EX_gln_L_e','EX_pro_L_e','EX_ser_L_e','EX_asn_L_e',
'EX_dopa_e','EX_adrnl_e','EX_srtn_e']
# UNCOMMENT & FIX THE LINE BELOW
findBiomarkers(model,fvaRxns=biomarkers_to_check,mods=['HGNC:8582'],synchronous=True)
# + [markdown] outputHidden=false inputHidden=false
# Now we will repeat this calculation for all metabolites that the genome-wide model can produce.
# -
findBiomarkers(model,fvaRxns=exchanges,mods=['HGNC:8582'],synchronous=True)
# <span style="color:red">**Assignment (3 min):**</span> If you see the prediction of the biofluids/tissue as the brain tissue: does the model correctly predict issues with neurotransmitters in the brain?
#
# <span style="color:red">**Assignment (10 min):**</span>
# What biomarkers are predicted when you focus on blocking the cofactor biopterin recycling reactions that also produce PKU? To get you started we included some code below.
# +
model.reactions.DHPR.gene_reaction_rule, model.reactions.DHPR.reaction
model.reactions.DHPR2.gene_reaction_rule,model.reactions.DHPR2.reaction
model.reactions.r0398.gene_reaction_rule,model.reactions.r0398.reaction
# +
# UNCOMMENT & FIX THE LINE BELOW
# findBiomarkers(model,fvaRxns=biomarkers_to_check,mods=[ADD THE RELEVANT GENES HERE],synchronous=True)
# -
# <span style="color:red">**Assignment (3 min):**</span> Is this what you expected? If not, is the gene annotation perhaps an issue? Check what would happen if you gave 'findBiomarkers' the full list of reactions as input.
# +
# Copy what you did above but give reactions as input
# -
# <span style="color:red">**Assignment (3 min):**</span> Are there any differences between the predictions for the two different ways to get PKU?
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Load Packages
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
# # Load Data Points (Do not modify the following block)
# +
with open('training_data.npz', 'rb') as f:
data = np.load(f)
x_list = data['x_list']
y_list = data['y_list']
x_data = data['x_data']
y_data = data['y_data']
n_data = len(x_data)
w = data['w']
original_degree = data['order']
# Print information of original function.
print("=================================")
print("We have", n_data, "number of data")
print("=================================")
weight_info_string = ''
for d in range(original_degree):
weight_info_string += 'w'+str(d)+':'+str(round(w[d],ndigits=3))+' '
print("Coefficients of the original polynomial")
print(weight_info_string)
print("=================================")
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
# -
# # Polynomial Regression (Programming Assignment)
# ### Variable Explanation (Do not change variable names)
# - 'w' is true coefficients of the original polynomial function
# - 'original_degree' is the order of the original polynomial function
# - 'x_list' is a list of the points at $x$-axis
# - 'y_list' is a list of function value $f(x)$ corresponding to 'x_list'. In other words, y_list = $f($x_list$)$
# - 'x_data' is an input data
# - 'y_data' is an output data
# - 'n_data' is the number of data points
#
# ### Our goal is to estimate 'w' from data points, 'x_data' and 'y_data'. Answer the following problems.
# ### 1. Compute a Vandermonde matrix when the degree of polynomial is $4$ (30pt)
# - The variable 'degree' is the order of polynomial. In this problem, we set degree=$4$
# - Use the variable 'A' for the Vandermonde matrix. Now, 'A' is initialized as a zero matrix whose elements are all zero. Fill in the element of the Vandermonde matrix by using power operator (\*\*), for loop, and np.concatenation.
# +
degree = 4
A = np.zeros((n_data, degree+1)) # Dummy initialization
p = np.ones((n_data,1))
k = np.zeros((n_data,1))
for i in range(degree):
for j in range(n_data):
k[j,0] = x_data[j]**(i+1)
p = np.concatenate((p,k), axis=1)
A = p
# -
# ### Print results (do not modify the following block)
print(A)
# ### 2. Compute the coefficients of polynomial regression using a $4$ degree polynomial (40pt)
# - Use the variable 'degree' and the Vandermonde matrix 'A' in Problem 1.
# - The variable 'w_est' is the coefficients of polynomial regression. Now, 'w_est' is initialized as a zero vector. Compute the 'w_est' from 'A' and 'y'
# - The variable 'y_est' is an estimated function value corresponding to the input points 'x_list'. Now, it is a zero list and fill the list by computing the estimated function values. In other words, y_est = $\hat{f}($x_list$)$
# +
w_est = np.zeros((degree+1,1))
y_est = np.zeros_like(x_list)
A_pinv = np.linalg.pinv(A)
w_est = np.matmul(A_pinv,y_data)
y_est = w_est[0] + w_est[1]*x_list + w_est[2]*(x_list**2) + w_est[3]*(x_list**3) + w_est[4]*(x_list**4)
# -
# ### Print results (do not modify the following block)
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est, 'm-', linewidth=2, label="Polynomial Regression (d={})".format(degree))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
# ### 3. Compute the polynomial regression with $1$ degree polynomials (15pt)
# - Repeat Problem 1 and Problem 2 with degree $1$.
# - Use the following variables.
# > degree1, A1, w_est1, y_est1
# +
degree1 = 1
A1 = np.zeros((n_data, degree1+1))
w_est1 = np.zeros((degree1+1,1))
y_est1 = np.zeros_like(x_list)
m = np.ones((n_data,1))
n = np.zeros((n_data,1))
for i in range(degree1):
for j in range(n_data):
n[j,0] = x_data[j]**(i+1)
m = np.concatenate((m,n), axis=1)
A1 = m
A1_pinv = np.linalg.pinv(A1)
w_est1 = np.matmul(A1_pinv, y_data)
y_est1 = w_est1[0] + w_est1[1]*x_list
# -
# ### Print results (do not modify the following block)
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est1, 'g-', linewidth=2, label="Polynomial Regression (d={})".format(degree1))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
# ### 4. Compute the polynomial regression with $10$ degree polynomials (15pt)
# - Repeat Problem 1 and Problem 2 with degree $10$.
# - Use the following variables.
# > degree2, A2, w_est2, y_est2
# +
degree2 = 10
A2 = np.zeros((n_data, degree2+1))
w_est2 = np.zeros((degree2+1,1))
y_est2 = np.zeros_like(x_list)
c = np.ones((n_data,1))
d = np.zeros((n_data,1))
for i in range(degree2):
for j in range(n_data):
d[j,0] = x_data[j]**(i+1)
c = np.concatenate((c,d), axis=1)
A2 = c
A2_pinv = np.linalg.pinv(A2)
w_est2 = np.matmul(A2_pinv, y_data)
y_est2 = w_est2[0]+w_est2[1]*x_list+w_est2[2]*(x_list**2)+w_est2[3]*(x_list**3)+w_est2[4]*(x_list**4)+w_est2[5]*(x_list**5)+w_est2[6]*(x_list**6)+w_est2[7]*(x_list**7)+w_est2[8]*(x_list**8)+w_est2[9]*(x_list**9)+w_est2[10]*(x_list**10)
# -
# ### Print results (do not modify the following block)
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est2, 'c-', linewidth=2, label="Polynomial Regression (d={})".format(degree2))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
# ### 5. [Challenging Problem] Explain the effect of degree (20pt)
# - By solving the above problems, we can observe the behaviors of polynomial regression with different degrees (1, 4, 10)
# - Explain pros and cons of high degree polynomial
# - Explain pros and cons of low degree polynomial
# - What is this phenomenon called in machine learning?
# +
# High degree polynomial passes through more data points, but there is a little difference from the original function.
# Low degree polynomial doesn't fit to both datas and the original function.
# If degree is too high, it's called 'overfitting', otherwise it's called 'underfitting'.
# -
# ### The following figure shows all regression results with different degrees.
plt.plot(x_list, y_list, 'b:', linewidth=2, label="Original Function")
plt.plot(x_list, y_est, 'm-', linewidth=2, label="Polynomial Regression (d={})".format(1))
plt.plot(x_list, y_est1, 'g-', linewidth=2, label="Polynomial Regression (d={})".format(4))
plt.plot(x_list, y_est2, 'c-', linewidth=2, label="Polynomial Regression (d={})".format(10))
plt.scatter(x_data, y_data, s=50, c='r', label="Data Points")
plt.xlim([np.min(x_list),np.max(x_list)])
plt.ylim([np.min(y_data),np.max(y_data)])
plt.legend(prop={'size': 12})
plt.title("Data Plot")
plt.show()
# Write your answer!!!
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="klQTKIZd1yiV" executionInfo={"status": "ok", "timestamp": 1624483886498, "user_tz": -120, "elapsed": 18360, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="2c78367f-0866-4ab7-f519-f36658492b11"
from google.colab import drive
drive.mount("/content/gdrive", force_remount=True)
# + id="scT7c9jU2CMF" executionInfo={"status": "ok", "timestamp": 1624483889134, "user_tz": -120, "elapsed": 2640, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}}
import pandas as pd
import numpy as np
from tqdm import tqdm
import itertools # Pour créer des iterateurs
import matplotlib.pyplot as plt # Pour l'affichage d'images
from matplotlib import cm # Pour importer de nouvelles cartes de couleur
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn import metrics
from PIL import Image
import tensorflow as tf
import keras
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB1
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout, Conv2D, MaxPooling2D, BatchNormalization, LeakyReLU, Flatten
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras import callbacks
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="3EnRhY5g2WIa" executionInfo={"status": "ok", "timestamp": 1624484018281, "user_tz": -120, "elapsed": 817, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="02e2aa27-6e8c-4220-9f0d-69a48f6ae1f7"
csv_5fam = "/content/gdrive/MyDrive/Colab Notebooks/reduced_dataset_5_families.csv"
df = pd.read_csv(csv_5fam)
folderpath = "/content/gdrive/MyDrive/images_mushroom/"
df['filepath'] = [folderpath + str(df['image_id'][x]) + ".jpg" for x in range(len(df))]
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="HT_UqlWFQZ5I" executionInfo={"status": "ok", "timestamp": 1624484734393, "user_tz": -120, "elapsed": 210571, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="52b5e6a9-5583-4824-fb8c-7de9f43b4159"
# Just to run a simple test:
im = tf.io.read_file(df['filepath'].iloc[24])
# Décoder l'information en un tensorflow RGB (3 channels).
im = tf.image.decode_jpeg(im, channels=3)
plt.imshow(im);
# + [markdown] id="tubJlQOOv65W"
# **Load previously saved model**
# + colab={"base_uri": "https://localhost:8080/"} id="VzDsVA64v308" executionInfo={"status": "ok", "timestamp": 1624484773818, "user_tz": -120, "elapsed": 32351, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="85af6340-5059-4cc1-e939-bccc44a124b1"
data_train, data_test = train_test_split(df, test_size=0.2, random_state=42)
data_train.head()
train_data_generator = ImageDataGenerator(rotation_range=5,
width_shift_range = 0.1,
height_shift_range = 0.05,
zoom_range = 1.1)
test_data_generator = ImageDataGenerator()
batch_size = 32
data_train["label"] = data_train["label"].astype(str)
data_test["label"] = data_test["label"].astype(str)
train_generator = train_data_generator.flow_from_dataframe(dataframe = data_train, directory="", x_col = "filepath", y_col="label", target_size=(256,256), batch_size=batch_size, class_mode='sparse')
test_generator = test_data_generator.flow_from_dataframe(dataframe = data_test, directory="", x_col = "filepath", y_col="label", target_size=(256,256), batch_size=batch_size, class_mode='sparse')
new_model = load_model('/content/gdrive/MyDrive/Colab Notebooks/checkpoint_effnet')
# + colab={"base_uri": "https://localhost:8080/"} id="1k_GleIqv4F8" executionInfo={"status": "ok", "timestamp": 1624494957893, "user_tz": -120, "elapsed": 5792627, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="e791142f-2b42-4df1-dd25-db2b365cc924"
lr_plateau = callbacks.ReduceLROnPlateau(monitor = 'val_loss',
patience = 3,
factor = 0.1,
verbose = 2,
mode = 'min')
early_stopping = callbacks.EarlyStopping(monitor = "val_loss",
patience = 7,
mode = 'min',
verbose = 2,
restore_best_weights= True)
checkpoint = callbacks.ModelCheckpoint(filepath="/content/gdrive/MyDrive/Colab Notebooks/test_save/model_effnetB1_2_20210623.h5",
monitor = 'val_loss',
save_best_only = True,
save_weights_only = False,
mode = 'min',
save_freq = 'epoch')
history = new_model.fit(train_generator,
epochs = 30,
steps_per_epoch = len(data_train)//batch_size,
validation_data = test_generator,
validation_steps = len(data_test)//batch_size,
callbacks=[lr_plateau, early_stopping, checkpoint])
# + id="FyQ2xdnmJu3t" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1624494959744, "user_tz": -120, "elapsed": 1852, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="4913dc77-61c1-43ab-9049-9ca2db006915"
### Set the path and name you want !
##
#
save_name = "/content/gdrive/MyDrive/Colab Notebooks/test_save/model_effnetB1_final_20210624.h5"
#
##
###
new_model.save(save_name)
## found on stackoverflow
#loaded_model = create_model()
#loaded_model.load_weights(filepath)
#y_pred = loaded.model.<predict_method>(test_set,verbose=<val>)
# + colab={"base_uri": "https://localhost:8080/"} id="FiJOsl_xv4OG" executionInfo={"status": "ok", "timestamp": 1624434856491, "user_tz": -120, "elapsed": 38447, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="149b43d5-89da-4beb-fc00-9ac83c1d5f7c"
X_test_path = data_test['filepath']
X_test = []
for filepath in tqdm(X_test_path):
# Lecture du fichier
im = tf.io.read_file(filepath)
# On décode le fichier
im = tf.image.decode_jpeg(im, channels=3)
# Redimensionnement
im = tf.image.resize(im, size=(256, 256))
X_test.append([im])
X_test = tf.concat(X_test, axis=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="N_Edk-KfQiwJ" executionInfo={"status": "ok", "timestamp": 1624440382227, "user_tz": -120, "elapsed": 1005, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="388f33eb-137e-4d05-d535-c8959c91991e"
y_prob = new_model.predict(X_test, batch_size=16)
y_pred = tf.argmax(y_prob, axis = 1).numpy()
y_test = data_test['label'].astype(int)
print("l'accuracy est de :", accuracy_score(y_test, y_pred))
print("le f1 score est de :", f1_score(y_test, y_pred))
cnf_matrix = confusion_matrix(y_test, y_pred)
### Just for the fun !
##
#
num_class = df["label"].nunique()
classes = range(0,num_class)
plt.figure()
plt.imshow(cnf_matrix, interpolation='nearest',cmap='Blues')
plt.title("Matrice de confusion")
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])):
plt.text(j, i, cnf_matrix[i, j],
horizontalalignment = "center",
color = "white" if cnf_matrix[i, j] > ( cnf_matrix.max() / 2) else "black")
plt.ylabel('Vrais labels')
plt.xlabel('Labels prédits')
plt.show()
#
##
###
# + id="5GWsrU3PQizd" colab={"base_uri": "https://localhost:8080/", "height": 312} executionInfo={"status": "ok", "timestamp": 1624436444107, "user_tz": -120, "elapsed": 1012, "user": {"displayName": "adrien.moreau@outlook.com", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIPRsrERBf3lLtyJxD2dP4-Z0CBqBfHRDL6_xK=s64", "userId": "03481180972326007030"}} outputId="51d849dc-39fc-43fb-af67-67585176a7ea"
indices_random = tf.random.uniform([3], 0, len(X_test), dtype=tf.int32)
plt.figure(figsize=(15,7))
for i, idx in enumerate(indices_random):
plt.subplot(1,3,i+1)
plt.imshow(tf.cast(X_test[idx], tf.int32))
plt.xticks([])
plt.yticks([])
plt.title('Pred class : {} \n Real class : {}'.format(df.family.unique()[y_pred[idx]], df.family.unique()[y_test.values[idx]]))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import math
import itertools
import matplotlib.pyplot as plt
import torch
# +
class Square_Crossroads():
def __init__(self, seed, dist_cars):
self.dist_cars = dist_cars
# initial 4 cars
self.cars = np.array([0, 1, 2, 3])
# create 4 entrances
self.state_a = [0, 5]
self.state_b = [5, 0]
self.state_c = [5, 10]
self.state_d = [10, 5]
self.exits = [self.state_a, self.state_b, self.state_c, self.state_d]
self.set_seed(seed)
self.reset()
def step(self, action_space, sign):
done = False
reward = 0
reward_time = -1
reward_boundary = 0
reward_static = 0
reward_crash = 0
for car in self.cars:
v = velocity(self.states[car][2], action_space[car][0])
# print("This is the calculated velocity: ", v)
# print(v)
if v < 0:
reward -= 1
elif v > 2:
reward -= 1
else:
self.states[car][2] = v
self.states[car][3] = action_space[car][1] * 360
# print("This is the calculated rotation: ", self.states[car][3])
new_x, new_y = self.move_car(self.states[car][0], self.states[car][2], self.states[car][3])
if new_x > 10 or new_x < 0 or new_y > 10 or new_y < 0:
reward_boundary -= 5
# print("A car tried to go outside the boundaries")
else:
# print("Moving the car: ", car)
self.states[car][0] = [new_x, new_y]
# print(self.states)
# calculate crash reward
reward_crash = self.check_crash(self.dist_cars)
reward_static = self.check_static()
reward_success, done = self.check_success_area()
reward += reward_crash + reward_static + reward_success + reward_time + reward_boundary
if done == True:
print("Success!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return self.states, reward, done
def set_seed(self, seed):
np.random.seed(seed)
def new_car(self):
# add a new car
last_car = self.cars[-1]
new_car = last_car + 1
# add new car index to list of all cars
self.cars = np.append(self.cars, new_car)
# take a random spawn exit
spawn = np.random.choice(len(self.exits))
# remove the spawn exit from the target exits
target_indexes = list(np.arange(len(self.exits)))
target_indexes.pop(spawn)
# choose a random target exit (spawn exit is already excluded)
rand_target = np.random.choice(target_indexes)
# added the new car with next_index : [spawn exit, target exit]
self.states[new_car] = [self.exits[spawn], self.exits[rand_target], 0, 0]
def reset_successful_car(self):
# if a car has successfully passed the crossroads, spawns it again with random entrance and target
spawn = np.random.choice(len(self.exits))
target_indexes = list(np.arange(len(self.exits)))
target_indexes.pop(spawn)
# choose a random target exit (spawn exit is already excluded)
rand_target = np.random.choice(target_indexes)
return [self.exits[spawn], self.exits[rand_target], 1, 1]
def reset(self):
# reset the whole environment
print("Environment reset with param")
self.states = dict.fromkeys(self.cars)
for car in self.states:
# remove the spawn exit from the target exits
target_indexes = list(np.arange(len(self.exits)))
target_indexes.pop(car)
# choose a random target exit (spawn exit is already excluded)
rand_target = np.random.choice(target_indexes)
# create a dictionary of car : [spawn exit, target exit, velocity, curvature]
self.states[car] = [self.exits[car], self.exits[rand_target], 1, 1]
return self.states
def check_crash(self, dist_cars):
# check if a crash has occured and decrease the reward by 20 for every crash
reward_crash = 0
all_points = [a[0] for a in self.states.values()]
for p0, p1 in itertools.combinations(all_points, 2):
tmp_dist = distance(p0, p1)
if tmp_dist <= dist_cars: reward_crash -= 10
# if reward_crash != 0: print("There was a car crash")
return reward_crash
def check_success_point(self):
# check if a car has successfully exited and increase reward by 100 for each success
reward_success = 0
# list of cars that should successfully be removed
exit = []
# done = False
for car, car_prop in self.states.items():
distance_success = distance(self.states[car][0], self.states[car][1])
if distance_success < 2:
print("We are at a distance: ", distance_success)
reward_success +=100
if distance_success < 0.5: reward_success +=500
if distance_success < 0.1: reward_success +=1000
if distance_success == 0.01:
reward_success +=10000
print("There was a successful exit")
# done = True
exit.append(car)
# reward_success += 100
# if self.states[car][0] == self.states[car][1]:
# exit.append(car)
# reward_success += 100
for c in exit:
self.states[c] = self.reset_successful_car()
# if reward_success != 0: print("There was a successful exit")
return reward_success
def check_success_area(self):
# check if a car has successfully exited and increase reward by 100 for each success
reward_success = 0
# list of cars that should successfully be removed
exit = []
done = False
for car, car_prop in self.states.items():
# print(car)
# print(car_prop)
# square_target =
distance_success = distance(self.states[car][0], self.states[car][1])
# print("Distance to success is: ", distance_success)
if distance_success < 2:
# print("We are at a distance: ", distance_success)
reward_success +=100
if distance_success < 0.5:
reward_success +=10000
print("There was a successful exit")
done = True
exit.append(car)
# if distance_success < 0.1:
# reward_success +=1000
# if distance_success == 0.01:
# reward_success +=10000
# print("There was a successful exit")
# done = True
# exit.append(car)
# reward_success += 100
# if self.states[car][0] == self.states[car][1]:
# exit.append(car)
# reward_success += 100
for c in exit:
self.states[c] = self.reset_successful_car()
# if reward_success != 0: print("There was a successful exit")
return reward_success, done
def check_static(self):
# check if velocity is 0 and decrease the reward by 10 for each car with velocity 0
reward_static = 0
for car_prop in self.states.values():
if car_prop[2] == 0:
reward_static -= 10
if reward_static != 0: print("There was а static car")
return reward_static
def move_car(self, states, distance, angle_degrees):
# move the car from point A to point B
new_x = states[0] + distance * math.cos(angle_degrees * math.pi / 180)
new_y = states[1] + distance * math.sin(angle_degrees * math.pi / 180)
return new_x, new_y
def distance(p0, p1):
# calculates the distance between 2 points
return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
def velocity(v, a):
# calculates velocity, given previous velocity and acceleration
return (v+a/3)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Everything you need to know about: Data Visualization in Python
#
# _The base article can be found on medium, titled [Complete Guide to Data Visualization with Python](https://towardsdatascience.com/complete-guide-to-data-visualization-with-python-2dd74df12b5e)_
#
# Let's see the main libraries for data visualization with Python and all the types of charts that can be done with them. We will also see which library is recommended to use on each occasion and the unique capabilities of each library.
#
# We will start with the most basic visualization that is looking at the data directly, then we will move on to plotting charts and finally, we will make interactive charts.
# ### Installation
#
# This code block below should import all of the packages required and test to make sure they're ll installed correctly. If any errors occur, check install_override.md to see the commands to run directly in your terminal for similar results.
# +
import sys
# !{sys.executable} -m pip install pandas
# !{sys.executable} -m pip install pandas_profiling
# !{sys.executable} -m pip install bokeh
print("\n\nSuccessfully installed python packages!")
# -
# ### Datasets
#
# We will work with two datasets that will adapt to the visualizations we show in the workshop. The datasets are in the `datasets` directory in this repository, and the datasets can also be downloaded [here](https://github.com/albertsl/datasets/tree/master/popularidad).
#
# The datasets are on the popularity of searches on the Internet for three terms related to artificial intelligence (data science, machine learning and deep learning). They have been extracted from a famous search engine.
#
# There are two files `temporal.csv` and `mapa.csv`. The first one we will use in the vast majority of the tutorial includes popularity data of the three terms over time (from 2004 to the present, 2020). In addition, I have added a categorical variable (ones and zeros) to demonstrate the functionality of charts with categorical variables.
#
# The file `mapa.csv` includes popularity data separated by country. We will use it in the last section of the article when working with maps.
# ## Pandas
#
# Before we move on to more complex methods, let's start with the most basic way of visualizing data. We will simply use pandas to take a look at the data and get an idea of how it is distributed.
#
# The first thing we must do is visualize a few examples to see what columns there are, what information they contain, how the values are coded, etc.
import pandas as pd
df = pd.read_csv('../datasets/temporal.csv')
df.head(10) #View first 10 data rows
# With the command `describe` we will see how the data is distributed, the maximums, the minimums, the mean, etc.
df.describe()
# With the `info` command we will see what type of data each column includes. We could find the case of a column that when viewed with the head command seems numeric but if we look at subsequent data there are values in string format, then the variable will be coded as a string.
df.info()
# By default, pandas limits the number of rows and columns it displays. This, among other things in Pandas, is modifiable. Here we set the max higher to allow us to see everything at once.
#
# With these commands, we increase the limits and we can visualize the whole data. Be careful with this option for big datasets, we can have problems showing them.
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# See the data now:
df
# Using Pandas styles, we can get much more information when viewing the table. First, we define a format dictionary so that the numbers are shown in a legible way (with a certain number of decimals, date and hour in a relevant format, with a percentage, with a currency, etc.) Don’t panic, this is only a display and does not change the data, you will not have any problem to process it later.
#
# To give an example of each type, here there are currency and percentage symbols even though they do not make any sense for this data.
format_dict = {'data science':'${0:,.2f}', 'Mes':'{:%m-%Y}', 'machine learning':'{:.2%}'}
#We make sure that the Month column has datetime format
df['Mes'] = pd.to_datetime(df['Mes'])
#We apply the style to the visualization
df.head().style.format(format_dict)
# We can highlight maximum and minimum values with colors too:
format_dict = {'Mes':'{:%m-%Y}'} #Simplified format dictionary with values that do make sense for our data
df.head().style.format(format_dict).highlight_max(color='green').highlight_min(color='#ff0000')
# We use a color gradient to display the data values.
df.head(10).style.format(format_dict).background_gradient(subset=['data science', 'machine learning'], cmap='BuGn')
# We can also display the data values with bars:
df.head().style.format(format_dict).bar(color='red', subset=['data science', 'deep learning'])
# Moreover, we also can combine the above functions and generate a more complex visualization.
df.head(10).style.format(format_dict).background_gradient(subset=['data science', 'machine learning'], cmap='BuGn').highlight_max(color='yellow')
# There is plenty more to learn about styling visualizations with Pandas here: https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
#
# All of these are for your convenience when working, as some datasets will lend themselves better to certain settings and features here.
# ### Pandas Profiling
# Pandas profiling is a library that generates interactive reports with our data, we can see the distribution of the data, the types of data, possible problems it might have. It is very easy to use, with only 3 lines we can generate a report that we can send to anyone and that can be used even if you do not know programming.
from pandas_profiling import ProfileReport
prof = ProfileReport(df)
prof.to_file(output_file='report.html')
# You can see the interactive report as a web page generated from the data used in the article, [here](https://todoia.es/recursos/informe.html).
#
# You can find more information about Pandas Profiling in [this article](https://towardsdatascience.com/exploratory-data-analysis-with-pandas-profiling-de3aae2ddff3). This package is best used if you're not an engineer or work mostly with non-engineers, so we will move on here.
# ## Matplotlib
#
# Matplotlib is the most basic library for visualizing data graphically. It includes many of the graphs that we can think of. Just because it is basic does not mean that it is not powerful, many of the other data visualization libraries we are going to talk about are based on it.
#
# Matplotlib charts are made up of two main components, the axes (the lines that delimit the area of the chart) and the figure (where we draw the axes, titles and things that come out of the area of the axes). Now let’s create the simplest graph possible:
#
# _note: The parameter label is to indicate the legend. This doesn't mean that it will be shown, we'll have to use another command that I'll explain later_
import matplotlib.pyplot as plt
plt.plot(df['Mes'], df['data science'], label='data science')
# We can make graphs with multiple variables in the same plot to compare them.
plt.plot(df['Mes'], df['data science'], label='data science')
plt.plot(df['Mes'], df['machine learning'], label='machine learning')
plt.plot(df['Mes'], df['deep learning'], label='deep learning')
# It is not very clear which variable each color represents. We’re going to improve the chart by adding a legend and titles.
plt.plot(df['Mes'], df['data science'], label='data science')
plt.plot(df['Mes'], df['machine learning'], label='machine learning')
plt.plot(df['Mes'], df['deep learning'], label='deep learning')
plt.xlabel('Date')
plt.ylabel('Popularity')
plt.title('Popularity of AI terms by date')
plt.grid(True)
plt.legend()
# If you are looking to run the chart in a separate window, comment in the code below and run it instead. This is not required but may be preferred if you want to save the image of the plot to your computer.
# +
# # %matplotlib
# plt.plot(df['Mes'], df['data science'], label='data science')
# plt.plot(df['Mes'], df['machine learning'], label='machine learning')
# plt.plot(df['Mes'], df['deep learning'], label='deep learning')
# plt.xlabel('Date')
# plt.ylabel('Popularity')
# plt.title('Popularity of AI terms by date')
# plt.grid(True)
# plt.legend()
# -
# We can make multiple graphics in one figure. This goes very well for comparing charts or for sharing data from several types of charts easily with a single image.
fig, axes = plt.subplots(2,2)
axes[0, 0].hist(df['data science'])
axes[0, 1].scatter(df['Mes'], df['data science'])
axes[1, 0].plot(df['Mes'], df['machine learning'])
axes[1, 1].plot(df['Mes'], df['deep learning'])
# We can draw the graph with different styles for the points of each variable:
plt.plot(df['Mes'], df['data science'], 'r-')
plt.plot(df['Mes'], df['machine learning']*2, 'bs')
plt.plot(df['Mes'], df['deep learning']*3, 'g^')
# Now let’s see a few examples of the different graphics we can do with Matplotlib. We start with a scatterplot:
#
# Now let’s see a few examples of the different graphics we can do with Matplotlib. We start with a scatterplot:
plt.scatter(df['data science'], df['machine learning'])
# Bar charts too:
plt.bar(df['Mes'], df['machine learning'], width=20)
# And let's look at some Histograms:
plt.hist(df['deep learning'], bins=15)
# Gallery of examples:
#
# In this link: https://matplotlib.org/gallery/index.html we can see examples of all types of graphics that can be made with Matplotlib.
# ## Seaborn
# Seaborn is a library based on Matplotlib. It specializes to give us are nicer graphics and functions to make complex types of graphics with just one line of code.
#
# We import the library and initialize the style of the graphics with `sns.set()`, without this command the graphics would still have the same style as Matplotlib. We show one of the simplest graphics, a scatterplot
import seaborn as sns
sns.set()
sns.scatterplot(x=df['Mes'], y=df['data science'])
# We can add information of more than two variables in the same graph. For this we use colors and sizes. We also make a different graph according to the value of the category column:
sns.relplot(x='Mes', y='deep learning', hue='data science', size='machine learning', col='categorical', data=df)
# One of the most popular graphics provided by Seaborn is the heatmap. It is very common to use it to show all the correlations between variables in a dataset:
sns.heatmap(df.corr(), annot=True, fmt='.2f')
# Another of the most popular is the pairplot that shows us the relationships between all the variables. Be careful with this function if you have a large dataset, as it has to show all the data points as many times as there are columns, it means that by increasing the dimensionality of the data, the processing time increases exponentially.
sns.pairplot(df)
# Now let’s do the pairplot showing the charts segmented according to the values of the _categorical_ variable. This allows yo to see the comparisons from before with the categorical value overlaid.
sns.pairplot(df, hue='categorical')
# A very informative graph is the jointplot that allows us to see a scatterplot together with a histogram of the two variables and see how they are distributed:
sns.jointplot(x='data science', y='machine learning', data=df)
# Another interesting graphic is the ViolinPlot to show distributions next to values at the same time:
sns.catplot(x='categorical', y='data science', kind='violin', data=df)
# We can create multiple graphics in one image just like we did with Matplotlib:
fig, axes = plt.subplots(1, 2, sharey=True, figsize=(8, 4))
sns.scatterplot(x="Mes", y="deep learning", hue="categorical", data=df, ax=axes[0])
axes[0].set_title('Deep Learning')
sns.scatterplot(x="Mes", y="machine learning", hue="categorical", data=df, ax=axes[1])
axes[1].set_title('Machine Learning')
# Gallery of examples:
# In [this link](https://seaborn.pydata.org/examples/index.html), we can see examples of everything that can be done with Seaborn.
# ## Bokeh
# Bokeh is a library that allows you to generate interactive graphics. We can export them to an HTML document that we can share with anyone who has a web browser.
#
# It is a very useful library when we are interested in looking for things in the graphics and we want to be able to zoom in and move around the graphic. Or when we want to share them and give the possibility to explore the data to another person.
#
# We start by importing the library and defining the file in which we will save the graph:
from bokeh.plotting import figure, output_file, save, gridplot
output_file('data_science_popularity.html')
# We draw what we want and save it on the file:
p = figure(title='data science', x_axis_label='Mes', y_axis_label='data science')
p.line(df['Mes'], df['data science'], legend_label='popularity', line_width=2)
save(p)
# You can see how the file data_science_popularity.html looks by clicking [here](https://todoia.es/recursos/data_science_popularity.html). It’s interactive, you can move around the graphic and zoom in as you like
#
# Adding multiple graphics to a single file:
output_file('multiple_graphs.html')
s1 = figure(width=250, plot_height=250, title='data science')
s1.circle(df['Mes'], df['data science'], size=10, color='navy', alpha=0.5)
s2 = figure(width=250, height=250, x_range=s1.x_range, y_range=s1.y_range, title='machine learning') #share both axis range
s2.triangle(df['Mes'], df['machine learning'], size=10, color='red', alpha=0.5)
s3 = figure(width=250, height=250, x_range=s1.x_range, title='deep learning') #share only one axis range
s3.square(df['Mes'], df['deep learning'], size=5, color='green', alpha=0.5)
p = gridplot([[s1, s2, s3]])
save(p)
# You can see how the file multiple_graphs.html looks by clicking [here](https://todoia.es/recursos/multiple_graphs.html).
#
# Gallery of examples:
# In this link https://docs.bokeh.org/en/latest/docs/gallery.html you can see examples of everything that can be done with Bokeh.
#
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# © Copyright 2020 Rigetti & Co, Inc.
#
# # Improving the performance of Max-Cut QAOA with Quil-T
#
# This notebook shows how to run the **Quantum Approximate Optimization Algorithm** (QAOA) on Aspen-8, using features of **Quil-T** to disable global fencing on CZ gates and, in doing so, improve the fidelity of the algorithm. This extends the ``MaxCutQAOA.ipynb`` notebook provided to our Quantum Cloud Services (QCS) users.
#
# A fence is a barrier in time used to sequence operations at the pulse control level. To minimize crosstalk on 2-qubit (2Q) gates, global fencing is normally enabled, meaning that each 2Q pulse sequence is applied with no other operations occurring on the QPU. While this maximizes the fidelity of any single 2Q gate, it also limits the total 2Q gate depth that can be achieved within the coherence time of the system. For shallow circuits less than 10x 2Q gates, this is certainly the right choice. However, for deep circuits greater than about 20x 2Q gates, it can be that disabling fencing provides a net improvement in fidelity for specific applications. We will demonstrate this effect in the context of a Max-Cut problem run using QAOA.
# +
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from tqdm import tqdm
from typing import Any, Dict, List, Optional, Set, Tuple
from pyquil import get_qc, Program
from pyquil.api import QuantumComputer, QPUCompiler, WavefunctionSimulator
from pyquil.gates import H, MEASURE, RESET
from pyquil.paulis import exponential_map, PauliSum, sX, sZ
from pyquil.quilbase import DefCalibration, Fence, FenceAll
# -
# ## Consolidated routines from the basic Max-Cut QAOA demonstration
#
# Here we consolidate the routines for generating Max-Cut QAOA problems and programs, running and plotting the algorithm result. For a detailed explanation of these routines, refer to the ``MaxCutQAOA.ipynb`` notebook.
# +
def generate_ising_graph(edges: List[Tuple[int, int]], seed: int) -> nx.Graph:
np.random.seed(seed)
graph = nx.from_edgelist(edges)
weights: np.ndarray = np.random.uniform(low=-1.0, high=+1.0, size=graph.number_of_edges())
nx.set_edge_attributes(graph, {e: {'w': w} for e, w in zip(graph.edges, weights)})
return graph
def bitstring_cut_weight(b: List[List[int]], graph: nx.Graph) -> dict:
cost = 0
inverse_map = {qubit: idx for idx, qubit in enumerate(list(graph.nodes))}
for q0, q1 in graph.edges():
cost += graph.get_edge_data(q0, q1)['w'] * (-1) ** int(b[inverse_map[q0]] != b[inverse_map[q1]])
return cost
def maxcut_qaoa_program(graph: nx.Graph) -> Program:
cost_ham = PauliSum([sZ(i) * sZ(j) * graph.get_edge_data(i, j)['w'] for i, j in graph.edges])
driver_ham = PauliSum([sX(i) for i in graph.nodes])
p = Program(RESET())
beta = p.declare('beta', 'REAL')
gamma = p.declare('gamma', 'REAL')
ro = p.declare('ro', 'BIT', len(graph.nodes))
p.inst(H(qubit) for qubit in list(graph.nodes))
p.inst(exponential_map(term)(gamma) for term in cost_ham)
p.inst(exponential_map(term)(beta) for term in driver_ham)
p.inst(MEASURE(qubit, ro[idx]) for idx, qubit in enumerate(list(graph.nodes)))
return p
def plot_landscape(landscape: np.ndarray):
width = landscape.shape[0]
max_x, max_y = (np.argmax(landscape) % width, np.argmax(landscape) // width)
plt.imshow(landscape, extent=[0, np.pi, np.pi, 0])
plt.plot((max_x + 0.5) * np.pi / width, (max_y + 0.5) * np.pi / width, 'ro')
plt.colorbar()
plt.xlabel('beta (radians)')
plt.ylabel('gamma (radians)')
plt.title('Max-Cut QAOA Landscape')
plt.show()
# -
# ## Patching the ``pyquil`` instruction set architecture to only use CZ
#
# To disable global fencing on CZ gates, we must first take steps to ensure that the QAOA ansatz circuit is compiled to only use the CZ gate. This routine patches the ``pyquil`` instruction set architecture used by the compiler to only support CZ in the set of 2Q instructions. This approach is equally valid to force compilation to CPHASE or XY gates; an exercise that we leave to the reader.
def patch_pyquil_isa_to_cz(qc: QuantumComputer):
pyquil_device_isa_dict = qc.device.get_isa().to_dict()
def filter_operations_dict_cz(operations_dict: Dict[str, Any]) -> Dict[str, Any]:
return {
"gates": [
gate_dict
for gate_dict in operations_dict["gates"]
if gate_dict["operator"] == "CZ"
]
}
pyquil_device_isa_dict_1q = pyquil_device_isa_dict["1Q"]
pyquil_device_isa_dict_2q = {}
for edge_id_str, operations_dict in pyquil_device_isa_dict["2Q"].items():
filtered_operations_dict = filter_operations_dict_cz(operations_dict)
if len(filtered_operations_dict["gates"]) != 0:
pyquil_device_isa_dict_2q[edge_id_str] = filtered_operations_dict
pyquil_device_isa_dict = {"1Q": pyquil_device_isa_dict_1q, "2Q": pyquil_device_isa_dict_2q}
qc.compiler.target_device.isa = pyquil_device_isa_dict
# ## Generate new calibrations that disable global fencing using Quil-T
#
# Quil-T extends Quil programs with the concept of *calibrations*. A calibration defines the pulse-level control sequence for a specific native instruction at a specific site. For example, the Quil operation "CZ 31 32" will have a calibration for that gate (CZ) at that site (the edge 31-32), written in Quil-T as "DEFCAL CZ 31 32".
#
# To disable global fencing on CZ gates, we must generate new calibrations using Quil-T that modify the pulse-level control sequence to replace global fencing directives ("FENCE") with fencing local to just the 2 qubits being operated upon ("FENCE \<q0\> \<q1\>"). We achieve this by first retreiving the current calibrations as updated at the last retune, calling `qc.compiler.get_calibration_program()`, and modify them in the stated way.
def disable_global_fencing_on_cz(qc: QuantumComputer) -> Program:
quilt_calibration_program = qc.compiler.get_calibration_program()
quilt_calibrations_nofence = []
for calibration in quilt_calibration_program.calibrations:
if isinstance(calibration, DefCalibration):
if calibration.name == "CZ":
updated_instrs = []
for instr in calibration.instrs:
if isinstance(instr, FenceAll): # replace FenceAll
updated_instrs.append(Fence(calibration.qubits))
else:
updated_instrs.append(instr)
quilt_calibrations_nofence.append(
DefCalibration(calibration.name, calibration.parameters, calibration.qubits, updated_instrs)
)
return Program(quilt_calibrations_nofence)
# ## Run the landscape, with or without disabling global fencing using Quil-T
#
# We provide the complete sequence for obtaining a ``pyquil`` quantum computer, modifying the instruction set architecture to only use CZ gates, compiling the QAOA ansatz circuit to native gates, and optionally add the updated Quil-T calibrations needed to disable global fencing on CZ. By setting the ``disable_global_fencing`` flag, we can see the comparative effect on noise and QAOA performance.
#
# **Note:** To provide a reasonable demonstration, we isolate all operational edges within the right-hand two octogons of the Aspen-8 chip.
def run_maxcut_qaoa_landscape(
qc_name: str,
disable_global_fencing: bool = False,
edge_count_limit: Optional[int] = None,
width: int = 20,
shots: int = 1000,
seed: int = 0
) -> Tuple[np.ndarray, Program, Program]:
qc = get_qc(qc_name)
patch_pyquil_isa_to_cz(qc)
qc_device_isa_restricted_edges = qc.compiler.target_device.isa["2Q"]
edges = []
for edge in qc.device.get_isa().edges:
if not edge.dead:
q0, q1 = edge.targets
if f"{q0}-{q1}" in qc_device_isa_restricted_edges:
edges.append((q0, q1))
if edge_count_limit is not None:
edges = edges[:edge_count_limit]
graph = generate_ising_graph(edges, seed)
program = maxcut_qaoa_program(graph)
program.wrap_in_numshots_loop(shots)
native_program = qc.compiler.quil_to_native_quil(program)
if disable_global_fencing:
native_program += disable_global_fencing_on_cz(qc)
executable = qc.compiler.native_quil_to_executable(native_program)
costs = []
angle_range = np.linspace(0, np.pi, width)
landscape = list(itertools.product(angle_range, angle_range))
for beta, gamma in tqdm(landscape):
memory_map = {'beta': [beta], 'gamma': [gamma]}
bitstrings = qc.run(executable, memory_map=memory_map)
costs.append(np.mean([bitstring_cut_weight(list(b), graph) for b in bitstrings]))
return np.array(costs).reshape(width, width), program, native_program
# ## Choose a device
#
# We choose the test device, initially as a QVM which can be run at any time but will not show the effects of diabling fencing on performance. The QVM supports the Quil-T interface to enable functional testing with the QVM prior to committing to a QPU reservation. However, the performance of the QVM degrades exponentially with size, so we limit the number of edges in the problem graph when using a QVM to a number that is tractable to simulate.
#
# To run on a real QPU, obtain a reservation, update `qc_name` to `"Aspen-8"`, and `edge_count_limit` to `None`.
# +
qc_name = "Aspen-8-qvm"
edge_count_limit = 6
# -
# ## Run landscape without global fencing disabled
#
# First, we run and display the landscape produced without global fencing disabled. This is the default behavior of a Quil program.
landscape_with_global_fencing, _, _ = run_maxcut_qaoa_landscape(
qc_name=qc_name, disable_global_fencing=False, edge_count_limit=edge_count_limit
)
plot_landscape(landscape_with_global_fencing)
# Banding seen in QPU-based results is symptomaptic of system decoherence, and a result of sequential 2Q gate execution pushing the total time on QPU past the system's T1 and T2 times. This issue can be addressed by disabling global fencing.
# ## Run landscape **with** global fencing disabled
#
# Second, we run and display the landscape produced with global fencing disabled. Results on QPU show visually the improvement in fidelity that is gained by this parallel execution of 2Q gates.
landscape_without_global_fencing, _, native_program = run_maxcut_qaoa_landscape(
qc_name=qc_name, disable_global_fencing=True, edge_count_limit=edge_count_limit
)
plot_landscape(landscape_without_global_fencing)
# To see what the native program looks like when fencing is disabled, you can print it out. When this is run on QPU, you will see "DEFCAL" instructions that include "FENCE" directives that only isolate the qubit pair being operated upon.
print(native_program)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: lang101
# language: python
# name: lang101
# ---
# ## Outline
# - Anatomy of a script
# - Virtual environment, requirements.txt
# ## A simple concordance function (KWIC)
import re #regex
import string #regex
text = "It was the best of times, it was the worst of times"
text.split()
# A quick regex tokenizer for splitting strings
def tokenize(input_string):
# Split on any non-alphanumeric character
tokenizer = re.compile(r"\W+")
# Tokenize
token_list = tokenizer.split(input_string)
# Return token_list
return token_list
tokenized = tokenize(text)
tokenized
text2 = "The world’s biggest oil companies are no stranger to UK waters, but by the end of the decade they will be running more offshore wind turbines than oil rigs."
tokenize(text2)
# ## Code along task: Creating a KWIC function
def kwic(text, keyword, window_size=50):
# For all regex matches
for match in re.finditer(keyword, text):
# first character index of match
word_start = match.start()
# last character index of match
word_end = match.end()
# Left window
left_window_start = max(0, word_start-window_size)
left_window = text[left_window_start:word_start]
# Right window
right_window_end = word_end + window_size
right_window = text[word_end : right_window_end]
# print line
line = f"{left_window} {keyword} {right_window}"
print(line)
# ## Collocation
# <img src="../data/imgs/expected-vs-observed.jpg">
# __For reference, see http://collocations.de/AM/index.html__
# u = our keyword <br>
# v = a collocate <br>
#
# O11 = our keyword && our collocate
# <img src="../data/imgs/mi.jpg">
# - u = how often keyword occurs
# - v = how often collocate appears along with u
# - O11 = v & u
# - O21 = v & !u
# - etc
# - N = total number of words
for text in corpus:
# Calculate each of these
u =
v =
R1 =
C1 =
# length of text
N = len()
# Expected
E11 = (R1*C1/N)
# return MI
MI = log(O11/E1l)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Critères d'évaluation en apprentissage supervisé
#
# ## Théorie
#
# ### Introduction
#
# Tout au long de ce cours nous avons étudié différentes méthodes d'apprentissage supervisé, et à chaque fois nous avons cherché à optimiser l'_accuracy_, c'est-à-dire à minimiser l'erreur de généralisation. Mais cette approche pour évaluer la pertinence de notre modèle et sa robustesse est-elle suffisante ? En particulier dans ce notebook nous allons nous intéresser au cas de la classification binaire (deux classes), nous allons voir que dans le cas de la classification, plusieurs métriques interviennent, elles peuvent être contradictoires (dans le sens où elles varient de manière opposée) et il sera donc nécessaire de faire un compromis sur certaines grandeurs en fonction de l'application que l'on considère.
#
# Considérons un exemple de classification binaire que nous déroulerons tout au long de cette partie théorique pour illustrer nos propos. Nous allons considérer une tâche de classification simple consistant à identifier des chiens dans des images, notre système prend donc en entrée des images et prédit en sortie la présence ou l'absence de chien dans cette image.
#
# ### Matrice de confusion
#
# Une métrique très souvent utilisée en classification est la matrice de confusion $\mathbf{M_c}$ qui résume de façon compacte les résultats de la classification:
#
# $$ \mathbf{M_c} = \left[ \begin{matrix}
# TP & FP \\
# FN & TN
# \end{matrix} \right]$$
#
# $TP$ : _True positive_, le nombre d'échantillons qui ont été labélisés comme contenant des chiens et qui contiennent effectivement des chiens.
#
# $FP$ : _False positive_, ou erreur de type I, le nombre d'échantillons qui ont été labelisés comme contenant des chiens alors qu'ils n'en contiennent pas.
#
# $FN$ : _False negative_, ou erreur de type II, le nombre d'échantillons qui ont été labelisés comme ne contenant pas de chiens alors qu'ils en contiennent.
#
# $TN$ : _True negative_, le nombre d'échantillons qui ont été labélisés comme ne contenant pas de chiens et qui n'en contiennent effectivement pas.
#
# Nous pouvons ensuite définir le nombre $P$ d'échantillons positifs et le nombre $N$ d'échantillons négatifs (en réalité, pas après prédiction). La métrique que nous avons utilisé tout au long du cours et qui est souvent utilisé est l'**accuracy**, celle-ci est définie par:
#
# $$ \mathbf{accuracy} = \frac{TP + TN}{P + N} = \frac{TP + TN}{TP + TN + FP + FN} = \frac{tr(\mathbf{M_c})}{TOTAL}$$
#
# Concrètement, elle représente le ratio entre le nombre d'échantillons bien classés (positifs ou négatifs) et le nombre total d'échantillons. Elle ne donne cependant aucune information sur les échantillons qui sont mal classés. Elle est souvent mal utilisée car elle n'est pertinente que dans le cas où nous avons autant de d'échantillons positifs que négatifs à classer. Il faut aussi les prédictions et les erreurs de prédiction soient de même importance, ce qui est rarement le cas.
#
# On utilise également deux autres grandeurs, la _**precision**_ et le _**recall**_, celles-ci sont définies par:
#
# $$ \mathbf{precision} = \frac{TP}{TP + FP}$$
#
# $$ \mathbf{recall} = \frac{TP}{TP + FN} = \frac{TP}{P} = TPR $$
#
# $TPR$ est le _True Positive Rate_, on peut également définir le _False Positive Rate_, $FPR = 1 - TPR = \frac{FP}{N}$.
#
# L'image suivante résume assez bien ce que nous venons de voir, les éléments bien classés sont sur la partie gauche de l'image.
#
# <img src="img/Precisionrecall.svg" title="Source: https://en.wikipedia.org/wiki/Precision_and_recall">
#
#
# Enfin, pour essayer de concilier _recall_ et _precision_, il est possible d'utiliser le _F-score_, il s'agit de la moyenne harmonique des deux :
#
# $$ \textrm{F-score} = \mathbf{F_1} = 2\cdot \frac{\mathbf{precision} \cdot \mathbf{recall}}{\mathbf{precision} + \mathbf{recall}}$$
#
# On définit plus généralement la mesure $\mathbf{F_\beta}$ qui permet de donner plus de poids à l'un ou à l'autre suivant la valeur de $\beta$ (pour $\beta \in \mathbb{R}^+$) :
# $$\mathbf{F_\beta} = (1+\beta^2)\cdot \frac{\mathbf{precision} \cdot \mathbf{recall}}{\beta^2 \cdot \mathbf{precision} + \mathbf{recall}} $$
# Un des points les plus importants de ce notebook et qu'il faut retenir absolument est qu'il faut choisir un critère d'évaluation qui soit cohérent avec la tâche que l'on souhaite accomplir. Aucune de ces mesures n'est meilleure que les autres, cela dépend du contexte.
#
# Par exemple, changeons de cas d'étude et prenons le cas d'un algorithme de détection de tumeurs dans des images médicales (scanner, IRM ou autre), ce que nous souhaitons dans ce cas c'est minimiser le nombre de faux négatifs, $FN$, c'est-à-dire que nous voulons minimiser le nombre de cas où l'algorithme prédit l'absence de tumeur dans l'image alors qu'il y en a une, les conséquences de telles erreurs sont évidentes. Dans ce cas, on ne se focalisera donc pas sur le $FP$, le cas où l'algorithme prédit la présence d'une tumeur alors qu'il n'y en a pas, cela donnera plus de travail aux médecins mais minimisera le risque de non-détection.
#
# #### Un exemple
#
# Commençons par nous convaincre que l'_accuracy_ n'est pas une bonne mesure [1], nous considérons un jeu de données sur le cancer du sein, il contient des données sur 286 femmes atteintes d'un cancer, parmi elles, 201 n'ont pas eu de récidive et 85 ont eu une récidive. Nous voulons construire un classifieur binaire utilisant 9 _features_ pour prédire la présence ou l'absence de récidive. Imaginons que nous avons construit trois modèles M1, M2 et M3, le modèle M1 prédit l'absence de récidive dans tous les cas, le modèle M2 prédit la présence de récidive dans tous les cas, et le modèle M3 est un peu moins radical et prédit 23 récidives (10 sont correctes) et 263 non récidives (188 sont correctes). Regardons leur _accuracy_ :
#
# $$\mathbf{accuracy(M_1)} = 201/286 \approx 70 \%$$
# $$\mathbf{accuracy(M_2)} = 85/286 \approx 30 \%$$
# $$\mathbf{accuracy(M_3)} = \frac{10+188}{286} \approx 69.23 \%$$
#
# En utilisant seulement l'_accuracy_, nous aurions tendance à dire que les modèle M1 et M3 sont assez performants. Pourtant, un coup d'oeil rapide aux matrices de confusion suffit à nous montrer qu'ils sont très différents:
#
# $$ \mathbf{M_{C1}} = \left[ \begin{matrix}
# 0 & 0 \\
# 85 & 201
# \end{matrix} \right]
# $$
#
# $$ \mathbf{M_{C2}} = \left[ \begin{matrix}
# 85 & 201 \\
# 0 & 0
# \end{matrix} \right]
# $$
#
# $$ \mathbf{M_{C3}} = \left[ \begin{matrix}
# 10 & 13 \\
# 75 & 188
# \end{matrix} \right]
# $$
#
# M3 est le seul à prédire à la fois de vrais positifs et de vrais négatifs. Regardons maintenant les autres grandeurs:
#
# * Precision :
# $$\mathbf{precision(M1)} = \frac{0}{0} = NaN $$
# $$\mathbf{precision(M2)} = \frac{85}{286} \approx 0.30 $$
# $$\mathbf{precision(M3)} = \frac{10}{23} \approx 0.43 $$
#
# * Recall
#
# $$\mathbf{recall(M1)} = \frac{0}{0+85} = 0 $$
# $$\mathbf{recall(M2)} = \frac{85}{0+85} = 1$$
# $$\mathbf{recall(M3)} = \frac{10}{10+75} \approx 0.12 $$
#
# * F-score
#
# $$ \mathbf{F1(M1)} = 0 $$
# $$ \mathbf{F1(M2)} \approx 0.46 $$
# $$ \mathbf{F1(M3)} \approx 0.19 $$
#
# Dans notre exemple, nous voulions maximiser le _recall_, ce qui revient à minimiser $FN$.
#
#
#
# ## AUC - Area Under the Curve
#
#
# Pour mesurer la performance d'un classifieur binaire, on peut tracer la courbe ROC (Receiver Operating Characteristic), celle-ci représente la variation de 'performance' du classifieur lorsque le seuil de décision varie. Concrètement, c'est la courbe qui relie les points dans le plan _FPR_ et _TPR_ (ou _recall_) lorsqu'on fait varier le seuil.
#
# <img src="img/Roccurves.png" title="Source: https://en.wikipedia.org/wiki/Receiver_operating_characteristic">
#
# La diagonale représente une classifieur qui tirerait au hasard sa prédiction avec une probabilité 0.5. Si la courbe est au dessus de la diagonale, le classifieur fait mieux qu'un tirage aléatoire, si elle en dessous il fait moins bien (dans ce cas il suffit d'inverser les prédicitions pour en faire un meilleur). Mais pour comparer plusieurs classifieurs entre-eux, comparer les courbes entre-elles n'est pas la méthode la plus précise comme on peut le voir sur la figure ci-dessus [3]. Il faut utiliser une grandeur quantitative, l'aire sous la courbe (AUC), dont la valeur varie entre 0.5 et 1.0 pour un classifieur performant.
#
# Il est possible d'utiliser des tests statistiques pour vérifier que les performances d'un classifieurs sont meilleures, en terme d'AUC.
# ## La pratique
import numpy as np
from keras.models import Sequential
from keras.layers import Input, Dense, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
# %matplotlib inline
import matplotlib.pyplot as plt
from math import inf
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
# Considérons un exemple simple de classification binaire, nous avons deux nuages de points générés suivant des loi normales : $\mathcal{N}(\mu_1,cov_1)$ et $\mathcal{N}(\mu_2,cov_2)$
# +
N = 1000
test = 400
mu1 = [1,1]
cov1 = [[4,3],[3,3]]
X1 = np.random.multivariate_normal(mu1, cov1, N)
#mu2 = [4,7]
mu2 = [4,5]
cov2 = [[1,0.5],[0.5,4]]
X2 = np.random.multivariate_normal(mu2, cov2, N)
# -
plt.plot(X1[:,0],X1[:,1],marker='.',linestyle='')
plt.plot(X2[:,0],X2[:,1],marker='.',linestyle='',color='r')
def zscore(X):
return((X - np.mean(X, axis=0))/np.std(X, axis=0))
# +
# Creation labels
y1 = np.zeros(N)
y2 = np.ones(N)
# Concatenation
X = np.concatenate((X1,X2))
X = zscore(X)
y = np.concatenate((y1,y2))
s = np.arange(2*N)
np.random.shuffle(s)
X = X[s]
y = y[s]
X_test = X[-test:,:]
X_train = X[:-test,:]
y_test = y[-test:]
y_train = y[:-test]
# -
plt.plot(X_train[:,0],X_train[:,1],marker='.',linestyle='')
# +
nn = Sequential()
nn.add(Dense(5, input_shape=(2,), kernel_initializer='uniform'))
nn.add(Activation('relu'))
#nn.add(Dense(15, kernel_initializer='uniform'))
#nn.add(Activation('relu'))
nn.add(Dense(1, kernel_initializer='uniform'))
nn.add(Activation('sigmoid'))
#print(nn.summary())
nn.compile(optimizer=RMSprop(lr=0.01), loss='binary_crossentropy', metrics=['binary_accuracy'])
history = nn.fit(X_train,y_train, epochs=15, batch_size=100)
# -
plt.plot(history.history['binary_accuracy'])
# récupère les prédictions du classifieur sur la base de test
y_pred = nn.predict(X_test)
res = nn.evaluate(X_test,y_test,batch_size=test)
# return [loss, bin_accuracy] sur la base de test
print("Test binary accuracy: {}%".format(round(res[1]*100,4)))
# ### Calcul de la matrice de confusion
#
# <div class="alert alert-block alert-warning">
# Question : Implémentez les fonctions ci-dessous qui calculent les 4 coefficients de la matrice de confusion étant donnés $y_{pred}$ le vecteur des classes prédites par le classifieur et $y_{test}$ les vrais labels.
# +
def true_positive(y_pred, y_test, threshold):
return None
def false_positive(y_pred, y_test, threshold):
return None
def false_negative(y_pred, y_test, threshold):
return None
def true_negative(y_pred, y_test, threshold):
return None
# -
def ConfusionMatrix(y_pred, y_test,threshold):
mat_conf = np.empty(4)
mat_conf[0] = true_positive(y_pred, y_test, threshold)
mat_conf[1] = false_positive(y_pred, y_test, threshold)
mat_conf[2] = false_negative(y_pred, y_test, threshold)
mat_conf[3] = true_negative(y_pred, y_test, threshold)
print(mat_conf[0],mat_conf[1])
print(mat_conf[2],mat_conf[3])
return mat_conf
threshold = 0.5
mat_conf = ConfusionMatrix(y_pred, y_test, threshold)
# ### Precision, Recall and F-Factor
#
# <div class="alert alert-block alert-warning">
# Question : Implémentez les fonctions $precision$, $recall$ et $f\_measure$ pour qu'elles retournent respectivement la métrique correspondante.
# +
def precision(TP,FP):
return None
def recall(TP,P):
return None
def f_measure(precision,recall):
return None
def measure(y_pred, y_test,threshold):
TP = true_positive(y_pred, y_test, threshold)
FP = false_positive(y_pred, y_test, threshold)
FN = false_negative(y_pred, y_test, threshold)
TN = true_negative(y_pred, y_test, threshold)
P = sum(y_test == 1)
N = sum(y_test == 0)
return TP,FP,FN,TN,P,N
# -
[TP,FP,FN,TN,P,N] = measure(y_pred, y_test, threshold)
prec = precision(TP,FP)
rec = recall (TP,P)
F_factor = f_measure(prec,rec)
print(TP,FP,FN,TN,P,N)
print("precision = ",prec)
print("recall = ",rec)
print("F_factor = ",F_factor)
# ### Tracé de la courbe ROC
#
# Le pseudo-code de l'algorithme pour extraire les coordonnées des points de la courbe ROC est présenté ci-dessous.
# <img src="img/algo_roc" style="width: 450px;">
#
# <div class="alert alert-block alert-warning">
# Question : écrire le code de cet algorithme dans la fonction ci-dessous pour retourner une matrice ($N_{test},2$), avec $N_{test}$ la taille de la liste retournée par le pseudo-code.
# </div>
def generateROCpoints(y_pred, y_test):
## Mettre le code ici ###
R = [[None]] ### A enlever (juste pour éviter quelques erreurs tant que le code n'est pas rempli)
########################
# A ce point de l'algo vous devriez avoir R sous la forme R = [[x0,y0],[x1,y1],...,[xN,yN]]
# L'algo du pseudo-code retourne une liste de liste, R, mais pour tracer la courbe il est plus
# facile d'utiliser une matrice R_mat.
R_mat = np.empty((len(R),len(R[0])))
for i in range(len(R)):
R_mat[i,:] = R[i]
return R_mat
R_mat = generateROCpoints(y_pred, y_test)
## Decommenter la ligne suivante une fois la fonction generateROCpoints implémentée correctement
#plt.plot(R_mat[:,0], R_mat[:,1]) #### A DECOMMENTER
plt.plot([0,1],[0,1])
plt.title('ROC curve')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.show()
# ### Aire sous la courbe ROC
#
# <img src="img/algo_auc" style="width: 450px">
#
# <div class="alert alert-block alert-warning">
# Question : écrire le code de cet algorithme dans la fonction AreaUnderCurve($y_{pred}, y_{test}$) ci-dessous.
# </div>
# +
def trap_area(X1,X2,Y1,Y2):
base = abs(X1-X2)
height_avg = (Y1+Y2)/2
area = base*height_avg
return area
def AreaUnderCurve(y_pred, y_test):
return None
# -
Area = AreaUnderCurve(y_pred, y_test)
Area
# ## À vous de jouer !
#
# ### Un exemple dans le domaine médical
# <div class="alert alert-block alert-warning">
# Pour le jeu de données suivant sur le cancer du sein (label 0 = tumeur bégnine, 1 = tumeur maligne) implémenter différents algorithmes de classification binaire (SVM, arbre, réseau de neurones) et comparez leur performance pour les grandeurs introduites précédemment.
# +
## Chargement des données
names = ['id', 'clumpThick', 'unifCellSize', 'unifCellShape', 'margAdh', 'SECS', 'bareNuclei', 'blandChrom', 'normalNucl','mistoses','class']
dataframe = pd.read_csv('data/breast-cancer-wisconsin.data', names=names, na_filter='?')
data = dataframe.values
X = data[:,1:-1]
y = data[:,-1]
# Les labels dans le dataset sont 2 et 4 au lieu des traditionnels 0 et 1, on les remplace.
y[y == 2] = 0
y[y == 4] = 1
size_test = 200 # doit être plus petit que la taille du dataset
X_train = X[:-size_test,:]
y_train = y[:-size_test]
X_test = X[-size_test:,:]
y_test = y[-size_test:]
# Pour plotter un plt.bar afin de comparer les trois classifieurs
prec_list = np.zeros(3)
rec_list = np.zeros(3)
fScrore_list = np.zeros(3)
# -
# <div class="alert alert-block alert-warning">
# 1er indice : commencez par regarder la répartition des classes. Que peut-on en dire ?
# <div class="alert alert-block alert-success">
# Réponse :
# ## Avec une SVM
# ## Avec une forêt aléatoire
# ## Avec un réseau de neurones
# +
fig, ax = plt.subplots(2,2)
ax[0,0].bar([1,2,3],prec_list,color=['r','b','g'])
ax[0,0].get_xaxis().set_visible(False)
ax[0,0].set_title('Precision')
ax[0,1].bar([1,2,3],rec_list,color=['r','b','g'])
ax[0,1].get_xaxis().set_visible(False)
ax[0,1].set_title('Recall')
ax[1,0].bar([1,2,3],fScrore_list,color=['r','b','g'])
ax[1,0].set_title('F-Score')
ax[1,0].get_xaxis().set_visible(False)
ax[1,1].get_xaxis().set_visible(False)
ax[1,1].get_yaxis().set_visible(False)
# En rouge la SVM, en bleu la forêt aléatoire et en vert le réseau de neurones
# -
# <div class="alert alert-block alert-warning">
# Quelle conclusion tirez-vous de vos différentes expériences sur ce jeu de données?
# <div class="alert alert-block alert-success">
# Réponse :
# # Régression
#
# Le cas de la régression est moins problématique, pour mesurer la performance d'un modèle de régression, il suffit de mesurer son l'écart entre la prédiction et la vraie valeur (Mean Squared Error, Mean Absolute Error, qui donnent un ordre de grandeur de la magnitude de l'écart mais pas de signe). On peut également calculer le coefficient de détermination $R^2$.
#
#
#
# ## References
#
# [1] https://machinelearningmastery.com/classification-accuracy-is-not-enough-more-performance-measures-you-can-use/
#
# [2] https://machinelearningmastery.com/assessing-comparing-classifier-performance-roc-curves-2/
#
# [3] https://en.wikipedia.org/wiki/Receiver_operating_characteristic
#
# [4] Les fichiers pdf dans l'archive
#
# [5] O. L. Mangasarian and W. H. Wolberg: "Cancer diagnosis via linear
# programming", SIAM News, Volume 23, Number 5, September 1990, pp 1 & 18.
#
# <img src="img/img.jpg" style="width: 450px;" title="Machine learning memes for convolutional teens">
#
# <img src="img/headache.png" style="width: 450px;" title="Machine learning memes for convolutional teens">
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# Azure IMDB Review Sentiment Analysis
# Let's start by creating a dictionary mapping the words in all 50,000 reviews to integers indicating the words' relative frequency of occurrence. How often a word occurs is valuable information.
#
# Each word is assigned a unique integer as it is ranked. The most common word is assigned the number 1, the second most common word is assigned the number 2, and so on. load_data also returns a pair of tuples containing the movie reviews (in this example, x_train and x_test) and the 1s and 0s classifying those reviews as positive and negative (y_train and y_test).
#
#
from keras.datasets import imdb
top_words = 10000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=top_words)
x_train[0]
imdb.get_word_index()
word_dict = imdb.get_word_index()
word_dict = { key:(value + 3) for key, value in word_dict.items() }
word_dict[''] = 0 # Padding
word_dict['>'] = 1 # Start
word_dict['?'] = 2 # Unknown word
reverse_word_dict = { value:key for key, value in word_dict.items() }
print(' '.join(reverse_word_dict[id] for id in x_train[0]))
from keras.preprocessing import sequence
max_review_length = 500
x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)
# +
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print(model.summary())
# +
#from keras.callbacks import History
#history = History()
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5, batch_size=32)
# +
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set()
acc = hist.history['accuracy']
val = hist.history['val_accuracy']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training accuracy')
plt.plot(epochs, val, ':', label='Validation accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.plot()
# +
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set()
loss = hist.history['loss']
valloss = hist.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, '-', label='Training loss')
plt.plot(epochs, valloss, ':', label='Validation loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='upper left')
plt.plot()
# -
scores = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
# +
import string
import numpy as np
def analyze(text):
# Prepare the input by removing punctuation characters, converting
# characters to lower case, and removing words containing numbers
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator)
text = text.lower().split(' ')
text = [word for word in text if word.isalpha()]
# Generate an input tensor
input = [1]
for word in text:
if word in word_dict and word_dict[word] < top_words:
input.append(word_dict[word])
else:
input.append(2)
padded_input = sequence.pad_sequences([input], maxlen=max_review_length)
# Invoke the model and return the result
result = model.predict(np.array([padded_input][0]))[0][0]
return result
# -
analyze('Easily the most stellar experience I have ever had.')
analyze('The long lines and poor customer service really turned me off.')
# Finito
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sentiment Analysis on test data
# - Training is based on the pos.txt and neg.txt files.
# - First, our data is in language/word format, not numerical form, which we need be converted to a vector of features.
# - Second realization: our texts may not be the same length of words or characters.
# - One option we have is to compile a list of all unique words in the training set. Let's say that's 3,500 unique words. These words are our lexicon.We check to see if a given input word of a text is in our unique word vector. If so, the index value of that word in the unique word index is set to 1 in the training vector. This is a very simple bag-of-words model.
#import our dependencies
import nltk
from nltk.tokenize import word_tokenize
import numpy as np
import pickle
import random
from nltk.stem import WordNetLemmatizer
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 100000
#create a function to create lexicon for our feature vector
def create_lexicon(pos,neg):
lexicon = []
for fi in [pos,neg]:
with open(fi,'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon+=list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_count = Counter(lexicon)
l2 = []
for w in w_count:
if 1000>w_count[w]>10:
l2.append(w)
print(len(l2))
return l2
def sample_handling(sample,lexicon,classification):
featureset = []
with open(sample,'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_values] +=1
features = list(features)
featureset.append([features, classification])
return featureset
# classification = [pos_value,neg_value] ,i.e, for a positive sample, classification = [1,0]
def create_feature_sets_and_labels(pos,neg,test_size = 0.1):
lexicon = create_lexicon(pos,neg)
features = []
features +=sample_handling('pos.txt',lexicon,[1,0])
features += sample_handling('neg.txt',lexicon,[0,1])
random.shuffle(features)
#for statistical reasons and for better training, we can't have all positive and negative training examples together
features = np.array([features])
testing_size = int(test_size*len(features))
train_x = list(features[:,0][:-testing_size])
train_y = list(features[:,1][:-testing_size])
test_x = list(features[:,0][-testing_size:])
test_y = list(features[:,1][-testing_size:])
return train_x,train_y,test_x,test_y
if __name__ == '__main__':
train_X,train_y,test_x,test_y = create_feature_sets_and_labels('pos.txt','neg.txt')
with open('sentiment_set.pickle','wb') as f:
pickle.dump([train_x,train_y,test_x,test_y],f)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic regression: interative lecture
# The first thing to say is that logistic regression is __not a prediction__, but a **classification** learning
# algorithm. The name *logistic regression* comes from statistics and is due to the fact that the mathematical
# formulation of logistic regression is similar to that of linear regression.
# In logistic regression, we still want to learn the equation of the hyperplane which best describes (fits) the data. Only the **target variable is** not numeric but **categorical**. In the simplest case, each data point belongs to one of two complementary classes: positive (1) and negative (0).
#
# The classification problem that we are trying to solve: given a vector of (numeric) observations $\mathbf{x}$, predict whether this observation belongs to a given class (1) or not (0).
# # 1. Using simple regression for classification
# The first idea that comes to mind - to use the usual regression: after all, we can treat class label 0 and 1 as a numeric attribute. Let's see why this idea does not really work.
# We start with a dataset that contains only one predictive attribute $\mathbf{x^{(1)}}$ (1D vector), and the categorical target attribute $y$.
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Create a tiny sample dataset
Data_X = np.array([1.0, 3.5, 4, 5, 6.5, 7.0, 9, 9.5, 13])
Data_Y = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
X = Data_X.reshape(len(Data_X),1)
Y = Data_Y.reshape(len(Data_Y),1)
# -
# Treat $y$ as numeric and try a **simple linear regression**:
# +
from sklearn import linear_model
regr = linear_model.LinearRegression(normalize=True)
regr.fit(X, Y)
print('intercept:', regr.intercept_)
print('slope:', regr.coef_)
# -
# Let's see if we can use this line to predict the class: 0 or 1.
# +
# Plot data points
plt.scatter(X, Y, c=Y)
plt.title('Train Data Fit')
plt.xlabel('X')
plt.ylabel('Y')
# Plot regression line
plt.plot(X, regr.predict(X), color = "lightgrey")
plt.scatter(X, regr.predict(X), color = "red", marker = "d", zorder=4, label="Prediction")
plt.legend()
plt.show()
# -
# As we see from the above experiment, the best fitting line does not really help us to predict the value of $y$: the linear combination of features such as $\mathbf{wx} + b$ gives a continuous result
# that spans from minus infinity to plus infinity, while $y$ has only two possible values.
#
# We also see that the $SSR$ would not be a good objective function for fitting this line - no matter how you change the line parameters, the SSR will always remain very high.
# # 2. Logistic regression in 1D
# The idea behind the logistic regression is to map an output of a continuous function to a binary result.
# If we set one class
# label to 0 and the second class label to 1, we just need to find a simple continuous
# function whose codomain is (0, 1). In such a case, if the value returned by the model for
# input $\mathbf{x}$ is closer to 0, then we output a class label 0, otherwise, the example is labeled
# as 1.
#
# One function that has such a property is the *standard logistic function* (also
# known as the <b>sigmoid</b> function)
# <img src="images/sigmoid.png" width="400px">
# Below we implement the mapping from $x$ to $y$ using line equation $y = ax + b$, and the sigmoid function which will convert the continuous value of $y$ into a float in (0,1) interval.
# +
def line(a, b, x):
return a*x + b
def sigmoid(y):
s = 1 / (1 + np.exp(-y))
return s
def y_from_sigmoid(s):
y = - np.log(1.0/s - 1)
return y
def x_from_y(a, b, y):
return (y - b)/a
# +
# Example
# here is the range with step 0.2
x_range = np.arange(1., 10., 0.2)
# here are the parameters of the line
a=2
b=-10
s = sigmoid(line(a,b,5))
print(s)
y = y_from_sigmoid(s)
print(y)
x = x_from_y(a, b, y)
print(x)
# +
# here are the parameters of the line
a=2
b=-10
# plotting the mapping from x to sigmoid(f(x))
S = sigmoid(line(a,b,x_range))
plt.scatter(x_range, S)
plt.plot(x_range,[0.5]*len(x_range), color="red", linestyle='dashed')
# find the value of x which gives sigmoid 0.5
y = y_from_sigmoid(0.5)
x = x_from_y(a, b, y)
plt.scatter([x], [0.5], marker="d", color="red")
label = "{:.1f},0.5".format(x)
plt.annotate(label, (x,0.5), textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center')
plt.show()
# -
# If we take an output of the linear model and feed it into a sigmoid function, then we get a number between 0 and 1. Sigmoid function gives us a probability that the point belongs to class 1.
#
# If we determined that $a$ and $b$ parameters of the best line are as above, then all the values of $x$ starting with approximately 6.5 would be classified as class 1 with the probability close to 1.0. All the values of $x$ smaller than 3.5 do not belong to class 1 - their sigmoid is close to 0.
#
# For other values of $x$ the probability of belonging or not belonging to class 1 can be found from the above sigmoid function.
#
# The value of $x$ which corresponds to the probability 0.5 is called a **decision boundary** - it divides all the points in the dataset into 2 classes - 1 and 0.
#
# Play with the parameters of the line and see how the decision boundary changes.
# ## 2.1. Linear Regression with Sigmoid
# Let's now see how do we use the sigmoid transformation for classification on the same sample toy 1D dataset as in Section 1.
#
# Maybe we just need to find the best fitting line and transform it into a sigmoid line?
# +
#repeating the dataset here
Data_X = np.array([1.0, 3.5, 4, 5, 6.5, 7.0, 9, 9.5, 13])
Data_Y = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
X = Data_X.reshape(len(Data_X),1)
Y = Data_Y.reshape(len(Data_Y),1)
# +
regr = linear_model.LinearRegression(normalize=True)
regr.fit(X, Y)
a = regr.coef_[0][0]
b = regr.intercept_[0]
print("slope:",a,", intercept:",b)
# draw sigmoid predictor
#create continuous interval of x values
x_range = np.arange(-5, 15, 0.2)
S = sigmoid(line(a,b,x_range))
plt.plot(x_range, S)
plt.scatter(X,Y,c=Y)
plt.plot(x_range,[0.5]*len(x_range), color="red", linestyle='dashed')
# find the value of x which gives sigmoid 0.5 - decision boundary
y = y_from_sigmoid(0.5)
x = x_from_y(a, b, y)
plt.scatter([x], [0.5], marker="d", color="red")
label = "{:.1f},0.5".format(x)
plt.annotate(label, (x,0.5), textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center')
plt.show()
# -
# What is the decision boundary? How many points are classified as positive using this decision boundary?
#
# Answer: the decision boundary is at 3.6. 7 points are classified as positive.
# ## 2.2. Proper Logistic Regression
# The goal of the Logistic Regression learning algorithm is to find the function which best predicts the classes of the data points in the training set.
# Given (currently unknown) parameters of the line $a$ and $b$, the predicted value $p_i$ for each datapoint looks like this:
#
# $p_i = sigmoid(ax_i +b)$
#
# Recall that $p_i$ reflects the probability of the point belonging to class 1. Thus (1 - $p_i$) is the probability of the point not belonging to class 1.
#
# The objective function should reflect the difference between $p_i$ and the actual class label $y_i$.
#
# The probability for each point $(x_i,y_i)$ to be classified correctly:
#
# $E(x_i) = p_i^{y_i} * (1-p_i)^{(1-y_i)}$
#
# The **objective function of logistic regression** is called *likelihood*. It reflects the probability of observing these data points given the parameters $a$ and $b$. The likelihood of the model given data is just the product of all the probabilities $E(x_i)$:
#
# $$L(a,b|data) = \Pi_{i=1}^n{ E(x_i)}$$
#
# The algorithm which finds the best values of $a$ and $b$ is called the **Maximum Likelihood Estimation (MLE)**. It is an iterative algorithm based on gradient descent! The sigmoid function has a nice derivative (can you calculate it with respect to a and with respect to b?).
#
# This time we are looking to **maximize** the likelihood. So if the derivative of function $L(b)$ is positive for some value of $b$ - we make next step in the same direction. As always we are using the learning rate $\eta$ to make small moves, and we also set the maximum number of iterations (learning epochs).
#
# In practice - because the product of probabilities for multiple points can become really small and cause underflow - we use **log likelihood** and try to maximize it.
# +
# Using logistic regression learning algorithm
# Fit (train) the Logistic Regression classifier
logregr = linear_model.LogisticRegression()
# note that while in regular regression Y can be multi-dimensional,
# in logistic regression it has to be a 1D vector
fitted_model = logregr.fit(X, Data_Y)
a = logregr.coef_[0][0]
b = logregr.intercept_[0]
print("slope:",a,", intercept:",b)
# draw sigmoid predictor
#create constinuous interval of x values
x_range = np.arange(-5, 15, 0.2)
S = sigmoid(line(a,b,x_range))
plt.plot(x_range, S)
plt.scatter(X,Y,c=Y)
# find the value of x which gives sigmoid 0.5 - decision boundary
y = y_from_sigmoid(0.5)
x = x_from_y(a, b, y)
plt.scatter([x], [0.5], marker="d", color="red")
label = "{:.1f},0.5".format(x)
plt.annotate(label, (x,0.5), textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center')
plt.show()
# -
# # 3. Logistic regression in 2D
#
# For the 2-dimensional vector **x** we are looking for a hyperplane:
#
# $y = w_1*x^{(1)} + w_2*x^{(2)}+ w_0$
#
# And the predicted value becomes:
#
# $p_i = \frac{1}{1 + e^{-y(x_i^{(1)},x_i^{(2)})}}$
#
# Note that as in the case of regression, we do not have to use the linear combination of original features, but may also use polynomials.
# The sample input dataset is taken from Conway & Myles Machine Learning for Hackers book, Chapter 2.
#
# Each sample contains three columns.
# * Height in inches
# * Weight in pounds
# * Gender: 1 corresponds to a male person, and 0 corresponds to a female person.
#
# We want to learn the model which given height and weight would predict a class label: male (1) or female (0).
data_file = "height_weight_gender.csv"
# +
import pandas as pd
data = pd.read_csv(data_file)
data.columns
# +
# X = feature values, all the columns except the last column
X = data.iloc[:, :-1]
# y = target value, class, last column of the data frame
Y = data.iloc[:, -1]
# all males
males = data.loc[Y == 1]
# all females
females = data.loc[Y == 0]
import matplotlib.pyplot as plt
# %matplotlib inline
# plot
plt.scatter(males.iloc[:, 0], males.iloc[:, 1], s=2, label='Males')
plt.scatter(females.iloc[:, 0], females.iloc[:, 1], s=2, label='Females')
plt.legend()
plt.show()
# +
from sklearn import linear_model
# Fit (train) the Logistic Regression classifier
logregr = linear_model.LogisticRegression(C=1e40, solver='newton-cg')
fitted_model = logregr.fit(X, Y)
# Predict
prediction_result = logregr.predict([(70,180)]) #174 cm, 82 kg
print(prediction_result)
# -
print( "intercept:", logregr.intercept_ )
print( "slopes:", logregr.coef_ )
# +
x1 = 70
x2 = 180
y_predict = logregr.coef_ [0][0]*x1 + logregr.coef_ [0][1]*x2 + logregr.intercept_
print(y_predict)
# -
print(sigmoid(y_predict))
# As there are two features in our dataset, the linear equation can be represented by
# $y = w_1*x_1 + w_2*x_2 + w_0$
#
# or using the parameters learned by logistic regression:
# $y = -0.49261999*x_1 + 0.19834042*x_2 + 0.69254177$
#
# The sigmoid function here is 3-dimensional. The decision boundary for it is a projection of the sigmoid into a 2D space ($x_1$, $x_2$).
# To find the line that separates males from females we set $y$ to zero and compute the line in coordinates ($x_1$, $x_2$):
#
# $0 = -0.49261999*x_1 + 0.19834042*x_2 + 0.69254177$
#
# $x_2 = \frac{0.49261999*x_1 - 0.69254177}{0.19834042}$
#
# This is the line equation for decision boundary.
# +
min_x1 = X.min()[0]
print(min_x1)
max_x1 = X.max()[0]
print(max_x1)
# -
x_values = np.linspace(min_x1 - 5, max_x1 + 5,100)
y_values = (0.49261999*x_values - 0.69254177) / 0.19834042
plt.plot(x_values, y_values, "--r")
plt.scatter(males.iloc[:, 0], males.iloc[:, 1], s=2, label='Males')
plt.scatter(females.iloc[:, 0], females.iloc[:, 1], s=2, label='Females')
plt.xlabel('Height')
plt.ylabel('Weight')
plt.legend()
plt.show()
# Copyright © 2022 Marina Barsky. All rights reserved.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# %load_ext autoreload
# %autoreload 2
# + [markdown] pycharm={"name": "#%% md\n"}
# # Movies
# + tags=[]
import matplotlib.pyplot as plt
from movie_ratings import config
from taskchain import Config
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# config_name = 'movies/imdb.all.yaml'
config_name = 'movies/imdb.filtered.yaml'
chain = Config(config.TASKS_DIR, config.CONFIGS_DIR / config_name, global_vars=config).chain()
chain
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
print(f'Movie count: {len(chain.movies.value)}')
chain.movies.value.sample(5)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
chain.movies.value.columns
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Top movies
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
min_votes = 10 ** 3
chain.movies.value\
.query(f'reviews_from_users > {min_votes}')\
.sort_values('avg_vote', ascending=False)\
.head()
# -
chain.duration_histogram.value
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
chain.year_histogram.value
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Directors
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
len(chain.directors.value)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Actors
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
len(chain.actors.value)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
## Genres
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
genres = chain.genres.value
for genres, movies in genres.items():
print(genres, len(movies))
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Countries
#
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
countries = chain.countries.value
for country, movies in countries.items():
print(country, len(movies))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
# %matplotlib inline
torch.manual_seed(1)
N_SAMPLES = 20
N_HIDDENS = 300
# +
x = torch.unsqueeze(torch.linspace(-1, 1, 2*N_SAMPLES), dim = 1)
y = x + 0.3 * torch.normal(torch.zeros(2*N_SAMPLES, 1), torch.ones(2*N_SAMPLES, 1))
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), dim = 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
plt.scatter(x, y, c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x, test_y, c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
# +
net_overfitting = nn.Sequential(
nn.Linear(1, N_HIDDENS),
nn.ReLU(),
nn.Linear(N_HIDDENS, N_HIDDENS),
nn.ReLU(),
nn.Linear(N_HIDDENS, 1)
)
net_dropout = nn.Sequential(
nn.Linear(1, N_HIDDENS),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(N_HIDDENS, N_HIDDENS),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(N_HIDDENS, 1)
)
print(net_overfitting)
print(net_dropout)
# +
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# device = 'cpu'
net_overfitting.to(device)
net_dropout.to(device)
criterion = nn.MSELoss()
optimizer_overfitting = optim.Adam(net_overfitting.parameters(), lr=0.01)
optimizer_dropout = optim.Adam(net_dropout.parameters(), lr=0.01)
for t in range(1001):
net_overfitting.zero_grad()
outputs_overfitting = net_overfitting(x.to(device))
loss_overfitting = criterion(outputs_overfitting, y.to(device))
loss_overfitting.backward()
optimizer_overfitting.step()
net_dropout.zero_grad()
outputs_dropout = net_dropout(x.to(device))
loss_dropout = criterion(outputs_dropout, y.to(device))
loss_dropout.backward()
optimizer_dropout.step()
if t % 100 == 0:
net_overfitting.eval()
net_dropout.eval()
plt.cla()
test_overfitting = net_overfitting(test_x.to(device)).cpu().detach()
test_dropout = net_dropout(test_x.to(device)).cpu().detach()
plt.scatter(x, y, c='magenta', s=50, alpha=0.3, label='train')
plt.scatter(test_x, test_y, c='cyan', s=50, alpha=0.3, label='test')
plt.plot(test_x, test_overfitting, 'r-', lw=3, label='overfitting')
plt.plot(test_x, test_dropout, 'b--', lw=3, label='dropout(50%)')
plt.text(0, -1.2, 'overfitting train loss=%.4f' % loss_overfitting.cpu().item(), fontdict={'size': 20, 'color': 'red'})
plt.text(0, -1.5, 'dropout train loss=%.4f' % loss_dropout.cpu().item(), fontdict={'size': 20, 'color': 'blue'})
plt.text(0, -1.8, 'overfitting test loss=%.4f' % criterion(test_overfitting, test_y).item(), fontdict={'size': 20, 'color': 'red'})
plt.text(0, -2.1, 'dropout test loss=%.4f' % criterion(test_dropout, test_y).item(), fontdict={'size': 20, 'color': 'blue'})
plt.legend(loc='upper left'); plt.ylim((-2.5, 2.5));plt.pause(0.1)
net_overfitting.train()
net_dropout.train()
plt.show()
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project 4: A/B Test a New Menu Launch
#
# ## The Business Problem
#
# Round Roasters is an upscale coffee chain with locations in the western United States of America. The past few years have resulted in stagnant growth at the coffee chain, and a new management team was put in place to reignite growth at their stores.
#
# The first major growth initiative is to introduce gourmet sandwiches to the menu, along with limited wine offerings. The new management team believes that a television advertising campaign is crucial to drive people into the stores with these new offerings.
#
# However, the television campaign will require a significant boost in the company’s marketing budget, with an unknown return on investment (ROI). Additionally, there is concern that current customers will not buy into the new menu offerings.
#
# To minimize risk, the management team decides to test the changes in two cities with new television advertising. Denver and Chicago cities were chosen to participate in this test because the stores in these two cities (or markets) perform similarly to all stores across the entire chain of stores; performance in these two markets would be a good proxy to predict how well the updated menu performs.
#
# The test ran for a period of 12 weeks (2016-April-29 to 2016-July-21) where five stores in each of the test markets offered the updated menu along with television advertising.
#
# The comparative period is the test period, but for last year (2015-April-29 to 2015-July-21).
#
# You’ve been asked to analyze the results of the experiment to determine whether the menu changes should be applied to all stores. The predicted impact to profitability should be enough to justify the increased marketing budget: at least 18% increase in profit growth compared to the comparative period while compared to the control stores; otherwise known as incremental lift. In the data, profit is represented in the gross_margin variable.
#
# You have been able to gather three data files to use for your analysis:
#
# - Transaction data for all stores from 2015-January-21 to 2016-August-18
# - A listing of all Round Roasters stores
# - A listing of the 10 stores (5 in each market) that were used as test markets
#
# ## Plan Your Analysis
#
# #### 1. What is the performance metric you’ll use to evaluate the results of your test?
#
# The performance metric is the **lift of profit growth** (gross_margin), which must be at least 18 % to justify the increased marketing budget.
# #### 2. What is the test period?
#
# The test period is **12 weeks**, from 2016/04/29 to 2016/07/21. In this period, five stores in the test markets offered the updated menu and TV ads were running.
# #### 3. At what level (day, week, month, etc.) should the data be aggregated?
#
# The data should be aggregated on a **weekly** basis.
# ## Clean Up Your Data
#
# <img src="img/1.png" width="500">
# <div align="center">
# Figure 1 - Alteryx Workflow for Data Cleaning and Aggregation
# </div>
# ## Match Treatmend and Control Units
#
# *Apart from trend and seasonality...*
# #### 1. What control variables should be considered? Note: Only consider variables in the *RoundRoastersStore* file.
#
# **AvgMonthlySales** and **Sq_Ft** could be considered.
#
# #### 2. What is the correlation between your each potential control variable and your performance metric?
#
# The following table shows the correlation analysis. **AvgMonthSales** has a high correlation and will be chosen to be a control variable. **Sq_Ft.** on the other hand has a very low correlation and will not be used to match treatment and control stores.
#
# #### 3. What control variables will you use to match treatment and control stores?
#
# Additionally, to the **trend** and **seasonality** variables, the **AvgMonthlySales** will be used to match the treatment store to 2 control stores (high correlation).
#
# #### 4. Please fill out the table below with your treatment and control stores pairs:
#
# | **Treatment Store** | **Control Store 1** | **Control Store 2** |
# |---|---|---|
# | 1664 | 7162 | 8112|
# | 1675 | 1580 | 1807 |
# | 1696 | 1964 | 1863 |
# | 1700 | 2014 | 1630 |
# | 1712 | 8162 | 7434 |
# | 2288 | 9081 | 2564 |
# | 2293 | 12219 | 9524 |
# | 2301 | 3102 | 9238 |
# | 2322 | 2409 | 3235 |
# | 2341 | 12536 | 2382 |
#
#
# <img src="img/2.png" width="500">
# <div align="center">
# Figure 2 - Alteryx Workflow for Matching Treatment and Control Units
# </div>
# ## Analysis and Writeup
#
# #### 1. What is your recommendation - Should the company roll out the updated menu to all stores?
#
# It is **recomended** to roll out the updated menu, as the needed 18 % lift is exceeded by the A/B test with an *overall lift of 40.7 %*.
# Additionally, the *significance* of the A/B model is very high, which leads to the assumption, that the lift is generated through the new menu implementation.
#
# #### 2. What is the lift from the new menu for West and Central regions (include statistical significance)?
#
# **West:**
# Lift is 37.9 % with a 9.5 % significance.
# <img src="img/3.png" width="500">
# <div align="center">
# Figure 3 - Report for the West Region
# </div>
#
# **Central:**
# Lift is 43.5 % with a 99.6 % significance. Here is the report:
#
# <img src="img/4.png" width="500">
# <div align="center">
# Figure 4 - Report for the Central Region
# </div>
#
# #### 3. What is the lift from the new menu overall?
#
# The overall lift is 40.7 % with a 100 % significance.
# <img src="img/5.png" width="500">
# <div align="center">
# Figure 5 - Overall Report
# </div>
# <img src="img/6.png" width="500">
# <div align="center">
# Figure 6 - Alteryx Workflow
# </div>
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('MSCS-basic')
# language: python
# name: python3
# ---
def fast_exp(base, exponent, modulus):
result = 1
while exponent != 0:
if exponent % 2 == 1:
result = (result * base) % modulus
base = base**2 % modulus
exponent = exponent // 2
return result
def extended_euclidean(m, n):
if n == 0:
return 1, 0, m
x, y, g = extended_euclidean(n, m % n)
return y, x - (m // n)*y, g
fast_exp(5, 177, 11)
fast_exp(2, 10, 10000)
extended_euclidean(175, 119)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
from datetime import datetime
import pandas as pd
from pandas.io.json import json_normalize
import matplotlib.pyplot as plt
# Data describing all bike rides
BIKE_RIDES_DATASET = './data/201905_Usage_Bicimad.json'
with open(BIKE_RIDES_DATASET, encoding='ISO-8859-1') as dataset:
data = [json.loads(line) for line in dataset]
bicimad = json_normalize(data)
bicimad.head(3)
bicimad['travel_time_mins'] = bicimad['travel_time']/60
bicimad['date_timestr'] = bicimad['unplug_hourTime.$date']
bicimad['date_time'] = bicimad.date_timestr.map(lambda x : datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.000+0200'))
bicimad[['date_time','idunplug_station','travel_time_mins','idplug_station','user_type','ageRange']]
count_byhour = bicimad.groupby(['date_time'])['travel_time_mins'].describe()
count_byhour[['count']]
# +
# weekday assign
count_byhour['dayofweek'] = count_byhour.index.dayofweek # 0 is Monday
# count_byhour = count_byhour[count_byhour.dayofweek.isin([0,1,2,3,4])]
# count_byhour[['count','dayofweek']]
# -
count_byhour.loc['2019-05-01':'2019-05-05']['count'].plot()
plt.show()
count_byhour.loc['2019-05-06':'2019-05-10']['count'].plot()
plt.show()
count_byhour.loc['2019-05-13':'2019-05-17']['count'].plot()
plt.show()
count_byhour.loc['2019-05-20':'2019-05-24']['count'].plot()
plt.show()
count_byhour.loc['2019-05-27':'2019-05-30']['count'].plot()
plt.show()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Find signature areas with and without missing signatures with Keras, Tesseract, and ImageMagick
# We want to find contract pages that have signature areas with missing signatures. Our strategy is to use Tesseract to search for typical words such as "som kjøper" and "som selger" that usually appear near a signature field, extract a "signature area" image around these words, and classify them as "containing a signature" or "not containing a signature".
import os, glob
import numpy as np
import random
from PIL import Image, ImageDraw, ImageFont, ImageEnhance
from bs4 import BeautifulSoup as soup
import re
import matplotlib.pyplot as plt
import csv
import faker
# We'll develop our approach using this example contract from EiendomsMegler 1: 
# We start by writing helper functions that will generate a text file with bounding boxes from the image we want to detect signatures in:
# +
def hocr_img(img_file_name, temp_file_name = "temp_pdf_png"):
os.system("rm {0}*".format(temp_file_name))
file = img_file_name.rsplit('.', 1)[0]
suffix = img_file_name.rsplit('.', 1)[1]
os.system("convert -density 300 -depth 8 -quality 85 {0} {1}-0.png".format(img_file_name, temp_file_name))
os.system("tesseract -l nor {0} {1}-0 hocr".format(img_file_name, temp_file_name))
#increase_contrast(img_file_name)
hocr_files = ["{0}-0.hocr".format(temp_file_name)]
return(hocr_files)
def hocr_pdf(pdf_file_name,temp_file_name = "temp_pdf_png"):
os.system("rm {0}*".format(temp_file_name))
os.system("convert -density 300 -depth 8 -quality 85 {0} {1}.png".format(pdf_file_name, temp_file_name))
png_files = glob.glob("{0}*.png".format(temp_file_name))
for png_file in png_files:
file = png_file.replace(".png","")
os.system("tesseract -l nor {0} {1} hocr".format(png_file, file))
#increase_contrast(png_file)
if os.path.isfile("{0}.png".format(temp_file_name)):
os.system("mv {0}.png {0}-0.png".format(temp_file_name))
os.system("mv {0}.hocr {0}-0.hocr".format(temp_file_name))
hocr_files = sorted(glob.glob("{0}*.hocr".format(temp_file_name)))
return(hocr_files)
# -
# We use beautifulsoup to find the elements containing the keywords we want to trigger signature detection in. By default we use "Som kjøper", "Som selger", "For kjøper", "For selger", "Kjøpers underskrift", "Selgers underskrift":
# +
def search_hocr_for_sentence(sentence, trigger_dict, hocr, page_num):
word_span_list = hocr.find_all("span", {"id" : lambda L: L and L.startswith('word_')})
word_list = []
word_id_list = []
bbox_list = []
for word_span in word_span_list:
#word = re.sub(r'[^\w\s]','',word_span.find(text=True))
word = re.sub(";",":", word_span.find(text=True))
word_id = int(word_span["id"].replace("word_1_",""))
bbox = word_span["title"].replace(";","").split()[1:5]
word_list.append(word)
word_id_list.append(word_id)
bbox_list.append(bbox)
sentence_words = sentence.split()
sentence_length=len(sentence_words)
n=0
for i in range(len(word_list)):
if sentence_words[0] in word_list[i]:
candidate_sentence = " ".join(word_list[i:(i+sentence_length)])
if sentence in candidate_sentence:
n+=1
bbox = bbox_list[i+sentence_length-1]
trigger_dict.update({sentence+" "+str(n) : {"page" : page_num,
"x0":int(bbox[0]),
"y0":int(bbox[1]),
"x1":int(bbox[2]),
"y1":int(bbox[3])}})
return(trigger_dict, word_list, word_id_list, bbox_list)
def get_trigger_areas(hocr_list,
trigger_sentences = "trigger_sentences.csv"):
with open(trigger_sentences) as csvfile:
reader = csv.reader(csvfile)
trigger_sentences = list(reader)[0]
trigger_dict = {}
page_num=0
for hocr_file in hocr_list:
page_num+=1
with open(hocr_file,"r") as file:
hocr = soup(file, "html5lib")
for trigger_sentence in trigger_sentences:
trigger_dict, word_list, word_id_list, bbox_list = search_hocr_for_sentence(trigger_sentence,
trigger_dict,
hocr,
page_num)
return(trigger_dict, word_list, word_id_list, bbox_list)
# -
trigger_dict,_,_,_ = get_trigger_areas(hocr_pdf("samples/eiendomsmegler1_kontrakt.png"))
trigger_dict
# Let's extract images of each detected signature field. We'll use the font size (from the height of the bounding box) to scale the extracted area: The larger the font, the larger the extracted image:
def extract_signatures(trigger_dict,
rel_left=3,
rel_right=15,
rel_up=0,
rel_down=6,
temp_file_name="temp_pdf_png"):
sig_img_list = []
for trigger in trigger_dict:
page_file = temp_file_name + "-" + str(trigger_dict[trigger]["page"]-1) + ".png"
page_img = Image.open(page_file)
page_width = page_img.size[0]
page_height= page_img.size[1]
x0 = trigger_dict[trigger]["x0"]
x1 = trigger_dict[trigger]["x1"]
y0 = trigger_dict[trigger]["y0"]
y1 = trigger_dict[trigger]["y1"]
font_height = y1-y0
x0 = np.max([x0 - font_height*rel_left, 0])
x1 = np.min([x0 + font_height*rel_right, page_width])
y0 = np.max([y0 - font_height*rel_up, 0])
y1 = np.min([y0 + font_height*rel_down, page_height])
crop_img = page_img.crop((x0,y0,x1,y1))
sig_img_list.append(crop_img)
return(sig_img_list)
img_list = extract_signatures(trigger_dict)
img_list[0]
img_list[1]
# This works pretty well! Our next step is to build a training data set for the neural net that will decide if each image contains a signature. We'll generate a bunch of signature areas of various sizes, with and without signatures:
# +
def salt_image(img,salt_prob):
max_x, max_y = img.size
pixels = img.load()
x = random.randrange(max_x)
y = random.randrange(max_y)
for i in range(max_x):
for j in range(max_y):
if random.random() < salt_prob:
r,g,b,a = pixels[x,y]
red = 255
green = 255
blue = 255
pixels[i, j] = (red, green, blue, a)
return(img)
def generate_signature(new_width, new_height, alpha, filename):
img = Image.open(filename)
width, height = img.size
pixdata = img.load()
for y in range(height):
for x in range(width):
r,g,b,a = pixdata[x,y]
pixdata[x, y] = (r, g, b, np.int(a*alpha))
img = img.resize((new_width, new_height), Image.ANTIALIAS)
return(img)
def signature_box_content(width,
height,
text="",
include_text=False,
signed=True,
horizontal_line = True,
name_under_line=False,
address_under_line=False,
max_salt=0.7, sig_directory="signatures", font_directory="fonts"):
bottom_margin = np.int(np.max([np.round(height*0.15),1]))
sig_img = Image.new("RGB", (width,height), color="white")
fnt_size = np.int(bottom_margin*0.7)
font = random.choice(os.listdir(font_directory))
txt_fnt = ImageFont.truetype(font, fnt_size)
text_shift = random.uniform(0,1)
line_shift = random.uniform(1,3)
draw = ImageDraw.Draw(sig_img)
fake = faker.Faker()
if horizontal_line:
draw.line((0, sig_img.size[1]-int(bottom_margin*line_shift), sig_img.size[0], sig_img.size[1]-int(bottom_margin*line_shift)), fill="black")
if name_under_line:
draw.text((0, sig_img.size[1]-int(bottom_margin*line_shift)), fake.name(), font=txt_fnt, fill="black")
if address_under_line:
draw.text((0, sig_img.size[1]-int(bottom_margin*(line_shift-1))), fake.address(), font=txt_fnt, fill="black")
if include_text:
draw.text((0,int(text_shift*text_shift)), text, font=txt_fnt, fill="black")
if signed:
sig_alpha = random.uniform(0.2, 1)
sig_width = np.random.randint(low=int(width/3),high=int(width*1.5))
sig_height = np.random.randint(low=int(height/3),high=int(height*1.5))
sig_left_margin = np.random.randint(width-int(sig_width/2))
sig_top_margin = np.random.randint(height-bottom_margin-int(sig_height/2))
sig_filename = random.choice(os.listdir(sig_directory))
salt_factor = random.uniform(0,max_salt)
#print(sig_filename) Uncomment to debug signature.png-s with bad transparency masks
signature = salt_image(generate_signature(sig_width, height-bottom_margin, sig_alpha, sig_directory+"/"+sig_filename),salt_factor)
sig_img.paste(signature, (sig_left_margin,sig_top_margin), signature)
return(sig_img, bottom_margin)
# -
def generate_signatures(N,
trigger_sentences = "trigger_sentences.csv",
pct_validate=0.1,
path="signature_output",
signed_probability=0.5,
horizontal_line_probability=0.5,
text_probability=0.5,
name_probability=0.5,
address_probability=0.5,
signature_width_min = 200,
signature_width_max = 600,
signature_height_min= 100,
signature_height_max= 400):
N_length = len(str(N))
with open(trigger_sentences) as csvfile:
reader = csv.reader(csvfile)
trigger_sentences = list(reader)[0]
os.system("rm -R {0}".format(path))
os.system("mkdir {0}".format(path))
os.system("mkdir {0}/train".format(path))
os.system("mkdir {0}/train/signed".format(path))
os.system("mkdir {0}/train/not_signed".format(path))
os.system("mkdir {0}/validation".format(path))
os.system("mkdir {0}/validation/signed".format(path))
os.system("mkdir {0}/validation/not_signed".format(path))
for n in range(0,(N)):
height = np.random.randint(low=signature_height_min, high=signature_height_max)
width = np.int(np.min([height*1.6,np.random.randint(low=signature_width_min, high=signature_width_max)]))
box_is_signed = np.random.rand()<signed_probability
box_has_line = np.random.rand()<horizontal_line_probability
box_has_text = np.random.rand()<text_probability
box_has_name = np.random.rand()<name_probability
box_has_address = np.random.rand()<address_probability
sign_box_text = random.choice(trigger_sentences)
sig_img, _ = signature_box_content(width,
height,
sign_box_text,
box_has_text,
box_is_signed,
box_has_line,
box_has_name,
box_has_address)
#n_string = str(n+1).zfill(N_length)
n_string = str(n+1)
N_train = np.int(N*(1-pct_validate))
if n<N_train:
if box_is_signed:
sig_img.save(path+"/train/signed/"+n_string+".png")
else:
sig_img.save(path+"/train/not_signed/"+n_string+".png")
else:
if box_is_signed:
sig_img.save(path+"/validation/signed/"+n_string+".png")
else:
sig_img.save(path+"/validation/not_signed/"+n_string+".png")
if (n+1) % 100 == 0:
print(str(n+1) + " of " +str(N))
return("Done! Synthetic signatures written to "+path)
generate_signatures(2000)
# Now let's see if we can teach a small CNN to tell the difference between signed signature areas and signature areas with missing signatures (see https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html for details):
# +
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'signature_output/train'
validation_data_dir = 'signature_output/validation'
nb_train_samples = 1800
nb_validation_samples = 200
epochs = 20
batch_size = 15
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Conv2D(128, (3, 3)))
#model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
#model.save_weights('first_try.h5')
model.save('model.h5')
# -
# The model validates well on the synthetic signatures, but what about real cases not based on synthetic data? Let's give it a go on the contract example from Eiendomsmegler 1:
# +
from keras.models import load_model
from keras.preprocessing import image
from keras import backend as K
def predict_image(model_name, img, threshold=0.5, show=False):
"""Use a pretrained CNN to judge if an image contains a signature."""
img = img.convert('RGB')
img = img.resize((150, 150), Image.ANTIALIAS)
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
K.clear_session()
model = load_model(model_name)
pred = model.predict(img_tensor)
if pred[0]<threshold:
conclusion = False # "Not signed!"
confidence = (threshold-pred)/threshold
else:
conclusion = True # "Signed!"
confidence = (pred-threshold)/(1-threshold)
return conclusion, np.round(confidence,4)
# -
predict_image("model.h5",img_list[0],show=True)
predict_image("model.h5",img_list[1],show=True)
# "False" means "Not signed" in this contect, and 1. means the model is pretty damn confident about its' conclusion. Looking good! Now what if we sign this contract by hand, with a signature not earlier seen by the model during training?
#
# 
signed_trigger_dict,_,_,_ = get_trigger_areas(hocr_img("samples/eiendomsmegler1_kontrakt_signed.png"))
signed_img_list = extract_signatures(signed_trigger_dict)
signed_img_list[0]
signed_img_list[1]
predict_image("model.h5",signed_img_list[0],show=True)
predict_image("model.h5",signed_img_list[1],show=True)
# Well done, AI-signature-detector!
# Let's create some functions to gather all of the above toghether:
# +
def score_signatures(trigger_dict,
rel_left=3,
rel_right=10,
rel_up=0,
rel_down=6,
model="model.h5",
temp_file_name="temp_pdf_png"):
"""Use a pretrained CNN to search for signatures in each trigger area in trigger_dict. The results are added to trigger_dict and the latter returned."""
for trigger in trigger_dict:
page_file = temp_file_name + "-" + str(trigger_dict[trigger]["page"]-1) + ".png"
page_img = Image.open(page_file)
page_width = page_img.size[0]
page_height= page_img.size[1]
x0 = trigger_dict[trigger]["x0"]
x1 = trigger_dict[trigger]["x1"]
y0 = trigger_dict[trigger]["y0"]
y1 = trigger_dict[trigger]["y1"]
font_height = y1-y0
x0 = np.max([x0 - font_height*rel_left, 0])
x1 = np.min([x0 + font_height*rel_right, page_width])
y0 = np.max([y0 - font_height*rel_up, 0])
y1 = np.min([y0 + font_height*rel_down, page_height])
crop_img = page_img.crop((x0,y0,x1,y1))
signed, confidence = predict_image(model,crop_img,show=False)
trigger_dict[trigger].update({"x0":int(x0),
"x1":int(x1),
"y0":int(y0),
"y1":int(y1),
"signed":signed,
"confidence":np.float(confidence)})
return(trigger_dict)
def score(hocr_files):
trigger_dict,_,_,_ = get_trigger_areas(hocr_files)
score_dict = score_signatures(trigger_dict)
return(score_dict)
# -
# Let's try our scoring function on Forbrukerrådet's recommended contract. We expect the (unsigned) signatures to be on page 5:
score(hocr_pdf("samples/forbrukerrådet_kontrakt.pdf"))
# And what about a contract for buying a used car?
score(hocr_pdf("samples/bil_kontrakt.pdf"))
# This is really all we need! But... wouldn't it be cool to also return the PDF with the signature detections marked? Yeah, let's give it a try:
# +
import img2pdf
def visualize_detections(trigger_dict, filename, fnt_size=30, temp_file_name = "temp_pdf_png"):
png_file_dict = {}
for trigger in trigger_dict:
page_file = temp_file_name + "-" + str(trigger_dict[trigger]["page"]-1) + ".png"
img = Image.open(page_file).convert("RGB")
drawing = ImageDraw.Draw(img)
top_left = (trigger_dict[trigger]["x0"],trigger_dict[trigger]["y0"])
bottom_right= (trigger_dict[trigger]["x1"],trigger_dict[trigger]["y1"])
text_start_x = trigger_dict[trigger]["x0"]
text_start_y = trigger_dict[trigger]["y1"]-fnt_size
text_start = (text_start_x, text_start_y)
txt_fnt = ImageFont.truetype("DejaVuSerif.ttf", fnt_size)
if trigger_dict[trigger]["signed"]==True:
color="green"
text = str(trigger)+" Signed! Conf.: " + str(np.round(trigger_dict[trigger]["confidence"]))
else:
color="red"
text = str(trigger)+" Not signed! Conf.: " + str(np.round(trigger_dict[trigger]["confidence"],3))
drawing.rectangle([top_left, bottom_right], outline=color)
drawing.text(text_start, text, font=txt_fnt, fill=color)
img.save(page_file)
img_list = sorted(glob.glob("{0}*.png".format(temp_file_name)))
#with open(filename, "wb") as f:
# f.write(img2pdf.convert(img_list))
pdf_resp = img2pdf.convert(img_list)
return (pdf_resp)
# +
bil_dict = score(hocr_pdf("samples/bil_kontrakt.pdf"))
with open("bil_viz.pdf","wb") as f:
f.write(visualize_detections(bil_dict, "samples/bil_kontrakt.pdf"))
# -
# Go ahead and [have a look](bil_viz.pdf) :)
|
# ---
# layout: post
# title: "[ISS 세미나] 김태영님, 블록과 함께하는 파이썬 딥러닝 케라스"
# author: 김태영
# date: 2018-03-19 14:00:00
# categories: seminar
# comments: true
# image: http://tykimos.github.io/warehouse/2018-3-19-ISS_Python_Deep_Learning_Keras_with_Blocks_title.png
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 이번 ISS에서는 "블록과 함께하는 파이썬 딥러닝 케라스"란 주제로 발표합니다. 딥러닝 기본 개념을 익히고 딥러닝 기반 모델을 쉽게 만들어볼 수 있는 ‘케라스’라는 딥러닝 라이브러리에 대해서 알아봅니다. 케라스 코드를 ‘블록’ 개념과 매칭하여 직관적으로 모델을 이해할 수 있는 방법에 대해 연습한 후 다양한 기초 문제를 살펴봅니다. 생성모델과 딥강화학습에 대해서도 어떤 것인지 대충 알아보겠습니다. 그리고 분야별 라이트닝 톡을 40분간 진행할 예정입니다. 많은 관심 부탁드리겠습니다.
#
# ISS란 Intelligence Space Seminar의 약자로 인공지능 기술과 관련된 인스페이스의 사내 세미나를 말합니다. 어렵게 모신 전문가분들의 주옥같은 내용을 공유하고자 오픈 세미나로 진행하고 있습니다.
#
# 
# ---
# ### 발표자 및 발표자료
#
# |구분|소개|
# |-|-|
# ||김태영, (주)인스페이스 (기초 강좌)<br><br>[블록과 함께하는 파이썬 딥러닝 케라스]<br><br>비전공자분들이 쉽게 딥러닝 모델을 개발할 수 있도록 케라스 라이브러리 소개와 블록 비유를 통해 다양한 모델을 살펴보겠습니다.<br><br>[발표자료보기](https://docs.google.com/presentation/d/1dCyZmxGQgICmUp4t_ora4K3q2J52oxPw5CWFIrC0J-k/edit?usp=sharing)|
# ||유용균, 한국원자력연구원 (라이트닝 톡)<br><br>[딥러닝을 이용한 최적설계 및 시뮬레이션 소개]<br><br>딥러닝 기술은 물리 화학 기계 등 광범위한 과학 분야에 적용되고 있습니다. 전산 시물레이션 및 최적설계 분야에 대한 적용 사례와 진행 중인 연구를 간단하게 소개드리겠습니다.<br><br>[발표자료보기](http://tykimos.github.io/warehouse/2018-3-19-ISS_Python_Deep_Learning_Keras_with_Blocks_yoyogo96.pdf)|
# ||송규예, OrbisAI (라이트닝 톡)<br><br>[딥러닝을 활용한 IHCI 측면에서의 감성분석]<br><br>딥러닝도 결국 인간을 위한 기술이므로, 딥러닝이 적용된 기계와 인간과의 상호작용 측면에 있어 화두가 되는 것이 '감성' 부분입니다. 딥러닝을 활용한 인간의 감성분석 연구사례와 이와 관련한 일을 하고 있는 저희 회사를 소개합니다.<br><br>[발표자료보기](http://tykimos.github.io/warehouse/2018-3-19-ISS_Python_Deep_Learning_Keras_with_Blocks_ivyheart2.pdf)|
# ||박상민, 대덕소프트웨어마이스터고등학교 (라이트닝 톡)<br><br>[근전도 생체신호 데이터로 손 모양 생성하기]<br><br>인공지능 개발자를 꿈꾸는 한국의 어느 평범한(?) 고등학교 3학년입니다. 임베디드 SW와 딥러닝을 주로 공부하고 있습니다. 근전도 생체신호, 자이로 등의 데이터와 딥러닝의 생성모델(GAN)로 손의 모양을 생성해내는 프로젝트에 대해서 소개드리겠습니다.<br><br>[발표자료보기](http://tykimos.github.io/warehouse/2018-3-19-ISS_Python_Deep_Learning_Keras_with_Blocks_ivyheart2.pdf)|
# ||전미정, iOS 개발자 (라이트닝 톡)<br><br>[케라스와 함께하는 모바일 딥러닝]<br><br>딥러닝을 모바일에서 어떻게 활용하는지에 대해 소개하고 케라스를 이용해 실제 모바일에서 구동하는 애플리케이션 제작을 공유합니다.<br><br>[발표자료보기](http://tykimos.github.io/warehouse/2018-3-19-ISS_Python_Deep_Learning_Keras_with_Blocks_jmj.pdf)|
# ||박은수, 경희대학교 우주탐사학과 (라이트닝 톡)<br><br>[우주기상 소개 및 딥러닝 적용 사례]<br><br>우주기상은 천문학 분야에서 우리 일상 생활과 가장 밀접한 분야 중 하나입니다. 우주기상이란 무엇인지, 이 분야에서 수십년간 축적되어온 방대한 양의 데이터, 그리고 최근 딥러닝을 적용하여 수행하고 있는 연구에 대하여 소개합니다.<br><br>[발표자료보기](http://tykimos.github.io/warehouse/2018-3-19-ISS_Python_Deep_Learning_Keras_with_Blocks_pes.pdf)|
# ---
# ### 프로그램
#
# * 일시: 2018년 3월 19일 오후 2시 ~ 오후 6시
# * 장소: 대전 유성구 대학로291 KAIST 나노종합기술원 9F 대전창조경제혁신센터 E19
# * 인사나누기 (13:40~14:00)
# * 1부 (14:00~14:50)
# * [딥러닝 이야기] '딥러닝'에서 왜 '딥'이고, 무엇을 '러닝(학습)'하는 지에 대한 이야기를 합니다.
# * [케라스 이야기] '케라스'에 대한 의미와 케라스의 특징 및 장단점에 대해서 알아봅니다.
# * [케라스 개념잡기] 가장 기초적인 케라스 샘플 코드를 살펴보고 학습 방법에 대해서 살펴봅니다.
# * [태양에서 세포까지 극한알바] 여러 분야에서 케라스 기반 딥러닝 모델 사례를 살펴봅니다.
# * [케라스 코리아] 모두의 손에 딥러닝 툴을 쥐어지게 하자라는 케라스 비전을 공유하기 위한 그룹에 대해서 알아봅니다. 여러 분야에서 적용하고자 하는 노력에 대해서 알아봅니다.
# * 분야별 라이트닝 톡 (15:00~15:30)
# * [딥러닝을 이용한 최적설계 및 시뮬레이션 소개] 유용균님, 한국원자력연구원
# * [딥러닝을 활용한 IHCI 측면에서의 감성분석] 송규예님, OrbisAI
# * 2부 (15:30~16:20)
# * [레이어 개념잡기] 가장 기초적인 뉴런부터 다층퍼셉트론 신경망, 컨볼루션 신경망, 순환 신경망을 구성하고 있는 레이어에 대한 개념을 알아봅니다.
# * [딥브릭 이야기] 딥러닝 레이어 개념을 손에 잡히는 실체로 단순화 시킨 '블록’에 비유한 딥브릭에 대해서 알아봅니다.
# * [딥브릭 레시피 살펴보기] 문제에 따른 딥브릭 레시피를 살펴보면서 문제와 모델을 매칭시키는 연습을 해봅니다.
# * 분야별 라이트닝 톡 (16:30~17:15)
# * [근전도 생체신호 데이터로 손 모양 생성하기] 박상민님, 대덕소프트웨어마이스터고등학교
# * [케라스와 함께하는 모바일 딥러닝] 전미정님, iOS 개발자
# * [우주기상 소개 및 딥러닝 적용 사례] 박은수님, 경희대학교 우주탐사학과
# * 3부 (17:15~17:50)
# * [생성모델(GAN) 살펴보기] 딥러닝 모델의 네트워크, 학습목표, 최적화기에 대한 기본 개념을 익힙니다.
# * [딥강화학습 살펴보기] 딥강화학습이 무엇인지만 간략히 살펴봅니다.
# ---
#
# ### 분야별 라이트닝 톡 신청
#
# 아래와 같은 주제로 간단하게 발표하실 분을 세미나에 모시고자 합니다. 라이트닝 톡을 하실 분은 저에게 이메일(tykim@inspace.re.kr) 보내주세요.
#
# * 해당 분야에서 딥러닝 모델을 적용하신 사례 소개
# * 아직 딥러닝 모델을 적용하지는 못했으나 보유하고 있는 데이터셋 소개
# * 기관 내에 전문 인공지능 팀이 있을 경우 팀 소개
# ---
#
# ### 참가신청
#
# 스팀잇 가입자분들은 [https://steemit.com/deeplearning/@tykimos/iss](https://steemit.com/deeplearning/@tykimos/iss)에서 신청하시고 그렇지 않으신 분은 아래 댓글로 달아주세요. 이메일만 있으면 비회원으로 쉽게 댓글을 달 수 있습니다. 대관 장소 수용인원이 최대 200명이라 선착순 200분까지만 받겠습니다. 여러 경로로 참가신청을 받기 때문에 등록하시면 제가 순번을 알려드리도록 하겠습니다. 댓글 양식은 아래와 같으며 '한 줄'로 작성 부탁드리겠습니다.
#
# * 이름, 기관, 이메일, 분야, 참석계기
# * 예) 김태영, 인스페이스, tykim@inspace.re.kr, 우주, 위성 운영 효율화를 위해 강화학습을 적용해보고자 합니다.
#
# 댓글을 달아도 스팸처리되어서 바로 표시 안될 수도 있습니다. 제가 다 스팸아님으로 처리하고 있으니, 크게 신경 안 쓰셔도 됩니다. 그리고 혹시 참석신청하셨으나 부득이한 이유로 참석이 힘드신 분은 미리 알려주세요~ 다른 분들에게 참석 기회를 드리고자 합니다.
# ---
#
# ### 후기
#
# 시간내기 힘든 월요일 오후에 비까지 오는데도 불구하고 백여명이 넘는 많은 분들이 함께해주셨습니다. 참석해주시면 분들 너무 감사드립니다. 그리고 라이트닝 톡을 준비해주시고 발표해주신 분들도 너무 감사드립니다. 분야별 톡이 점점 활성화가 되었으면 좋겠습니다.
#
# 
# ---
#
# ### 같이 보기
#
# * [다른 세미나 보기](https://tykimos.github.io/seminar/)
# * [케라스 기초 강좌](https://tykimos.github.io/lecture/)
# * [케라스 코리아](https://www.facebook.com/groups/KerasKorea/)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Content:
# 1. Simple K-Means Cluster on 2D data
# 2. General K-Means Cluster works with data of any dimension
# 3. Simple Image Compression (Don't use large image file)
# 4. Optimized Image Compression
# 5. Using SkLearn (Always use library)
# +
import random
import math
from tqdm import tnrange, tqdm_notebook
from matplotlib import pyplot
from pprint import pprint
from functools import lru_cache
def vector_addiction(vector1, vector2):
return [i+j for i,j in zip(vector1, vector2)]
def scalar_multiplication(vector, scalar):
return [scalar*i for i in vector]
def index_where_equals(vector, value):
return [i for i in range(len(vector)) if vector[i] == value]
def random_list(no,minimum=0,maximum=100):
return [random.randint(minimum, maximum) for i in range(no)]
def euclidian_distance(vector1, vector2):
temp = sum([(i-j)**2 for i,j in zip(vector1,vector2)])
return math.sqrt(temp)
def get_column(matrix, column):
return list(map(lambda x: x[column], matrix))
# -
# # K-Means Clustering
class KMeans:
def __init__(self,no_of_clusters = 2):
self.no_of_clusters = no_of_clusters
def initial_cluster_center(self):
cluster_center = []
for b in range(self.no_of_clusters):
temp = []
for i, j in zip(self.min_values,self.max_values):
temp.append(random.randint(i, j))
cluster_center.append(temp)
return cluster_center
def fit(self,X,iterations=5):
self.X = X
self.transpose = [self.get_column(X,i) for i in range(len(X[0]))]
self.min_values, self.max_values = [min(i) for i in X], [max(i) for i in X]
self.no_of_dimension = len(X)
can_visualise = True if self.no_of_dimension == 2 else False
self.cluster_center = self.initial_cluster_center()
for i in tqdm_notebook(range(iterations)):
self.assign_cluster()
self.relocate_clusters()
self.assign_cluster()
if not can_visualise:
print('Can only visualise 2d data')
def nearest_cluster(self,point):
min_cluster = None
min_distance = None
for i in range(len(self.cluster_center)):
#cluster_x, cluster_y = self.cluster_center[i]
distance = euclidian_distance(self.cluster_center[i], point)
if not min_distance or min_distance > distance:
min_distance = distance
min_cluster = i
return min_cluster
def get_column(self,matrix, column):
return list(map(lambda x: x[column], matrix))
def assign_cluster(self):
self.cluster_assignment = []
for i in range(len(self.X[0])):
point = self.get_column(self.X,i)
self.cluster_assignment.append(self.nearest_cluster(point))
return self.cluster_assignment
def relocate_clusters(self):
temp = {}
no_of_points_in_cluster = {}
for cluster_no,vector2 in zip(self.cluster_assignment,self.transpose):
temp.setdefault(cluster_no,[0 for i in range(self.no_of_dimension)])
temp[cluster_no] = vector_addiction(temp[cluster_no],vector2)
no_of_points_in_cluster.setdefault(cluster_no, 0)
no_of_points_in_cluster[cluster_no] += 1
for key, value in temp.items():
self.cluster_center[key] = scalar_multiplication(value, 1/no_of_points_in_cluster[key])
return self.cluster_center
def visualize(self, save_name = None):
if self.no_of_dimension !=2:
print('Can only visualise 2')
return
graph = pyplot
graph.clf()
cluster_x = [i[0] for i in self.cluster_center]
cluster_y = [i[1] for i in self.cluster_center]
colors = ['r', 'b', 'g', 'k', 'm']
markers = ['P','v','*','s','X']
for i in range(len(self.cluster_center)):
point_no = index_where_equals(self.cluster_assignment,i)
x = [self.X[0][i] for i in point_no]
y = [self.X[1][i] for i in point_no]
graph.scatter(x, y, c= colors[i])
graph.scatter([cluster_x[i]], [cluster_y[i]],c=colors[i], marker = markers[i])
if save_name:
graph.title = save_name
graph.savefig('/Users/ankushchoubey/Desktop/'+str(save_name) +'.png')
no_of_points = 100
x = random_list(no_of_points,0,40)
y = random_list(no_of_points,0,40)
# +
# check it step by step
engine_general = KMeans(5)
engine_general.fit([x,y],10)
engine_general.cluster_assignment
pprint(engine_general.cluster_center)
#print(engine_general.nearest_cluster([1,2]))
# -
engine_general.visualize()
# # 3. Simple Image Compression (Don't use large image file)
no_of_colors = 16
from PIL import Image
image_large = Image.open('/Users/ankushchoubey/Downloads/1.jpg')
#image.show()
#image_small = Image.open('/Users/ankushchoubey/Downloads/small.jpg')
# # 4. Optimized Image Compression
def min_max_scaling(x, x_min, x_max,new_x_min,new_x_max):
return (x - x_min)*(new_x_max-new_x_min)/(x_max-x_min) + new_x_min
@lru_cache(maxsize=256)
def scale8_bit(no):
return int((no*6/256)*36)
class ImageCompress:
@lru_cache()
def min_max_scaling(self,x):
return (x - self.mini)*self.product_factor + self.range_start
@lru_cache(maxsize=520800)
def nearest_color(self, r,g,b):
cluster_at_index = self.kmeans.nearest_cluster([r,g,b])
cluster_point = self.kmeans.cluster_center[cluster_at_index]
return tuple([int(i) for i in cluster_point])
def fit(self, image,no_of_colors,iterations, quality=0.1):
width, height = image.size
colors = image.getcolors(width*height)
no = get_column(colors,0)
scaled_version = []
self.mini = min(no)
self.maxi = max(no)
self.range_start = 0
self.range_end = self.maxi*quality
self.product_factor = (self.range_end-self.range_start)/(self.maxi-self.mini)
for i in tqdm_notebook(no, desc='Scaling Values'):
scaled_version.append(int(self.min_max_scaling(i)))
self.min_max_scaling.cache_clear()
x_l,y_l,z_l=[],[],[]
for i in tqdm_notebook(range(len(scaled_version)), desc='getting image data'):
if scaled_version[i]==0:
continue
for j in range(scaled_version[i]):
x,y,z = colors[i][1]
#x, y,z = scale8_bit(x), scale8_bit(y),scale8_bit(z)
x_l.append(x)
y_l.append(y)
z_l.append(z)
self.kmeans = KMeans(no_of_colors)
self.kmeans.fit([x_l,y_l,z_l],iterations=iterations)
del x_l, y_l, z_l
new_pixel_values=[]
for i in tqdm_notebook(list(image.getdata())):
change_color = self.nearest_color(i[0],i[1],i[2])
new_pixel_values.append(change_color)
self.nearest_color.cache_clear()
im2 = Image.new(image.mode, image.size)
im2.putdata(new_pixel_values)
return im2
compressor = ImageCompress()
compressed_image = compressor.fit(image_large,no_of_colors=32,iterations=5, quality=0.025)
colors = image_large.getcolors(image_large.width*image_large.height)
colors
compressed_image.save('/Users/ankushchoubey/Downloads/fast_compressed_small.jpg')
# # 5. Sklearn
from sklearn.cluster import KMeans
from PIL import Image
import numpy as np
X = np.array(image_large.getdata())
X
engine = KMeans(n_clusters=32)
cluster_allocation = engine.fit_predict(X)
cluster_centers = engine.cluster_centers_.tolist()
# +
list_of_points = []
for i in cluster_allocation.tolist():
list_of_points.append(tuple([int(j) for j in cluster_centers[i]]))
# -
im2 = Image.new(image_small.mode, image_small.size)
im2.putdata(list_of_points)
im2.show()
import numpy as np
import pandas as pd
class KMeans:
def __init__(self, noOfClusters):
self.k = noOfClusters
def initial_clusters_centers(self, data_range):
cluster_centers = {}
for name, values in data_range.iteritems():
random_values = np.random.uniform(low=values[0], high=values[1], size=(self.k))
cluster_centers[name] = random_values
return pd.DataFrame(cluster_centers)
def fit(self, data, iterations):
self.data = data
data_range = pd.DataFrame([self.data.min(), self.data.max()])
self.cluster_centers = self.initial_clusters_centers(data_range)
self.axis = self.data.columns.values
self.data['cluster'] = self.allocate(data[self.axis])
for i in tqdm_notebook(range(iterations)):
self.data['cluster'] = self.allocate(data[self.axis])
temp = self.cluster_centers
self.cluster_centers = self.relocate()
def nearest_cluster(self, point):
temp = self.cluster_centers-point
return np.sqrt(np.sum( np.square( temp ), axis=1 ))
def allocate(self, data):
allocation = []
for i in data.index.values:
distances = self.nearest_cluster(data.iloc[i])
assigned_cluster = distances.sort_values().index.values[0]
allocation.append(assigned_cluster)
return allocation
def relocate(self):
relocation = {}
for i in self.cluster_centers.index.values:
temp = data[data['cluster']==i]
relocation[i] = temp[self.axis].mean()
return pd.DataFrame(relocation).transpose()
data = np.random.rand(1000,2)*100
data = pd.DataFrame(data,columns=['x_axis','y_axis'])
data.plot.scatter('x_axis','y_axis')
engine = KMeans(5)
engine.fit(data,10)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/elsiecolme/EscapeEarth/blob/main/Interns/Elise/How_to_save_a_LightCurve_in_FITS_format%3F.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="xWJQ1hay_w69"
# !pip install lightkurve
import lightkurve as lp
# + id="x8TQGdnJAEHL"
#Obtaining a random Kepler lightcurve from MAST
from lightkurve import search_lightcurvefile
lcf = search_lightcurvefile('KIC 757076', quarter=3).download()
# + id="nTu-WcU8AR2a" outputId="f02e934e-f669-4f30-8711-7bfe20bc46b7" colab={"base_uri": "https://localhost:8080/", "height": 387}
#Making edits to lightcurve. We use the PDCSAP flux from MAST, remove NaN values and clip out any outliers.
lc = lcf.PDCSAP_FLUX.remove_nans().remove_outliers()
lc.scatter();
# + id="TnLU0eL1AXGE"
#Now we can use the to_fits method to save the lightcurve to a file called output.fits
lc.to_fits(path='output.fits', overwrite=True)
# + id="HXME9qwzAeYA" outputId="1665f3fd-ba53-4c2b-afb5-c879376c392e" colab={"base_uri": "https://localhost:8080/", "height": 34}
from astropy.io import fits
hdu = fits.open('output.fits')
type(hdu)
# + id="HLbd32KQAgHh" outputId="6b2083eb-31de-4c40-b744-769aae48e1b6" colab={"base_uri": "https://localhost:8080/", "height": 85}
hdu.info()
#hdu is a set of astropy.io.fits objects, which is what we would expect.
# + id="RMkki35WBK-f" outputId="da1bc27d-9a50-4d8a-a044-41cbf722b9e7" colab={"base_uri": "https://localhost:8080/", "height": 442}
hdu[0].header
# + id="dk61sGj3BPh7" outputId="92fd13e6-6067-4951-8a4d-83e3340d8db3" colab={"base_uri": "https://localhost:8080/", "height": 493}
hdu[1].header
# + id="F7cFB397Bbmf"
lc.to_fits(path='output.fits',
overwrite=True,
HLSPLEAD='Kepler/K2 GO office',
HLSPNAME='TUTORIAL',
CITATION='HEDGES2018')
# + id="BOYIhk78BhlL"
hdu = fits.open('output.fits')
# + id="yAWNzQbDBoHe" outputId="780af75a-c0d5-4d56-8559-9005f5d7652c" colab={"base_uri": "https://localhost:8080/", "height": 493}
hdu[0].header
# + id="EU255Uh_Bqs0" outputId="e8271552-79ae-4fc0-f34c-b305846f38fb" colab={"base_uri": "https://localhost:8080/", "height": 51}
demo_vector = lc.fold(1.23456789).phase
demo_vector
# + id="3l1w-A1IB-BA"
lc.to_fits(path='output.fits',
overwrite=True,
HLSPLEAD='Kepler/K2 GO office',
HLSPNAME='TUTORIAL',
CITATION='HEDGES2018',
DEMO_COLUMN=demo_vector)
# + id="w5XcLyhQB_ov"
hdu = fits.open('output.fits')
# + id="uRPkcvwmCOH4" outputId="40679f63-05cf-4680-f1c4-225908c7936e" colab={"base_uri": "https://localhost:8080/", "height": 187}
hdu[1].data.columns
# + id="PBxcrFLACPD4"
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Interpretability
# ### Preambula
# To get started you need to install glasses, this can be done through `pip`
#
# ```bash
# pip install git+https://github.com/FrancescoSaverioZuppichini/glasses
# ```
# ## Interpretability
#
# There are different methods to understand the model's output. In glasses, each model is a `VisionModule` instance and they all implement the `Interpretable` protocol allowing you to access the `.interpret` method.
#
# All the available interpretability techniques are contained in `glasses.interpretability`. Let's see them in action!
#
# First of all, we need an image!
# +
import requests
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt
r = requests.get('https://i.insider.com/5df126b679d7570ad2044f3e?width=700&format=jpeg&auto=webp')
im = Image.open(BytesIO(r.content))
fig = plt.figure()
plt.title('A cute dog 😍')
plt.imshow(im)
# -
# Then we can create a model using `AutoModel`, a simple `resnet18` will do just fine.
# +
from glasses.models import AutoModel, AutoTransform
model = AutoModel.from_pretrained('resnet18')
cfg = AutoTransform.from_name('resnet18')
# -
# Then, let's import the interpretability methods we would like to apply. So far I have [Grad-Cam](https://arxiv.org/abs/1610.02391) and [Saliency Map](https://arxiv.org/abs/1312.6034) implemented
from glasses.interpretability import GradCam, SaliencyMap
from torchvision.transforms import Normalize
# use config to correctly preprocess the picture and add the batch dim
x = cfg.transform(im).unsqueeze(0)
# you just need to instance an interpretability class and pass it to .interpret
_ = model.interpret(x, using=SaliencyMap()).show()
# in the Grad-Cam case we also want to post process the image by inverting the ImageNet preprocessing
postprocessing = Normalize(-cfg.mean / cfg.std, (1.0 / cfg.std))
_ = model.interpret(x, using=GradCam(), postprocessing=postprocessing).show()
# Tada!
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Case Study Context
#
# * An online travel agency wants to improve the hotel-booking experience by making it mobile-friendly and as
# easy as chatting with a friend.
#
# * Some of the ways that they advertise the hotel rooms is through common hotel booking sites that aggregate rates from many OTA’s.
# * They are able to determine how much they would like to spend on each click.
# * They can set a different spend per click for each itinerary (itinerary is the unique inputs of the search: hotel / length of stay / checkin date / etc...).
# * If they spend more, they will be more likely to be shown and will appear higher in the ranking.
# * If they spend less, they will be shown lower in the ranking but if our bid is too low, they may not be shown
# at all.
#
# # Key Deliverables of the Case Study
#
# * Identify key trends from booking and spending data
# * How can the OTA agency improve?
# * What are some deep insights based on the data
#Importing Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import os
import seaborn as sns
os.getcwd()
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
os.chdir('/Users/snehamehrin/Desktop/Project/data-analysis-project/data/raw')
import warnings
warnings.filterwarnings('ignore')
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# +
#Reading the data
spend_df=pd.read_csv('spend.csv',dtype={'HOTEL_ID':'object'},parse_dates=['DATE'])
booking_df=pd.read_csv('bookings.csv',parse_dates=['DATE'],dtype={'EAN_ID':'object'})
#Exploring the data
print(color.BOLD + color.YELLOW +"First 5 rows of spending data " + color.END)
spend_df.head()
print(color.BOLD + color.YELLOW +"First 5 rows of booking data " + color.END)
booking_df.head()
print(color.BOLD + color.YELLOW +"Data Info Spending Data\n\n" + color.END)
spend_df.info()
print('\n\n')
print(color.BOLD + color.YELLOW +"Data Info Booking Data\n\n" + color.END)
booking_df.info()
#Rename columns
spend_df=spend_df.rename(columns={'DTA':'Days_Arrival','LOS':'Length Of Stay','BOOKING_DOW':'Day_Week','COMPARISON_TO_LOWEST_PRICE (%)':'Lowest_Price_%'}).drop(columns=['Unnamed: 10','Unnamed: 11'])
booking_df=booking_df.rename(columns={'DTA':'Days_Arrival','LOS':'Length Of Stay','BOOKING_DOW':'Day_Week'})
# -
# ## Key Observation
#
# * There are null values in the columns ELIGIBLE_IMPS,IMPRESSIONS,CLICKS,SPEND and Lowest Price
# * Assuming null values means zero in this case, imputing the null values with zero seems like a good way to clean this data.
#
# ## 2. Data Cleaning
cols=['ELIGIBLE_IMPS','IMPRESSIONS','CLICKS','SPEND','Lowest_Price_%']
def data_cleaning(data):
for i in cols:
data[cols]=data[cols].fillna(0)
return data
spend_df=data_cleaning(spend_df)
# ## 3. Exploratory Data Analysis
pd.options.display.float_format = '{:.2f}'.format
spend_df.describe()
# ## Key Observations
#
# ### Days_Arrival
# * Avg Length from booking to day of arrival is 17.However, since standard deviation is large in this case, mean doesn't seem like a valid statistic metric to consider.
# * Less thatn 50% of the data has an average length as 8.
# * Maximum length between booking to day of arrival is 238
#
# ### Length of Stay
# * Less than 50% of the customers stay only for a day
# * There seems to be 1 customer who has stayed over 19 days - Interesting data point to consider to see if it's an outlier
#
# ### Eligible Impression
#
# * On avg snap commerce seems to have less than 2 impression rates to provide to the users for their search query.
# * There seems to be a booking for which 8559 outliers are present - Further exploration needed
#
# ### Impressions
#
# * On avg customers have viewed the impressions only zero times. 50% of the data , the avg impression rate is 0. This makes sense, since the eligible impression rate is also zero.
# * However, there is an add which has been seen 2836 times by the user- Further exploration needed
#
# ### Clicks
#
# * Less than 75% of the ads has been clicked zero times. This is not a good sign
# ## 4. Feature Engineering
#
# * Calculate the day column of the date fields in spending and booking data
# *
# ### 4.1 Formatting Dates
# * In the first feature engineering process, we calculate the day number from date.
# * We also order the Day of the week field , so that we can visualize properly.
def feature_engineering(df):
"""
This function calculates the day of the month from the date field.
If the input is 2020/03/12 then Day column will output to 12
"""
df['DATE']=pd.to_datetime(df['DATE'])
df['Day']=df['DATE'].dt.day
df['Day_Week']=df['DATE'].dt.strftime('%a')
df['Day_Week']=pd.Categorical(df['Day_Week'], categories=
['Sun','Mon','Tue','Wed','Thu','Fri', 'Sat'],
ordered=True)
return df
booking_df=feature_engineering(booking_df)
spend_df=feature_engineering(spend_df)
# ### 4.2 Impressions & Clicks
#
# * We calculate the **click rate( No of Clicks/Impressions)**- If the **Impressions are 0**, we default it to **zero**,since it doesn't make sense for the user to click without viewing it.
# * We also calculate the **Impression rate(No Of Impressions/ELIGIBLE_IMPS)**-However, this might not be an accurate metric since **ELIGIBLE_IMPS** means if the snapcommerce had a **rate for that advertisement** and this might or might **not be shown to user**, so we do not know if the **Impression count** is the **no of times users viewed the advertisement** for which there is a **rate**.
# +
#Let's look if people have viewed an advertisement even if snapcommerce does not have a Eligible Rate
no_imps=spend_df.loc[(spend_df['IMPRESSIONS']!=0) & (spend_df['ELIGIBLE_IMPS']==0)]
print('No Of Searches for which People have viewed the advertisement , but there is no Eligible Rate is {}'.format(no_imps.shape[0]))
no_clicks=spend_df.loc[(spend_df['IMPRESSIONS']==0) & (spend_df['CLICKS']!=0)]
print('No Of Searches for which People have clicked the advertisement, but there is no views is {}'.format(no_clicks.shape[0]))
# -
# **30** seems a very small number compared to the overall dataset, so for now , I will calculate this rate and leave it open.
#
# **359** also seems like a very small number where clicks are not zero, but Impressions are zero, so I will calculate the click rate for now.
#Calculation Of Impression_Rate
spend_df['Impression_Rate']=spend_df.loc[spend_df['ELIGIBLE_IMPS']==0,'Impression_Rate']=0
spend_df['Impression_Rate']=spend_df.loc[spend_df['ELIGIBLE_IMPS']!=0,'Impression_Rate']=round(spend_df['IMPRESSIONS']/spend_df['ELIGIBLE_IMPS']*100)
#Calculation Of Click Rate
spend_df['Click_Rate']=spend_df.loc[spend_df['IMPRESSIONS']==0,'Impression_Rate']=0
spend_df['Click_Rate']=spend_df.loc[(spend_df['IMPRESSIONS']!=0) & (spend_df['CLICKS']!=0) ,'Impression_Rate']=round(spend_df['CLICKS']/spend_df['IMPRESSIONS']*100)
spend_df['Click_Rate']=spend_df.loc[(spend_df['IMPRESSIONS']!=0) & (spend_df['CLICKS']==0),'Impression_Rate']=0
# ## 3.1 Date
#
print("Range of Date for searches is from {0} to {1}".format(min(spend_df['DATE']),max(spend_df['DATE'])))
print("Unique Date Count for search is {}\n".format(spend_df['DATE'].nunique()))
print("Range of Date for bookings is from {0} to {1}".format(min(booking_df['DATE']),max(booking_df['DATE'])))
print("Unique Date Count for booking is {}\n".format(booking_df['DATE'].nunique()))
# +
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
def date_eda(data_df,label,col,color):
"""
This function plots the no of searches or bookings in a day.
"""
df=pd.DataFrame(data_df[col].value_counts().rename_axis(col).reset_index(name='Count'))
ax.plot(df[col],df['Count'],label=label,marker='.',linestyle='none',color=color)
plt.title('Search\Booking Trend Per {}'.format(col))
plt.ylabel('No Of Searches')
ax.spines['bottom'].set_color('#DCDCDC')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('#DCDCDC')
ax.spines['right'].set_color('none')
ax.xaxis.label.set_color('#DCDCDC')
ax.tick_params(axis='x', colors='#808080')
ax.tick_params(axis='y', colors='#808080')
plt.savefig('rend.png', bbox_inches='tight',transparent=True,pad_inches=0)
plt.legend()
return df
df_spend=date_eda(spend_df,label='Search',col='Day',color='#8C6B92')
df_booking=date_eda(booking_df,label='Booking',col='Day',color='#CE7B98')
# +
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.plot(Impression_Trend['Day_Week'],Impression_Trend['ELIGIBLE_IMPS'],label='Eligible Impression',marker='.',color='#545973')
ax.plot(Impression_Trend['Day_Week'],Impression_Trend['IMPRESSIONS'],label='Impression',marker='.',color='#8C6B92')
ax.plot(Impression_Trend['Day_Week'],Impression_Trend['CLICKS'],label='Clicks',marker='.',color='#CE7B98')
ax.annotate('Eligible Impressions', xy=('Fri', 3))
ax.annotate('Impressions', xy=('Sat', 1.75))
ax.annotate('Clicks', xy=('Sat', 0.25))
ax.spines['bottom'].set_color('#DCDCDC')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('#DCDCDC')
ax.spines['right'].set_color('none')
ax.xaxis.label.set_color('#DCDCDC')
ax.tick_params(axis='x', colors='#808080')
ax.tick_params(axis='y', colors='#808080')
plt.savefig('rend.png', bbox_inches='tight',transparent=True,pad_inches=0)
# -
df_spend.describe()
df_booking.describe()
# ### Observations
#
# * There are more searches than bookings - Not a good sign.
# * 50% of the searches seems to be done within first 15 days of the month.
# * 50% of the bookings seems to be done within first 15 days of the month.
#
# ### Our Goal
# * Target Advertising during first half of the month.
# * Convert searches into more booking- Why are our users not clicking on our ads?
# ### Observations
#
# * Highest number of searches and bookings are during sundays
# *Steep in searches during fridays and no search at all during thursdays
# *Searches seem to pickup from Friday again
# * Booking seems to follow the same pattern too
# ## Highlight Observations
#
# * Target Advertising during weekends especially first half of the month
# * Investigate more on why the number of searches are not converting into bookings
# ## 3.2 Length Of Stay
#
# * In Length Of Stay, we aim to find out the common searches for the stay duration.
# * We are trying to see, if people are searching more for vacation or short term stays.
# * My hypothesis is that short term stay searches are more likely to be business meetings or staycation to nearby places.
# * Long term stays might be for Vacations.
spend_df['Length Of Stay'].describe()
# +
df=pd.DataFrame(spend_df['Length Of Stay'].value_counts().rename_axis('length_of_stay').reset_index(name='Count'))
sns.barplot(x=df['length_of_stay'],y=df['Count'],palette="Blues_d")
plt.title('No Of Searches per Length Of Stay')
plt.xlabel('Length of Stay')
plt.ylabel('No Of Searches')
plt.show()
# +
def length_of_stay_grouping(data,col):
"""
This function creates a grouping for the length of stay column
"""
data['length_of_stay_grouping']=np.select([
data[col].between(1,2,inclusive=True),
data[col].between(3,5,inclusive=True),
data[col].between(6,8,inclusive=True),
data[col]>9
],
[
'1-2 days stay',
'3-5 days stay',
'6-8 days stay',
'More than 8 days stay'
])
return data
spend_df=length_of_stay_grouping(spend_df,'Length Of Stay')
booking_df=length_of_stay_grouping(booking_df,'Length Of Stay')
# -
ax=sns.countplot(spend_df['length_of_stay_grouping'])
ax.set(xlabel='Length Of Stay',ylabel='No Of Searches',title='No Of Searches Per Length Of Stay')
ax=sns.countplot(booking_df['length_of_stay_grouping'])
ax.set(xlabel='Length Of Stay',ylabel='No Of Bookings',title='No Of Bookings Per Length Of Stay')
# ### Observations
#
# * Short-Stay(1-3) ,Medium Stay(4,5,6) searches and bookings seems to be more
# ## 3.3 Hotel
#
# * For this feature, we figure out how many times we search for a particular hotel.
# * So , I group by hotel_id and count the number of searches.
# * Then, I take the histogram of the count to figure out how many times the searches occur ,so that I can group them into buckets.
# * Then,I bucket them into "Extremely High-Demand,High-Demand","Medium-Demand","Low-Demand" Hotels.
# * This will give us an idea of how much SnapCommerce is spending on these high demand hotels.
hotel_hist=pd.DataFrame(spend_df['HOTEL_ID'].value_counts().rename_axis('hotel_id').reset_index(name='Count'))
hotel_hist['Count'].describe()
sns.distplot(hotel_hist['Count'],bins=20)
#Merging the search count to the original data_set
spend_df=spend_df.merge(hotel_hist,left_on='HOTEL_ID',right_on='hotel_id')
spend_df=spend_df.drop(columns=['hotel_id']).rename(columns={'Count':'hotel_search_count'})
# * We see that **75%** of the hotels are searched less than **15 times** in a month.
# * **Few Hotels** are searched more than **200 times** - This is our **Golden Segment**, since these few hotesls have a potential to drive more revenue.
#
# +
def hotel_rating(data,col):
"""
This function creates a grouping for the demand of the hotel based on the search count
"""
data['hotel_rating']=np.select([
data[col].between(1,4,inclusive=True),
data[col].between(5,15,inclusive=True),
data[col].between(16,100,inclusive=True),
data[col]>100
],
[
'Low Demand(1-4 Searches)',
'Medium Demand(5-15 Searches)',
'High Demand(16-100 Searches)',
'Extremely High Demand(>100 Searches)'
])
return data
spend_df=hotel_rating(spend_df,'hotel_search_count')
# -
#Distribution of Hotel Rating
sns.countplot(spend_df['hotel_rating'])
# ## Observations
#
# * Even though, we saw that there are few hotels searched multiple times by the users , but many users are searching for these hotels we can see that High Demand hotels has a potential to drive up our revenue
# ### 3.4 Days_Arrival
spend_df['Days_Arrival'].describe()
sns.boxplot(spend_df['Days_Arrival'])
def arrival_date(data,col):
"""
This function creates a grouping for the length of stay column
"""
data['booking_priority']=np.select([
data[col].between(0,2,inclusive=True),
data[col].between(3,10,inclusive=True),
data[col].between(11,31,inclusive=True),
data[col]>31
],
[
'Priority Check-in(0-2 Days)',
'Within 10 Days check-in',
'Within a Month check-in',
'After a Month check-in'
])
return data
spend_df=arrival_date(spend_df,'Days_Arrival')
booking_df=arrival_date(booking_df,'Days_Arrival')
sns.countplot(spend_df['booking_priority'])
# ### Observations
# * People seem to be searching for Urgent Booking and Low Priority Booking Hotels
# ### 3.5 Eligible IMPS
#
# * Eligible IMPS suggests if snapcommerce had a rate for the search, this search might or not be shown to the user.
# * It will be interesting to see, if snapcommerce had a rate for our High Search, Urgent Booking, Shorter Stay searches.
# +
#Let's look at the overall distribution of the Eligible IMPS field
spend_df['ELIGIBLE_IMPS'].describe()
sns.boxplot(spend_df['ELIGIBLE_IMPS'])
rate=round(spend_df['ELIGIBLE_IMPS'].loc[spend_df['ELIGIBLE_IMPS']==0].shape[0]/spend_df.shape[0]*100)
print("{}% Of the times snapcommerce had no rate to be shown to the user".format(rate))
# -
# ### Observations
# * Box Plot suggests that there are way more outliers for this dataset.
# * 75% of the searches have an Eligible Impression of 1. 50% of the dataset has no Eligible Impression.
#
# **Does ELIGIBLE Impression rate has any correlation with any of the fields?
# We will explore further**
# ### 3.6 Impressions
#
# Impressions corresponds to number of times users viewed the advertisements.
# If there are more views to the advertisement, then it could result in more clicks and essentially translate to booking.
# +
#Let's look at the overall distribution of the Impressions field
spend_df['IMPRESSIONS'].describe()
sns.boxplot(spend_df['IMPRESSIONS'])
rate=round(spend_df['IMPRESSIONS'].loc[spend_df['IMPRESSIONS']==0].shape[0]/spend_df.shape[0]*100)
print("{}% Of the times there were no views to the advertisement resulting from the search".format(rate))
# -
# ### Observations
# * **75%** of the **searches** resulted only in **1 View**. **50%** of the dataset have **no views**.
# * Could it be that there are no rates for these advertisements or the bid is too low for this?
# ### 3.7 Clicks
#
#
# +
#Let's look at the overall distribution of the Impressions field
spend_df['CLICKS'].describe()
sns.boxplot(spend_df['CLICKS'])
rate=round(spend_df['CLICKS'].loc[spend_df['CLICKS']==0].shape[0]/spend_df.shape[0]*100)
print("{}% Of the times there were no clicks to the advertisement resulting from the search".format(rate))
# -
# ### Observations
#
# * Conversion rate from views to Clicks is very low.
# * **97%** of the time there were **no clicks** to the advertisement resulting from the search
# ### 3.8 Spend
#
# * Spend means the total amount snapcommerce spend for the clicks
# * Since the clicks were very low , we can assume that spend might also have been very low.
#
#Let's look at the overall distribution of the Impressions field
spend_df['SPEND'].describe()
sns.boxplot(spend_df['SPEND'])
plt.show()
sns.distplot(spend_df['SPEND'])
rate=round(spend_df['SPEND'].loc[spend_df['SPEND']==0].shape[0]/spend_df.shape[0]*100)
print("{}% Of the times there were no clicks to the advertisement resulting from the search".format(rate))
# ### 3.0 Lowest_Price
#
# * **Lowest Price means the **% change from snap commerce's price and it's lowest competitors price**.
# * My assumption is that if the **competitors price** is **lower** than **snap commerce** then this value will be **negative**.
# * If it is **positive** then snapcommerce had a lower price than it's competitiors.
spend_df['Lowest_Price_%'].describe()
sns.distplot(spend_df['Lowest_Price_%'])
print ("{}% of the time snap commerce's rate was lower than it's competitor's".format(round(spend_df.loc[spend_df['Lowest_Price_%']>=0].shape[0]/spend_df.shape[0]*100)))
# ### Observations
#
# * If 87% of the time snapcommerce had a lower price than it's competitors, then why are the clicks and views so low?
#
print("{}% of the times snap commerce had lowest price than the competitors but there was no eligible rate".format(round(spend_df.loc[(spend_df['Lowest_Price_%']>=0) & (spend_df['ELIGIBLE_IMPS']==0)].shape[0]/spend_df.shape[0]*100)))
corr=spend_df.corr()
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
sns.scatterplot(data=spend_df, x="SPEND", y="CLICKS")
# ## 4 Bookings EDA
booking_df.describe()
# ### 4.1 Days Arrival
def booking_eda(data):
sns.boxplot(data)
plt.show()
sns.distplot(data)
plt.show()
booking_df.describe()
booking_eda(booking_df['Days_Arrival'])
sns.countplot(booking_df['booking_priority'])
booking_df.columns
# ### 4.2 Length Of Stay
booking_eda(booking_df['Length Of Stay'])
sns.countplot(booking_df['length_of_stay_grouping'])
# ### 4.3 Bookings
booking_eda(booking_df['BOOKINGS'])
sns.countplot(booking_df['BOOKINGS'])
# ### 4.4 GMV
booking_eda(booking_df['GMV'])
booking_df.corr()
sns.scatterplot(data=booking_df, x="Day", y="GMV")
# ### 4.5 Hotel ID
hotel_hist=pd.DataFrame(booking_df['EAN_ID'].value_counts().rename_axis('Hotel').reset_index(name='Count'))
hotel_hist['Count'].describe()
print("{}% Of Hotels has been booked more than once".format(round(hotel_hist.loc[hotel_hist['Count']>1].shape[0]/hotel_hist.shape[0]*100,0)))
print("{}% Of Hotels has been booked more than 4 times".format(round(hotel_hist.loc[hotel_hist['Count']>4].shape[0]/hotel_hist.shape[0]*100,0)))
booking_df=booking_df.merge(hotel_hist,left_on='EAN_ID',right_on='Hotel')
booking_df.rename(columns={'Count':'hotel_booking_count'},inplace=True)
# +
def hotel_booking_rating(data,col):
"""
This function creates a grouping for the demand of the hotel based on the search count
"""
data['hotel_booking_rating']=np.select([
data[col].between(1,1,inclusive=True),
data[col].between(2,10,inclusive=True),
data[col].between(10,100,inclusive=True),
data[col]>=101
],
[
'Low Demand Hotels(1 Booking)',
'Medium Demand Hotels(2-10 Bookings)',
'High Demand Hotels(10-100 Bookings)',
'Extremely High Demand Hotels(>100 Bookings)'
])
return data
booking_df=hotel_booking_rating(booking_df,'hotel_booking_count')
# +
booking_df['hotel_booking_rating'].value_counts()
# -
# ### Observations
# * There is a handful of hotels that has been booked more than 4 times.
# * We need to focus on the hotels that are booked only few times and see if they are searched by the users as well.
# * If they are we need to focus our ad strategy on these hotels
# ## Key Questions
# ### 1. How many searches and bookings
print('No of Searches is {}'.format(spend_df.shape[0]))
print('No of Bookings is {}'.format(booking_df.shape[0]))
# ### 2. What are the popular searches
def group_by_fun(data,groupby,agg):
c=data.groupby(groupby)[agg].count().rename("count")
return (c/c.sum())*100
# +
hotel_rating=group_by_fun(spend_df,'length_of_stay_grouping','HOTEL_ID')
print('% Of Users who Searched for Length Of Stay\n')
hotel_rating
check_in=group_by_fun(spend_df,'booking_priority','HOTEL_ID')
print('% Of Users who Searched for Check-in dates\n')
check_in
hotel_rating=group_by_fun(spend_df,'hotel_rating','HOTEL_ID')
print('% Of Users who Searched for hotel subsset\n')
hotel_rating
# -
spend_df.loc[spend_df['hotel_rating']=='Low Demand(1-4 Searches)'].describe()
# ## 3 . When do users search
# +
search_trend=pd.DataFrame(spend_df.groupby("Day", as_index=False)['HOTEL_ID'].count())
booking_trend=pd.DataFrame(booking_df.groupby("Day", as_index=False)['EAN_ID'].count())
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.plot(search_trend['Day'],search_trend['HOTEL_ID'],label='Search',marker='.',color='#DCDCDC')
ax.plot(search_trend['Day'].loc[search_trend['Day']<=15],search_trend['HOTEL_ID'].loc[search_trend['Day']<=15],label='Search',marker='.',color='#dba961')
ax.spines['bottom'].set_color('#DCDCDC')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('#DCDCDC')
ax.spines['right'].set_color('none')
ax.xaxis.label.set_color('#DCDCDC')
ax.tick_params(axis='x', colors='#808080')
ax.tick_params(axis='y', colors='#808080')
plt.savefig('week_trend.png', bbox_inches='tight',transparent=True,pad_inches=0)
# -
search_trend=pd.DataFrame(spend_df.groupby("Day_Week", as_index=False)['HOTEL_ID'].count())
booking_trend=pd.DataFrame(booking_df.groupby("Day_Week", as_index=False)['EAN_ID'].count())
# +
search_trend=pd.DataFrame(spend_df.groupby("Day_Week", as_index=False)['HOTEL_ID'].count())
booking_trend=pd.DataFrame(booking_df.groupby("Day_Week", as_index=False)['EAN_ID'].count())
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.plot(search_trend['Day_Week'],search_trend['HOTEL_ID'],label='Search',marker='.',color='#DCDCDC')
ax.plot(search_trend['Day_Week'].loc[(search_trend['Day_Week']=='Sun') | (search_trend['Day_Week']=='Mon')] ,search_trend['HOTEL_ID'].loc[(search_trend['Day_Week']=='Sun') | (search_trend['Day_Week']=='Mon')],label='Search',marker='.',color='#dba961')
ax.plot(search_trend['Day_Week'].loc[(search_trend['Day_Week']=='Fri') | (search_trend['Day_Week']=='Sat')] ,search_trend['HOTEL_ID'].loc[(search_trend['Day_Week']=='Fri') | (search_trend['Day_Week']=='Sat')],label='Search',marker='.',color='#dba961')
ax.spines['bottom'].set_color('#DCDCDC')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('#DCDCDC')
ax.spines['right'].set_color('none')
ax.xaxis.label.set_color('#DCDCDC')
ax.tick_params(axis='x', colors='#808080')
ax.tick_params(axis='y', colors='#808080')
plt.savefig('day_trend.png', bbox_inches='tight',transparent=True,pad_inches=0)
# -
# ### 4. % Of Searches for which no rate and lower price
spend_df['ELIGIBLE_IMPS'].describe()
spend_df['Lowest_Price_%']=spend_df['Lowest_Price_%'].round(2)
spend_df['Lowest_Price_%'].describe()
snapcommerce_adv=spend_df.loc[(spend_df.ELIGIBLE_IMPS>0) & (spend_df['Lowest_Price_%']>=0)]
spend_df['Eligible Rate'] ='Eligible Rate N/A'
spend_df.loc[(spend_df.ELIGIBLE_IMPS>0) & (spend_df['Lowest_Price_%']>=0),'Eligible Rate'] ='Eligible Rate Available'
snapcommerce_adv.shape[0]
snapcommerce_adv.shape[0]/spend_df.shape[0]
# ### 5.Calculating High Demand Searches
# +
highest_searches=spend_df.loc[((spend_df['hotel_rating']=="High Demand(16-100 Searches)") | (spend_df['hotel_rating']=="Medium Demand(5-15 Searches)")) & ((spend_df['length_of_stay_grouping']== '1-2 days stay') & (spend_df.booking_priority=='Priority Check-in(0-2 Days)'))]
spend_df['Search Rating']='Unpopular Search'
spend_df.loc[((spend_df['hotel_rating']=="High Demand(16-100 Searches)") | (spend_df['hotel_rating']=="Medium Demand(5-15 Searches)")) & ((spend_df['length_of_stay_grouping']== '1-2 days stay') & (spend_df.booking_priority=='Priority Check-in(0-2 Days)')),'Search Rating']="Popular Search"
highest_searches.shape[0]
highest_searches.shape[0]/spend_df.shape[0]
# +
spend_df['Rating'] ="Unpopular Search Eligible Rate N/A"
spend_df.loc[((spend_df['Search Rating']=="Popular Search") & (spend_df['Eligible Rate']=="Eligible Rate Available")) ,'Rating']="Popular Search Rate Available"
spend_df.loc[((spend_df['Search Rating']=="Popular Search") & (spend_df['Eligible Rate']=="Eligible Rate N/A")) ,'Rating']="Popular Search Eligible Rate N/A"
spend_df.loc[((spend_df['Search Rating']=="Unpopular Search") & (spend_df['Eligible Rate']=="Eligible Rate Available")) ,'Rating']="Unpopular Search Eligible Rate Available"
rating=group_by_fun(spend_df,'Rating','HOTEL_ID')
rating
# +
color_dict = dict({'Unpopular Search Eligible Rate N/A':'#576687',
'Popular Search Eligible Rate N/A':'#273755',
'Unpopular Search Eligible Rate Available': '#9bb7d8',
'Popular Search Rate Available' :'#dba961'})
fig = plt.figure()
ax=sns.scatterplot(data=spend_df, x="SPEND", y="CLICKS", hue="Rating", palette=color_dict,legend=False,alpha=0.66)
ax.set
ax.spines['bottom'].set_color('#DCDCDC')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('#DCDCDC')
ax.spines['right'].set_color('None')
ax.xaxis.label.set_color('#808080')
ax.yaxis.label.set_color('#808080')
ax.tick_params(axis='x', colors='#DCDCDC')
ax.tick_params(axis='y', colors='#DCDCDC')
plt.savefig('scatterplot', bbox_inches='tight',transparent=True,pad_inches=0)
# -
# ## Booking Trends within Itineraries
spend_df_length_of_stay=spend_df.groupby(['Day_Week','length_of_stay_grouping'])['HOTEL_ID'].count()
booking_df_scatter=booking_df.groupby("booking_priority").agg(Avg_GMV=pd.NamedAgg(column='GMV', aggfunc=np.mean),
Booking_sum=pd.NamedAgg(column='BOOKINGS',aggfunc=np.sum)).reset_index().sort_values(by='Booking_sum')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(booking_df_scatter['booking_priority'], booking_df_scatter['Booking_sum'])
booking_df_scatter.value_counts()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mogp-dev
# language: python
# name: mogp-dev
# ---
# + active=""
# """
# Copyright 2021 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Authors:
# Yinchong Yang <yinchong.yang@siemens.com>
# Florian Buettner <buettner.florian@siemens.com>
#
# """
# -
# ### 0. Load packages
# +
import os
import sys
import gpflow
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import pickle
import scipy as sp
from scipy.sparse import coo_matrix
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import *
sys.path.append('../../mogp_decomposition/')
from mwgp import GPD
from data import load_movielens_data_1m
# -
# ### 1. Setting random seeds
np.random.seed(123456)
tf.set_random_seed(123456)
# ### 2. Load data
# +
ml_triple_store = load_movielens_data_1m('../../data/ML-1M/')
N = ml_triple_store.shape[0] # number of triples
# Generate new split:
# write_out = open('./ml-1m_splits.pkl', 'wb')
# pickle.dump(splits, write_out)
# write_out.close()
# Load existent split:
read_in = open('../../data/ML-1M/splits.pkl', 'rb')
splits = pickle.load(read_in)
read_in.close()
# -
# ### 3. Training with all splits
for cv_id in range(5):
# Use the current trunk as test set and the rest as training
te_ids = splits[cv_id]
tr_ids = []
for i in range(len(splits)):
if i != cv_id:
tr_ids.append(splits[i])
tr_ids = np.concatenate(tr_ids)
# As usual in GP, rescaling the target to be zero mean
target_scaler = StandardScaler()
# I, J are numbers of users and movies, respectively.
# Our model can also handle 3 input sources, i.e., kernels, corresponding to
# tensor decomposition. Since ML-1M is a matrix decomposition task, we set K to None.
I = ml_triple_store[:, 0].max()+1
J = ml_triple_store[:, 1].max()+1
K = None
# Prepare the training and test data:
# The user indices and movie indices are found in the first two columns in the
# triple store. The training target is the rescaled third column, i.e. ratings.
X_tr = ml_triple_store[tr_ids][:, 0:2]
Y_tr = target_scaler.fit_transform(ml_triple_store[tr_ids, 3][:, None]).reshape(-1)
X_te = ml_triple_store[te_ids][:, 0:2]
Y_te = target_scaler.transform(ml_triple_store[te_ids, 3][:, None]).reshape(-1)
# We initialize the latent representations with principal components.
# For that we first generate a full matrix from the triple store.
X_tr_coo = coo_matrix((Y_tr.reshape(-1), (X_tr[:, 0], X_tr[:, 1])), shape=(I, J))
X_tr_dense = X_tr_coo.todense()
# We take the leading principal components.
pca_user = PCA(8)
pca_item = PCA(8)
user_pcs = pca_user.fit_transform(X_tr_dense)
item_pcs = pca_item.fit_transform(X_tr_dense.T)
# The dictionary of hyper parameters:
hyper_params = {'I':I, 'J':J, 'K':K,
'emb_sizes': [8, 8], # the size of the latent representations.
'M': 128, # the number of inducing point pairs.
'emb_reg': 1e-4, # l2 regularization on representation vectors.
'batch_size': 2**16, # mini batch sizeof training
'obs_mean': Y_tr.mean(), # mean of target, which is actually 0.
'lr': 1e-2} # learning rate.
# Initialize the model with hyper parameters:
gp_md = GPD(**hyper_params)
# Specify the path to save trained model
gp_md.save_path = './ml-1m_M=128_cv'+str(cv_id)+'/'
# Build the model.
gp_md.build()
# Option 1: using PCA to initialize the latent representations.
# Note: so far we do not yet make use of the principal components as initialization
# of the latent representations.
# In order to achieve that using our current implementation, which only supports random
# initialization, we have to apply a small trick:
# First we call the save() method of the class, which saves two objects:
# The first object consists of the GP hyper parameters.
# The second object consists of the latentrepresentations.
gp_md.save()
# Second, we replace the current random representation with the principal components
# in the gp_md model object.
param0 = gp_md.get_weights_params()
param0[0] = user_pcs
param0[1] = item_pcs
# Note this does not change the parameters in the model since get_weights_params() method
# makes a copy of the weights. Therefore:
# Third, we only overwrite the previously saved weights "model_params.pkl" with the
# PCA initialization, while leaving the other object, the GP parameters unchanged.
with open('./ml-1m_M=128_cv'+str(cv_id)+'/model_params.pkl', 'wb') as f:
pickle.dump(param0, f)
# Finally, we re-load the entire model, with old GP parameters but updated PC as
# initializations of the latent representations.
gp_md.load_params()
# Option 2: Alternatively, one could also simply use the random initialization by simply
# ignoring everything after gp_md.build()
# Now we can start training:
# The third and fourth parameters are validation X and Y, which we ommit for now.
gp_md.train(X_tr, Y_tr, None, None, n_iter=500)
# Save the model after training.
gp_md.save()
# Also save the scaler for this specific split configuration, which we need for evaluation.
with open('./ml-1m_scaler_cv'+str(cv_id)+'.pkl', 'wb') as f:
pickle.dump(target_scaler, f)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cross Convolution
# Following Menke and Levin (2003)
#
# The main idea is to compare data to model by minimising a cross convolution function.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import sys
sys.path.append("..")
import splitwavepy as sw
import numpy as np
import matplotlib.pyplot as plt
# -
real = sw.Pair(delta=0.05,split=(40,1.2),noise=0.01)
real.plot()
pred = sw.Pair(delta=0.05,split=(40,1.2),noise=0)
pred.plot()
# +
def crossconv(real, pred):
obs = real.chop()
pre = pred.chop()
x = np.convolve(obs.x, pre.y)
y = np.convolve(pre.x, obs.y)
return x, y
def misfit(x, y):
num = np.trapz((x - y)**2)
den = np.trapz(x**2) + np.trapz(y**2)
return num / den
# -
x, y = crossconv(real, pred)
print(misfit(x, y))
plt.plot(x)
plt.plot(y)
plt.plot(x - y)
plt.show()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
blog_posts = ["The 10 coolest math functions in python", "How to make HTTP requests in python", "A tutorial about data types in python"]
for post in blog_posts :
print(post)
# +
blog_posts = ["", "The 10 coolest math functions in python", "", "How to make HTTP requests in python", "A tutorial about data types in python"]
for post in blog_posts :
if post == "" :
continue
print(post)
# +
mystring = "This is a string"
for char in mystring :
print(char)
# -
for x in range(0, 10) :
print(x)
# +
person = {'Name' : 'Bhagya', 'Age' : '21', 'Gender' : 'Female'}
for key in person :
print(key, ":", person[key])
# +
blog_posts ={"python" : ["The 10 coolest math functions in python", "How to make HTTP requests in python", "A tutorial about data types in python"], "Javascript" : ["Namespaces in Javascript.", "New function available in ES6"]}
for category in blog_posts :
print("Posts about", category)
for post in blog_posts[category] :
print(post)
# -
# ## Excersice: Loops
#
# **1. Create a program that asks the user for 8 names of people and store them in a list. When all the names have been given, pick a random one and print it.**
# +
import random
people = []
while len(people) < 8 :
name = str(input("Enter a name: "))
people.append(name)
index = random.randint(0, 7)
random_person = people[index]
print(random_person)
# -
# **2. Create a guess game with the names of the colors. At each round pick a random color from a list and let the user try to guess it. When he does it, ask if he wants to play agian. Keep playing until the user types "no".**
# +
import random
colors = ['red', 'green', 'yellow', 'blue', 'violet', 'white', 'black', 'orange', 'pink', 'brown']
while True :
color = colors[random.randint(0, len(colors) - 1)]
guess = input("I'm thinking about a color, can you guess it: ")
while True :
if color == guess.lower() :
break
else :
guess = input("Nope. Try again:")
print("You guessed it! I was thinking about", color)
play_again = input("Let's play again? Type 'no' to quit.")
if play_again.lower() == 'no' :
break
print("It was fun, thanks for playing.")
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import time
import pandas as pd
import urllib.request
import os
import random
# -
storeName = input("請輸入店家名稱:")
# +
options = Options()
options.add_argument("--headless") # 不開啟實體瀏覽器背景執行
options.add_argument("--incognito") # 開啟無痕模式
options.add_argument('window-size=1920x1080')
driver = webdriver.Chrome('../tools/chromedriver', options=options) # 如果你沒有把webdriver放在同一個資料夾中,必須指定位置給他
driver.get("https://www.google.com/maps/search/" + storeName)
# +
def tryclick(driver, selector, count=0): ##保護機制,以防無法定味道還沒渲染出來的元素
try:
elem = driver.find_element_by_xpath(selector)
elem.click() # 點擊定位到的元素
except:
time.sleep(2)
count+=1
if(count <2):
tryclick(driver, selector,count)
else:
print("cannot locate element" + selector)
tryclick(driver, '//span[text()="查看所有評論"]') # 點擊「查看所有評論」按鍵
time.sleep(3) # 等待javascript渲染出來,當然這個部分還有更進階的作法,關鍵字是implicit wait, explicit wait,有興趣可以自己去找
# html = driver.page_source # 取得html文字
soup = BeautifulSoup(driver.page_source, 'html.parser')
print(soup)
driver.close() # 關掉Driver打開的瀏覽器
def tryclick(driver, selector, count=0): ##保護機制,以防無法定位到還沒渲染出來的元素
try:
elem = driver.find_element_by_xpath(selector)
elem.click() # 點擊定位到的元素
except:
time.sleep(2)
count+=1
if(count <2):
tryclick(driver, selector,count)
else:
print("cannot locate element" + selector)
tryclick(driver, '//span[text()="查看所有評論"]') # 點擊「查看所有評論」按鍵
time.sleep(3)
# html = driver.page_source # 取得html文字
soup = BeautifulSoup(driver.page_source, 'html.parser')# 取得 html 文字,轉成 html 格式
print(soup)
driver.close() # 關掉Driver打開的瀏覽器
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pip
import pkg_resources
dir(pkg_resources)
pkg_resources.working_set.entries
import pkgutil
pks = [mod for mod in pkgutil.iter_modules()]
pks[:15]
len(pks)
names = [e.name for e in pks]
names.index('numpy')
names.index('scrapy')
import requests
from bs4 import BeautifulSoup as bs
# +
url="https://www.analyticsvidhya.com/blog/2018/05/24-ultimate-data-science-projects-to-boost-your-knowledge-and-skills/"
r = requests.get(url)
data = r.text
len(data)
# -
soup = bs(data) # the warning is because we did not specify parser
txts = soup.find_all('div', attrs={"class":"text-content"})
len(txts),len(txts[0])
print(txts[0])
txt = txts[0]
type(txt)
ps = txt.findAll(['p','h2','h3'])
len(ps)
ps[:3]
t=ps[0]
type(t)
t.name
url
t
ps[15]
# +
with open('paragraphs.txt', mode='w') as f:
f.write(f'OG URL:{url}\n\n')
for p in ps:
pad=""
if p.name == 'h3':
pad='### '
elif p.name == 'h2':
pad='## '
if len(p.findAll('a')):
f.write(f"{pad}{p.text}\n\n") ##TODO need to figure out how to only get parent text
for e in p.findAll('a'):
f.write(f"[{e.text}]({e['href']})\n\n")
else:
f.write(f"{pad}{p.text}\n\n")
# -
kids = list(txt.findChildren(recursive=True))
len(kids)
with open('projects.txt', mode='w') as f:
for e in kids:
f.write(e.text)
with open('content.txt', mode='w') as f:
for e in kids:
f.write(str(e.contents))
len(kids)
type(kids)
kds = [el for el in kids]
for e in kds:
print(e)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Rz9hX1PztAdt"
# Work with a workflow via KNIME.exe
# author: Gressling, T
# license: MIT License # code: github.com/gressling/examples
# activity: single example # index: 17-2
# + id="OWrSm-FutGzg"
import knime
knime.executable_path = "KNIME_PATH/knime.exe"
workspace = "KNIME_PATH/KNIME workspaces/book"
workflow = "test_workflow"
knime.Workflow(workflow_path=workflow, workspace_path=workspace)
# shows the workflow in the notebook!
# + id="xjGHUXhItJjq"
in = pd.DataFrame({'smiles':['COCc1cc(C=O)ccc1O']})
# + id="VtZ_U7LCtOVV"
with knime.Workflow(workflow_path=workflow,
workspace_path=workspace) as KWorkflow:
KWorkflow.data_table_inputs[0] = in
KWorkflow.execute()
# + id="VPVRM-RutQVZ"
result=KWorkflow.data_table_outputs[0]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
AR(1) model using default LLY method
"""
import numpy as np
import pandas as pd
import linkalman
import scipy
from linkalman.models import BaseConstantModel as BCM
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from copy import deepcopy
# %matplotlib inline
# -
# # Initialization
# This example gives an introduction of core functionalities of `linkalman`. The first of the two primary inputs for `linkalman` models is a user-defined function that define system dynamics. Note that here we have `my_f`: `theta` -> `M`, because we are assuming constant model, and T provided by BaseConstantModel (BCM) by other means. In addition, BCM calculates unconditional means and covariances directly, and automatically generated Dt if not provided by users.
#
# If users want to use BaseOpt, they must provide a mapping such as `my_ft : (theta, T, **kwargs) -> Mt`, where:
#
# ```
# Mt = {'Ft': Ft, 'Qt': Qt, 'Ht': Ht, 'Rt': Rt, 'Bt': Bt, 'Dt': Dt, 'xi_1_0': xi_1_0, 'P_1_0': P_1_0}
# ```
#
# The output must be a dictionary and contains all the keys. The values for `Ft`, `Qt`, `Bt`, `Ht`, `Dt`, and `Rt` are a list of matrices of length T. In other words, Mt must provide the system matrics from 0 up to time T. `xi_1_0` and `P_1_0` are initial state means and covariances matrices, respectively. `Mt` define a BSTS model.
def my_f(theta):
"""
AR(1) model. In general, MLE is biased, so the focus should be
more on prediction fit, less on parameter estimation. The
formula here for Ar(1) is:
y_t = c + Fy_{t-1} + epsilon_{t-1}
"""
# Define theta
phi_1 = 1 / (np.exp(theta[0])+1) # ensure less than 1
sigma = np.exp(theta[1])
# Generate F
F = np.array([[phi_1]])
# Generate Q
Q = np.array([[sigma]])
# Generate R, set to 0 to be consistent with AR(1) process
R = np.array([[0]])
# Generate H
H = np.array([[1]])
# Generate B
B = np.array([[theta[2]]])
# Collect system matrices
M = {'F': F, 'Q': Q, 'H': H, 'R': R, 'B': B}
return M
# The second of the two primary inputs is a solver object. Here I use `scipy` optimizers to build:
#
# ```
# my_soler: (param, obj_func, **kwargs) -> (theta_opt, fval_opt)
# ```
#
# It effectively takes initial `param`, objective function `obj_func`, and optional `**kwargs` as inputs and produces optimal parameter `theta_opt` and function evaluation `fval_opt` as outputs. Any solver function should have such setup for `linkalman` to use. Here the `obj_func` is provided by `linkalman`. Note that since `linkalman` provides loglikelihood functions, we should modify the objective function if needed.
def my_solver(param, obj_func, **kwargs):
"""
Simple solver for LLY
"""
obj_ = lambda x: -obj_func(x)
res = minimize(obj_, param, **kwargs)
theta_opt = np.array(res.x)
fval_opt = res.fun
return theta_opt, fval_opt
# Now let's initialize a `BaseConstantModel` (`BCM`) model. Use `model.set_f` and `model.set_solver` to plug in `my_f` and `my_solver`. Note that since we use `BCM`, which under the hood invokes `linkalman.core.utils.ft`, the keyword arguments in `model.set_f` are for `ft`.
# Initialize the model
x = 1 # used to calculate stationary mean
model = BCM()
model.set_f(my_f, x_0=x * np.ones([1, 1]))
model.set_solver(my_solver, method='nelder-mead',
options={'xatol': 1e-8, 'disp': True})
# # Generate Synthetic Data
# Now let's generate some synthetic data using `model.simulated_data`. I also split the data set into training set and test set. In addition, for the last bit of dataset, I mark them all as `np.nan` (meaning missing) to test the model's performance in forecasting instead of typical nowcasting in test data. Users of `linkalman` may use either the full data, or only the test data, if the initial state values are properly configured. I will illustrate this later.
# +
# Some initial parameters
theta = np.array([-1, -0.1, 1])
T = 365
train_split_ratio = 0.7
forecast_cutoff_ratio = 0.8
# Split train data
train_split_t = np.floor(T * train_split_ratio).astype(int)
# Generate missing data for forcasting
forecast_t = np.floor(T * forecast_cutoff_ratio).astype(int)
# If we want AR(1) with non-zero stationary mean, we should proivde a constant
x_col = ['const']
Xt = pd.DataFrame({x_col[0]: x * np.ones(T)}) # use x to ensure constant model
# Build simulated data
df, y_col, xi_col = model.simulated_data(input_theta=theta, Xt=Xt)
# Store fully visible y for comparison later
df['y_0_vis'] = df.y_0.copy()
# Splits models into three groups
is_train = df.index < train_split_t
is_test = (~is_train) & (df.index < forecast_t)
is_forecast = ~(is_train | is_test)
# Create a training and test data
df_train = df.loc[is_train].copy()
# Build two kinds of test data (full data vs. test data only)
df_test = df.copy()
df_test_only = df.loc[is_test | is_forecast].copy()
# Create an offset
df_test.loc[is_forecast, ['y_0']] = np.nan
df_test_only.loc[df_test_only.index >= forecast_t, ['y_0']] = np.nan
# -
# # Fit and Predict
# Let's fit the AR(1) model using the `LLY` method. Here, `model.fit` will take the input DataFrame, some initial guess, and column name of measurements `y_col` and of regressors `x_col`. After the model is optimized, I use `model.predict` to compute $\hat{y}_{t|t-1} \equiv E(y_t|y_1,y_2,...y_{t-1}, x_1,x_2,...,x_t;\theta)$. Note that we can also provide a specific `theta` into `model.predict` to generate customized predictions.
#
# I also create a confidence interval at 95% for $\hat{y}_{t|t-1}$ (since we have full measurements arcoss all times, smoothed estimate $\hat{y}_{t|T}$ are just $y_t$ and are omitted here. We will discuss $\hat{y}_{t|T}$ in more details with AR(1) examples with missing measurements.
# +
# Fit data using LLY:
theta_init = np.random.rand(len(theta))
model.fit(df_train, theta_init, y_col=y_col, x_col=x_col,
method='LLY')
# Make predictions from LLY:
df_LLY = model.predict(df_test)
df_LLY['kf_ub'] = df_LLY.y_0_filtered + 1.96 * np.sqrt(df_LLY.y_0_fvar)
df_LLY['kf_lb'] = df_LLY.y_0_filtered - 1.96 * np.sqrt(df_LLY.y_0_fvar)
# Make predictions using true theta:
df_true = model.predict(df_test, theta=theta)
df_true['kf_ub'] = df_true.y_0_filtered + 1.96 * np.sqrt(df_true.y_0_fvar)
df_true['kf_lb'] = df_true.y_0_filtered - 1.96 * np.sqrt(df_true.y_0_fvar)
# -
# # Check Model Performance
# We can now check the performance of our model against both the actuals as well as the model prediction when using true $\theta$. Let's first plot $\hat{y}_{t|t-1}$ with confident intervals. Here I will just make some very simple plots, but with the output from the fitted models (for example `model.ks_fitted` contains the state estimates from both the filtering and smoothing process) one may build sophisticated analysis pipeline.
def simple_plot(df, col_est, col_actual, col_ub, col_lb, label_est,
label_actual, title, figsize=(20, 10)):
ax = plt.figure(figsize=figsize)
plt.plot(df.index, df[col_est], 'r', label=label_est)
plt.scatter(df_LLY.index, df[col_actual], s=20, c='b',
marker='o', label=label_actual)
plt.fill_between(df.index, df[col_ub], df[col_lb], color='g', alpha=0.2)
ax.legend(loc='right', fontsize=12)
plt.title(title, fontsize=22)
plt.show()
simple_plot(df_LLY, 'y_0_filtered', 'y_0_vis', 'kf_ub', 'kf_lb', 'Filtered y0', 'Actual y0',
'Filtered y with estimated theta')
simple_plot(df_true, 'y_0_filtered', 'y_0_vis', 'kf_ub', 'kf_lb', 'Filtered y0', 'Actual y0',
'Filtered y with true theta')
# The plot shows the predition using fitted theta and true theta are very similar. We can verify this by looking at some metrics. Overall, the fitted theta has very similar performance as the true theta in terms of RMSE. The mean_error is a more noisy metrics.
# Build a simple function
def summary_stats(df, col_est, col_true):
RMSE = np.sqrt((df[col_est] - df[col_true]).var())
mean_error = (df[col_est] - df[col_true]).mean()
return {'RMSE': RMSE, 'mean_error': mean_error}
# Compare model performance in training data
print('LLY theta for training data: {}'.format(
summary_stats(df_LLY[is_train], 'y_0_filtered', 'y_0_vis')))
print('Ture theta for training data: {}'.format(
summary_stats(df_true[is_train], 'y_0_filtered', 'y_0_vis')))
# Compare model performance in test data
print('LLY theta for test data: {}'.format(
summary_stats(df_LLY[is_test], 'y_0_filtered', 'y_0_vis')))
print('Ture theta for test data: {}'.format(
summary_stats(df_true[is_test], 'y_0_filtered', 'y_0_vis')))
# Compare model performance in forecast data
print('LLY theta for forecast data: {}'.format(
summary_stats(df_LLY[is_forecast], 'y_0_filtered', 'y_0_vis')))
print('Ture theta for forecast data: {}'.format(
summary_stats(df_true[is_forecast], 'y_0_filtered', 'y_0_vis')))
# # Prediction without Training Data
# Finally, `linkalman.models.BaseOpt.predict_t` allows starting prediction not from time index 0 (as determined by training data), but rather pick any time from 0 to T (note that training data ends with time index T-1), as long as the state at time t is not a diffuse state. This improvement allows lightweight model predictions. Instead of loading the entire dataset, users may just pick a starting time index t, and feed in data with first observation labeled with timestamp t. `t_index=-1` here means using T as the staring time index. You can see both generate exactly the same results.
# +
df_test_only
df_LLY_test_only = model.predict_t(df_test_only, t_index=-1)
is_test_only = df_LLY_test_only.index < forecast_t
is_forecast_only = ~is_test_only
print('LLY theta for test data: {}'.format(
summary_stats(df_LLY[is_test], 'y_0_filtered', 'y_0_vis')))
print('LLY theta for test data only: {}'.format(
summary_stats(df_LLY_test_only[is_test_only], 'y_0_filtered', 'y_0_vis')))
print('LLY theta for forecast data: {}'.format(
summary_stats(df_LLY[is_forecast], 'y_0_filtered', 'y_0_vis')))
print('LLY theta for forecast data only: {}'.format(
summary_stats(df_LLY_test_only[is_forecast_only], 'y_0_filtered', 'y_0_vis')))
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class config:
def __init__(self,gpu,uinput,output,quality,model_dir,scale_ratio,tta,batch_size,block_size,extension,arch,method,noise_level,color,tta_level,width,height,shorter_side,longer_side):
self.gpu = gpu
self.uinput = uinput
self.output = output
self.quality = quality
self.model_dir = model_dir
self.scale_ratio = scale_ratio
self.tta = tta
self.batch_size = batch_size
self.block_size = block_size
self.extension = extension
self.arch = arch
self.method = method
self.noise_level = noise_level
self.color = color
self.tta_level = tta_level
self.width = width
self.height = height
self.shorter_side = shorter_side
self.longer_side = longer_side
def set_defaults(self):
self.gpu = -1
self.uinput = 'images/small.png'
self.output = './scaled/'
self.quality = None
self.model_dir = None
self.scale_ratio = 2.0
self.tta = None
self.batch_size = 16
self.block_size = 128
self.extension = "png"
self.arch = "VGG7"
self.method = "scale"
self.noise_level = 0
self.color = 'rgb'
self.tta_level = 8
self.width = 0
self.height = 0
self.shorter_side = 0
self.longer_side = 0
def setInput(self,sinput):
self.uinput = sinput
def setOutput(self,soutput):
self.output = soutput
cfg = config(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None)
cfg.set_defaults()
# +
import os
import waifu2x
entries = []
# for entry in os.scandir("C:\\work\\all\\"):
# if entry.path.endswith(".tif") and entry.is_file():
# entries.append(entry.path)
#Set Output Folder
cfg.setOutput(None)
# for i in range(1):
# cfg.setInput(entries[i])
# waifu2x.main(cfg)
#Set Input File
cfg.setInput(None)
waifu2x.main(cfg)
# -
import import_ipynb
import test
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WARNING
# **Please make sure to "COPY AND EDIT NOTEBOOK" to use compatible library dependencies! DO NOT CREATE A NEW NOTEBOOK AND COPY+PASTE THE CODE - this will use latest Kaggle dependencies at the time you do that, and the code will need to be modified to make it work. Also make sure internet connectivity is enabled on your notebook**
# # Preliminaries
# Write requirements to file, anytime you run it, in case you have to go back and recover dependencies. **MOST OF THESE REQUIREMENTS WOULD NOT BE NECESSARY FOR LOCAL INSTALLATION**
#
# Requirements are hosted for each notebook in the companion github repo, and can be pulled down and installed here if needed. Companion github repo is located at https://github.com/azunre/transfer-learning-for-nlp
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# !pip freeze > kaggle_image_requirements.txt
# -
# # Open Ended Text Generation with GPT-2
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
# Pipeline uses `gpt2` by default, but we specify it explicitly to be fully transparent
from transformers import pipeline
gpt = pipeline('text-generation',model='gpt2')
# -
# Now, let's generate some text with GPT-2
gpt("Transfer learning is a field of study", max_length=100)
# A nonexhaustive list of other model choices suitable for text generation within the transformers library include "ctrl" (CTRL - huge! too big for Kaggle), "xlnet-base-cased" (XLNet), "transfo-xl-wt103" (Transformer XL)... These often need to be padded very carefully to work well, GPT-2 is the safest choice for open-ended text generation. See https://huggingface.co/transformers/usage.html#text-generation for more.
# # Conversational Text Generation with DialoGPT
#
# DialoGPT is an extension of GPT to conversational response generation
# +
from transformers import AutoModelWithLMHead, AutoTokenizer # you can use these utility classes that automatically load the right classes
from transformers import GPT2LMHeadModel, GPT2Tokenizer # or these more specific classes directly
import torch
tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium")
# -
# Chat for 5 Lines
conversation_length = 5
for step in range(conversation_length):
# encode new user input, add end-of-sentence token, return tensor
new_user_inputs_ids = tokenizer.encode(input("User: ") + tokenizer.eos_token, return_tensors='pt')
# add new input to chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_inputs_ids], dim=1) if step > 0 else new_user_inputs_ids
# generate a response of up to max_length tokens
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
# display response
print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
# **Because the notebook is not saving input text, here is the entire conversation so you know how we prompted the model**
#
# User: hi robot
#
# DialoGPT: Hello, human.
#
# User: huh?
#
# DialoGPT: I'm a bot.
#
# User: ok, what is your name?
#
# DialoGPT: Robot. I'm a robot.
#
# User: ok! Can you say something else?
#
# DialoGPT: Robot. I'm a robot.
#
# User: Do you have children?
#
# DialoGPT: Robot. I'm a robot.
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.4 64-bit
# name: python3
# ---
# # Composition
# +
class Engine:
def __init__(self, name):
self.name = name
self.status = None
def start(self):
self.status = "started"
def stop(self):
self.status = "stopped"
# -
class Car:
def __init__(self):
self.engine = Engine(name='mHawk')
def start(self):
self.engine.start()
def stop(self):
self.engine.stop()
mahindra_car = Car()
mahindra_car.start()
print(mahindra_car.engine.status)
mahindra_car.stop()
print(mahindra_car.engine.status)
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Organizando uma eleição
# (créditos ao professor Rafael)
# O último ano foi extremamente desafiador para você. Além de estarmos vivendo uma pandemia que aumentou nossa preocupação com a nossa própria vida e com nossos entes queridos e estarmos obrigados a passar mais tempo em casa para garantir nossa segurança, você se matriculou em um curso de programação e ciência de dados para mudar de rumo em sua carreira!
#
# O _timing_ pareceu ideal: mais tempo em casa, mais tempo para estudar. Porém, você esbarrou em algumas dificuldades. O condomínio está em reforma, e você passa o dia ao som de marretadas no piso. Durante a noite, no horário da sua aula online, seus vizinhos - um casal passando por dificuldades na relação - costumam discutir em voz alta. A discussão deles frequentemente acorda o bebê do outro vizinho e todos os cachorros do andar no prédio. Deixar para estudar no final de semana não ajuda, pois o filho adolescente da família do apartamento logo acima do seu costuma trazer os colegas de banda para ensaiar.
#
# A situação se tornou insustentável, e o síndico - que é o mesmo senhorzinho desde que você se mudou para esse condomínio, há mais de 10 anos - não toma atitudes e não gosta de ser perturbado. Chegou a hora de organizar uma eleição para um novo síndico, e você tomou a iniciativa de montar um sistema eletrônico para auxiliar na votação.
# ## Entidades envolvidas
#
# Podemos imaginar as seguintes entidades envolvidas na eleição:
#
# * **Morador**: representa cada uma das pessoas que moram no prédio. Possui um nome, um Apartamento e é capaz de depositar um voto em um Candidato em uma Urna.
# * **Candidato**: é um tipo especial de Morador. Possui tudo o que o morador possui, mas também possui um número que será usado para representá-lo na Urna e uma contagem individual de votos.
# * **Apartamento**: representa cada uma das unidades do prédio. Possui uma lista de moradores e um status indicando se ele já votou ou não. Cada apartamento tem direito a exatamente 1 voto. Se a sua filha já votou, você não pode mais votar!
# * **Urna**: coleciona votos. Ela possui uma coleção de candidatos e uma coleção de moradores (lembrando que o candidato também conta como morador!). Ela armazena a quantidade de votos de cada candidato e é capaz de incrementar essa quantidade ao receber um voto novo. Ela também possui uma coleção de apartamentos e é capaz de determinar quais apartamentos já votaram (mas não qual apartamento votou em cada candidato - o voto é secreto). Quando o último apartamento votar, ela publica os resultados e declara o vencedor.
#
# Vamos pensar juntos em meios de implementar cada uma das classes.
# ### Morador
#
# A classe mais básica de nossa eleição, representando cada uma das pessoas que habitam no condomínio. O morador obrigatoriamente possui um nome e um Apartamento.
#
# Seu construtor pode ou não receber uma string contendo o nome, e pode ou não receber um objeto Apartamento. Caso ele não receba um nome, deverá solicitar a digitação de um através do input. Caso ele não receba um objeto Apartamento, deverá pedir por input o número do apartamento e criar o objeto.
#
# O nome e apartamento digitados ou recebidos deverão se tornar atributos do objeto.
#
# Nosso objeto também deverá possuir um método para votar. Ele deverá receber um objeto Urna e pode receber um número inteiro correspondente ao número de um candidato ou, caso não receba, irá ler o número pelo teclado. Ela deverá primeiro verificar se o seu apartamento já votou, e caso não tenha, ela irá chamar o método de acrescentar voto na Urna, informando o número de seu candidato e passando seu objeto apartamento. Caso contrário, ela exibirá uma mensagem alertando que esse apartamento já votou.
# +
# Crie sua classe Morador aqui
# -
# ### Candidato
#
# Objetos da classe Candidato são, necessariamente, moradores. Eles possuem as mesmas informações e as mesmas ações que um morador. O que isso significa em programação orientada a objeto?
#
# Ele possui outros dois atributos: o seu número que será usado para identificá-lo na Urna e sua contagem de votos. Ambos devem ser inicializados com valor 0.
#
# Ele deve oferecer métodos para que seu número seja configurado (quando cadastrarmos o candidato na Urna, ela irá fornecer um número para ele), bem como para que sua contagem de votos seja atualizada (ao final da votação, a Urna irá atualizar a contagem de todos os candidatos).
# +
# Crie sua classe Candidato aqui
# -
# ### Apartamento
#
# Objetos da classe Apartamento irão agregar objetos Morador. Isso significa que um de seus atributos será uma lista de moradores. Quando um Apartamento é criado, sua lista começará vazia. O Apartamento deve oferecer métodos para adicionar moradores novos (recebe um objeto Morador e adiciona à lista) e para visualizar seus moradores.
#
# Cada apartamento tem direito a exatamente um voto na eleição para síndico. Portanto, cada objeto também deve ter um atributo indicando se aquele apartamento já votou ou não. A classe deve fornecer meios para que a esse atributo possa ser atualizado, pois quando um morador votar, a urna irá alterar esse atributo registrando que o apartamento já votou.
# +
# Crie sua classe Apartamento aqui
# -
# ### Urna
# A classe mais complexa do projeto. Ela é responsável por boa parte da lógica da nossa eleição.
#
# Seu construtor deverá criar como atributos uma coleção de apartamentos e outra de candidatos - escolha a estrutura de dados que julgar mais adequada.
#
# A classe deve fornecer métodos que permitam o cadastro de apartamentos (recebendo como parâmetro objetos Apartamento já existentes) e candidatos (idem). Ao receber um candidato para cadastro, ela deverá gerar um número **único** para representar esse candidato na eleição e irá ajustar esse atributo diretamente no objeto candidato.
#
# Ela também deve ter um método para receber um voto, que deve receber um objeto apartamento e o número de um candidato. Ela deve atualizar a contagem de voto do candidato cujo número foi recebido, e marcar aquele apartamento como já votado. Se o apartamento que votou era o último que ainda podia votar, a urna deve imediatamente imprimir os resultados.
#
# Bole uma maneira que a urna possa sinalizar para o programa principal se ela já encerrou a votação ou não.
# +
# Crie sua classe Urna aqui
# -
# ## O programa principal
#
# Seu programa principal terá 3 etapas - fique à vontade para modularizar essas etapas com o auxílio de funções, ou mesmo utilizar outras classes se julgar necessário.
#
# * Cadastro: o programa deverá ler informações sobre cada pessoa e ir criando os objetos Morador/Candidato e Apartamento correspondentes. Lembre-se de sempre perguntar se a próxima pessoa a ser lida é ou não candidata para decidir a melhor forma de instanciá-la no sistema.
#
# * Configuração: o programa deverá utilizar as informações obtidasna etapa anterior para criar uma urna.
#
# * Votação: o programa ficará em loop permitindo que diferentes moradores votem (ou tentem votar). Nessa etapa, o programa deve ser capaz de identificar qual o morador votando e utilizar os métodos criados previamente para realizar o voto. Quando a classe Urna informar que a votação acabou, o loop deve ser encerrado.
# +
## crie o programa principal aqui
# -
# ## Dicas
#
# * Métodos mágicos: no mínimo uma _representação imprimível_ dos objetos de cada classe seria desejável para facilitar a sua vida. No caso de um morador, podemos imprimir seu nome e número do apartamento. No caso de um candidato, poderíamos incluir o número da urna. No caso do apartamento, seria legal imprimir todos os moradores. E no caso da urna, podemos ter dois casos: antes da votação acabar, apenas a lista de apartamentos que já votou deveria ser impressa. Após o término da votação, ela deveria imprimir quantos votos cada candidato recebeu e qual deles venceu.
#
# * Arquivos e testes: note que vocês sempre foram orientados a passar dados por parâmetros **ou** digitá-los. Se você bolar um esquema inteligente com arquivos em formato padronizado (como CSV) e organizar direitinho seu programa principal, você consegue gerar um único arquivo com todos os moradores e suas respectivas informações (número do apartamento, se é ou não candidato e em quem gostaria de votar) e automatizar completamente as etapas do seu programa.
#
# * Segurança adicional: atualmente, cada apartamento armazena a informação se já votou ou não. Mas e se o apartamento pudesse alterar essa informação e enganar a urna? Você pode colocar uma camada redundante de segurança fazendo com que a urna TAMBÉM controle quais apartamentos já votaram. Mesmo que o apartamento diga que não votou, se a urna está registrando que ele votou, então um hipotético segundo voto deve ser recusado. Veja se consegue pensar em outras possíveis falhas de segurança em potencial no programa. Lembre-se sempre de pensar do ponto de vista de uma classe e "desconfiar" das outras.
#
# * Criatividade: os enunciados trazem o **mínimo** que você precisa fazer para que o seu programa funcione como foi planejado. Isso não impede que você crie outros métodos, acrescente informações que possam ser úteis e até mesmo crie outras classes para modelar melhor outros fatores que poderiam ser considerados na organização da sua eleição.
#
# ## Bônus: representação visual dos votos
#
# A função abaixo gera um gráfico de barras mostrando quantos votos cada candidato teve! Cole-a em sua classe Urna e faça os devidos ajustes para que ela use os atributos da Urna ao invés dos parâmetros utilizados.
#
# O programinha na célula seguinte apenas ilustra o funcionamento da função.
#
# +
import matplotlib.pyplot as plt
def graficoVotos(candidatos, votos):
plt.bar(candidatos, votos)
plt.title('Resultado das eleições')
plt.xlabel('Candidatos')
plt.ylabel('Votos')
# +
candidatos = ['Python', 'JavaScript', 'HTML']
votos = [8, 3, 4]
graficoVotos(candidatos, votos)
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='https://raw.githubusercontent.com/afo/dataXprague/master/imgs/dx_logo.png' width=600px></img>
#
# **Inspiriation and sources:** Databricks intro tutorial and [Using Apache Spark 2.0 to Analyze the City of San Francisco's Open Data](https://www.youtube.com/watch?v=K14plpZgy_c) by Sameer Farooqui
#
# **Authors**: Alexander Fred-Ojala & Ikhlaq Sidhu
#
# # Introduction to Spark and Big Data
#
# Databricks is a platform for running Spark without complex cluster management or tedious maintenance tasks. Spark is a distributed computation framework for executing code in parallel across many different machines. Databricks is the Spark team's enterprise solution makes big data simple by providing Spark as a hosted solution.
#
# ## Databricks Terminology
#
# - ****Workspaces**** : Where you store the ****notebooks**** and ****libraries****.
# - ****Notebooks**** : Like Jupyter Notebooks that can run `Scala`, `Python`, `R`, `SQL`, or `Markdown`. Define language by `%[language name]` at the top of the cell. Connect to a cluster to run.
# - ****Dashboards**** can be created from ****notebooks**** as a way of displaying the output of cells without the code.
# - ****Libraries**** : Packages / Modules. You can install them via pypi.
# - ****Tables**** : Structured data, that can be stored in data lake / cloud storage. Stored on Cluster or cached in memory.
# - ****Clusters**** : Groups of computers that you treat as a single computer to perform operations on big sets of data.
# - ****Jobs**** : Schedule execution on ****notebooks**** or Python scripts. They can be created either manually or via the REST API.
# - ****Apps**** : 3rd party integrations with the Databricks platform like Tableau.
# ### Spark's history
#
# Spark was developed by founders of Databricks in AMPLab at UC Berkeley. Started 2009, donated to Apache open source in 2013.
#
# ### The Contexts/Environments
#
# Before Spark 2.X many used the `sparkContext` made available as `sc` and the `SQLContext` made available as `sqlContext`. The `sqlContext` makes a lot of DataFrame functionality available while the `sparkContext` focuses more on the Apache Spark engine itself.
#
# In Spark 2.X, there is just one context - the `SparkSession`.
#
# ### The Data Interfaces
#
# Key interfaces.
#
# - ****The DataFrame**** : Collection of distributed `Row` types (note no indicies for look up). Similar to pandas or R dataframe.
# - ****The RDD (Resilient Distributed Dataset)**** : Interface to a sequence of data objects that consist of one or more types that are located across a variety of machines in a cluster. Focus on DataFrames as those will be supersets of the current RDD functionality.
#
# See speed difference:
#
# <img src='https://databricks.com/wp-content/uploads/2015/02/Screen-Shot-2015-02-16-at-9.46.39-AM.png' width=600px></img>
# Spark is a unified processing engine that can analyze big data using SQL, machine learning, graph processing or real time stream analysis. Streaming (infinte Dataframe), Machine Learning, Graph / Pagerank.
#
# 
# You can read from many different data sources and Spark runs on every major environment. We will use Amazon EC2. We will read CSV data. Stick with Dataframe and SQL.
#
# 
# # Let's Start
#
# Before you start running code, you need to make sure that the notebook is attached to a cluster.
#
# ### To create a Cluster
#
# Click the Clusters button that you'll notice on the left side of the page. On the Clusters page, click on  in the upper left corner.
#
# Then, on the Create Cluster dialog, enter the configuration for the new cluster.
#
# Finally,
#
# - Select a unique name for the cluster.
# - Select the most recent stable Runtime Version.
# - Enter the number of workers to bring up - at least 1 is required to run Spark commands.
#
#
# **Go back to the notebook and in the top right corner press Detached and connect to your cluster.**
#
# *Note, Databricks community clusters only run for an hour*
# first let's explore the previously mentioned `SparkSession` where info is stored. We can access it via the `spark` variable.
spark
# We can use the spark context to parallelize a small Python range that will provide a return type of `DataFrame`.
# +
firstDataFrame = spark.range(10000)
print(firstDataFrame) # if you just run a transformation no Spark Job is done.
# -
# or use RDD through sc (spark context)
spark.sparkContext.parallelize(range(1000))
# Now one might think that this would actually print the values parallelized. That's not how Spark works.
#
# Spark allows two distinct kinds of operations, **transformations** and **actions**.
#
# 
#
# ### Transformations
#
# Transformations will only get executed once you have called a **action**. An example of a transformation might be to convert an integer into a float or to filter a set of values. I.e. Lazy Evaluation.
#
# ### Actions
#
# Actions are computed during execution. Run all of the previous transformations in order to get back an actual result. An action is composed of one or more jobs which consists of tasks that will be executed by the workers in parallel where possible.
#
# Sshort sample of actions and transformations:
#
# 
firstDataFrame.show(3) # example of an action, dataframe is now evaluated
# An example of a transformation
# select the ID column values and multiply them by 2, SQL interfac
secondDataFrame = firstDataFrame.selectExpr("(id * 2) as value")
secondDataFrame.show(5)
# +
from pyspark.sql.functions import col # to select columns
firstDataFrame.withColumn('id2', col('id')*2).show(3)
# -
# Or common before Spark 2.X
firstDataFrame.rdd.map(lambda x: x[0]*2).take(3)
# or
firstDataFrame.take(5)
# or
display(firstDataFrame)
# Transformations are lazily evaluated because it is easy to optimize the entire pipeline of computations this way. Computations can be parallellized and executed on many different nodes at once (like a map and a filter).
#
# 
#
# Spark also keeps results in memory, as opposed to other frameworks (e.g. Hadoop Map Reduce) that write to disk.
#
# ## Spark Architecture
#
# Spark allows you to treat many machines as one via a master-worker architecture.
#
# There is `driver` or master node in the cluster, accompanied by `worker` nodes. The master sends work to the workers and either instructs them to pull to data from memory or from disk (or from another data source).
#
# Spark Cluster has a Driver node that communicates with executor nodes. Executor nodes are logically like execution cores.
#
# 
#
# The Driver sends Tasks to the empty slots on the Executors when work has to be done:
#
# 
#
# Note: In the case of the Community Edition there is no worker, the master executes the entire code. However, the same code works on any cluster (beware of CPU / GPU frameworks).
#
# 
#
# Access details in the web UI by clicking at the top left of this notebook.
# # Working example with data
#
# To illustrate **transformations** and **actions** - let's go through an example using `DataFrames` and a csv file of a public dataset that Databricks makes available. Available using the Databricks filesystem. Let's load the popular diamonds dataset in as a spark `DataFrame`. Now let's go through the dataset that we'll be working with.
# Use `%fs` to interact with the spark filesystem
# %fs ls /databricks-datasets/Rdatasets/data-001/datasets.csv
# +
dataPath = "/databricks-datasets/Rdatasets/data-001/csv/ggplot2/diamonds.csv"
diamonds = spark.read.format("csv")\
.option("header","true")\
.option("inferSchema", "true")\
.load(dataPath)
# inferSchema means we will automatically figure out column types
# at a cost of reading the data more than once
# -
# Show the dataframe with Databricks `display` function or the show function.
display(diamonds)
display(diamonds.limit(5)) # for a subset
diamonds.printSchema() # see that the column types are OK and schema inferred correctly.
diamonds.rdd.getNumPartitions() # only one partition. This dataframe does not exist in memory. For big data several partitions.
# Partitions can be optimized according to your cluster size. Have it divisible by cluster size.
# For community edition, any number * 3 is OK
# you can use REPARTITION method
diamonds.count() # reads through the whole data set
display(diamonds.summary())
diamonds.select('cut').distinct().show() # show unique entries in the cut column
# What makes `display` exceptional is the fact that we can very easily create some more sophisticated graphs by clicking the graphing icon that you can see below. Here's a plot that allows us to compare price, color, and cut.
display(diamonds)
# most common cut, ordered. First interesting insight.
display(diamonds.select('cut').groupBy('cut').count().orderBy('count',ascending=False))
display(diamonds.select('price','cut').groupBy('cut').avg('price')) # show graph, prepares 5 jobs
# Now that we've explored the data, let's return to understanding **transformations** and **actions**. First transformations, then actions.
#
# First we group by two variables, cut and color and then compute the average price. Then we're going to inner join that to the original dataset on the column `color`. Then we'll select the average price as well as the carat.
# +
df1 = diamonds.groupBy("cut", "color").avg("price") # a simple grouping
df2 = df1\
.join(diamonds, on='color', how='inner')\
.select("`avg(price)`", "carat")
# a simple join and selecting some columns
# -
# These transformations are now complete in a sense but nothing has happened.
#
# The reason for that is these computations are *lazy* in order to build up the entire flow of data from start to finish required by the user. This is an intelligent optimization for two key reasons. Any calculation can be recomputed from the very source data allowing Apache Spark to handle any failures that occur along the way, successfully handle stragglers. Secondly, Spark can optimize computation so that data and computation can be `pipelined`.
#
# To get a sense for what this plan consists of, we can use the `explain` method.
df2.explain()
# Now explaining the above results is outside of this introductory tutorial. This is Spark's plan for how it hopes to execute the given query.
df2.count()
# This will execute the plan that Apache Spark built up previously. Click the little arrow next to where it says `(X) Spark Jobs` after that cell finishes executing and then click the `View` link. This brings up the Apache Spark Web UI right inside of your notebook.
#
# 
#
# These are significant visualizations called Directed Acyclic Graphs (DAG)s of all the computations that have to be performed in order to get to that result.
#
# Transformations are *lazy* - while generating this series of steps Spark will optimize lots of things, one of core reasons that users should be focusing on using DataFrames and Datasets instead of the legacy RDD API. With DataFrames and Datasets, Apache Spark will work under the hood to optimize the entire query plan and pipeline entire steps together.
# # SQL view
diamonds.repartition(3).createOrReplaceTempView("diamondsView") # also repartition, create a table view for SQL
diamonds.count()
# %sql SELECT carat, cut, color from diamondsView ORDER BY carat DESC;
# in jupyter
spark.sql('SELECT * FROM diamondsView').show()
# # To pandas DataFrame
# +
import pandas as pd
pd_df = diamonds.toPandas()
# -
pd_df.head(5)
type(pd_df)
# ### Caching
#
# Spark can store things in memory during computation. Can speed up access to commonly queried tables or pieces of data. This is also great for iterative algorithms that work over and over again on the same data.
#
# To cache a DataFrame or RDD, simply use the cache method.
df2.cache() # look in the UI / Storage
# Caching, like a transformation, is performed lazily, won't store the data in memory until you call an action on that dataset.
#
# Here's a simple example. We've created our df2 DataFrame which is essentially a logical plan that tells us how to compute that exact DataFrame. We've told Apache Spark to cache that data after we compute it for the first time. So let's call a full scan of the data with a count twice. The first time, this will create the DataFrame, cache it in memory, then return the result. The second time, rather than recomputing that whole DataFrame, it will just hit the version that it has in memory.
#
# Let's take a look at how we can discover this.
df2.count() # read all data and then materialize, cache it in memory
#
# Tungsten method to cache DataFrame into memory, makes it smaller.
# Optimize by repartitioning according to your cluster also
# Optimal partition sizes are 50-100Mb
# However after we've now counted the data. We'll see that the explain ends up being quite different.
df2.count()
# In the above example, we can see that this cuts down on the time needed to generate this data immensely - often by at least an order of magnitude. With much larger and more complex data analysis, the gains that we get from caching can be even greater!
# %fs ls /tmp/
# to save work and dataframe save as a Parquet file
diamonds.write.format('parquet').save('/tmp/diamonds/')
# %fs ls /tmp/diamonds/
# Easily continue work if the cluster is shutdown, link to folder:
diamonds2 = spark.read.parquet('/tmp/diamonds/')
# +
diamonds2.show() # will include all partitioning, cache into memory etc.
# parque files are really efficient to read from. Always take CSV or JSON, do the ETL and then write to Parquet file.
# -
# ## Conclusion
#
# In this notebook we've covered a ton of material! But you're now well on your way to understanding Spark and Databricks!
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
par(mfrow=c(2,2))
betalist <- c(0, 0.441, 0.75, -1.5)
for (z in 1:4) {
g <- 100
beta <- betalist[z]
trials <- 100000
grid <- matrix(sample(c(-1, 1), (g+2)^2, rep=T), nrow=g+2)
grid[c(1, g+2), ] <- 0
grid[, c(1, g+2)] <- 0
for (m in 1:trials) {
i <- sample(2:(g+1), 1)
j <- sample(2:(g+1), 1)
deg <- grid[i, j+1] + grid[i, j-1] + grid[i-1, j] + grid[i+1, j]
p <- 1/(1 + exp(-beta * 2 * deg))
if (runif(1) < p) grid[i,j] <- 1 else grid[i,j] <- -1
}
final <- grid[2:(g+1), 2:(g+1)]
image(final, yaxt='n', xaxt='n', col=c(0,1))
}
# -
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# ==================
# Embedding in wx #4
# ==================
#
# An example of how to use wxagg in a wx application with a custom toolbar.
#
# +
from matplotlib.backends.backend_wxagg import (
FigureCanvasWxAgg as FigureCanvas,
NavigationToolbar2WxAgg as NavigationToolbar,
)
from matplotlib.figure import Figure
import numpy as np
import wx
class MyNavigationToolbar(NavigationToolbar):
"""Extend the default wx toolbar with your own event handlers."""
def __init__(self, canvas):
NavigationToolbar.__init__(self, canvas)
# We use a stock wx bitmap, but you could also use your own image file.
bmp = wx.ArtProvider.GetBitmap(wx.ART_CROSS_MARK, wx.ART_TOOLBAR)
tool = self.AddTool(wx.ID_ANY, 'Click me', bmp,
'Activate custom contol')
self.Bind(wx.EVT_TOOL, self._on_custom, id=tool.GetId())
def _on_custom(self, evt):
# add some text to the axes in a random location in axes coords with a
# random color
ax = self.canvas.figure.axes[0]
x, y = np.random.rand(2) # generate a random location
rgb = np.random.rand(3) # generate a random color
ax.text(x, y, 'You clicked me', transform=ax.transAxes, color=rgb)
self.canvas.draw()
evt.Skip()
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
'CanvasFrame', size=(550, 350))
self.figure = Figure(figsize=(5, 4), dpi=100)
self.axes = self.figure.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
self.toolbar = MyNavigationToolbar(self.canvas)
self.toolbar.Realize()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
self.SetSizer(self.sizer)
self.Fit()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="DxgYaej6zNEa" outputId="fcb46920-721d-462d-fab8-74dc223c4365"
#initial package installations
# #!pip install --upgrade pip
# #!pip uninstall -y scipy
# !pip install scipy==1.2.3
# !pip install --upgrade grpcio==1.24.3 tensorflow==2.0.0
# !pip install -U h5py sklearn tqdm bleach pillow
# !pip install keras-vis
# !pip install matplotlib==3.0.3 numpy==1.18.1 seaborn==0.9.0 pandas==0.25.3
# !pip install numpy==1.18.1
# !pip install scikit-image==0.14.2
# + colab={"base_uri": "https://localhost:8080/", "height": 47} colab_type="code" id="OZwjt-AhzEa-" outputId="b9c5b815-ecf3-4bf9-a7b7-14aed52e6e7a"
from tensorflow.keras.models import load_model
import os
import matplotlib.pyplot as plt
import scipy
print(scipy.__version__)
from vis.visualization import visualize_saliency
from vis.utils import utils
from keras import activations
import matplotlib.image as mpimg
import scipy.ndimage as ndimage
import tensorflow as tf
# + [markdown] colab_type="text" id="kfKJ2wKnzEbN"
# **Vizualizing Saliency Maps**
# https://medium.com/@ODSC/visualizing-your-convolutional-neural-network-predictions-with-saliency-maps-9604eb03d766
# + colab={"base_uri": "https://localhost:8080/", "height": 255} colab_type="code" id="kEjGsqiNzEbc" outputId="d6bbe603-55ab-49a6-f161-06cdeaa35132"
saved_model = tf.keras.models.load_model('./best_resnetV2101.hdf5')
saved_model.summary()
# + colab={} colab_type="code" id="N8JnJmV93o4l"
img = mpimg.imread('covid_1.jpeg')
plt.imshow(img)
# + colab={} colab_type="code" id="2NuQNjrRzEbk"
layer_idx = utils.find_layer_idx(saved_model, 'dense_30')
saved_model.layers[-1].activation = activations.linear
saved_model.save('saliency.h5')
model = load_model('saliency.h5')
# + colab={} colab_type="code" id="xuneQnYTzEbu"
model = load_model('saliency.h5')
layer_idx = utils.find_layer_idx(model, 'dense_30')
# + colab={} colab_type="code" id="4e4DArYHzEb1"
grads = visualize_saliency(model,
layer_idx,
filter_indices=None,
seed_input=img,
backprop_modifier=None,
# grad_modifier='absolute',
)
# + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" id="GoCttSdSzEb8" outputId="80ea7a6e-19d7-478f-9ada-c0cfcb6db3f1"
gaus = ndimage.gaussian_filter(grads[:,:,2], sigma=5)
plt.imshow(img)
plt.imshow(gaus, alpha=.6)
# + colab={"base_uri": "https://localhost:8080/", "height": 427} colab_type="code" id="oiNmt3CLOL67" outputId="6eff200e-e5ba-4d2c-fc23-d953712d1524"
fig, ax = plt.subplots(2, 4, figsize=(15, 7), sharey=True, sharex=True)
[axi.set_axis_off() for axi in ax.ravel()]
for i in range(4):
normal_img = mpimg.imread('Normal_{}.jpeg'.format(i))
grads = visualize_saliency(model, -1, filter_indices=None, seed_input=normal_img, grad_modifier='absolute', backprop_modifier=None)
gaus = ndimage.gaussian_filter(grads[:,:,2], sigma=5)
ax[0, i].imshow(normal_img)
ax[0, i].imshow(gaus, alpha=.5, cmap="bwr")
ax[0, i].set_title('NORMAL {}'.format(i+1))
covid_img = mpimg.imread('covid_{}.jpeg'.format(i))
grads = visualize_saliency(model, -1, filter_indices=None, seed_input=covid_img, grad_modifier='absolute', backprop_modifier=None)
gaus = ndimage.gaussian_filter(grads[:,:,2], sigma=5)
ax[1, i].imshow(covid_img)
ax[1, i].imshow(gaus, alpha=.5, cmap="bwr")
ax[1, i].set_title('COVID {}'.format(i+1))
# + colab={} colab_type="code" id="j6oYiAFdfdw0"
layer_dict = dict([(layer.name, layer) for layer in saved_model.layers])
layer_dict2 = dict([(layer.name, layer) for layer in layer_dict['resnet101v2'].layers])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="JgXWGQ8zpgZb" outputId="e448351d-8cf1-486b-e1bb-b30aebc9025b"
model = layer_dict['resnet101v2'].summary()
# -
# **Vizualizing Convolutional Layers** Using example from Keras Documentation: https://keras.io/examples/conv_filter_visualization/
# + colab={} colab_type="code" id="pLQxKb3AmpdX"
from __future__ import print_function
import time
import numpy as np
from PIL import Image as pil_image
from keras.preprocessing.image import save_img
from keras import layers
from keras import backend as K
def normalize(x):
"""utility function to normalize a tensor.
# Arguments
x: An input tensor.
# Returns
The normalized input tensor.
"""
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def deprocess_image(x):
"""utility function to convert a float array into a valid uint8 image.
# Arguments
x: A numpy-array representing the generated image.
# Returns
A processed numpy-array, which could be used in e.g. imshow.
"""
# normalize tensor: center on 0., ensure std is 0.25
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.25
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def process_image(x, former):
"""utility function to convert a valid uint8 image back into a float array.
Reverses `deprocess_image`.
# Arguments
x: A numpy-array, which could be used in e.g. imshow.
former: The former numpy-array.
Need to determine the former mean and variance.
# Returns
A processed numpy-array representing the generated image.
"""
if K.image_data_format() == 'channels_first':
x = x.transpose((2, 0, 1))
return (x / 255 - 0.5) * 4 * former.std() + former.mean()
def visualize_layer(model,
layer_name,
step=1.,
epochs=15,
upscaling_steps=9,
upscaling_factor=1.2,
output_dim=(412, 412),
filter_range=(0, None)):
"""Visualizes the most relevant filters of one conv-layer in a certain model.
# Arguments
model: The model containing layer_name.
layer_name: The name of the layer to be visualized.
Has to be a part of model.
step: step size for gradient ascent.
epochs: Number of iterations for gradient ascent.
upscaling_steps: Number of upscaling steps.
Starting image is in this case (80, 80).
upscaling_factor: Factor to which to slowly upgrade
the image towards output_dim.
output_dim: [img_width, img_height] The output image dimensions.
filter_range: Tupel[lower, upper]
Determines the to be computed filter numbers.
If the second value is `None`,
the last filter will be inferred as the upper boundary.
"""
def _generate_filter_image(input_img,
layer_output,
filter_index):
"""Generates image for one particular filter.
# Arguments
input_img: The input-image Tensor.
layer_output: The output-image Tensor.
filter_index: The to be processed filter number.
Assumed to be valid.
#Returns
Either None if no image could be generated.
or a tuple of the image (array) itself and the last loss.
"""
s_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# we start from a gray image with some random noise
intermediate_dim = tuple(
int(x / (upscaling_factor ** upscaling_steps)) for x in output_dim)
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random(
(1, 3, intermediate_dim[0], intermediate_dim[1]))
else:
input_img_data = np.random.random(
(1, intermediate_dim[0], intermediate_dim[1], 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# Slowly upscaling towards the original size prevents
# a dominating high-frequency of the to visualized structure
# as it would occur if we directly compute the 412d-image.
# Behaves as a better starting point for each following dimension
# and therefore avoids poor local minima
for up in reversed(range(upscaling_steps)):
# we run gradient ascent for e.g. 20 steps
for _ in range(epochs):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
# some filters get stuck to 0, we can skip them
if loss_value <= K.epsilon():
return None
# Calculate upscaled dimension
intermediate_dim = tuple(
int(x / (upscaling_factor ** up)) for x in output_dim)
# Upscale
img = deprocess_image(input_img_data[0])
img = np.array(pil_image.fromarray(img).resize(intermediate_dim,
pil_image.BICUBIC))
input_img_data = np.expand_dims(
process_image(img, input_img_data[0]), 0)
# decode the resulting input image
img = deprocess_image(input_img_data[0])
e_time = time.time()
print('Costs of filter {:3}: {:5.0f} ( {:4.2f}s )'.format(filter_index,
loss_value,
e_time - s_time))
return img, loss_value
def _draw_filters(filters, n=None):
"""Draw the best filters in a nxn grid.
# Arguments
filters: A List of generated images and their corresponding losses
for each processed filter.
n: dimension of the grid.
If none, the largest possible square will be used
"""
if n is None:
n = int(np.floor(np.sqrt(len(filters))))
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top n*n filters.
filters.sort(key=lambda x: x[1], reverse=True)
filters = filters[:n * n]
# build a black picture with enough space for
# e.g. our 8 x 8 filters of size 412 x 412, with a 5px margin in between
MARGIN = 5
width = n * output_dim[0] + (n - 1) * MARGIN
height = n * output_dim[1] + (n - 1) * MARGIN
stitched_filters = np.zeros((width, height, 3), dtype='uint8')
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, _ = filters[i * n + j]
width_margin = (output_dim[0] + MARGIN) * i
height_margin = (output_dim[1] + MARGIN) * j
stitched_filters[
width_margin: width_margin + output_dim[0],
height_margin: height_margin + output_dim[1], :] = img
# save the result to disk
save_img('vgg_{0:}_{1:}x{1:}.png'.format(layer_name, n), stitched_filters)
# this is the placeholder for the input images
assert len(model.inputs) == 1
input_img = model.inputs[0]
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
output_layer = layer_dict[layer_name]
#assert isinstance(output_layer, layers.Conv2D)
# Compute to be processed filter range
filter_lower = filter_range[0]
filter_upper = (filter_range[1]
if filter_range[1] is not None
else len(output_layer.get_weights()[1]))
assert(filter_lower >= 0
and filter_upper <= len(output_layer.get_weights()[1])
and filter_upper > filter_lower)
print('Compute filters {:} to {:}'.format(filter_lower, filter_upper))
# iterate through each filter and generate its corresponding image
processed_filters = []
for f in range(filter_lower, 128):
img_loss = _generate_filter_image(input_img, output_layer.output, f)
if img_loss is not None:
processed_filters.append(img_loss)
print('{} filter processed.'.format(len(processed_filters)))
# Finally draw and store the best filters to disk
_draw_filters(processed_filters)
# + colab={"base_uri": "https://localhost:8080/", "height": 288} colab_type="code" id="PtNeAU_kn970" outputId="06d6e19d-0fbd-4f2e-9eaf-aaceccb6346d"
layer_name = 'conv3_block3_3_conv'
visualize_layer(layer_dict['resnet101v2'], layer_name)
# + colab={} colab_type="code" id="Sf9p77EEoICG"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.