code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import pickle
import scipy
from scipy import signal
from scipy import stats
import numpy as np
from sklearn.model_selection import ShuffleSplit
import math
from collections import OrderedDict
import matplotlib.pyplot as plt
sys.path.append('D:\Diamond\code')
from csp_james_2 import *
sys.path.append('D:\Diamond\code')
from thesis_funcs_19_03 import *
import torch
import torch.nn as nn
import torch.nn.functional as nnF
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
import csv
# -
class Model_current_pre (nn.Module):
def __init__(self, chn_inp, len_inp, nf, ks, stride, act_f, nfc):
super(Model_current_pre, self).__init__()
#activation function, str, l_relu or relu
self.act_f = act_f
#input dimension, 32 csp feature channels, each with 44 samples or 25, if running wondow 2s
self.input_size = (chn_inp,len_inp)
#number of convolution filters(kernels)
self.num_filters1 = nf
#size of each convolution kernel
self.kernel_size1 = (chn_inp,ks)
self.kernel_stride1 = stride
self.batch_norm_inp = nn.BatchNorm2d(1)
#define network
self.conv1 = nn.Conv2d(1, self.num_filters1, self.kernel_size1, stride = stride)
#calculate output size after convolution
self.h_out = int(calc_out_size(self.input_size[0], self.kernel_size1[0], stride = stride))
self.w_out = int(calc_out_size(self.input_size[1], self.kernel_size1[1], stride = stride))
self.batch_norm_conv1 = nn.BatchNorm2d(self.num_filters1)
self.fc1 = nn.Linear(int(self.num_filters1 * self.h_out * self.w_out), nfc)
self.batch_norm_fc1 = nn.BatchNorm1d(self.fc1.out_features)
self.fc2 = nn.Linear(self.fc1.out_features, nfc)
self.batch_norm_fc2 = nn.BatchNorm1d(self.fc2.out_features)
self.fc3 = nn.Linear(self.fc2.out_features, 4)
self.drop = nn.Dropout(p=0.5)
def forward(self, x):
#print (x.size())
x = self.batch_norm_inp(x)
if self.act_f == 'leaky_relu':
x = nnF.leaky_relu(self.batch_norm_conv1(self.conv1(x)))
x = self.drop(x)
x = x.view(-1, int(self.num_filters1 * self.h_out * self.w_out))
x = self.drop(x)
x = nnF.leaky_relu(self.batch_norm_fc1(self.fc1(x)))
x = self.drop(x)
x = nnF.leaky_relu(self.batch_norm_fc2(self.fc2(x)))
x = self.drop(x)
elif self.act_f == 'relu':
x = nnF.relu(self.batch_norm_conv1(self.conv1(x)))
x = self.drop(x)
x = x.view(-1, int(self.num_filters1 * self.h_out * self.w_out))
x = self.drop(x)
x = nnF.relu(self.batch_norm_fc1(self.fc1(x)))
x = self.drop(x)
x = nnF.relu(self.batch_norm_fc2(self.fc2(x)))
x = self.drop(x)
elif self.act_f == 'sigmoid':
x = nnF.sigmoid(self.batch_norm_conv1(self.conv1(x)))
x = self.drop(x)
x = x.view(-1, int(self.num_filters1 * self.h_out * self.w_out))
x = self.drop(x)
x = nnF.sigmoid(self.batch_norm_fc1(self.fc1(x)))
x = self.drop(x)
x = nnF.sigmoid(self.batch_norm_fc2(self.fc2(x)))
x = self.drop(x)
#x = nnF.softmax(self.fc3(x))
x = self.fc3(x)
return x
# +
portion_train = 1
subject = 1
meth = 'gold_stand' #gold_stand,tl_comp_csp_kld , tl_comp_csp_mi
raw_data_root = 'E:\\Diamond\\bci_iv\\DATA\\2a\\extract_raw\\'
config_root= 'E:\\Diamond\\bci_iv\\MODELS\\fbcsp_mibif_cnn\\2a\\configs\\'
feature_root = 'E:\\Diamond\\bci_iv\\MODELS\\fbcsp_mibif_cnn\\2a\\CURRENT\\' + meth + '\\'
model_root = feature_root
save_root = model_root + 'eval456\\'
#load in cv config grid
hp_names =[] #all the hyper-parameter names to be validated
with open(config_root +'cv_config.csv', mode = 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
for row in csv_reader:
hp_names.append((row[0]).strip())
with open(config_root +'_lambda_config.csv', mode = 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
for row in csv_reader:
hp_names.append((row[0]).strip())
csv_file.close()
filename = 'A0'+str(subject)+'T'
filename_save = filename
file_root_feature = feature_root + filename_save[:-1] + '\\4s\\' + 'pt_' + str(int(portion_train*100))
file_root_model = model_root + filename_save[:-1] + '\\4s\\' + 'pt_' + str(int(portion_train*100))
###################################################################################################################
#load best config
###################################################################################################################
#load in best config line
config_file = open(file_root_model + '\\ANN\\best_config_val.txt', 'r')
config_log= config_file.readlines()
config_file.close()
for i in range (0,len(config_log)):
line = config_log[(i + 1) * -1]
if '_act_fun_' in line: #and line.split(' ')[0].split('_lambda_')[1] == '0':
break
#extract best config values and make into dictionary
config = OrderedDict()
for hp_ind in range(0, len(hp_names)-1):
config[hp_names[hp_ind]] = (line.split(hp_names[hp_ind] + '_')[1].split('_'+hp_names[hp_ind+1]+'_')[0])
config[hp_names[-1]] = line.split(hp_names[-1]+'_')[1].split(' ')[0]
###### read which model init and fold has the best accuracy
best_model_info_file = open (model_root + 'eval\\' + '4s_' + str(portion_train*100) + '_best_config_eval_acc_all_subjects.txt', 'r')
best_model_acc_info = best_model_info_file.readlines()
best_model_info_file.close()
for i in range (0, len(best_model_acc_info)):
line1 = best_model_acc_info[i]
if line1.split(',')[0].strip() == str(subject):
best_init = (line1.split(',')[-1].strip().split('_')[0].strip())
best_fold = (line1.split(',')[-1].strip().split('_')[1].strip())
###################################################################################################################
#load training features
###################################################################################################################
LABELS0_go = pickle.load(open(file_root_feature + '\\LABELS0_go.pickle', 'rb'))
TRAIN_IDX = pickle.load(open(file_root_feature + '\\TRAIN_IDX.pickle', 'rb'))
train_idx0 = TRAIN_IDX[int(best_fold)]
y_train = LABELS0_go[train_idx0]
X_train0 = pickle.load(open(file_root_feature + '\\Z_all_classes_train_fold_' + best_fold +
'_lambda_' + str(float(config['_lambda'].strip())) + ".pickle", 'rb'))
#remove mean from input trial
for i in range (0, np.shape(X_train0)[0]):
X_train0[i] = X_train0[i] - np.average(X_train0[i])
X_train = np.reshape(X_train0, [np.shape(X_train0)[0], 1, np.shape(X_train0)[1], np.shape(X_train0)[2]]).astype('float64')
X_train_mean_alt = np.average(X_train, axis = 0)
# -
### use the average of all trials of all classes as input to synthesis featue map
inp0 = np.expand_dims(X_train_mean_alt, 0)
inp0 = torch.from_numpy(inp0).float()
# +
model = Model_current_pre(chn_inp = inp0.size()[-2], len_inp = inp0.size()[-1], nf = int(config['nf']), ks = int(config['ks']) ,
stride = int(config['stride']), act_f = config['act_fun'], nfc = int(config['nfc']))
save_path = file_root_model + '\\ANN\\model_config_'+ line.split(' ')[0] + '_'+ 'n_inits_' + best_init +'_fold_' + best_fold + '.pt'
model.load_state_dict(torch.load(save_path))
model.eval()
# -
save_path
# # Synthesize fareature map
# +
num_classes = 4
BEST = []
for i in range (0, num_classes):
BEST.append([])
print ('class', i)
DIFF= []
BEST[i] = {'best_im':None, 'best_out':None, 'best_cost':None}
prev_cost = 10000
count = 0
epoch = 0
best_cost = 10000000
inp = Variable(inp0.clone() , requires_grad=True)
im= inp.detach().numpy()
im = im.squeeze()
plt.imshow(im, cmap = 'jet',aspect='auto')
plt.colorbar()
target_class= i
num_epoch = 100000
model.eval()
#initial_learning_rate = 0.5
while count < 1000 and epoch < num_epoch:
#for epoch in range(1, num_epoch):
epoch += 1
# Process image and return variable
if epoch % 4 == 0:
inp = inp.detach().numpy().squeeze()
inp = scipy.ndimage.filters.gaussian_filter1d(inp, 0.3)
inp = np.expand_dims(inp, 0)
inp = np.expand_dims(inp, 0)
inp = torch.from_numpy(inp).float()
inp = Variable(inp, requires_grad=True)
if epoch % 1000 == 0:
print('Iteration:', str(epoch), 'Loss', "{0:.2f}".format(class_loss.data.numpy()))
try:
print (diff.abs(), count)
except:
print (count)
# Define optimizer for the image
optimizer = optim.SGD([inp], lr = 0.5, weight_decay = 0.0005)
optimizer = optim.Adam([inp], lr = 0.001, weight_decay = 0.0001)
# Forward
output = model(inp)
# Target specific class
class_loss = -output[0, target_class]
if class_loss < -2:
diff = class_loss - prev_cost
DIFF.append(diff)
if diff.abs() < 0.05:
count += 1
else:
count = 0
prev_cost = class_loss
# Zero grads
model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
if class_loss < best_cost:
best_im = inp.clone().detach().numpy()
best_cost = class_loss
best_out = output
BEST[i]['best_im'] = best_im
BEST[i]['best_out'] = best_out
BEST[i]['best_cost'] = best_cost
# Recreate image
#if i % 10 == 0:
# Save image
#im_path = '../generated/c_specific_iteration_'+str(i)+'.jpg'
#save_image(self.created_image, im_path)
# -
# # Visualize synthesized feature map
to_save = 1
for c in range (0, 4):
plt.imshow(BEST[c]['best_im'].squeeze() + np.average(X_train0), cmap = 'jet',aspect='auto', vmin = -5, vmax = 9)
plt.colorbar()
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
if to_save == 1:
plt.savefig('D:\\Diamond\\pics\\current_results\\cnn_vis\\synth_c\\' + str(c)+'.png', dpi = 800)
plt.show()
print (BEST[c]['best_out'])
# # Average fatrue mao per class
#
plt.imshow(X_train[2].squeeze(),'jet', aspect = 'auto')
plt.colorbar()
IM = []
for c in range (0, 4):
class_indices = np.where(y_train == c )[0]
X_train1 = X_train[class_indices]
#X_train1 = X_train0
im = np.average(X_train1, axis = 0).squeeze()
IM.append(im)
plt.imshow(im, 'jet', aspect = 'auto', vmin = -5, vmax = 9)
plt.colorbar()
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
"""
if c in [0,2]:
plt.ylabel('Feature channels', fontsize = 16)
if c in [2,3]:
plt.xlabel('downsampled time', fontsize = 16)
"""
if to_save == 1:
plt.savefig('D:\\Diamond\\pics\\current_results\\cnn_vis\\ave_c_' + str(c)+'.png', dpi = 800)
plt.show()
# # corrolation
INDR= []
for c in range (0,4):
R = []
P = []
best_im = BEST[c]['best_im'].squeeze()
class_indices = np.where(y_train == c )[0]
ave_ = np.average(X_train[class_indices], axis = 0).squeeze()
for ch in range (0,32):
r,p = scipy.stats.pearsonr(ave_[ch], best_im[ch])
R.append(r)
P.append(p)
R = np.array(R)
P= np.array(P)
indr = np.where(R>0.5)[0]
INDR.append(indr)
P_ = P[indr]
print(max(P_))
plt.plot(R, 'bo')
#plt.plot(P, 'ro')
plt.vlines([7.5,15.5,23.5], 0.5,1,linestyles='dotted')
#plt.hlines(0.5, 0,32,color='g',linestyles='dotted')
plt.hlines(0.05, 0,32,linestyles='dotted')
plt.ylim([0.5,1])
plt.ylabel('R value', fontsize = 16)
plt.xlabel('feature channel', fontsize = 16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
if to_save == 1:
plt.savefig('D:\\Diamond\\pics\\current_results\\cnn_vis\\subject1\\corro_c_' + str(c)+'.png', dpi = 800)
plt.show()
INDR
plt.plot(INDR[0],R[INDR], 'rx' )
plt.plot(R, 'b+')
# +
plt.plot(P, 'ro')
plt.vlines([7.5,15.5,23.5], 0,0.05,linestyles='dotted')
plt.ylim([0,0.05])
# -
SALI = []
for target_class in range (0, 4):
#target_calss = 3
#inp_fm = IM[target_class]
#inp_fm = BEST[target_class]['best_im'].squeeze()
#inp_fm = AVE[target_class].squeeze()
inp_fm = BEST[target_class]['best_im'].squeeze()
#inp_fm = np.average(X_train[np.where(y_train == target_class)[0]], axis = 0).squeeze()
inp_fm = np.expand_dims(inp_fm, 0)
inp_fm = np.expand_dims(inp_fm, 0)
inp_fm = torch.from_numpy(inp_fm).float()
inp_fm = Variable(inp_fm , requires_grad=True)
model.eval()
model_output = model(inp_fm)
# Zero gradients
model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
model_output.backward(gradient=one_hot_output)
sali = (inp_fm.grad.data.numpy().squeeze())
pos_saliency = (np.maximum(0, sali) / sali.max())
neg_saliency = (np.maximum(0, -sali) / -sali.min())
SALI.append(sali)
plt.imshow((sali),'jet', aspect='auto')
plt.colorbar()
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
if to_save == 1:
plt.savefig('D:\\Diamond\\pics\\current_results\\cnn_vis\\subject1\\sali_map_c_' + str(target_class)+'.png', dpi = 800)
plt.show()
"""
plt.imshow(pos_saliency,cmap = 'gray',aspect='auto')
plt.colorbar()
plt.show()
plt.imshow(neg_saliency,cmap = 'gray',aspect='auto')
plt.colorbar()
plt.show()
"""
def umm_scale_to_range(a, range2):
"""
scael an array to any range
INPUTS:
a: np array to be scaled
raneg2: list or array, [range2[0], range2[1]] is the new scale (min, max), eg. [0,1] 0r [1,10]
OUTPUT:
a2: scaled array with min(a2) = range2[0], max(a2) = range2[1]
"""
a1 = (a - np.min(a)) / (np.max(a) - np.min(a))
a2 = a1 * (range2[1] - range2[0]) + range2[0]
return a2
umm = umm_scale_to_range(sali, [-1,1])
# +
ave_sali_ = (np.average(np.abs(SALI),axis = 2))
for c in range (0,4):
plt.plot(INDR[c], ave_sali_[c][INDR[c]], 'ro')
plt.plot(ave_sali_[c], 'b+')
plt.vlines([7.5,15.5,23.5], min(ave_sali_[c]),max(ave_sali_[c]),linestyles='dotted')
plt.hlines(np.average(ave_sali_[c]), 0,32, colors='g' ,linestyles='dotted')
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
if c in [0,2]:
plt.ylabel('saliency gradient', fontsize =16)
if c in [2,3]:
plt.xlabel('feature channel', fontsize = 16)
if to_save == 1:
plt.savefig('D:\\Diamond\\pics\\current_results\\cnn_vis\\subject1\\sali_gradient_c_' + str(c)+'.png', dpi = 800)
plt.show()
# -
# +
fig, axes = plt.subplots(nrows=4, ncols=3, sharex= True, sharey= True)
count = 0
AVE = []
for ax in axes.flat:
#print (count%2, count/2)
if count in np.array([0, 3, 6, 9]):
class_indices = np.where(y_train == int(np.where(np.array([0, 3, 6, 9])==count)[0][0] ))[0]
X_train1 = X_train0[class_indices]
#X_train1 = X_train0
X_train = np.reshape(X_train1, [np.shape(X_train1)[0], 1, np.shape(X_train1)[1], np.shape(X_train1)[2]]).astype('float64')
ave = np.average(X_train, axis = 0).squeeze()
AVE.append(ave)
ave_im = ax.imshow(ave, cmap = 'jet',aspect='auto')
elif count in [1,4,7,10]:
synth = ax.imshow(BEST[int(np.where(np.array([1,4,7,10])==count)[0][0] )]['best_im'].squeeze(), cmap = 'jet',aspect='auto')
elif count in [2, 5, 8, 11]:
sali = SALI[int(np.where(np.array([2, 5, 8, 11])==count)[0][0])]
pos_saliency = (np.maximum(0, sali) / sali.max())
pos_sali = ax.imshow(pos_saliency, cmap = 'gray', aspect= 'auto')
count += 1
fig.colorbar(synth, ax=axes.ravel().tolist())
#plt.savefig(file_root + 'gauss_0dot3_ave_synth_possali')
plt.show()
# +
#SALI = pickle.load(open(file_root + '_saliency_unprocessed.pickle', 'rb'))
plt.imshow(BEST[3]['best_im'].squeeze(), 'jet', aspect = 'auto')
plt.colorbar()
sali = SALI[3]
pos_saliency = (np.maximum(0, sali) / sali.max())
plt.imshow(pos_saliency, 'gray', aspect = 'auto', alpha = 0.5)
plt.show()
# -
np.shape(SALI[0])
np.where(np.array([0,1,2,3]) == 3)
# +
inp_ave = AVE[0]
inp_ave = np.expand_dims(inp_ave,0)
inp_ave = np.expand_dims(inp_ave,0)
inp_ave = torch.from_numpy(inp_ave).float()
model(inp_ave)
# -
np.shape(AVE[target_class].squeeze())
# +
#pickle.dump(SALI, open(file_root+'_saliency_unprocessed.pickle', 'wb'))
# -
np.shape(SALI)
SALI[0] - SALI[2]
model_output
plt.imshow(best_im.squeeze(), vmin=0, cmap = 'jet',aspect='auto')
plt.imshow(best_im.squeeze(), vmin=0, cmap = 'jet',aspect='auto')
plt.imshow(best_im.squeeze(), vmin=0, cmap = 'jet',aspect='auto')
inp_im = inp0.numpy().squeeze()
plt.imshow(inp_im, vmin=0, vmax=np.max(inp_im), cmap = 'jet',aspect='auto')
# +
im= inp.detach().numpy()
im = im.squeeze()
vmin = 0
vmax = np.max(im)
plt.imshow(im, vmin=0, vmax=vmax, cmap = 'jet',aspect='auto')
# -
plt.imshow(im, 'jet')
im_gw = ndimage.gaussian_filter1d(im, 0.9)
plt.imshow(im_gw, cmap = 'jet',aspect='auto')
inp0 - inp
| results/cnn_vis_maps_corro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## En este ejercicio vamos a clusterizar con kmeans ##
#
# **Aprendizaje no supervisado** es aquel en el que no sabemos nada sobre los datos.
#
# **Clusterizar** es hallar grupos de clases de iguales, dentro de un dataset.
#
# <div class="alert alert-block alert-info">
# Para clusterizar, se suele usar el algoritmo del kmeans, aunque este necesita que se le diga el nº de clusters.<br>
# Para calcular el nº de cluster se suele emplear la técnica de la silueta, que calcula la máxima separación entre cluster.
# </div>
# ## Cargamos el dataset ##
# +
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import html5lib
from sklearn.cross_validation import train_test_split #to split the dataset for training and testing
from sklearn import svm , metrics
from sklearn.metrics import mean_squared_error
#df = pd.read_html("http://www.mundodeportivo.com/resultados/futbol/laliga/clasificacion.html")[0]
df = pd.read_csv("datasets/equipos.csv")
df
# +
# Eliminamos columnas no deseadas
df.index = df.Equipo
try:
del df["Equipo"]
del df["PJ"]
except: pass
df
# +
# Pasamos a arrays de numpy
X = df[[ "PG","PE","PP","GF","GC","DIF","PTS"]].values
# -
# ## Usamos el método de la silueta, en un bucle for, y observamos los resultados ##
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
X = df[[ "PG","PE","PP","GF","GC","DIF","PTS"]].values
scores = []
values = np.arange(2, 10)
for num_clusters in values:
# Train the KMeans clustering model
kmeans = KMeans(init='k-means++', n_clusters=num_clusters, n_init=10)
kmeans.fit(X)
#
score = metrics.silhouette_score(X, kmeans.labels_,
metric='euclidean', sample_size=len(X))
print("\nNumber of clusters =", num_clusters)
print("Silhouette score =", score)
scores.append(score)
# +
# Plot silhouette scores
plt.figure()
plt.bar(values, scores, width=0.7, color='black', align='center')
plt.title('Silhouette score vs number of clusters')
#Extract the best score and the corresponding value for the number of clusters:
# Extract best score and optimal number of clusters
num_clusters = np.argmax(scores) + values[0]
print('\nOptimal number of clusters =', num_clusters)
# -
# ## El método de la silueta sugiere 2, 3 ó 4 clusters.##
#
# Nosotros vamos a usar 4, para que el ejericicio sea parecido a los anteriores.
#
# Usamos el modelo kmeans, y este predice los valores.
# Los **centroides** son el punto medio de cada cluster.
#
# <div class="alert alert-block alert-info">
# Vendrían a ser como el elemento promedio, para ese cluster.
# <div>
# +
num_clusters=4
kmeans = KMeans(init='k-means++', n_clusters=num_clusters, n_init=10)
# Train the K-Means model with the input data:
# Train the KMeans clustering model
kmeans.fit(X)
labels = kmeans.fit(X).labels_
print(labels)
# +
centroids = pd.DataFrame(kmeans.cluster_centers_)
centroids.columns = [ "PG","PE","PP","GF","GC","DIF","PTS"]
print("Los valores de los centroides son")
centroids.sort_values("PTS")
# -
# Vemos por la tabla anterior, que el kmeans reconoce un centroide con un equipo fuerte, y luego va paulatinamente bajando la fuerza de los equipos.
#
# Pintamos el gráfico, con los resultados del clasificador.
# Para eso:
#
# - Disminuimos el nº de dimensiones usando PCA a 2, para poder pintar un gráfico en dos dimensiones.
# - Usamos el kmeans como clasificador, para colorear las regiones del gráfico.
# - Pintamos los centroides, como estrellas.
#
# +
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
# Primero definimos las variables X e Y, como arrays de numpy
df_sinpuntos = df
df["auto_labels"] = labels
X = df.values[:,0:7]
Y = df.values[:,7]
le = LabelEncoder()
y2 = le.fit_transform(Y)
y = y2
print(X,y)
###### Generamos un nuevo dataframe con sol dos componentes.
pca_2c = PCA(n_components=2)
X_pca_2c = pca_2c.fit_transform(X)
print(X_pca_2c.shape)
# +
# Generamos un nuevo clasificador, sobre el dataframe de dos dimensiones
from sklearn.cluster import KMeans
km = KMeans(n_clusters=4,
init='random',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X_pca_2c)
# +
from mlxtend.plotting import plot_decision_regions
fig = plt.figure(figsize=(10,8))
fig = plot_decision_regions(X=X_pca_2c, y=y_km, clf=km)
# Pintamos los centroides
fig = plt.scatter(km.cluster_centers_[:, 0],
km.cluster_centers_[:, 1],
s=250, marker='*',
c='red', edgecolor='black',
label='centroids')
plt.legend(scatterpoints=1)
plt.grid()
plt.tight_layout()
#plt.savefig('images/11_02.png', dpi=300)
plt.show()
# -
# ## Conclusión ##
#
# El algoritmo kmeans es una forma de clasificar un conjunto de datos, cuando no se sabe nada sobre los mismos.
| ejercicios/Ejercicio 11 - Aprendizaje no supervisado - Clusterizacion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lmcanavals/algorithmic_complexity/blob/main/01_02_analysis_asymptotic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Ofug8fZyLMUU"
# # Asymptotic Analysis
# + [markdown] id="abxw1G_wLWDU"
# ## Quick recap
# + id="htuOsjDKIwJB"
def selectionSort(a): # T(n) =
n = len(a) # 2
for i in range(n - 1): # 1 + (n - 1) *
minIdx = i # 1
for j in range(i + 1, n): # 1 + (n / 2) *
if a[j] < a[minIdx]: # 3 +
minIdx = j # 1
if i != minIdx: # 1 +
a[i], a[minIdx] = a[minIdx], a[i] # 6
# + [markdown] id="tG6mRW5ZKC_u"
# Expresión detallada:
#
# $ T(n)=3+(n-1)\times(2+\frac{n}{2}\times(3+1)+1+6) $
#
# $ T(n)=3+(n-1)\times(2n+9) $
#
# $ T(n)=2n^2 + 7n -6 $
#
# $ T(n) \implies O(n^2) $
# + [markdown] id="3o25al71LY7N"
# ## Recursive functions
# + id="2MapaI3GRWUc"
def merge(a, ini, fin): # T(n) =
mid = (ini + fin) // 2 # 3
i = ini # 1
j = mid + 1 # 2
n = fin - ini + 1 # 3
temp = [0]*n # 1 + n
for k in range(n): # n *
if j > fin or (i <= mid and a[i] < a[j]): # 7 + max(then, else)
temp[k] = a[i] # 3
i += 1 # 2
else:
temp[k] = a[j] # 3
j += 1 # 2
for k in range(n): # n
a[ini + k] = temp[k] # 4
# + [markdown] id="pFYKAEROQ-pX"
# $ T(n) = 10 + n + 12n + 4n $
#
# $ T(n) = 17n + 10 $
#
# $ T(n) \implies O(n) $
# + id="b4O3N3gSR4xV"
def mergeSort(a, ini, fin): # T(n) =
if ini < fin: # 1 +
mid = (ini + fin) // 2 # 3
mergeSort(a, ini, mid) # T(n/2)
mergeSort(a, mid + 1, fin) # T(n/2)
merge(a, ini, fin) # O(n)
# + [markdown] id="eGafn8ABSO_t"
# $ T(n) = 2T(\frac{n}{2}) + O(n) + 4 $
# + [markdown] id="Y8_ieA13N-4k"
# ## Deductivo analysis
| 01_02_analysis_asymptotic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Et4EmBdetQYs" colab_type="code" outputId="7083290f-4aa2-4b85-8dbd-ae0c0ce91e87" executionInfo={"status": "ok", "timestamp": 1578815772696, "user_tz": -540, "elapsed": 999, "user": {"displayName": "\u5cf6\u7530\u6ec9\u5df1", "photoUrl": "", "userId": "06467417530496067215"}} colab={"base_uri": "https://localhost:8080/", "height": 55}
from google.colab import drive
drive.mount('/content/drive')
# + id="LT7drR5ZrM2U" colab_type="code" colab={}
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="LnIo5PcgrM2b" colab_type="text"
# ## Define
# + id="1_UfvB5urM2c" colab_type="code" colab={}
#TRAIN_DATA_FILES = ['cross','dead_end', 'left', 'right', 'straight', 'threeway_left', 'threeway_center', 'threeway_right']
TRAIN_DATA_FILES = ['left']
# NUM_CLASSES = 6
NUM_CLASSES = len(TRAIN_DATA_FILES)
MAX_RANGE = 240
NUM_DATA = 400
REPLACE_NAN = 0.0
MARGIN = 1.5
RESOLUTION = 50.0 #(cm)
WIDTH = HEIGHT = int(400 * 2 * MARGIN / RESOLUTION)
# + [markdown] id="TQL65DQKrM2g" colab_type="text"
# ## 学習データ数の読み込み用関数
# データ読み込み時、それぞれのラベルの学習データの内、最も数の少ないものに合わせる
# + id="xgqfnLiurM2h" colab_type="code" colab={}
def adjust_data_num(num_class):
# header = 列名
file_name = "/content/drive/My Drive/Colab Notebooks/data/" + TRAIN_DATA_FILES[num_class] + ".csv"
data_set = pd.read_csv(file_name, header=None)
return data_set.head(NUM_DATA)
# + [markdown] id="hI35WoeorM2k" colab_type="text"
# ## 学習データ読み込み用関数
# + id="qP7O8VnZrM2k" colab_type="code" colab={}
def split_data():
# files = os.listdir('/content/drive/My Drive/Colab Notebooks/data')
X = []
all_data_set = []
for i in range(NUM_CLASSES):
try:
data_set = adjust_data_num(i)
all_data_set.append(data_set)
except pd.io.common.EmptyDataError:
print("ERROR: {} is empty".format(file_name))
X = pd.concat(all_data_set)
# replace Nan with 'REPLACE_NAN'
X = X.fillna(REPLACE_NAN)
# _, DIM_input_data = data_set.shape
return X.values
# + [markdown] id="D0AymYf0EXsO" colab_type="text"
# ## Plot
# + id="BaUlsytxEWSi" colab_type="code" colab={}
X = split_data()
X *= 100 / RESOLUTION
_, DIM_input_data = X.shape
rotate = 0
matrix_x = [ [-np.sin(rotate + np.deg2rad((-MAX_RANGE/2) + (i / DIM_input_data * MAX_RANGE))) for i in range(DIM_input_data)] ]
matrix_y = [ [np.cos(rotate + np.deg2rad((-MAX_RANGE/2) + (j / DIM_input_data * MAX_RANGE))) for j in range(DIM_input_data)] ]
point_cloud_x = np.round(X * matrix_x) + (WIDTH / 2)
point_cloud_y = np.round(X * matrix_y) + (HEIGHT / 2)
# + id="toiFlIhCpCvD" colab_type="code" outputId="f7646a38-4130-4fa3-9ebb-aebac2601cb6" executionInfo={"status": "ok", "timestamp": 1578815840244, "user_tz": -540, "elapsed": 1007, "user": {"displayName": "\u5cf6\u7530\u6ec9\u5df1", "photoUrl": "", "userId": "06467417530496067215"}} colab={"base_uri": "https://localhost:8080/", "height": 303}
index = 119
fig, ax = plt.subplots(figsize=(5, 5))
plt.scatter(point_cloud_x[index], point_cloud_y[index], color="k")
#ax.set_xlim(0, 30)
#ax.set_ylim(0, 30)
plt.axis("off")
#plt.savefig("result.png", transparent = True)
plt.show()
# + id="4MC56cWTZfr-" colab_type="code" outputId="b8190d6c-ad1f-444e-8aad-70afa97caff2" executionInfo={"status": "ok", "timestamp": 1578815853153, "user_tz": -540, "elapsed": 1025, "user": {"displayName": "\u5cf6\u7530\u6ec9\u5df1", "photoUrl": "", "userId": "06467417530496067215"}} colab={"base_uri": "https://localhost:8080/", "height": 303}
index = 8
fig, ax = plt.subplots(figsize=(5, 5))
plt.scatter(point_cloud_x[index], point_cloud_y[index], color="k")
#ax.set_xlim(0, 30)
#ax.set_ylim(0, 30)
plt.axis("off")
#plt.savefig("result.png", transparent = True)
plt.show()
# + id="cThKfWcfsPPg" colab_type="code" colab={}
# + id="mKUFBh8psPTx" colab_type="code" colab={}
#TRAIN_DATA_FILES = ['cross','dead_end', 'left', 'right', 'straight', 'threeway_left', 'threeway_center], 'threeway_right
TRAIN_DATA_FILES = ['straight']
# NUM_CLASSES = 6
NUM_CLASSES = len(TRAIN_DATA_FILES)
MAX_ANGLE = 240
MAX_RANGE = 4
MAX_SPEED = 1.0
MAP_BUILD_HZ = 1.0
NUM_DATA = 400
REPLACE_NAN = 0.0
MARGIN = 1.5
RESOLUTION = 50.0 #(cm)
WIDTH = HEIGHT = int((2/RESOLUTION * (MAX_RANGE + MAX_SPEED/MAP_BUILD_HZ)*100)*MARGIN)
# + id="I6E8-aWojW-6" colab_type="code" colab={}
X = split_data()
X *= 100 / RESOLUTION
_, DIM_input_data = X.shape
rotate = 0
matrix_x = [ [-np.sin(rotate + np.deg2rad((-MAX_ANGLE/2) + (i / DIM_input_data * MAX_ANGLE))) for i in range(DIM_input_data)] ]
matrix_y = [ [np.cos(rotate + np.deg2rad((-MAX_ANGLE/2) + (j / DIM_input_data * MAX_ANGLE))) for j in range(DIM_input_data)] ]
point_cloud_x = np.round(X * matrix_x) + (WIDTH / 2)
point_cloud_y = np.round(X * matrix_y) + (HEIGHT / 2)
# + id="RzzQVBPssRc2" colab_type="code" outputId="7379e2bf-e4a1-46c9-d76a-8b57dbfd05c6" executionInfo={"status": "ok", "timestamp": 1564738854327, "user_tz": -540, "elapsed": 1537, "user": {"displayName": "\u5cf6\u7530\u6ec9\u5df1", "photoUrl": "", "userId": "06467417530496067215"}} colab={"base_uri": "https://localhost:8080/", "height": 320}
index = 8
fig, ax = plt.subplots(figsize=(5, 5))
plt.scatter(point_cloud_x[index], point_cloud_y[index], color="k")
#ax.set_xlim(0, 30)
#ax.set_ylim(0, 30)
plt.axis("off")
#plt.savefig("result.png", transparent = True)
plt.show()
# + id="QBHQlvVLMpF4" colab_type="code" colab={}
| program/pltURG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 32-bit
# name: python37432bit8454b6b220304c4cb3da9b12f850f688
# ---
import matplotlib.pyplot as plt
import numpy as np
data = np.arange(10)
data
plt.plot(data)
# +
#Figure 和 Subplot
# -
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
plt.plot(np.random.randn(50).cumsum(), 'k--')
ax1.hist(np.random.randn(100), bins=20, color='k', alpha=0.3)
ax2.scatter(np.arange(30), np.arange(30) + 3 * np.random.randn(30))
fig, axes = plt.subplots(2, 3)
axes
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
plt.plot(np.random.randn(50).cumsum(), 'k--')
ax1.hist(np.random.randn(100), bins=20, color='k', alpha=0.3)
ax2.scatter(np.arange(30), np.arange(30) + 3 * np.random.randn(30))
ax1
fig, axes = plt.subplots(2, 3)
axes
#调整subplot周围的间距
fig, axes = plt.subplots(2,2, sharex=True, sharey=True)
for i in range(2):
for j in range(2):
axes[i,j].hist(np.random.randn(500), bins=50, color='k', alpha=0.5)
plt.subplots_adjust(wspace=0, hspace=0)
from numpy.random import randn
plt.plot(randn(30).cumsum(), 'ko--')
plt.plot(randn(30).cumsum(), color='g', linestyle='dashed', marker='o')
data = np.random.randn(30).cumsum()
plt.plot(data, 'k--', label='Default')
plt.plot(data, 'k--', drawstyle='steps-post', label='steps-post')
plt.legend(loc='best')
# +
#设置标题、轴标签以及刻度标签
# -
flg = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(np.random.randn(1000).cumsum())
ticks = ax.set_xticks([0, 250, 500, 750, 1000])
labels = ax.set_xticklabels(['one', 'two', 'three', 'four', 'five'], rotation=30, fontsize='small')
ax.set_title('My first matplotlib plot')
ax.set_xlabel('Stages')
props = {
'title': 'My first matplotlib plot',
'xlabel': 'Stages'
}
ax.set(**props)
# +
# 添加图例
# -
from numpy.random import randn
flg = plt.figure(); ax = fig.add_subplot(1,1,1)
ax.plot(randn(1000).cumsum(), 'k', label='one')
ax.plot(randn(1000).cumsum(), 'k--', label='two')
ax.plot(randn(1000).cumsum(), 'k.', label='three')
# + tags=[]
ax.legend(loc='best')
# +
# 注解以及在Subplot上绘图
# ax.text(x,y,'Hello world!', family='monospace', fontsize=10)
# -
import pandas as pd
# +
from datetime import datetime
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
data = pd.read_csv('examples/spx.csv', index_col=0, parse_dates=True)
spx = data['SPX']
spx.plot(ax = ax, style='k--')
crisis_data = [
(datetime(2007, 10, 11), 'Peak of bull market'),
(datetime(2008, 3, 12), 'Bear Strearns Fails'),
(datetime(2008, 9, 15), 'Lehman Bankruptcy')
]
for date, label in crisis_data:
ax.annotate(label, xy=(date, spx.asof(date) + 75), xytext=(date, spx.asof(date) + 225), arrowprops=dict(facecolor='black', headwidth=4, width=2, headlength=4), horizontalalignment='left', verticalalignment='top')
# Zoom in on 2007-2010
ax.set_xlim(['1/1/2007', '1/1/2011'])
ax.set_ylim([600, 1800])
ax.set_title('Important dates in the 2008-2009 financial crisis')
# +
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
rect = plt.Rectangle((0.2, 0.75), 0.4, 0.15, color='k', alpha=0.3)
circ = plt.Circle((0.7, 0.2), 0.15, color='b', alpha=0.3)
pgon = plt.Polygon([[0.15,0.15], [0.35, 0.4], [0.2, 0.6]], color='g', alpha=0.5)
ax.add_patch(rect)
ax.add_patch(circ)
ax.add_patch(pgon)
# -
plt.savefig('figpath.svg')
plt.savefig('figpath.png', dpi=400, bbox_inches='tight')
#柱状图
fig, axes = plt.subplots(2,1)
data = pd.Series(np.random.rand(16), index=list('abcdefghijklmnop'))
data.plot.bar(ax=axes[0], color='k', alpha=0.7)
data.plot.barh(ax=axes[1], color='k', alpha=0.7)
df = pd.DataFrame(np.random.rand(6, 4), index=['one', 'two', 'three', 'four', 'five', 'six'], columns=pd.Index(['A', 'B', 'C', 'D'], name='Genus'))
df
df.plot.bar()
df.plot.barh(stacked=True, alpha=0.5)
| 9.matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 演習4 - トランズモン型量子ビット
#
# ## 歴史的背景
#
# 量子コンピューターでは、量子情報の基本単位として「量子ビット」が用いられます。量子ビットは、本質的には2準位系の量子力学的なシステムであり、電子のスピン(スピン量子ビット)、イオンの原子準位(イオントラップ量子ビット)などの自然界の系や、超伝導回路による人工的な量子系(超伝導量子ビット)など、多くの物理システムに実装することができます。
#
# 最初につくられた超伝導量子ビットは、ジョセフソン接合によって鉛に弱く結合された超伝導体に、厳密に定義されたクーパー対(超伝導体内の束縛電子)を配置したものでした。このクーパーペアボックス型量子ビットでは、$|0\rangle$の状態と$|1\rangle$の状態の間のエネルギー差は、システム内または周辺の自由電子や静電気の影響を敏感に受けます。そして、この電荷ノイズに敏感に反応して、量子ビットの位相緩和が起こります。クーパーペアボックス型量子ビットの位相緩和時間($T_2$)は、通常、~1$\mu s$程度です。
#
# 2007年初めて提案されたトランズモン型量子ビットにおける重要な発見は、このエネルギーの電圧依存性(分散と呼ばれる)が周期的であることでした。分路コンデンサーを導入すると、ジョセフソン・エネルギーと充電エネルギーの比$E_J/E_c$が50程度と非常に大きくなり、平坦な電荷分散が得られるようになります。クーパー対は電荷ノイズ(図1aのx軸上の不確かさ)に非常に敏感ですが、トランズモン型量子ビットではほとんどが抑制されています(図1d)。また、トランズモン量子ビットの位相緩和時間($T_2$)は大幅に改善されているのがわかります。IBMの量子システムにおいては、$T_2$は通常~100 $\mu s$です。[`ibmq_santiago`](https://quantum-computing.ibm.com/services?skip=0&systems=all&system=ibmq_santiago)の例を参照してください。
#
# <img src="resources/transmon-charge-dispersion.png" alt="Transmon charge dispersion" style="width:50%">
#
# この演習では、実際のIBM量子システムを用いて分光を行い、トランズモン型量子ビットのエネルギー特性を調べます。ジョセフソン・エネルギーと充電エネルギーの比$E_J/E_c$は、その前身であるクーパー対型量子ビットの充電ノイズ問題を解決する鍵となります。
#
# ### 参考文献
#
# 1. Nakamura, Yasunobu, <NAME>, and <NAME>. "Coherent control of macroscopic quantum states in a single-Cooper-pair box." Nature 398.6730 (1999): 786-788.
# 2. Koch, Jens, et al. "Charge-insensitive qubit design derived from the Cooper pair box." Physical Review A 76.4 (2007): 042319.
# ## トランズモン型量子ビットのエネルギーレベルとエネルギー特性
#
# 量子調和振動子(QHO)のハミルトニアンは、線形LC回路のハミルトニアンを量子化することで得られます。そのエネルギー準位は等間隔です(図2c)。しかし、ジョセフソン接合という非線形回路素子を導入すると、トランズモン型量子ビットのエネルギー準位が変化して(図2d)、そのエネルギー準位はもはや等間隔でなくなります。高エネルギーのエネルギー準位の間隔は、低エネルギーのものよりも小さくなっています。非調和性$\delta$は、$|1\rangle \rightarrow |2\rangle$ と$|0\rangle \rightarrow |1\rangle$の間のエネルギー差 $\delta = \hbar \omega_{12} - \hbar \omega_{01}$または$\omega_{12} - \omega_{01}$として定義されます。(ここでは、 $\hbar \equiv 1$とします)。マイクロ波の周波数を$\omega_{01}$に調整することで、多準位のトランズモンを2準位系、つまり量子ビットとして効果的に扱うことができるのです。
#
# <figure>
# <img src="resources/transmon-energy-levels.png" alt="Transmon energy levels" style="width:50%">
# <center><figcaption><b>Fig. 2</b> Energy potential for a quantum harmonic oscillator and a trasmon qubit. (from Ref [3])</figcaption></center>
# </figure>
#
# 超電導体では、電子は$2e$の有効電荷を持つクーパー対を形成します。充電エネルギー$E_c$は、量子ビットに1つのクーパー対を加えるためのエネルギーコストであり、超電導回路の静電容量と関係があります。ジョセフソン・エネルギー$E_J$は、ジョセフソン接合をトンネルするクーパー対のエネルギーであり、トランズモン型量子ビットでは、誘導エネルギーはすべてジョセフソン接合によって提供されますが、これは他の種類の超伝導量子ビットには当てはまりません。(超伝導量子ビットの設計と解析に関する詳細はは、最近リリースされました[Qiskit Metal](https://qiskit.org/metal/)を参照ください)
#
# Duffing(ダフィング)振動子のハミルトニアンをクーパーペアボックスのハミルトニアンから導き出すことで、これらのエネルギーは関連するトランズモンの周波数に次のように関連づけることができます。
#
# $$
# f_{01} = \sqrt{8 E_c E_J} - E_c \qquad E_c = -\delta = f_{01} - f_{12},
# $$
#
# このノートでは、周波数を表す記号として、マイクロ波に使われることが多い$f$を$\omega$に置き代えて使用します。非調和性は、すべて充電エネルギー$E_c$によってもたらされます。物理的には、結合部分に大きな*分路*(並列)コンデンサーとして組み込まれています。これにより、電荷ノイズの影響を抑えつつ、トランズモンのエネルギー準位を十分に分離することができます。
# ## Qiskit Pulse の概要
#
# これまでは、抽象的な量子回路レベルで量子ビットを扱ってきました。回路内の量子ゲートは、物理的にはマイクロ波パルスとして実装されています。Qiskit Pulseは、IBM量子システムに送信されるマイクロ波パルスを操作するパルスレベルのアクセスを提供します。
#
# 簡単な概要説明として、Qiskit Pulseのスケジュール(実験)は、Channel(例:ドライブチャネル)に作用するInstruction(例:Play)で構成されています。ここでは、利用可能な命令とチャンネルの概要を示します。
#
# 
#
# 詳しくは、実際の量子ハードウェアとチャネルの相互作用をまとめた表をご覧ください。
#
# 
#
# Qiskit Pulseについては、[Qiskit documentation](https://qiskit.org/documentation/apidoc/pulse.html)で詳しく説明されていますが、私たちは解説を交えたPulseプログラミングから始める方がより理解の習得が進むと考えています。そこで、以下では、パルス、スケジュールを作成し、実際の量子システムで実験を行う方法を学びます。
# ## さあ始めよう!
#
# <div class="alert alert-block alert-success">
#
# **目標**
#
# $|1\rangle \rightarrow |2\rangle$ 遷移周波数$f_{12}$を見つける
#
# **計画**
#
# 1. **(チュートリアル)** 分光法(周波数スイープ)による$|0\rangle \rightarrow |1\rangle$ 遷移周波数$f_{01}$を見つけましょう。
# 1. **(チュートリアル)** ラビ振動(振幅スイープ)による X-180 パルスの振幅キャリブレーション
# 1. **(問題)** キャリブレートした X-180 パルスと分光法(周波数スイープ)による$|1\rangle \rightarrow |2\rangle$ 遷移周波数$f_{12}$を見つけましょう。
#
# </div>
#
# <div class="alert alert-block alert-danger">
#
# **このノートブックで編集が必要なのは <a href="#problem">問題の1つのセルだけ</a> です。** あるいは、より良いフィッティングを得るために <a href="#fit-f12">フィッティングパラメータを変更</a> する必要があるかもしれませんが、それ以外のためにセルを編集する必要はありません。しかしながら、`shift+Enter` を押してセルを実行する必要はあります。
#
# </div>
#
# Qiskit Pulse の重要な点に集中するために、後続のセルは `helper` モジュールを使います。詳細はQiskit Textbook [Investigating Quantum Hardware Using Microwave Pulses
# ](https://qiskit.org/textbook/ch-quantum-hardware/index-pulses.html) 及び [Qiskit Global Summer School](https://qiskit.org/learn/intro-qc-qh/) の Lectures 16-21, Labs 6-7を参照してください。
# +
# Import helper module from local folder
import sys
import os
sys.path.append(os.getcwd())
from resources import helper
# Numerical and plotting tools
import numpy as np
import matplotlib.pyplot as plt
# Import SI unit conversion factors
from resources.helper import GHz, MHz, kHz, us, ns
# -
# <div class="alert alert-block alert-danger">
#
# **スペシャルプロバイダー**
#
# 今回のチャレンジのために新しい量子システム `ibmq_jakarta` をリザーブしました。参加者はチャレンジ期間中(2021/05/20-26)スペシャル[プロバイダー](https://quantum-computing.ibm.com/composer/docs/iqx/manage/provider/#providers)への独占的なアクセスを手に入れます。スペシャルプロバイダーにアサインされるには**5つのチャレンジのうち最低1つを完了**する必要があります。そうすればあなたの IBM Quantumアカウントに`You have been added to a new project in IBM Quantum` というメールが送られます。
#
# すべてが正常に動作していれば、[アカウントの詳細ページ](https://quantum-computing.ibm.com/account)に特別なプロバイダーの情報が表示されるはずです。
#
# IBM Quantum アカウントは、IBMid と関連付けられている必要があります。そうでない場合は、`Login with some authorized required.`というエラーが発生します。IBM Quantumアカウントに関連付けられている同じ電子メールアドレスを使用して[新しいIBMidを登録](https://auth.quantum-computing.ibm.com/auth/idaas)し、[IBM Quantum](https://quantum-computing.ibm.com)に再度ログインしてください。
#
# **注意: エクササイズの完了からアサインされるまで12時間かかることがあります。** なにか問題があれば、Qiskit Slack ワークスペースの [#iqc2021](https://qiskit.slack.com/archives/C021UTFN9GE) にお問い合わせください。まだ参加していない方は、Qiskit Slackのワークスペース[こちら](https://ibm.co/joinqiskitslack)に参加してください。
#
# </div>
#
# スペシャルプロバイダーにアサインされていることをチェックするために、次のセルを実行してください。スペシャルプロバイダーに追加されていれば、以下のようなメッセージが出力されます。`<AccountProvider for IBMQ(hub='iqc2021-n', group='challenge-m', project='ex4')>`。 `n`は`1`から`10` までの数、`m`は`1`から`1000`までの数です。この番号は `ibmq_jarkarta` への接続に必要になります。
# +
# Importing standard Qiskit libraries
from qiskit import IBMQ
from qiskit.tools.jupyter import *
# Loading your IBM Quantum account
IBMQ.load_account()
IBMQ.providers() # see a list of providers you have access to
# +
# get the special provider assigned to you using information from the output above
hub_name = 'iqc2021-1' # e.g. 'iqc2021-1'
group_name = 'challenge-93' # e.g. 'challenge-1'
project_name = 'ex4' # your project name should be 'ex4'
provider = IBMQ.get_provider(hub=hub_name, group=group_name, project=project_name)
# get `ibmq_jakarta` backend from the provider
backend_name = 'ibmq_jakarta'
backend = provider.get_backend(backend_name)
backend # see details of the `ibmq_jakarta` quantum system
# -
# ### チャネルをインスタンス化し、測定スケジュールを得る
#
# 我々は以後、異なるドライブスケジュールに対して同じ測定スケジュールを使います。どのバックエンドについても、個別に較正(calibrate)する代わりにデフォルトの測定パルスを使うことができます。
# +
from qiskit import pulse
from qiskit.pulse import Play, Schedule, DriveChannel
# please use qubit 0 throughout the notebook
qubit = 0
# -
# 次に、バックエンドコンフィグレーションとサンプリング時間 $dt$ をセーブします。プロットのために、`AcquireChannel` を除外します。
backend_config = backend.configuration()
exc_chans = helper.get_exc_chans(globals())
dt = backend_config.dt
print(f"Sampling time: {dt*1e9} ns")
# `instruction_schedule_map` はバックエンドで使われるデフォルトのキャリブレート済みパルスのパラメータを与えます。この "ネイティブゲートセット" は、あなたが
# [`QuantumCircuit`](https://qiskit.org/documentation/apidoc/circuit.html#gates-and-instructions) で使えるオペレーターのサブセットで構成されており、Qiskit が `QuantumCircuit` をトランスパイルする先のものであることがわかります。
backend_defaults = backend.defaults()
center_frequency = backend_defaults.qubit_freq_est
inst_sched_map = backend_defaults.instruction_schedule_map
inst_sched_map.instructions
# インストラクションはバックエンドのネイティブゲートセットで構成されます: 単一量子ビットゲート, $X$のルート`sx`, パラメータ化された $Z$-軸回転 `rz(θ)`, そして二量子ビット 制御-NOT `cx`。その他にもいつくかゲートがリストされています。特にオイラー角に基づく ["U-gates"](https://qiskit.org/textbook/ch-states/single-qubit-gates.html#generalU3) は deprecated のため、もうすぐ使えなくなります。
#
# Basis Gate | Operation
# --- | ---
# `u1(λ)` | `rz(λ)`
# `u2(φ,λ)` | `rz(φ+π/2) sx rz(λ-π/2)`
# `u3(θ,φ,λ)` | `rz(φ+π) sx rz(θ+π) sx rz(λ)`
# `id` | identity
# `x` | $X$ for echoes in dynamical decoupling
# `measure` | measurement
#
# 一つ注意が必要なのは、それぞれの `sx` は1つの物理パルスで構成されますが、`rz` は [frame changes](https://arxiv.org/abs/1612.00858) によるソフトウェアで実装されることです。
# `instruction_schedule_map` からはスクラッチすることなく、デフォルトの計測パルスを得ることができます。これはキャリブレート済みのパルスから始めることにも使えます。
# retrieve calibrated measurement pulse from backend
meas = inst_sched_map.get('measure', qubits=[qubit])
meas.exclude(channels=exc_chans).draw(time_range=[0,1000])
# <div class="alert alert-block alert-success">
#
# ### ステップ1(チュートリアル): $|0\rangle \rightarrow |1\rangle$ 遷移検索
#
# このセクションのコードは変更する必要がありません。セルを実行してステップを理解してください。`spec01_scheds` の作り方には特に注意してください。同じようなパルススケジュールを<a href="#problem">最終問題</a>で作る必要があります。パルススケジュールの作り方についてより詳細は[ドキュメント](https://qiskit.org/documentation/tutorials/circuits_advanced/06_building_pulse_schedules.html)を参照してください。
#
# </div>
# +
from qiskit.pulse import DriveChannel, Gaussian
# The same spec pulse for both 01 and 12 spec
drive_amp = 0.25
drive_duration = inst_sched_map.get('x', qubits=[qubit]).duration
# Calibrated backend pulse use advanced DRAG pulse to reduce leakage to the |2> state.
# Here we will use simple Gaussian pulse
drive_sigma = drive_duration // 4 # DRAG pulses typically 4*sigma long.
spec_pulse = Gaussian(duration=drive_duration, amp=drive_amp,
sigma=drive_sigma, name=f"Spec drive amplitude = {drive_amp}")
# Construct an np array of the frequencies for our experiment
spec_freqs_GHz = helper.get_spec01_freqs(center_frequency, qubit)
# Create the base schedule
# Start with drive pulse acting on the drive channel
spec01_scheds = []
for freq in spec_freqs_GHz:
with pulse.build(name="Spec Pulse at %.3f GHz" % freq) as spec01_sched:
with pulse.align_sequential():
# Pay close attention to this part to solve the problem at the end
pulse.set_frequency(freq*GHz, DriveChannel(qubit))
pulse.play(spec_pulse, DriveChannel(qubit))
pulse.call(meas)
spec01_scheds.append(spec01_sched)
# Draw spec01 schedule
spec01_scheds[-1].exclude(channels=exc_chans).draw(time_range=[0,1000])
# +
from qiskit.tools.monitor import job_monitor
# Run the job on a real backend
spec01_job = backend.run(spec01_scheds, job_name="Spec 01", **helper.job_params)
print(spec01_job.job_id())
job_monitor(spec01_job)
# If the queuing time is too long, you can save the job id
# And retrieve the job after it's done
# Replace 'JOB_ID' with the the your job id and uncomment to line below
#spec01_job = backend.retrieve_job('JOB_ID')
# -
# ### 分光データのフィッティング
#
#
#
#
#
# 量子ビット周波数 $f_{01}$ を見つけるため、分光信号を *ローレンツ* 関数にフィットします
#
# $$ \frac{AB}{\pi[(f-f_{01})^2 + B^2]} + C $$
#
# 以下はフィッティングパラメーターです:
#
#
# Parameter | Correponds to
# --- | ---
# $A$ | amplitude
# $f_{01}$ | 01 frequency guess (GHz)
# $B$ | scale
# $C$ | offset
#
# `helper` モジュールから `SpecFitter` を使います。これは `qiskit.ignis.characterization.fitters` ライブラリのフィッターをベースにしています。
#
# <div class="alert alert-block alert-danger">
#
# **注意:** より良いフィッティングのためには、フィッティングパラメーターを調整する必要があります。
#
# </div>
# +
from resources.helper import SpecFitter
amp_guess = 5e6
f01_guess = 5
B = 1
C = 0
fit_guess = [amp_guess, f01_guess, B, C]
fit = SpecFitter(spec01_job.result(), spec_freqs_GHz, qubits=[qubit], fit_p0=fit_guess)
fit.plot(0, series='z')
f01 = fit.spec_freq(0, series='z')
print("Spec01 frequency is %.6f GHz" % f01)
# -
# バックエンドのキャリブレート済み量子ビット周波数とあなたの結果を比較してみましょう!うまくいけば、近い値 ($\pm$1 MHz) が得られます。
# Retrieve qubit frequency from backend properties
f01_calibrated = backend.properties().frequency(qubit) / GHz
f01_error = abs(f01-f01_calibrated) * 1000 # error in MHz
print("Qubit frequency error is %.6f MHz" % f01_error)
# 🎉 おめでとうございます!あなたは実機上の最初のパルスの実験に成功しデータ解析をしました。これは Qiskit Pulse と `ibmq_armonk` を[2年弱前に発表](https://www.ibm.com/blogs/research/2019/12/qiskit-openpulse/) する前は不可能でした。今あなたは量子物理学の実験を家から実施しています。信じられないことです!
# <div class="alert alert-block alert-success">
#
# ### ステップ2(チュートリアル): ラビ振動による X-180 パルス振幅のキャリブレーション
#
# このセクションのコードは変更する必要がありません。セルを実行してステップを理解してください。
# </div>
#
# ### 測定した周波数からラビ・スケジュールを作る
# +
max_rabi_amp = 0.75
rabi_amps = helper.get_rabi_amps(max_rabi_amp)
rabi_scheds = []
for ridx, amp in enumerate(rabi_amps):
with pulse.build(name="rabisched_%d_0" % ridx) as sched: # '0' corresponds to Rabi
with pulse.align_sequential():
pulse.set_frequency(f01*GHz, DriveChannel(qubit))
rabi_pulse = Gaussian(duration=drive_duration, amp=amp, \
sigma=drive_sigma, name=f"Rabi drive amplitude = {amp}")
pulse.play(rabi_pulse, DriveChannel(qubit))
pulse.call(meas)
rabi_scheds.append(sched)
# Draw rabi schedule
rabi_scheds[-1].exclude(channels=exc_chans).draw(time_range=[0,1000])
# +
# Run the job on a real device
rabi_job = backend.run(rabi_scheds, job_name="Rabi", **helper.job_params)
print(rabi_job.job_id())
job_monitor(rabi_job)
# If the queuing time is too long, you can save the job id
# And retrieve the job after it's done
# Replace 'JOB_ID' with the the your job id and uncomment to line below
#rabi_job = backend.retrieve_job('JOB_ID')
# -
# ### ラビ・データのフィッティング
#
# ラビ周期 $T = 2\pi/f$ を見つけるため、ラビ信号を正弦関数にフィットします。
#
# $$ a \cos(2\pi f x + \phi) + c $$
#
# 以下はフィッティングパラメーターです:
#
# Parameter | Correponds to
# --- | ---
# $a$ | amplitude
# $f$ | Rabi drive frequency
# $\phi$ | phase offset
# $c$ | offset
#
#
# `qiskit.ignis.characterization.calibration.fitters` ライブラリから `RabiFitter` を使います。
#
# <div class="alert alert-block alert-danger">
#
# **注意:** より良いフィッティングのためには、フィッティングパラメーターを調整する必要があります。
#
# </div>
# +
from qiskit.ignis.characterization.calibrations.fitters import RabiFitter
amp_guess = 5e7
fRabi_guess = 2
phi_guess = 0.5
c_guess = 0
fit_guess = [amp_guess, fRabi_guess, phi_guess, c_guess]
fit = RabiFitter(rabi_job.result(), rabi_amps, qubits=[qubit], fit_p0=fit_guess)
fit.plot(qind=0, series='0')
x180_amp = fit.pi_amplitude()
print("Pi amplitude is %.3f" % x180_amp)
# -
# <div id='problem'></div>
# <div class="alert alert-block alert-success">
#
# ### ステップ3(問題): $|1\rangle \rightarrow |2\rangle$ 遷移周波数を見つける
#
# トランズモン量子ビットの $|1\rangle$ と $|2\rangle$ の間の状態遷移を観測するため、以下が必要です:
#
# 1. 量子ビットを $|0\rangle$ から $|1\rangle$ にするため $X_\pi$ パルスを適用する
# 1. $|1\rangle \rightarrow |2\rangle$ 遷移を見つけるため、周波数を変えながら第二のパルスを適用する
#
# </div>
# <div class="alert alert-block alert-danger">
# 下のセルがこのノートブックで唯一編集が必要なセルです。
# </div>
# +
# Define pi pulse
x_pulse = Gaussian(duration=drive_duration,
amp=x180_amp,
sigma=drive_sigma,
name='x_pulse')
#print(spec_pulse)
def build_spec12_pulse_schedule(freq, anharm_guess_GHz):
with pulse.build(name="Spec Pulse at %.3f GHz" % (freq+anharm_guess_GHz)) as spec12_schedule:
with pulse.align_sequential():
# こちらのコメント間にコードを記入ください - 開始
excited_pulse = spec_pulse = Gaussian(duration=drive_duration, amp=drive_amp*1.5, sigma=drive_sigma, name=f"excited drive amplitude = {drive_amp}")
pulse.play(x_pulse,DriveChannel(qubit))
pulse.set_frequency((freq+anharm_guess_GHz)*GHz, DriveChannel(qubit))
pulse.play((excited_pulse), DriveChannel(qubit))
pulse.call(meas)
# こちらのコメント間にコードを記入ください - 終了
return spec12_schedule
# -
# 我々のトランズモン量子ビットの非調和性は通常 $-300$ MHz 付近にあるため、その周りをスイープします。
# +
anharmonicity_guess_GHz = -0.3 # your anharmonicity guess
freqs_GHz = helper.get_spec12_freqs(f01, qubit)
# Now vary the sideband frequency for each spec pulse
spec12_scheds = []
#print(freqs_GHz)
for freq in freqs_GHz:
#print(freq+anharmonicity_guess_GHz)
spec12_scheds.append(build_spec12_pulse_schedule(freq, anharmonicity_guess_GHz))
# Draw spec12 schedule
spec12_scheds[-1].exclude(channels=exc_chans).draw(time_range=[0,1000])
# +
# Run the job on a real device
spec12_job = backend.run(spec12_scheds, job_name="Spec 12", **helper.job_params)
print(spec12_job.job_id())
job_monitor(spec12_job)
# If the queuing time is too long, you can save the job id
# And retrieve the job after it's done
# Replace 'JOB_ID' with the the your job id and uncomment to line below
#spec12_job = backend.retrieve_job('JOB_ID')
# -
# ### 分光データのフィッティング
#
# <div id='fit-f12'></div>
#
# $|1\rangle \to |2\rangle$ 遷移 $f_{12}$ を見つけるため、再び分光信号をローレンツ関数にフィットします。
#
# $$ \frac{AB}{\pi[(f-f_{12})^2 + B^2]} + C $$
#
# 以下はフィッティングパラメータです:
#
# Parameter | Correponds to
# --- | ---
# $A$ | amplitude
# $f_{12}$ | 12 frequency guess (GHz)
# $B$ | scale
# $C$ | offset
#
#
# <div class="alert alert-block alert-danger">
#
# **注意:** より良いフィッティングのためには、フィッティングパラメーターを調整する必要があります。
#
# </div>
# +
amp_guess = 2e7
f12_guess = f01 - 0.3
B = .1
C = 0
fit_guess = [amp_guess, f12_guess, B, C]
fit = SpecFitter(spec12_job.result(), freqs_GHz+anharmonicity_guess_GHz, qubits=[qubit], fit_p0=fit_guess)
fit.plot(0, series='z')
f12 = fit.spec_freq(0, series='z')
print("Spec12 frequency is %.6f GHz" % f12)
# -
# Check your answer using following code
from qc_grader import grade_ex4
grade_ex4(f12,qubit,backend_name)
# Submit your answer. You can re-submit at any time.
from qc_grader import submit_ex4
submit_ex4(f12,qubit,backend_name)
# ## $E_J/E_c$を計算する
# 導入部の式を修正し、パルス実験で得られた$f_{01}$と$f_{12}$を用いて、$E_c$と$E_J$を計算することができます。
#
# $$
# E_c = -\delta = f_{01} - f_{12} \qquad E_J = \frac{(2f_{01}-f_{12})^2}{8(f_{01}-f_{12})}
# $$
Ec = f01 - f12
Ej = (2*f01-f12)**2/(8*(f01-f12))
print(f"Ej/Ec: {Ej/Ec:.2f}") # this value is typically ~ 30
# ## Additional information
#
# **Created by:** <NAME>, <NAME>
#
# **Version:** 1.0.0
| solutions by participants/ex4/ex4-ja-AyumuShiraishi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Saravanaprabhu36/ml-basics/blob/master/Berlin_Airbnb_Price_Prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="OfbLSsoX1sXd"
# all essential libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# encoding the object types into integers/floats
from sklearn.preprocessing import LabelEncoder
# Model building
import sklearn
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
# + id="UySlWNpN128K"
def missing_cols(df):
"""prints out columns with its amount of missing values with its %"""
total = 0
for col in df.columns:
missing_vals = df[col].isnull().sum()
pct = df[col].isna().mean() * 100 # percentage of missing values in a column
total += missing_vals
if missing_vals != 0:
print("{} {} [{}%]".format(col, df[col].isnull().sum(), round(pct, 2)))
# + colab={"base_uri": "https://localhost:8080/", "height": 341} id="Kq0RDNx03rmE" outputId="6e6268f6-c26d-440e-e7f2-45dc7b167884"
df = pd.read_csv("train_airbnb_berlin.csv")
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="RrrazRjY3xxC" outputId="7efc6433-7993-4895-d137-b125513a5e3f"
df.shape # returns a tuple with number of rows and columns which is not callable
# + id="1TfpFgyx37ul" colab={"base_uri": "https://localhost:8080/", "height": 806} outputId="2a60c384-5834-445c-fd99-a4320c96fb4b"
df2 = pd.read_csv("test_airbnb_berlin.csv")
df2
# + id="X36l2G294jAg" colab={"base_uri": "https://localhost:8080/"} outputId="1769d2c4-32e7-4b2e-c6cf-cf5376929bde"
missing_cols(df)
# + id="1lx3-bmA4lH3" colab={"base_uri": "https://localhost:8080/", "height": 358} outputId="2765e54e-e426-4f40-a266-078e15b6f9bb"
newdf = df.drop(["Square Feet"], axis = 1)
newdf.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 655} id="XKMHc_hv5DO8" outputId="8c5aa4ca-19e9-4d99-fc3e-582fb425fe2f"
en_df = newdf.fillna(0) # fills all the missing values with zeros
en_df
# + id="hyg21yGJ6sIZ"
# en_df = new_df["Overall Rating"].fillna(np.mean(new_df["Overall Rating"]))
# en_df -> another way to fill the missing values by the average of all the values in that column
# + colab={"base_uri": "https://localhost:8080/"} id="W4ej58Bc6dYN" outputId="71631018-493e-45d7-9087-b1e02ccedf42"
en_df.info() # what does each column posses -> name, non-null, data type
# + colab={"base_uri": "https://localhost:8080/"} id="gz8-n6OD6oMY" outputId="43a7c5ca-f6fc-4c95-8a7d-3209a86785d9"
objList = en_df.select_dtypes(include = "object").columns
print(objList)
# + colab={"base_uri": "https://localhost:8080/"} id="he_Gk6xK70sh" outputId="d0821332-7787-4afe-9cd1-c17f0d447784"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder() # le is the conventional keyword for label encoder
for feat in objList:
en_df[feat] = le.fit_transform(en_df[feat].astype(str))
# object -> str -> int
# LabelEncoder() does not directly deal with objects
print(en_df.info())
# + id="M3dqbIMP8SBs"
# EDA -> Exploratory Data Analysis
# In statistics, Exploratory Data Analysis is an approach of analyzing data sets to summarize their main characteristics
import seaborn
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mTRcCZIg8hOj" outputId="166489d1-f052-4ee3-f9b5-d5495c9bb16f"
for col in en_df.columns[:37]:
x = en_df[col]
y = en_df["Price"]
plt.plot(x, y, ".") # . is a datapoint representing the relation between each column and the price
plt.xlabel(col)
plt.ylabel("Price")
plt.show()
# + id="9yniSvHM9Pet"
# + colab={"base_uri": "https://localhost:8080/", "height": 353} id="bgc31JtVAOpi" outputId="57a0e83b-6da2-401d-acf7-d21c004c842a"
sns.distplot(en_df["Overall Rating"], kde = False).set(xlabel = "Overall Rating", ylabel = "Count")
# + colab={"base_uri": "https://localhost:8080/", "height": 945} id="bviY9n7DA36I" outputId="6acb319c-4b58-4a94-a75a-71ccff3d0462"
numerical = en_df.select_dtypes(include = "float64").columns
en_df[numerical].hist(figsize = (15, 15)) # plotting all float64 columns in histogram format
# + colab={"base_uri": "https://localhost:8080/"} id="SUbZZCoUBDzz" outputId="45332ed1-3363-41b8-8141-72291f5a9709"
missing_cols(df2)
# + id="KD1_kBqsBdIF"
newdf2 = df2.drop(["Square Feet"], axis = 1) # dropping square feet column because of too many null values
# + colab={"base_uri": "https://localhost:8080/", "height": 806} id="9yb5mky-Blg7" outputId="84294be4-a36d-4735-ab5a-a5c31122593f"
en_df2 = newdf2.fillna(0)
en_df2
# + id="JWIYJzEsBwfE"
# en_df2["Accuracy Rating"] = newdf2["Accuracy Rating"].fillna(np.mean(en_df["Accuracy Rating"]))
# en_df2["Accuracy Rating"]
# + colab={"base_uri": "https://localhost:8080/"} id="HizHxhXGB_gj" outputId="b33ffe60-113f-47fd-c39c-31e29233021c"
en_df2.info()
# + id="juQlKt-nCEqj"
objList2 = en_df2.select_dtypes(include = "object").columns
# + colab={"base_uri": "https://localhost:8080/"} id="X0FHiqCXCTCn" outputId="52d75204-3e68-4d31-a108-ec5641be578e"
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder() # le is the conventional keyword for label encoder
for feat in objList:
en_df2[feat] = le.fit_transform(en_df2[feat].astype(str))
# object -> str -> int
# LabelEncoder() does not directly deal with objects
en_df2.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 470} id="J2zFLe77CTmE" outputId="643c5949-82d3-4b38-8da8-b53b5e115910"
xVar = en_df.drop(["Price"], axis = 1)
xVar
# + colab={"base_uri": "https://localhost:8080/"} id="Pwu_EO8XFfea" outputId="a11f1870-afd1-40b0-eb73-ed0e31f6c960"
yVar = en_df["Price"]
yVar
# + colab={"base_uri": "https://localhost:8080/"} id="br3Sm1amFt6A" outputId="0a7abb64-7bed-4ec5-ed21-54681544467e"
X_train, X_test, y_train, y_test = train_test_split(xVar, yVar, test_size=0.20, random_state = 0)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# + [markdown] id="lAjXt02UGeIn"
# ### XGBoost Regression model
# + id="hZXriRqrGRjU"
xgboost = XGBRegressor(learning_rate=0.008,
n_estimators=6000,
max_depth=4,
min_child_weight=0,
gamma=0.6,
subsample=0.7,
colsample_bytree=0.7,
objective="reg:squarederror",
nthread=-1,
scale_pos_weight=1,
seed=27,
reg_alpha=0.00006,
random_state=42
)
# learning rate requires hypertuning for error correction
# n_estimators means how many steps, will be needed for predicting the output
xgb = xgboost.fit(X_train, y_train)
# + [markdown] id="ucYAztElJSdj"
# ### Linear Regression model from sklearn
# + id="DQ3mI8JAI7NM"
from sklearn.linear_model import LinearRegression
# + id="KaAIPcVFJZKr"
linear = LinearRegression()
lr = linear.fit(X_train, y_train)
# + [markdown] id="sbPl6R78KFvD"
# ### Adaboost Regression model
# + id="aolQ0uipKEEC"
from sklearn.ensemble import AdaBoostRegressor
ada = AdaBoostRegressor(learning_rate=0.005,
n_estimators=75,
random_state=42
)
# + colab={"base_uri": "https://localhost:8080/"} id="hDj3gRm-Ketm" outputId="7cf1fe71-2180-40f1-f693-e9aa5e1749f6"
ada.fit(X_train, y_train)
# + id="dwrVH7OLKhCQ"
# XGBoost and AdaBoost uses decision trees to predict the result
# linear regression works based on the line of best fit to predict the result
# + [markdown] id="WEYqgyReK4fv"
# ### Predicitons
# + colab={"base_uri": "https://localhost:8080/"} id="_4ndKYlSK06J" outputId="55efdb19-984c-49d0-cfca-5b92e2097a8b"
prediction_xgb = xgb.predict(X_test)
# prediction_res_xgb = xgb.score(X_test, y_test)
prediction_xgb
# + colab={"base_uri": "https://localhost:8080/"} id="GZcYfMXhLAEp" outputId="b1d59604-cb6e-4f5f-9deb-2f9ee4ebf0c8"
prediction_linear = linear.predict(X_test)
prediction_linear
# + colab={"base_uri": "https://localhost:8080/"} id="_dhpY-_wLI_M" outputId="731fa34f-a709-493f-dab5-46eb5b91154e"
prediction_ada = ada.predict(X_test)
prediction_ada
# + [markdown] id="RYNRtJh-LeFe"
# ### RMSE Calculation -> Root Mean Square Error
# + colab={"base_uri": "https://localhost:8080/"} id="sQ6kYkwULaNO" outputId="7df19d8d-e3b2-4a58-a660-a71708a609a6"
from sklearn.metrics import mean_squared_error
RMSE_xgb = np.sqrt(mean_squared_error(y_test, prediction_xgb))
RMSE_linear = np.sqrt(mean_squared_error(y_test, prediction_linear))
RMSE_ada = np.sqrt(mean_squared_error(y_test, prediction_ada))
print(RMSE_xgb)
print(RMSE_linear)
print(RMSE_ada)
# + id="M5fDBg8AMDGa"
# lower the RMSE value, the better is the model
# overfitting means the model just mugged up the training data
# This situation where any given model is performing too well on the training data but the performance drops significantly over the test set is called an overfitting model.
# if the model is performing poorly over the test and the train set, then we call that an underfitting model.
# + [markdown] id="i29QWfl8M13n"
# **Overfitting** means the model performs very well in training data but not so well in testing data
# + [markdown] id="KnpG31-XM84W"
# **Underfitting** means the model does not actually perform well in both training and testing data
# + [markdown] id="Om_UUCX0MMqw"
# ### Convert the prediction dataframe of XGB regression model into CSV file
#
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="Q62k2UhYMJqe" outputId="fad2e882-3a5d-4dde-f87a-402067960109"
from pandas import DataFrame
df_xgb = DataFrame(prediction_xgb, columns= ["prediciton"])
df_xgb
# + id="yxk6bTaiNAul"
df_xgb.to_csv("prediction_xgb.csv")
# + [markdown] id="3Hdh1kAxOFh8"
# ### Convert the prediction dataframe of linear regression model into CSV file
#
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="rXvF2cpuNilo" outputId="7a5f8181-95fc-4e10-ba65-5b30e923e77b"
from pandas import DataFrame
df_linear = DataFrame(prediction_linear, columns= ["prediciton"])
df_linear
# + id="tXDwVAM6Nvar"
df_linear.to_csv("prediction_linear.csv")
# + [markdown] id="OpIidlSPOQo-"
# ### Convert the prediction dataframe of ada regression model into CSV file
#
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="cCv59XeKNyXx" outputId="02bc4ea0-a02d-4e23-d01d-3af3688ec0fa"
from pandas import DataFrame
df_ada = DataFrame(prediction_ada, columns= ["prediciton"])
df_ada
# + id="NrTXdU9_N2-K"
df_ada.to_csv("prediction_ada.csv")
# + [markdown] id="OYhIvDrcOopH"
# ### Checking if the CSV files are stored correctly
# + id="_OjxAeiqOtlK"
csv_xgb = pd.read_csv("prediction_xgb.csv")
csv_linear = pd.read_csv("prediction_linear.csv")
csv_ada = pd.read_csv("prediction_ada.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="mnC9vJHDPRiU" outputId="6bcf4c5a-d55b-4816-c866-f0233b3ec617"
csv_xgb
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="Mq5IKzuGPR2E" outputId="6020052e-f48d-46b1-a8f4-47c3551fc565"
csv_linear
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="1Jf0ksmqPSI9" outputId="6f3c8aee-6ff9-4584-ec38-62b6c22bd830"
csv_ada
| Berlin_Airbnb_Price_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Importing packages
# +
# %matplotlib inline
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Input, Flatten, Dense
from keras.callbacks import Callback, ModelCheckpoint
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import warnings
warnings.filterwarnings('ignore')
# -
# ### Load the pretrained Network
model_vgg16_conv = VGG16(weights='imagenet', include_top=False)
# ### Freeze the layers
for layer in model_vgg16_conv.layers:
layer.trainable = False
# ### Training parameters
img_width, img_height = 150, 150
train_data_dir = 'train'
val_data_dir = 'validation'
model_weights_file = 'vgg16-xfer-weights.h5'
nb_train_samples = 4
nb_val_samples = 4
nb_epochs = 5
# ### Build a classification model on top of Base Network
# +
input = Input(shape=(img_width, img_height, 3))
output_vgg16_conv = model_vgg16_conv(input)
x = Flatten()(output_vgg16_conv)
x = Dense(64, activation='relu')(x)
x = Dense(2, activation='softmax')(x)
model = Model(input=input, output=x)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# -
# ### Dataset Preparation
# +
train_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_width, img_height),
batch_size=4, class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(val_data_dir, target_size=(img_width, img_height),
batch_size=4,class_mode='categorical')
# -
# ### Training
# +
callbacks = [ModelCheckpoint(model_weights_file, monitor='val_acc', save_best_only=True)]
history = model.fit_generator( train_generator, callbacks = callbacks, samples_per_epoch=nb_train_samples,
nb_epoch=nb_epochs, validation_data=validation_generator, nb_val_samples=nb_val_samples)
print('Training Completed!')
# -
# ### Test the model
# +
img_path = 'dog.jpg'
label = ['Cat','Dog']
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
ind = np.where(features == 1)[1][0]
print('Predicted Array:',features)
print('Predicted Label:',label[ind])
# -
# ### Models for image classification with weights trained on ImageNet
# * Xception
# * VGG16
# * VGG19
# * ResNet50
# * InceptionV3
# * InceptionResNetV2
# * MobileNet
# * DenseNet
# * NASNet
| 1. CNN_Transfer_Learning_Cats_vs_Dogs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torchvision as vision
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
import sys
torch.cuda.set_device(0)
print('Python Version: {}; CUDA Version: {}; pytorch version: {}'.format(
sys.version,torch.version.cuda,torch.__version__))
# +
# change the PIL to tensor
from torchvision.transforms import ToTensor, Normalize as Norm, Compose
transform = Compose([ToTensor(),Norm((0.5,), (0.5,))]) # mean, range (change to 0 - 1)
train_dataset = MNIST(root='../dataset/MNIST',train=True,download=True,transform=transform)
test_dataset = MNIST(root='../dataset/MNIST',train=False,download=True,transform=transform)
# -
# Build up the dataset from numpy array
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split as split
from sklearn.preprocessing import Normalizer
from torch.utils.data import TensorDataset as TData
import numpy as np
mnist = fetch_mldata('MNIST original')
X_train, X_test, y_train, y_test = split(mnist['data'],mnist['target'],train_size=60000,test_size=10000)
norm = Normalizer()
X_train = norm.fit_transform(X_train).astype(np.float32)
X_test = norm.transform(X_test).astype(np.float32)
train_dataset = TData(torch.from_numpy(X_train.reshape((-1,1,28,28)),),
torch.from_numpy(y_train.astype(np.long)).view(-1))
test_dataset = TData(torch.from_numpy(X_test.reshape((-1,1,28,28))),
torch.from_numpy(y_test.astype(np.long)).view(-1))
print (len(train_dataset)) # length method
print (train_dataset[0][0].shape) # Data field, channel * width * length for conv2
print (train_dataset[0][1].shape) # Label field, .view(-1)
train_feeder = DataLoader(train_dataset, batch_size=128,shuffle=True, num_workers=2)
test_feeder = DataLoader(test_dataset,batch_size=1024,shuffle=False,num_workers=2)
# +
from torch.nn import Conv2d as Conv, MaxPool2d as Pool, Linear as FC
from torch.nn.functional import relu, dropout
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = Conv(1, 6, kernel_size=5)
self.pool = Pool(2, 2)
self.conv2 = Conv(6, 16, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = FC(16 * 4 * 4, 120)
self.fc2 = FC(120, 84)
self.fc3 = FC(84, 10)
def forward(self, x):
x = self.pool(relu(self.conv1(x)))
x = self.pool(relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4) # in pytorch: need to view to reshape
x = relu(self.fc1(x))
x = relu(self.fc2(x))
x = dropout(x,training=self.training)
x = self.fc3(x)
return x
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Net().to(device)
# +
from torch.optim import SGD
from torch.nn import CrossEntropyLoss
criterion = CrossEntropyLoss()
optimizer = SGD(net.parameters(), lr=0.01, momentum=0.9)
# -
net.train()
length = len(train_feeder.dataset)
for epoch in range(100):
running_loss = 0.0
running_correct = 0
for inputs, labels in train_feeder:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad() # refresh the buffer
outputs = net(inputs) # forward
loss = criterion(outputs, labels) # calculate the loss
loss.backward() # BP
optimizer.step() # p' = p - lr * grad
running_loss += loss.item()
pred = outputs.max(1, keepdim=True)[1] # get the index of the max log-probability
running_correct += pred.eq(labels.view_as(pred)).sum().item()
running_loss /= length
print('Epoch = {}: Train set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
epoch, running_loss, running_correct, length,100. * running_correct / length))
# +
net.eval()
test_loss = 0
test_correct = 0
length = len(test_feeder.dataset)
with torch.no_grad():
for data, target in test_feeder:
data, target = data.to(device), target.to(device)
output = net(data)
test_loss += criterion(output, target)
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
test_correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= length
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, test_correct, length, 100. * test_correct / length))
| 00_Learning_Pytorch/Pytorch_on_MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow_datasets as tfds
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from scipy.stats import pearsonr
import scipy.stats as st
from plot.scatter import scatter_plot, _set_axis_config
from plot.summary import summary_plot
from plot.colors import green_gold
# -
dataset = tfds.load(name='higgs', split='train')
feature_names = list(dataset.output_shapes.keys())[1:]
attributions = np.load('attributions.npy')
interactions = np.load('interactions.npy')
input_samples = np.load('input_samples.npy')
pred_output = np.load('pred_output.npy')
true_labels = np.load('true_labels.npy')
m_wbb = input_samples[:, feature_names.index('m_wbb')]
m_wwbb = input_samples[:, feature_names.index('m_wwbb')]
feature_names.index('m_wbb')
feature_names.index('m_wwbb')
pearsonr(m_wbb, m_wwbb)
linear_model = LinearRegression()
linear_model.fit(m_wbb.reshape(-1, 1), m_wwbb)
m_wwbb_pred = linear_model.predict(m_wbb.reshape(-1, 1))
residuals = m_wwbb - m_wwbb_pred
squared_residuals = np.square(residuals)
mse = np.mean(squared_residuals)
r_squared = linear_model.score(m_wbb.reshape(-1, 1), m_wwbb)
print('R^2: {:.4f}, MSE: {:.4f}'.format(r_squared, mse))
model_x = np.linspace(np.min(m_wbb), np.max(m_wbb), num=100)
model_y = linear_model.predict(model_x.reshape(-1, 1))
def customize_axis(ax, xlabel, ylabel, title, ax_below=True):
_set_axis_config(ax, [0.2, 0.2, 1.0, 1.0])
ax.grid(linewidth=0.5)
ax.set_axisbelow(ax_below)
ax.tick_params(length=6, labelsize=12)
ax.set_xlabel(xlabel, fontsize=14)
ax.set_ylabel(ylabel, fontsize=14)
ax.set_title(title, fontsize=18)
plt.scatter(x=m_wbb,
y=m_wwbb,
c=true_labels,
s=20)
plt.scatter(x=attributions[:, feature_names.index('m_wbb')],
y=attributions[:, feature_names.index('m_wwbb')],
c=interactions[:, feature_names.index('m_wbb'), feature_names.index('m_wwbb')],
s=5)
# +
fig, axs = plt.subplots(3, 2, figsize=(16, 15))
ax = axs[0, 0]
ax.scatter(x=m_wbb,
y=m_wwbb,
s=6,
alpha=0.5)
ax.plot(model_x,
model_y,
c='firebrick')
customize_axis(ax, 'm_wbb', 'm_wwbb', 'Regression plot between the top two features')
ax = axs[0, 1]
ax.scatter(x=m_wbb,
y=residuals,
s=6,
alpha=0.5,
c=pred_output,
cmap=green_gold())
ax.plot(model_x,
np.zeros(model_x.shape),
c='firebrick')
customize_axis(ax, 'm_wbb', 'residuals', 'Residuals of the regression line')
ax = axs[1, 0]
ax.scatter(x=residuals,
y=interactions[:, feature_names.index('m_wbb'), feature_names.index('m_wwbb')],
s=6,
alpha=0.5,
c=input_samples[:, feature_names.index('m_wbb')],
cmap=green_gold())
customize_axis(ax, 'residuals', 'interactions', 'Residuals vs. interactions of m_wbb and m_wwbb')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax = axs[1, 1]
x = residuals
y = interactions[:, feature_names.index('m_wbb'), feature_names.index('m_wwbb')]
xx, yy = np.mgrid[xlim[0]:xlim[1]:100j, ylim[0]:ylim[1]:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
cfset = ax.contourf(xx, yy, np.log(f + 0.1), cmap='Blues')
customize_axis(ax, 'residuals', 'interactions', 'Log density plot of residuals vs. interactions', False)
ax = axs[2, 0]
ax.scatter(x=residuals,
y=attributions[:, feature_names.index('m_wwbb')],
s=6,
alpha=0.5)
customize_axis(ax, 'residuals', 'attribution to m_wwbb', 'Attributions vs. Residuals')
ax = axs[2, 1]
ax.scatter(x=residuals,
y=attributions[:, feature_names.index('m_wbb')],
s=6,
alpha=0.5)
customize_axis(ax, 'residuals', 'attribution to m_wbb', 'Attributions vs. Residuals')
plt.tight_layout()
# -
# ### Normal evaluation:
# ```
# Evaluating model with flip indices set to False
# ---------- Train Set ----------
# 100000/100000 - 1351s - loss: 0.4814 - binary_accuracy: 0.7710 - auc: 0.8622
# ---------- Vald Set ----------
# 5000/5000 - 114s - loss: 0.4827 - binary_accuracy: 0.7706 - auc: 0.8615
# ```
#
# ### Flipped Evaluation:
# ```
# Evaluating model with flip indices set to True
# ---------- Train Set ----------
# 100000/100000 - 1355s - loss: 0.5531 - binary_accuracy: 0.7221 - auc: 0.8044
# ---------- Vald Set ----------
# 5000/5000 - 113s - loss: 0.5535 - binary_accuracy: 0.7218 - auc: 0.8042
# ```
#
# Hmmm... definitely didn't observe the pattern I was hoping for. Still interesting though!
| examples/tabular/higgs/plot_residuals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # hellow every one is the test of JNB
# we are tesating and learning how to use JNB
print("hey dude be aswesome")
print ("hellow BDA 01")
# # numbers in python
BDA = 20
BDA
type (BDA)
FCS=100
FCS-BDA
FCS/BDA
BDA*FCS
FCS+BDA
# # FLOAT POINT VALUE OPERATIONS
RAN=20.9
KAN=99.87
RAN-KAN
KAN-RAN
KAN*RAN
KAN+RAN
RAN/KAN
KAN/RAN
RAN+KAN
# # STRINGS VALUE OPERATIONS
LUNCH="BRIYANI"
LUNCH[1]
LUNCH[::-1]
LUNCH[:-1]
LUNCH[2]
sunday ="BusinessDataAnalytics"
sunday
sunday[1:20]
sunday[::-1]
sunday[::2]
sunday[::3]
sunday[::-3]
sunday[2:3:7]
# # list operations
week=['sunday','monday','tuesday','wednesday','thursday',1,2,9,["hi",'ate','meow'],True]
week
week[1]
week[1:8]
week.count ("monday")
week.insert(1,5)
week
week.pop()
# # dictionary operations
anyDicts ={"hb":"cat","fish":"ranguard","farins":"geetha"}
anyDicts
anyDicts.keys()
type(anyDicts)
anyDicts.get("hb")
anyDicts.values()
# # Tuples operation
anytyple =("anwar","fan",22,22,False,59.36)
anytyple
anytyple.count(22)
anytyple.index(22)
# # set operations
anyset ={"BDA",2,1,2,3,4,5,65,False}
anyset
anyset.add(21)
anyset
# # boolean operations
True
False
not False
not True
2<3<8
2>3<5
3+6+98+78>200
# # comparison.operators
# == is to check two operators are equal or not
10==3
10==10
x=100
y=60
x==y
# not eqal to
x != y
x>y
y>x
# gretater than or equal to
z=40
x>=z
y >=z
# less than equal too
x<=y
y<=z
# # chain operators
# # AND
# # OR
# # NOT
(40>100 ) or (100>50) # or operartor
(40>100) and (100>50) # and operator
not (40>100) #not operator
not (40<100) #not operator
# # flow control staements
# # if elif, else
if 40> 50:
print ("first statementg was true")
print ("first block printed")
elif 50>90:
print ("second elif statement was true")
print ("first block printed")
else:
print ("nothing left hence printing else block")
| python basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div>
# <a href="https://www.audiolabs-erlangen.de/fau/professor/mueller"><img src="data_layout/PCP_Teaser.png" width=100% style="float: right;" alt="PCP Teaser"></a>
# </div>
# # Unit 9: Discrete Fourier Transform (DFT)
#
# <ul>
# <li><a href='#learn'>Overview and Learning Objectives</a></li>
# <li><a href='#inner'>Inner Product</a></li>
# <li><a href='#dft'>Definition of DFT</a></li>
# <li><a href='#dftmatrix'>DFT Matrix</a></li>
# <li><a href='#fft'>Fast Fourier Transform (FFT)</a></li>
# <li><a href='#exercise_freq_index'>Exercise 1: Interpretation of Frequency Indices</a></li>
# <li><a href='#exercise_missing_time'>Exercise 2: Missing Time Localization</a></li>
# <li><a href='#exercise_chirp'>Exercise 3: Chirp Signal</a></li>
# <li><a href='#exercise_inverse'>Exercise 4: Inverse DFT</a></li>
# </ul>
# <a id='learn'></a>
# <div class="alert alert-block alert-warning">
# <h2>Overview and Learning Objectives</h2>
#
#
# The <strong>Fourier transform</strong> is one of the most important tools for a wide range of engineering and computer science applications. The general idea of <strong>Fourier analysis</strong> is to decompose a given signal into a weighted superposition of sinusoidal functions. Since these functions possess an explicit physical meaning regarding their frequencies, the decomposition is typically more accessible for subsequent processing steps than the original signal. Assuming that you are familiar with the Fourier transform and its applications in signal processing, we review in this unit the discrete variant of the Fourier transform known as <strong>Discrete Fourier Transform</strong> (DFT). We define the inner product that allows for comparing two vectors (e.g., discrete-time signals of finite length). The DFT can be thought of as comparing a given signal of finite length with a specific set of exponential signals (a complex variant of sinusoidal signals), each comparison yielding a complex-valued Fourier coefficient. Then, using suitable visualizations, we show how you can interpret the amplitudes and phases of these coefficients. Recall that one can express the DFT as a complex-valued square matrix. We show how separately plotting the real and imaginary parts leads to beautiful and insightful images. Applying a DFT boils down to computing a matrix–vector product, which we implement via the standard NumPy function <code>np.dot</code>. Since the number of operations for computing a DFT via a simple matrix–vector product is quadratic in the input length, the runtime of this approach becomes problematic with increasing length. This issue is exactly where the fast Fourier transform (FFT) comes into the game. We present this famous divide-and-conquer algorithm and provide a Python implementation. Furthermore, we compare the runtime behavior between the FFT implementation and the naive DFT implementation. We will further deepen your understanding of the Fourier transform by considering further examples and visualization in the exercises. In <a href='#exercise_freq_index'>Exercise 1</a>, you will learn how to interpret and plot frequency indices in a physically meaningful way. In <a href='#exercise_missing_time'>Exercise 2</a>, we discuss the issue of loosing time information when applying the Fourier transform, which is the main motivation for the <a href='https://www.audiolabs-erlangen.de/resources/MIR/FMP/C2/C2_STFT-Basic.html'>short-time Fourier transform</a>. In <a href='#exercise_chirp'>Exercise 3</a>, you will apply the DFT to a <strong>chirp signal</strong>, which yields another illustrative example of the DFT's properties. Finally, in <a href='#exercise_inverse'>Exercise 4</a>, we will invite you to explore the relationship between the DFT and its inverse. Again, an overarching goal of this unit is to apply and deepen your Python programming skills within the context of a central topic for signal processing.
#
# </div>
# <a id='inner'></a>
# ## Inner Product
#
# In this notebook, we consider [discrete-time (DT) signals](PCP_Signal.html) of finite length $N\in\mathbb{N}$, which we represent as vector
#
# $$
# x=(x(0),x(1),...,x(N-1))^\top\in\mathbb{R}^N
# $$
#
# with samples $x(n)\in\mathbb{R}^N$ for $n\in[0:N-1]$. Note that $\top$ indicates the transpose of a vector, thus converting a row vector into a column vector. Furthermore, note that we start indexing with the index $0$ (thus adapting our mathematical notation to Python conventions). A general concept for comparing two vectors (or signals) is the **inner product**. Given two vectors $x, y \in \mathbb{R}^N$, the inner product between $x$ and $y$ is defined as follows:
#
# $$
# \langle x | y \rangle := \sum_{n=0}^{N-1} x(n) y(n).
# $$
#
# The absolute value of the inner product may be interpreted as a measure of similarity between $x$ and $y$. If $x$ and $y$ are similar (i.e., if they point to more or less the same direction), the inner product $|\langle x | y \rangle|$ is large. If $x$ and $y$ are dissimilar (i.e., if $x$ and $y$ are more or less orthogonal to each other), the inner product $|\langle x | y \rangle|$ is close to zero.
#
# One can extend this concept to **complex-valued** vectors $x,y\in\mathrm{C}^N$, where the inner product is defined as
#
# $$
# \langle x | y \rangle := \sum_{n=0}^{N-1} x(n) \overline{y(n)}.
# $$
#
# In the case of real-valued signals, the complex conjugate does not play any role and the definition of the complex-valued inner product reduces to the real-valued one. In the following code cell, we give some examples.
# <div class="alert alert-block alert-warning">
# <strong>Note:</strong>
# One can use the NumPy function <code>np.vdot</code> to compute the inner product. However, opposed to the mathematical convention to conjugate the second argument, this function applies complex conjugation on the first argument. Therefore, for computing $\langle x | y \rangle$ as defined above, one has to call <code>np.vdot(y, x)</code>.
# </div>
# In the following, we generate and visualize three signals $x_1$, $x_2$, $x_3$. Then, we compute and discuss different inner products using the signals.
# +
import numpy as np
from matplotlib import pyplot as plt
import libpcp.signal
# %matplotlib inline
Fs = 64
dur = 1
x1, t = libpcp.signal.generate_example_signal(Fs=Fs, dur=dur)
x2, t = libpcp.signal.generate_sinusoid(dur=dur, Fs=Fs, amp=1, freq=2, phase=0.3)
x3, t = libpcp.signal.generate_sinusoid(dur=dur, Fs=Fs, amp=1, freq=6, phase=0.1)
def plot_inner_product(ax, t, x, y, color_x='k', color_y='r', label_x='x', label_y='y'):
"""Plot inner product
Notebook: PCP_dft.ipynb
Args:
ax: Axis handle
t: Time axis
x: Signal x
y: Signal y
color_x: Color of signal x (Default value = 'k')
color_y: Color of signal y (Default value = 'r')
label_x: Label of signal x (Default value = 'x')
label_y: Label of signal y (Default value = 'y')
"""
ax.plot(t, x, color=color_x, linewidth=1.0, linestyle='-', label=label_x)
ax.plot(t, y, color=color_y, linewidth=1.0, linestyle='-', label=label_y)
ax.set_xlim([0, t[-1]])
ax.set_ylim([-1.5, 1.5])
ax.set_xlabel('Time (seconds)')
ax.set_ylabel('Amplitude')
sim = np.vdot(y, x)
ax.set_title(r'$\langle$ %s $|$ %s $\rangle = %.1f$' % (label_x, label_y, sim))
ax.legend(loc='upper right')
plt.figure(figsize=(8, 5))
ax = plt.subplot(2, 2, 1)
plot_inner_product(ax, t, x1, x1, color_x='k', color_y='k', label_x='$x_1$', label_y='$x_1$')
ax = plt.subplot(2, 2, 2)
plot_inner_product(ax, t, x1, x2, color_x='k', color_y='r', label_x='$x_1$', label_y='$x_2$')
ax = plt.subplot(2, 2, 3)
plot_inner_product(ax, t, x1, x3, color_x='k', color_y='b', label_x='$x_1$', label_y='$x_3$')
ax = plt.subplot(2, 2, 4)
plot_inner_product(ax, t, x2, x3, color_x='r', color_y='b', label_x='$x_2$', label_y='$x_3$')
plt.tight_layout()
# -
# In the above example, one can make the following observations:
#
# * The signal $x_1$ is similar to itself, leading to a large value of $\langle x_1 | x_1 \rangle=40.0$.
# * The overall course of the signal $x_1$ strongly correlates with the sinusoid $x_2$, which is reflected by a relatively large value of $\langle x_1 | x_2 \rangle=29.9$.
# * There are some finer oscillations of $x_1$ that are captured by $x_3$, leading to a still noticeable value of $\langle x_1 | x_3 \rangle=14.7$.
# * The two sinusoids $x_2$ and $x_3$ are more or less uncorrelated, which is revealed by the value of $\langle x_2 | x_3 \rangle\approx 0$.
#
# In other words, the above comparison reveals that the signal $x_1$ has a strong signal component of $2~\mathrm {Hz}$ (frequency of $x_2$) and $6~\mathrm {Hz}$ (frequency of $x_3$). Measuring correlations between an arbitrary signal and sinusoids of different frequencies is exactly the idea of performing a Fourier (or spectral) analysis.
# <a id='dft'></a>
# ## Definition of DFT
#
# Let $x\in \mathbb{C}^N$ be a vector of length $N\in\mathbb{N}$. The **discrete Fourier transform** (DFT) of $x$ is defined by:
#
# $$ X(k) := \sum_{n=0}^{N-1} x(n) \exp(-2 \pi i k n / N) $$
#
# for $k \in [0:N-1]$. The vector $X\in\mathbb{C}^N$ can be interpreted as a frequency representation of the time-domain signal $x$. To obtain a geometric interpretation of the DFT, we define the vector $\mathbf{e}_k \in\mathbb{C}^N$ with real part $\mathbf{c}_k=\mathrm{Re}(\mathbf{e}_k)$ and imaginary part $\mathbf{s}_k=\mathrm{Im}(\mathbf{e}_k)$ by
#
# $$\mathbf{e}_k(n) := \exp(2 \pi i k n / N) = \cos(2 \pi i k n / N) + i \sin(2 \pi i k n / N)
# = \mathbf{c}_k(n) + i \mathbf{s}_k(n)$$
#
# for each $k \in [0:N-1]$.
#
#
# This vector can be regarded as a [sampled version](PCP_signal.html) of the [exponential function](PCP_exp.html) of frequency $k/N$. Using inner products, the DFT can be expressed as
#
# $$ X(k) = \sum_{n=0}^{N-1} x(n) \overline{\mathbf{e}_k}(n) = \langle x | \mathbf{e}_k \rangle,$$
#
# thus measuring the similarity between the signal $x$ and the sampled exponential functions $\mathbf{e}_k$. The absolute value $|X(k)|$ indicates the degree of similarity between the signal $x$ and $\mathbf{e}_k$. In the case that $x\in \mathbb{R}^N$ is a real-valued vector (which is typically the case for audio signals), we obtain:
#
# $$
# X(k) = \langle x |\mathrm{Re}(\mathbf{e}_k) \rangle - i\langle x | \mathrm{Im}(\mathbf{e}_k) \rangle
# = \langle x |\mathbf{c}_k \rangle - i\langle x | \mathbf{s}_k \rangle
# $$
#
# The following plot shows an example signal $x$ compared with functions $\overline{\mathbf{e}_k}$ for various frequency parameters $k$. The real and imaginary part of $\overline{\mathbf{e}_k}$ are shown in <font color='red'> red</font> and <font color='blue'> blue</font>, respectively.
# +
def plot_signal_e_k(ax, x, k, show_e=True, show_opt=False):
"""Plot signal and k-th DFT sinusoid
Notebook: PCP_dft.ipynb
Args:
ax: Axis handle
x: Signal
k: Index of DFT
show_e: Shows cosine and sine (Default value = True)
show_opt: Shows cosine with optimal phase (Default value = False)
"""
N = len(x)
time_index = np.arange(N)
ax.plot(time_index, x, 'k', marker='.', markersize='10', linewidth=2.0, label='$x$')
plt.xlabel('Time (samples)')
e_k = np.exp(2 * np.pi * 1j * k * time_index / N)
c_k = np.real(e_k)
s_k = np.imag(e_k)
X_k = np.vdot(e_k, x)
plt.title(r'k = %d: Re($X(k)$) = %0.2f, Im($X(k)$) = %0.2f, $|X(k)|$=%0.2f' %
(k, X_k.real, X_k.imag, np.abs(X_k)))
if show_e is True:
ax.plot(time_index, c_k, 'r', marker='.', markersize='5',
linewidth=1.0, linestyle=':', label='$\mathrm{Re}(\overline{\mathbf{u}}_k)$')
ax.plot(time_index, s_k, 'b', marker='.', markersize='5',
linewidth=1.0, linestyle=':', label='$\mathrm{Im}(\overline{\mathbf{u}}_k)$')
if show_opt is True:
phase_k = - np.angle(X_k) / (2 * np.pi)
cos_k_opt = np.cos(2 * np.pi * (k * time_index / N - phase_k))
d_k = np.sum(x * cos_k_opt)
ax.plot(time_index, cos_k_opt, 'g', marker='.', markersize='5',
linewidth=1.0, linestyle=':', label='$\cos_{k, opt}$')
plt.grid()
plt.legend(loc='lower right')
N = 64
x, t = libpcp.signal.generate_example_signal(Fs=N, dur=1)
plt.figure(figsize=(8, 15))
for k in range(1, 8):
ax = plt.subplot(7, 1, k)
plot_signal_e_k(ax, x, k=k)
plt.tight_layout()
# -
# <a id='phase'></a>
# ## DFT Phase
#
# At first sight, the DFT may be a bit confusing: Why is a real-valued signal $x$ compared with a complex-valued sinusoid $\mathbf{e}_k$? What does the resulting complex-valued Fourier coefficient
#
# $$
# c_k:= X(k) := \langle x |\mathrm{Re}(\mathbf{e}_k) \rangle - i\langle x | \mathrm{Im}(\mathbf{e}_k) \rangle.
# $$
#
# encode? To understand this, we represent the complex number $c_k$ in form of its [polar representation](PCP_complex.html#polar)
#
# $$
# c_k = |c_k| \cdot \mathrm{exp}(i \gamma_k),
# $$
#
# where $\gamma_k$ is the [angle](PCP_complex.html) (given in radians). Furthermore, let $\mathbf{cos}_{k,\varphi}:[0:N-1]\to\mathbb{R}$ be a sampled sinusoid with frequency parameter $k$ and phase $\varphi\in[0,1)$, defined by
#
# $$
# \mathbf{cos}_{k,\varphi}(n) = \mathrm{cos}\big( 2\pi (kn/N - \varphi) \big)
# $$
#
# for $n\in[0,N-1]$. Defining $\varphi_k := - \frac{\gamma_k}{2 \pi}$, one obtains the following remarkable property of the Fourier coefficient $c_k$:
#
# \begin{eqnarray}
# |c_k| &=& \mathrm{max}_{\varphi\in[0,1)} \langle x | \mathbf{cos}_{k,\varphi} \rangle,\\
# \varphi_k &=& \mathrm{argmax}_{\varphi\in[0,1)} \langle x | \mathbf{cos}_{k,\varphi} \rangle.
# \end{eqnarray}
#
# In other words, the phase $\varphi_k$ maximizes the correlation between $x$ and all possible sinusoids $\mathbf{cos}_{k,\varphi}$ with $\varphi\in[0,1)$. Furthermore, the magnitude $|c_k|$ yields this maximal value. Thus, computing a single correlation between $x$ and the complex-valued function $\mathbf{e}_k$ (which real part coincides with $\mathbf{cos}_{k,0}$, and its imaginary part with $\mathbf{cos}_{k,0.25}$) solves an optimization problem. In the following code cell, we demonstrate this optimality property, where the $\mathbf{cos}_{k,\varphi}$ with optimal phase $\varphi=\varphi_k$ is shown in <font color='green'>green</font>.
# +
plt.figure(figsize=(8, 15))
for k in range(1, 8):
ax = plt.subplot(7, 1, k)
plot_signal_e_k(ax, x, k=k, show_e=False, show_opt=True)
plt.tight_layout()
# -
# <a id='dftmatrix'></a>
# ## DFT Matrix
#
# Being a linear operator $\mathbb{C}^N \to \mathbb{C}^N$, the DFT can be expressed by some $N\times N$-matrix. This leads to the famous DFT matrix $\mathrm{DFT}_N \in \mathbb{C}^{N\times N}$ matrix, which is given by
#
# $$\mathrm{DFT}_N(n, k) = \mathrm{exp}(-2 \pi i k n / N)$$
#
# for $n\in[0:N-1]$ and $k\in[0:N-1]$. Let $\rho_N:=\exp(2 \pi i / N)$ be the primitive $N^\mathrm{th}$ [root of unity](PCP_exp.html#roots). Then
#
# $$\sigma_N:= \overline{\rho_N} = \mathrm{exp}(-2 \pi i / N)$$
#
# also defines a primitive $N^\mathrm{th}$ [root of unity](PCP_exp.html#roots). From the [properties of exponential functions](PCP_exp.html), one obtains that
#
# $$ \sigma_N^{kn} = \mathrm{exp}(-2 \pi i / N)^{kn} = \mathrm{exp}(-2 \pi i k n / N)$$
#
# From this, one obtains:
#
# $$
# \mathrm{DFT}_N =
# \begin{pmatrix}
# 1 & 1 & 1 & \dots & 1 \\
# 1 & \sigma_N & \sigma_N^2 & \dots & \sigma_N^{N-1} \\
# 1 & \sigma_N^2 & \sigma_N^4 & \dots & \sigma_N^{2(N-1)} \\
# \vdots & \vdots & \vdots & \ddots & \vdots \\
# 1 & \sigma_N^{N-1} & \sigma_N^{2(N-1)} & \dots & \sigma_N^{(N-1)(N-1)} \\
# \end{pmatrix}
# $$
#
# In the following visualization, the real and imaginary part of $\mathrm{DFT}_N$ are shown, where the values are encoded by suitable colors. Note that the $k^\mathrm{th}$ row of $\mathrm{DFT}_N$ corresponds to the vector $\mathbf{e}_k$ as defined above.
# +
def generate_matrix_dft(N, K):
"""Generate a DFT (discete Fourier transfrom) matrix
Notebook: PCP_dft.ipynb
Args:
N: Number of samples
K: Number of frequency bins
Returns:
dft: The DFT matrix
"""
dft = np.zeros((K, N), dtype=np.complex128)
time_index = np.arange(N)
for k in range(K):
dft[k, :] = np.exp(-2j * np.pi * k * time_index / N)
return dft
N = 32
dft_matrix = generate_matrix_dft(N, N)
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.title('$\mathrm{Re}(\mathrm{DFT}_N)$')
plt.imshow(np.real(dft_matrix), origin='lower', cmap='seismic', aspect='equal')
plt.xlabel('Time (sample, index $n$)')
plt.ylabel('Frequency (index $k$)')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.title('$\mathrm{Im}(\mathrm{DFT}_N)$')
plt.imshow(np.imag(dft_matrix), origin='lower', cmap='seismic', aspect='equal')
plt.xlabel('Time (samples, index $n$)')
plt.ylabel('Frequency (index $k$)')
plt.colorbar()
plt.tight_layout()
# -
# We now write a function that computes the discrete Fourier transform $X = \mathrm{DFT}_N \cdot x$ of a signal $x\in\mathbb{C}^N$. We apply the function from above sampled at $N=64$ time points. The peaks of the magnitude Fourier transform $|X|$ correspond to the main frequency components the signal is composed of. Note that the magnitude Fourier transform is symmetrical around the center. Why? For the interpretation of the time and frequency axis, see also <a href='#exercise_freq_index'>Exercise 1: Interpretation of Frequency Indices</a></li>
# +
def dft(x):
"""Compute the discete Fourier transfrom (DFT)
Notebook: PCP_dft.ipynb
Args:
x: Signal to be transformed
Returns:
X: Fourier transform of x
"""
x = x.astype(np.complex128)
N = len(x)
dft_mat = generate_matrix_dft(N, N)
return np.dot(dft_mat, x)
N = 64
x, t = libpcp.signal.generate_example_signal(Fs=N, dur=1)
X = dft(x)
def plot_signal_dft(t, x, X, ax_sec=False, ax_Hz=False, freq_half=False, figsize=(10, 2)):
"""Plotting function for signals and its magnitude DFT
Notebook: PCP_dft.ipynb
Args:
t: Time axis (given in seconds)
x: Signal
X: DFT
ax_sec: Plots time axis in seconds (Default value = False)
ax_Hz: Plots frequency axis in Hertz (Default value = False)
freq_half: Plots only low half of frequency coefficients (Default value = False)
figsize: Size of figure (Default value = (10, 2))
"""
N = len(x)
if freq_half is True:
K = N // 2
X = X[:K]
else:
K = N
plt.figure(figsize=figsize)
ax = plt.subplot(1, 2, 1)
ax.set_title('$x$ with $N=%d$' % N)
if ax_sec is True:
ax.plot(t, x, 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Time (seconds)')
else:
ax.plot(x, 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Time (samples)')
ax.grid()
ax = plt.subplot(1, 2, 2)
ax.set_title('$|X|$')
if ax_Hz is True:
Fs = 1 / (t[1] - t[0])
ax_freq = Fs * np.arange(K) / N
ax.plot(ax_freq, np.abs(X), 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Frequency (Hz)')
else:
ax.plot(np.abs(X), 'k', marker='.', markersize='3', linewidth=0.5)
ax.set_xlabel('Frequency (index)')
ax.grid()
plt.tight_layout()
plt.show()
plot_signal_dft(t, x, X)
plot_signal_dft(t, x, X, ax_sec=True, ax_Hz=True)
plot_signal_dft(t, x, X, ax_sec=True, ax_Hz=True, freq_half=True)
# -
# <a id='fft'></a>
# ## Fast Fourier Transform (FFT)
#
# Next, we discuss the famous fast Fourier transform (FFT), which is a fast algorithm to compute the DFT. The FFT algorithm was originally found by Gauss in about 1805 and then rediscovered by Cooley and Tukey in 1965. The FFT algorithm is based on the observation that applying a DFT of even size $N=2M$ can be expressed in terms of applying two DFTs of half the size $M$. It exploits the fact that there are algebraic relations between the entries $\sigma_N^{kn} = \mathrm{exp}(-2 \pi i / N)^{kn}$ of DFT matrices. In particular, one has
#
# $$\sigma_M = \sigma_N^2$$
#
# In the FFT algorithm, one computes the DFT of the even-indexed and the uneven-indexed entries of $x$:
#
# \begin{align}
# (A(0), \dots, A(N/2-1)) &= \mathrm{DFT}_{N/2} \cdot (x(0), x(2), x(4), \dots, x(N-2))\\
# (B(0), \dots, B(N/2-1)) &= \mathrm{DFT}_{N/2} \cdot (x(1), x(3), x(5), \dots, x(N-1))
# \end{align}
#
# With these two DFTs of size $N/2$, one can compute the full DFT of size $N$ via:
#
# \begin{eqnarray}
# C(k) &=& \sigma_N^k \cdot B(k)\\
# X(k) &=& A(k) + C(k)\\
# X(N/2 + k) &=& A(k) - C(k)\\
# \end{eqnarray}
#
# for $k \in [0: N/2 - 1]$. The numbers $\sigma_N^k$ are also called *twiddle factors*. If $N$ is a power of two, this idea can be applied recursively until one reaches the computation of $\mathrm{DFT}_{1}$ (the case $N=1$), which is simply multiplication by one (i.e. just returning the signal of length $N=1$). For further details, we refer to Section 2.4.3 of <a href="http://www.music-processing.de">[Müller, FMP, Springer 2015])</a> (see also Table 2.1).
#
# In the following code, we provide a function `fft` that implements the FFT algorithm. We test the function `fft` by comparing its output with the one when applying the `dft` on a test signal `x`. For the comparison of result matrices, we use the NumPy functions [`np.array_equal`](https://numpy.org/doc/stable/reference/generated/numpy.array_equal.html) and [`np.allclose`](https://numpy.org/doc/stable/reference/generated/numpy.allclose.html#numpy.allclose).
# +
def fft(x):
"""Compute the fast Fourier transform (FFT)
Notebook: PCP_dft.ipynb
Args:
x: Signal to be transformed
Returns:
X: Fourier transform of x
"""
x = x.astype(np.complex128)
N = len(x)
log2N = np.log2(N)
assert log2N == int(log2N), 'N must be a power of two!'
X = np.zeros(N, dtype=np.complex128)
if N == 1:
return x
else:
this_range = np.arange(N)
A = fft(x[this_range % 2 == 0])
B = fft(x[this_range % 2 == 1])
range_twiddle_k = np.arange(N // 2)
sigma = np.exp(-2j * np.pi * range_twiddle_k / N)
C = sigma * B
X[:N//2] = A + C
X[N//2:] = A - C
return X
N = 64
x, t = libpcp.signal.generate_example_signal(Fs=N, dur=1)
X_via_dft = dft(x)
X_via_fft = fft(x)
X_via_fft_numpy = np.fft.fft(x)
is_equal = np.array_equal(X_via_dft, X_via_fft)
is_equal_tol = np.allclose(X_via_dft, X_via_fft)
is_equal_tol_np = np.allclose(X_via_dft, X_via_fft_numpy)
print('Equality test for dft(x) and fft(x) using np.array_equal: ', is_equal)
print('Equality test for dft(x) and fft(x) using np.allclose: ', is_equal_tol)
print('Equality test for dft(x) and np.fft.fft(x) using np.allclose:', is_equal_tol_np)
# -
# <div class="alert alert-block alert-warning">
# <strong>Note:</strong> The test shows that our <code>dft</code> and <code>fft</code> implementations do not yield the same result (due to numerical issues). However, the results are numerically very close, which is verified by the test using <code>np.allclose</code>.
# </div>
# The FFT reduces the overall number of operations from the order of $N^2$ (needed when computing the usual matrix–vector product $\mathrm{DFT}_N \cdot x$) to the order of $N\log_2N$. The savings are enormous. For example, using $N=2^{10}=1024$, the FFT requires roughly $N\log_2N=10240$ instead of $N^2=1048576$ operations in the naive approach. Using the module `timeit`, which provides a simple way to time small bits of Python code, the following code compares the running time when using the naive approach and the FFT. Furthermore, we compare the running time with the highly optimized NumPy implementation <code>np.fft.fft</code>.
# +
import timeit
rep = 3
for N in [256, 512, 1024, 2048, 4096]:
time_index = np.arange(N)
x = np.sin(2 * np.pi * time_index / N )
t_DFT = 1000 * timeit.timeit(lambda: dft(x), number=rep)/rep
t_FFT = timeit.timeit(lambda: fft(x), number=rep*5)/(rep*5)
t_FFT_np = timeit.timeit(lambda: np.fft.fft(x), number=rep*100)/(rep*100)
print('Runtime (ms) for N = %4d : DFT %10.2f, FFT %.5f, FFT_np %.8f'%(N, t_DFT, t_FFT, t_FFT_np))
# -
# ## Exercises and Results
import libpcp.dft
show_result = True
# <a id='exercise_freq_index'></a>
# <div class="alert alert-block alert-info">
# <strong>Exercise 1: Interpretation of Frequency Indices</strong><br>
# Given a dimension $N\in\mathbb{N}$, the $\mathrm{DFT}_N$ transform a vector $x\in\mathbb{C}^N$ into another vector $X\in\mathbb{C}^N$. Assuming that $x$ represents a time-domain signal sampled with a sampling rate $F_\mathrm{s}$, one can associate the index $n\in[0:N-1]$ of the sample $x(n)$ with the physical time point $t = n/F_\mathrm{s}$ given in seconds. In case of the vector $X$, the index $k\in[0:N-1]$ of the coefficient $X(k)$ can be associated to a physical frequency value
#
# $$
# \omega=\frac{k \cdot F_\mathrm{s}}{N}.
# $$
#
# Furthermore, using a real-valued signal $x\in\mathbb{R}^N$, the upper part of $X\in\mathbb{C}^N$ becomes redundant, and it suffices to consider the first $K$ coefficients with $K=N/2$.
#
# <ul>
# <li>Find explanations why these properties apply.</li>
# <li>Find out how the function <code>plot_signal_dft</code> uses these properties to convert and visualize the time and frequency axes.</li>
# <li>Using the signal <code>x, t = libpcp.signal.generate_example_signal(Fs=64, dur=2)</code>, plot the signal and its magnitude Fourier transform once using axes given in indices and once using axes given in physical units (seconds, Hertz). Discuss the results.</li>
# <li>Do the same for the signal <code>x, t = libpcp.signal.generate_example_signal(Fs=32, dur=2)</code>. What is going wrong and why?</li>
# </ul>
# </div>
# +
#<solution>
# Your Solution
#</solution>
# -
libpcp.dft.exercise_freq_index(show_result=show_result)
# <a id='exercise_missing_time'></a>
# <div class="alert alert-block alert-info">
# <strong>Exercise 2: Missing Time Localization</strong><br>
# The Fourier transform yields frequency information that is averaged over the entire time axis. However, the information on when these frequencies occur is hidden in the transform. To demonstrate this phenomenon, construct the following two different signals defined on a common time axis $[0, T]$ with $T$ given in seconds (e.g., $T=6~\mathrm{sec}$).
#
# <ul>
# <li>A superposition of two sinusoids $f_1+f_2$ defined over the entire time interval $[0, T]$, where the first sinusoid $f_1$ has a frequency $\omega_1=1~\mathrm{Hz}$ and an amplitude of $1$, while the second sinusoid $f_2$ has a frequency $\omega_2=5~\mathrm{Hz}$ and an amplitude of $0.5$.</li>
# <li>A concatenation of two sinusoids, where $f_1$ (specified as before) is now defined only on the subinterval $[0, T/2]$, and $f_2$ is defined on the subinterval $[T/2, T]$.
# </ul>
#
# Sample the interval $[0,T]$ to obtain $N$ samples (use <code>np.linspace</code>), with $N\in\mathbb{N}$ being power of two (e.g., $N=256$). Define DT-signals of the superposition and the concatenation and compute the DFT for each of the signals. Plot the signals as well as the resulting magnitude Fourier transforms and discuss the result.
# </div>
# +
#<solution>
# Your Solution
#</solution>
# -
libpcp.dft.exercise_missing_time(show_result=show_result)
# <a id='exercise_chirp'></a>
# <div class="alert alert-block alert-info">
# <strong>Exercise 3: Chirp Signal</strong><br>
# The function $f(t)=\sin\left(\pi t^2\right)$ defines a <strong>chirp signal</strong> (also called <strong>sweep signal</strong>), in which the frequency increases with time. The <strong>instantaneous frequency $\omega_t$</strong> of the chirp signal at time $t$ is the derivate of the sinusoid's argument divided by $2\pi$, thus $\omega_t = t$.
# <ul>
# <li>Let $[t_0,t_1]$ be a time interval (given in seconds) with $0\leq t_0<t_1$ and $N\in\mathbb{N}$ be power of two. Implement a function <code>generate_chirp</code> that outputs a sampled chirp signal <code>x</code> over the interval $[t_0,t_1]$ with $N$ samples (use <code>np.linspace</code>).</li>
# <li>Compute the DFT of <code>x</code> for various input parameters $t_0$, $t_1$, and $N$. Plot the chirp signal as well as the resulting magnitude Fourier transform. Discuss the result.</li>
# </ul>
# </div>
# +
#<solution>
# Your Solution
#</solution>
# -
libpcp.dft.exercise_chirp(show_result=show_result)
# <a id='exercise_inverse'></a>
# <div class="alert alert-block alert-info">
# <strong>Exercise 4: Inverse DFT</strong><br>
# The discrete Fourier transform given by the matrix $\mathrm{DFT}_N \in \mathbb{C}^{N\times N}$ is an invertible operation, given by the inverse DFT matrix $\mathrm{DFT}_N^{-1}$.
# <ul>
# <li>There is an explicit relation between $\mathrm{DFT}_N$ and its inverse $\mathrm{DFT}_N^{-1}$. Which one? </li>
# <li>Write a function <code>generate_matrix_dft_inv</code> that explicitly generates $\mathrm{DFT}_N^{-1}$.
# <li>Check your function by computing $\mathrm{DFT}_N \cdot \mathrm{DFT}_N^{-1}$ and $\mathrm{DFT}_N^{-1} \cdot \mathrm{DFT}_N$ (using <code>np.matmul</code>) and comparing these products with the identity matrix (using <code>np.eye</code> and <code>np.allclose</code>).</li>
# <li>Furthermore, compute the inverse DFT by using <code>np.linalg.inv</code>. Compare the result with your function using <code>np.allclose</code>.
# <li>Similar to <code>fft</code>, implement a fast inverse Fourier transform <code>fft_inv</code></li>
# </ul>
# </div>
# +
#<solution>
# Your Solution
#</solution>
# -
libpcp.dft.exercise_inverse(show_result=show_result)
# <div>
# <a href="https://opensource.org/licenses/MIT"><img src="data_layout/PCP_License.png" width=100% style="float: right;" alt="PCP License"></a>
# </div>
| PCP_dft.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import os
from pathlib import Path
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# %matplotlib inline
from keras.preprocessing import image
# +
X_list = []
for filename in sorted(os.listdir('./Dataset_Resize/')):
img = image.load_img(Path('./Dataset_Resize/', filename))
X_list.append(image.img_to_array(img))
X = np.array(X_list)
print(X.shape)
df = pd.read_csv('./vehicles.csv', index_col=0)
df_dummy = pd.get_dummies(df['Vehicle_Type'])
Y = np.array(df_dummy)
print(Y.shape)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)
# -
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Dense, Flatten, GlobalAveragePooling2D
from keras.models import Model, Sequential
from keras.utils.vis_utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adadelta, Adam
from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, LearningRateScheduler, ModelCheckpoint
# we will only instantiate the convolutional part of the model, everything up to the fully-connected layers. <br>
# We will then run this model on our training and validation data once, recording the output (the "bottleneck features" from Inception model: the last activation maps before the fully-connected layers) in two numpy arrays. <br>
# Then we will train a small fully-connected model on top of the stored features<br>
batch_size = 256
datagen = ImageDataGenerator(rescale=1. / 255)
model_Inception = InceptionV3(include_top = False, weights = 'imagenet')
print("load Inception V3")
# +
generator = datagen.flow(X_train, Y_train,
batch_size=batch_size)
bottleneck_features_train = model_Inception.predict_generator(generator)
print("Calculate train features")
print(bottleneck_features_train.dtype)
# -
np.savez('inception_features_train', features=bottleneck_features_train)
# np.save(open('bottleneck_features_train.npy', 'w'), bottleneck_features_train)
print("Saved train features")
# +
generator = datagen.flow(X_test, Y_test,
batch_size=batch_size)
bottleneck_features_validation = model_Inception.predict_generator(generator)
print("Calculate test features")
print(bottleneck_features_validation.dtype)
# -
np.savez('inception_features_validation', features=bottleneck_features_validation)
# np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation)
print("Saved test features")
early_Stopping = EarlyStopping(
monitor='val_loss',
patience=5,
verbose=0,
mode='auto'
)
reduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
checkpointer = ModelCheckpoint(filepath="TL_Inception.hdf5", verbose=1, save_best_only = True)
csv = CSVLogger('TL_Inception.csv')
# load our saved data and train a small fully-connected model
features_train = np.load('inception_features_train.npz')['features']
print("done")
print(features_train.shape)
features_validation = np.load('inception_features_validation.npz')['features']
print("done")
print(features_validation.shape)
print(Y_train.shape, Y_test.shape)
def model_add_top_layer(input_shape):
model_addtop = Sequential()
model_addtop.add(GlobalAveragePooling2D(input_shape = input_shape))
model_addtop.add(Dense(1024,activation='relu'))
model_addtop.add(Dense(6, activation='softmax'))
model_addtop.summary()
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-6)
model_addtop.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model_addtop
model_TL = model_add_top_layer(features_train.shape[1:])
history_InceptionV3TL = model_TL.fit(features_train, Y_train,
batch_size=256,
epochs=100,
verbose=1,
validation_data=(features_validation, Y_test),
shuffle=True,
callbacks=[early_Stopping,reduceLR,checkpointer,csv])
| NEU_ADS_Student_Project_Portfolio_Examples/Vehicle Type Classification Using Convolutional Neural Network/Project/Inceptionv3_TL/InceptionV3_Transfer_Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 24. Perceptron
#
# [](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/24.Perceptron.ipynb)
#
# (Based on https://towardsdatascience.com/perceptron-algorithm-in-python-f3ac89d2e537)
#
# In this notebook, we will implement the Perceptron algorithm. It is the simplest single-layer neural network algorithm and illustrates some of the fundamental aspects of artifical neural networks.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Neuron.svg/640px-Neuron.svg.png" alt="Neuron" align="center" style="width:500px; float:center"/>
# ### Create a Dataset
#
# Before we apply the clustering technique, we create a dataset of materials with experimental bandgaps.
# +
import numpy as np
import matplotlib.pyplot as plt
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
from sklearn import datasets
X, y = datasets.make_blobs(n_samples=150, n_features=2,
centers=2, cluster_std=3,
random_state=1)
print(type(X))
print(y)
#Plotting
fig = plt.figure(figsize=(8,6))
plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], 'r^')
plt.plot(X[:, 0][y == 1], X[:, 1][y == 1], 'bs')
plt.xlabel('Feature 1', fontsize=18)
plt.ylabel('Feature 2', fontsize=18)
plt.title('Random Classification Data with 2 classes', fontsize=20)
plt.show()
# -
# The dataset includes two classes, red and green. The goal for the Perceptron algorithm is to learn an optimal straight line that separates the two classes.
#
# The Perceptron algorithm, sums the input features using weights and applies a Unit Step Function of Heaviside function to that sum:
# $$
# \sigma(z) = \begin{cases}
# 0 \,\, \text{if} \,\, z < 0 \\
# 1 \,\, \text{if} \,\, z \ge 0
# \end{cases}
# $$
# +
def step_func(z):
return np.heaviside(z, 1)
z = np.linspace(-1, 1, 100)
sigma = step_func(z)
plt.step(z, sigma)
plt.xlabel('z', fontsize=18)
plt.ylabel('Stepfunction', fontsize=18)
plt.show()
# -
# ### Perceptron
#
# The Perceptron algorithm is illustrated by the following flowchart. For every training example, we first take the dot product of the input features and the parameters, $\theta$. Then, we apply the Unit Step Function to make the prediction, $\hat y$.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f0/Computer.Science.AI.Neuron.svg/640px-Computer.Science.AI.Neuron.svg.png" alt="Perceptron" align="center" style="width:500px; float:center"/>
#
# If the prediction is wrong and the model has misclassified that data point, we update for the parameters, $\theta$. We don’t update when the prediction is correct.
# ### Perceptron Update Rule
#
# The Perceptron update rule is similar to the Gradient Descent update rule:
# $$
# \theta_{t+1} = \theta_t + \eta \left ( {\bf y} -\sigma(\theta \cdot {\bf x}) \right ) {\bf x}
# $$
def perceptron(X, y, eta, epochs):
# X = Inputs.
# y = labels/target.
# eta = learning rate.
# epochs = Number of iterations.
# m = number of training examples
# n = number of features
m, n = X.shape
# Initializing parapeters theta to zero
# +1 in n+1 for the bias term.
theta = np.zeros((n+1,1))
# Empty list to store how many examples were
# misclassified at every iteration.
n_miss_list = []
# Training
for epoch in range(epochs):
# Variable to store the number of misclassified points
n_miss = 0
# Looping for every example.
for idx, x_i in enumerate(X):
# Insering 1 for bias, X0 = 1
x_i = np.insert(x_i, 0, 1).reshape(-1,1)
# Calculating prediction/hypothesis
y_hat = step_func(np.dot(x_i.T, theta))
# Updating if the example is misclassified
if (np.squeeze(y_hat) - y[idx]) != 0:
theta += eta*(y[idx] - y_hat)*x_i
# Incrementing by 1
n_miss += 1
# Appending number of misclassified examples
# at every iteration.
n_miss_list.append(n_miss)
return theta, n_miss_list
def plot_decision_boundary(X, theta):
# X --> Inputs
# theta --> parameters
# The Line is y=mx+c
# So, Equate mx+c = theta0.X0 + theta1.X1 + theta2.X2
# Solving we find m and c
x1 = [min(X[:,0]), max(X[:,0])]
m = -theta[1]/theta[2]
c = -theta[0]/theta[2]
x2 = m*x1 + c
# Plotting
fig = plt.figure(figsize=(10,8))
plt.plot(X[:, 0][y==0], X[:, 1][y==0], 'r^')
plt.plot(X[:, 0][y==1], X[:, 1][y==1], 'bs')
plt.xlabel('Feature 1', fontsize=19)
plt.ylabel('Feature 2', fontsize=18)
plt.title('Perceptron Algorithm', fontsize=20)
plt.plot(x1, x2, 'y-')
theta, miss_l = perceptron(X, y, 0.01, 20)
print(theta)
plot_decision_boundary(X, theta)
# We observe that we are able to separate the red and blue classes quite well.
#
# How about changing the random seed and the standard deviation for generating the data to explore how the Perceptron works for other distributions.
# ## Simple Perceptron with scikit-learn
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import Perceptron
import sklearn.metrics as metric
import numpy as np
# +
from sklearn.linear_model import Perceptron
# Create a simple data set for the AND function
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 0, 0, 1])
clf = Perceptron()
clf.fit(X,y)
print('score:', clf.score(X, y))
print('predictions:', clf.predict(X))
print('expected:', y)
# -
# ### Question
#
# Modify the data into other logical functions, such as OR, NOT, NAND, XOR. What do you observe? Can the single neuron perceptron algorithm model these logical functions?
# ### Multilayer Perceptron
#
# Generalizing the perceptron algorithm into one that includes multiple neurons and several layers of neurons results in a neural network. We can use the multilayer perceptron algorithm in scikit-learn `MLPClassifier` to train a classifier using a neural network.
# +
# Function to fit a 2D classification model and plot the decision boundary
# Source: https://towardsdatascience.com/easily-visualize-scikit-learn-models-decision-boundaries-dd0fb3747508
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def plot_decision_boundaries(X, y, model_class, **model_params):
"""
Function to plot the decision boundaries of a classification model.
This uses just the first two columns of the data for fitting
the model as we need to find the predicted value for every point in
scatter plot.
Arguments:
X: Feature data as a NumPy-type array.
y: Label data as a NumPy-type array.
model_class: A Scikit-learn ML estimator class
e.g. GaussianNB (imported from sklearn.naive_bayes) or
LogisticRegression (imported from sklearn.linear_model)
**model_params: Model parameters to be passed on to the ML estimator
Typical code example:
plt.figure()
plt.title("KNN decision boundary with neighbros: 5",fontsize=16)
plot_decision_boundaries(X_train,y_train,KNeighborsClassifier,n_neighbors=5)
plt.show()
"""
try:
X = np.array(X)
y = np.array(y).flatten()
except:
print("Coercing input data to NumPy arrays failed")
# Reduces to the first two columns of data
reduced_data = X[:, :2]
# Instantiate the model object
model = model_class(**model_params)
# Fits the model with the reduced data
model.fit(reduced_data, y)
y_pred=model.predict(reduced_data) # prediction
print(y_pred) # show the output
accuracy=metric.accuracy_score(np.array(y).flatten(), np.array(y_pred).flatten(), normalize=True)
print('acuracy=',accuracy) # show accracy score
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .005 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 0.2, reduced_data[:, 0].max() + 0.2
y_min, y_max = reduced_data[:, 1].min() - 0.2, reduced_data[:, 1].max() + 0.2
# Meshgrid creation
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh using the model.
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
# Predictions to obtain the classification results
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Plotting
fig = plt.figure(figsize=(6,6))
plt.contourf(xx, yy, Z, alpha=0.4)
plt.scatter(X[:, 0], X[:, 1], c=1-y, alpha=0.8)
plt.xlabel("Feature 1",fontsize=15)
plt.ylabel("Feature 2",fontsize=15)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
return plt
# +
# Create a data set for the XOR function
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
# Train a 2-layer neural network with two neurons in the hidden layer
plot_decision_boundaries(X, y,
MLPClassifier,
solver='lbfgs', hidden_layer_sizes=(2),
activation='logistic', random_state=4)
plt.show()
# -
# ### Question
#
# - Modify the parameters for the `MLPClassifier` to see how sensitive the optimized classifier is to the activation function`activation`, the optimization method `solver`, and the initial random guess `random_state`.
| Notebooks/24.Perceptron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HW05: Optimization
# **Brief Honor Code**. Do the homework on your own. You may discuss ideas with your classmates, but DO NOT copy the solutions from someone else or the Internet. If stuck, discuss with TA.
# **Note**: The expected figures are provided so you can check your solutions.
# **1**. (20 points)
#
# Find the gradient and Hessian for the following equation
#
# $$
# f(x, y) = 1 + 2x + 3y + 4x^2 + 2xy + y^2
# $$
#
# - Plot the contours of this function using `matplotlib` in the box $-5 \le x \le 5$ and $-5 \le y \le 5$ using a $100 \times 100$ grid.
# - Then plot the gradient vectors using the `quiver` function on top of the contour plot using a $10 \times 10$ grid. Are the gradients orthogonal to the contours?
#
# Hint: Use `numpy.meshgrid`, `matplotlib.contour` and `matplotllib.quiver`.
#
# 
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# ## Gradient and Hessian
def f(x, y):
'''Objective function'''
return 1+2*x+3*y+4*x**2+2*x*y+y**2
def grad(x, y):
'''Gradient for the equation'''
return np.array([2+8*x+2*y, 3+2*x+2*y])
def hessian(x, y):
'''Hessian for the equation'''
return np.array([[8, 2],
[2, 2]])
# ## Part 1
x1 = np.linspace(-5, 5, 100)
y1 = np.linspace(-5, 5, 100)
X1, Y1 = np.meshgrid(x1, y1)
plt.figure(figsize=(4,4))
plt.contour(X1, Y1, f(X1, Y1))
pass
# ## Part 2
x2 = np.linspace(-5, 5, 10)
y2 = np.linspace(-5, 5, 10)
X2, Y2 = np.meshgrid(x2, y2)
plt.figure(figsize=(4,4))
plt.contour(X1, Y1, f(X1, Y1))
plt.quiver(X2, Y2, grad(X2,Y2)[0], grad(X2,Y2)[1], color='red')
pass
# **2**. (30 points)
#
# This exercise is about using Newton's method to find the cube roots of unity - find $z$ such that $z^3 = 1$. From the fundamental theorem of algebra, we know there must be exactly 3 complex roots since this is a degree 3 polynomial.
#
# We start with Euler's equation
# $$
# e^{ix} = \cos x + i \sin x
# $$
#
# Raising $e^{ix}$ to the $n$th power where $n$ is an integer, we get from Euler's formula with $nx$ substituting for $x$
# $$
# (e^{ix})^n = e^{i(nx)} = \cos nx + i \sin nx
# $$
#
# Whenever $nx$ is an integer multiple of $2\pi$, we have
# $$
# \cos nx + i \sin nx = 1
# $$
#
# So
# $$
# e^{2\pi i \frac{k}{n}}
# $$
# is a root of 1 whenever $k/n = 0, 1, 2, \ldots$.
#
# So the cube roots of unity are $1, e^{2\pi i/3}, e^{4\pi i/3}$.
#
# 
#
# While we can do this analytically, the idea is to use Newton's method to find these roots, and in the process, discover some rather perplexing behavior of Newton's method.
#
# Newton's method for functions of complex variables - stability and basins of attraction. (30 points)
#
# 1. Write a function with the following function signature `newton(z, f, fprime, max_iter=100, tol=1e-6)` where
# - `z` is a starting value (a complex number e.g. ` 3 + 4j`)
# - `f` is a function of `z`
# - `fprime` is the derivative of `f`
# The function will run until either max_iter is reached or the absolute value of the Newton step is less than tol. In either case, the function should return the number of iterations taken and the final value of `z` as a tuple (`i`, `z`).
#
# 2. Define the function `f` and `fprime` that will result in Newton's method finding the cube roots of 1. Find 3 starting points that will give different roots, and print both the start and end points.
#
# Write the following two plotting functions to see some (pretty) aspects of Newton's algorithm in the complex plane.
#
# 3. The first function `plot_newton_iters(f, fprime, n=200, extent=[-1,1,-1,1], cmap='hsv')` calculates and stores the number of iterations taken for convergence (or max_iter) for each point in a 2D array. The 2D array limits are given by `extent` - for example, when `extent = [-1,1,-1,1]` the corners of the plot are `(-i, -i), (1, -i), (1, i), (-1, i)`. There are `n` grid points in both the real and imaginary axes. The argument `cmap` specifies the color map to use - the suggested defaults are fine. Finally plot the image using `plt.imshow` - make sure the axis ticks are correctly scaled. Make a plot for the cube roots of 1.
#
# 
#
# 4. The second function `plot_newton_basins(f, fprime, n=200, extent=[-1,1,-1,1], cmap='jet')` has the same arguments, but this time the grid stores the identity of the root that the starting point converged to. Make a plot for the cube roots of 1 - since there are 3 roots, there should be only 3 colors in the plot.
#
# 
# ## Part 1
def newton(z, f, fprime, max_iter=100, tol=1e-6):
''' Newton Method'''
i = 0
for i in range(max_iter):
z_new = z - f(z)/fprime(z)
i += 1
if np.abs(f(z_new)-f(z)) > tol:
z = z_new
else:
return i, z_new
return i, z_new
# ## Part 2
def f1(z):
'''Define objective function'''
return z**3-1
def fprime1(z):
'''Define the derivative of f'''
return 3*z**2
start1 = 1.1
it1, end_point1 = newton(start1, f1, fprime1)
start1, end_point1
start2 = -2+3j
it2, end_point2 = newton(-2+3j, f1, fprime1)
start2, end_point2
start3 = -2-3j
it3, end_point3 = newton(-2-3j, f1, fprime1)
start3, end_point3
# ## Part 3
def plot_newton_iters(f, fprime, n=200, extent=[-1,1,-1,1], cmap='hsv'):
'''Display the time of convergence of newton method'''
P = np.zeros((n,n))
for i, x in enumerate(np.linspace(extent[0], extent[1], n)):
for j, y in enumerate(np.linspace(extent[2],extent[3], n)):
z = complex(x, y)
it, end_point = newton(z, f1, fprime)
P[i, j] = it
plt.imshow(P.T, cmap=cmap, extent=extent)
plot_newton_iters(f1, fprime1)
def plot_newton_basins(f, fprime, n=200, extent=[-1,1,-1,1], cmap='jet'):
'''Display basin of attraction for convergence of roots of objective function. '''
P = np.zeros((n,n))
for i, x in enumerate(np.linspace(extent[0], extent[1], n)):
for j, y in enumerate(np.linspace(extent[2],extent[3], n)):
z = complex(x, y)
it, end_point = newton(z, f, fprime)
P[i, j] = end_point.imag
plt.imshow(P.T, cmap=cmap, extent=extent)
plot_newton_basins(f1, fprime1)
# **3**. (20 points)
#
# Consider the following function on $\mathbb{R}^2$:
#
# $$
# f(x_1,x_2) = -x_1x_2e^{-\frac{(x_1^2+x_2^2)}{2}}
# $$
#
# - Find the minimum under the constraint
# $$g(x) = x_1^2+x_2^2 \leq 10$$
# and
# $$h(x) = 2x_1 + 3x_2 = 5$$ using `scipy.optimize.minimize`.
# - Plot the function contours using `matplotlib`, showing the constraints $g$ and $h$ and indicate the constrained minimum with an `X`.
#
# 
# ## Part 1
from scipy.optimize import minimize
from matplotlib.patches import Circle
def f(x):
'''Objective function'''
return -x[0]*x[1]*np.exp(-(x[0]**2+x[1]**2)/2)
cons = ({'type': "eq", "fun": lambda x: 2*x[0]+3*x[1]-5}, {'type':"ineq", "fun": lambda x :-x[0]**2-x[1]**2+10})
ms = [minimize(f, [x0, (5-2*x0)/3], constraints=cons) for x0 in range(-3, 3)]
res = min(ms, key = lambda res: res.fun)
res.x, res.fun
# ## Part 2
def f1(x, y):
return -x*y*np.exp(-(x**2+y**2)/2)
# +
x = np.linspace(-4, 4, 100)
y = np.linspace(-4, 4, 100)
X, Y = np.meshgrid(x, y)
Z = f1(X, Y)
plt.figure()
m = plt.scatter(res.x[0], res.x[1], marker='x', c="red", s=100)
plt.contour(X, Y, Z)
plt.plot(x, (5-2*x)/3, '--', c="black")
plt.axis([-5,5,-5,5])
plt.title("Contour plot of f(x) subject to constraints h(x) and g(x)" )
plt.xlabel("x1")
plt.ylabel("x2")
r = np.sqrt(10)
circle1 = Circle((0,0),r, color="lightblue")
fig = plt.gcf()
ax = fig.gca()
ax.add_artist(circle1)
ax.add_artist(m)
plt.show()
# -
# **4** (30 points)
#
# Find solutions to $x^3 + 4x^2 -3 = x$.
#
# - Write a function to find brackets, assuming roots are always at least 1 unit apart and that the roots lie between -10 and 10
# - For each bracket, find the enclosed root using
# - a bisection method
# - Newton-Raphson (no guarantee to stay within brackets)
# - Use the end points of the bracket as starting points for the bisection methods and the midpoint for Newton-Raphson.
# - Use the companion matrix and characteristic polynomial to find the solutions
# - Plot the function and its roots (marked with a circle) in a window just large enough to contain all roots.
#
# Use a tolerance of 1e-6.
#
# 
# ## Part 1
# +
def f1(x):
'''Objective function'''
return x**3+4*x**2-x-3
def fprime1(x):
'''Define the derivative of f'''
return 3*x**2+8*x-1
# -
def find_bracket(f, start, end, step):
return [[x0,x0+step] for x0 in range(start, end) if f(x0)*f(x0+step)<0]
bracket = find_bracket(f1, start=-10, end=10, step=1)
bracket
# ## Part 2
# Bisection
def bisection(f, start, end, tol=1e-6):
'''Find the roots of function f using bisection method.'''
if end-start < tol:
return end
else:
mid = (start+end)/2
if f(start)*f(mid) < 0:
return bisection(f, start=start, end=mid)
else:
return bisection(f, start=mid, end=end)
# Newton-Raphson
def newton(z, f, fprime, max_iter=1000, tol=1e-6):
''' Find the roots of function f using Newton Method'''
i = 0
for i in range(max_iter):
z_new = z - f(z)/fprime(z)
i += 1
if np.abs(f(z_new)-f(z)) > tol:
z = z_new
else:
return z_new
return z_new
# Find the roots using newton method
[bisection(f1, start=x0[0], end=x0[1]) for x0 in bracket]
# Find the roots using newton method
mid_point = [np.mean(point) for point in bracket]
[newton(z, f1, fprime1) for z in mid_point]
# ## companion matrix
C = np.array([[-4,1,3],
[1,0,0],
[0,1,0]])
C
val, vec = np.linalg.eig(C)
val
# ## characteristic polynomial
roots = np.roots([1,4,-1,-3])
roots
# ## plot
x = np.linspace(-5, 2, 100)
plt.plot(x, f1(x))
plt.scatter(roots, np.zeros(len(roots)), color="red")
plt.hlines(0, -5.1, 2.3)
plt.axis([-5.1, 2.3,-25, 21])
pass
| HW05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# +
np.random.seed(7)
train = pd.DataFrame({
'x1': np.concatenate([np.random.normal(0, 10, size=100), np.random.normal(0, 10, size=100)]),
'x2': np.concatenate([np.random.normal(3, 3, size=100), np.random.normal(-3, 3, size=100)]),
'clase': [0 for i in range(100)] + [1 for i in range(100)]
})
test = pd.DataFrame({
'x1': np.concatenate([np.random.normal(0, 3, size=25), np.random.normal(0, 3, size=25)]),
'x2': np.concatenate([np.random.normal(3, 3, size=25), np.random.normal(-3, 3, size=25)]),
'clase': [0 for i in range(25)] + [1 for i in range(25)]
})
plt.plot(train[train.clase == 0].x1, train[train.clase == 0].x2, 'rx')
plt.plot(train[train.clase == 1].x1, train[train.clase == 1].x2, 'bo')
plt.show()
# -
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(5)
knn.fit(train.drop('clase', axis=1), train.clase)
from sklearn.metrics import confusion_matrix
confusion_matrix(test.clase, knn.predict(test.drop('clase', axis=1)))
# +
X_train = (train.drop('clase', axis=1) - train.drop('clase', axis=1).mean()) / (train.drop('clase', axis=1).std())
X_test = (test.drop('clase', axis=1) - test.drop('clase', axis=1).mean()) / (test.drop('clase', axis=1).std())
y_train = train.clase
y_test = test.clase
# -
plt.plot(X_train[y_train == 0].x1, X_train[y_train == 0].x2, 'rx')
plt.plot(X_train[y_train == 1].x1, X_train[y_train == 1].x2, 'bo')
knn.fit(X_train, y_train)
confusion_matrix(y_test, knn.predict(X_test))
# **Ejercicio**: ¿Cuántos vecinos es mejor usar para este problema?
| 5_Clasif_KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/ssm_spring_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="F3wG_2yFpALC"
# # 1d state space model of a mass-spring system in continuous time
#
# From https://srush.github.io/annotated-s4/
# (code at https://github.com/srush/annotated-s4/blob/main/s4/s4.py)
# + colab={"base_uri": "https://localhost:8080/"} id="FQN-puPXp9fT" outputId="f17507d8-37b8-41f7-d796-f24cafa5c21e"
# !pip install celluloid
# + id="R7UWaZQspBXw"
from functools import partial
import jax
import jax.numpy as np
#from flax import linen as nn
#from jax.nn.initializers import lecun_normal
from jax.numpy.linalg import eig, inv, matrix_power
from jax.scipy.signal import convolve
rng = jax.random.PRNGKey(1)
# + id="hEy_ICjepHP2"
def random_SSM(rng, N):
a_r, b_r, c_r = jax.random.split(rng, 3)
A = jax.random.uniform(a_r, (N, N))
B = jax.random.uniform(b_r, (N, 1))
C = jax.random.uniform(c_r, (1, N))
return A, B, C
def discretize(A, B, C, step):
I = np.eye(A.shape[0])
BL = inv(I - (step / 2.0) * A)
Ab = BL @ (I + (step / 2.0) * A)
Bb = (BL * step) @ B
return Ab, Bb, C
def scan_SSM(Ab, Bb, Cb, u, x0):
def step(x_k_1, u_k):
x_k = Ab @ x_k_1 + Bb @ u_k
y_k = Cb @ x_k
return x_k, y_k
return jax.lax.scan(step, x0, u)[1]
def run_SSM(A, B, C, u):
L = u.shape[0]
N = A.shape[0]
Ab, Bb, Cb = discretize(A, B, C, step=1.0 / L)
# Run recurrence
return scan_SSM(Ab, Bb, Cb, u[:, np.newaxis], np.zeros((N,)))
# + id="hF17HIbKztLP"
def example_mass(k, b, m):
A = np.array([[0, 1], [-k / m, -b / m]])
B = np.array([[0], [1.0 / m]])
C = np.array([[1.0, 0]])
return A, B, C
@partial(np.vectorize, signature="()->()")
def example_force(t):
x = np.sin(10 * t)
return x * (x > 0.5)
# + id="StNfFe-xpdV6"
def example_ssm():
# SSM
ssm = example_mass(k=40, b=5, m=1)
# L samples of u(t).
L = 100
step = 1.0 / L
ks = np.arange(L)
u = example_force(ks * step)
# Approximation of y(t).
y = run_SSM(*ssm, u)
# Plotting ---
import matplotlib.pyplot as plt
import seaborn
from celluloid import Camera
seaborn.set_context("paper")
fig, (ax1, ax2, ax3) = plt.subplots(3)
camera = Camera(fig)
ax1.set_title("Force $u_k$")
ax2.set_title("Position $y_k$")
ax3.set_title("Object")
ax1.set_xticks([], [])
ax2.set_xticks([], [])
# Animate plot over time
for k in range(0, L, 2):
ax1.plot(ks[:k], u[:k], color="red")
ax2.plot(ks[:k], y[:k], color="blue")
ax3.boxplot(
[[y[k, 0] - 0.04, y[k, 0], y[k, 0] + 0.04]],
showcaps=False,
whis=False,
vert=False,
widths=10,
)
camera.snap()
anim = camera.animate()
#anim.save("line.gif", dpi=150, writer="imagemagick")
# + colab={"base_uri": "https://localhost:8080/", "height": 337} id="OVhWXeUdplvm" outputId="6b8f44b6-ab0b-4815-fbfe-c60c01f13704"
example_ssm()
# + id="sUAuHL5fq4Q1"
def example_ssm2():
# SSM
ssm = example_mass(k=40, b=5, m=1)
# L samples of u(t).
L = 100
step = 1.0 / L
ks = np.arange(L)
u = example_force(ks * step)
# Approximation of y(t).
y = run_SSM(*ssm, u)
# Plotting ---
import matplotlib.pyplot as plt
import seaborn
from celluloid import Camera
seaborn.set_context("paper")
fig, (ax1, ax2) = plt.subplots(2, figsize=(20,10))
camera = Camera(fig)
ax1.set_title("Force $u_k$")
ax2.set_title("Position $y_k$")
#ax3.set_title("Object")
ax1.set_xticks([], [])
ax2.set_xticks([], [])
# Animate plot over time
for k in range(0, L, 2):
ax1.plot(ks[:k], u[:k], color="red")
ax2.plot(ks[:k], y[:k], color="blue")
camera.snap()
anim = camera.animate()
#anim.save("line.gif", dpi=150, writer="imagemagick")
# + colab={"base_uri": "https://localhost:8080/", "height": 632} id="MiZarEF-q__0" outputId="42bd659f-4104-4210-a05a-65955f7dd250"
example_ssm2()
# + id="MCzOqXUcrAz6"
| notebooks/ssm_spring_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from glob import iglob
import os
import cPickle as pk
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from bokeh.charts import Line, show, output_file
from bokeh.io import output_notebook
from bokeh.mpl import to_bokeh as tb
output_notebook()
# %matplotlib inline
# -
list(iglob('*.pdf'))
def parse_pdf_gender(file_n):
genderWords = set(['Gender', 'Sex'])
with open(file_n) as f:
table1 = False
table1dat = []
for l in f.readlines():
words = [i.strip('\n').strip() for i in l.split(' ') if i != '' and i != '\n']
if len(words) > 0 and words[0] in genderWords:
table1 = True
if table1:
table1dat.append(words)
if len(words) == 0:
table1 = False
table1dat = table1dat[:-2] # get rid of blank line and **
table1dat[0] = [t.strip('**') for t in table1dat[0]]
return table1dat
report_dict = pk.load(open('reports_dict.pk'))
years = sorted(report_dict.keys())
years.remove('2009')
years
[r.split('/')[-1] for r in report_dict['2009'][::-1]]
r1 = [r.split('/')[-1] for r in report_dict[years[0]][::-1]][0]
r1
os.system("pdftotext -layout " + r1 + " " + r1.split('.')[0] + ".txt")
t1d = parse_pdf_gender(r1.split('.')[0] + ".txt")
t1d
t1ds = []
docYears = []
for y in years:
for rep in [r.split('/')[-1] for r in report_dict[y][::-1]]:
docYears.append(int(y))
print rep
os.system("pdftotext -layout " + rep + " " + rep.split('.')[0] + ".txt")
t1d = parse_pdf_gender(rep.split('.')[0] + ".txt")
t1ds.append(t1d)
print t1d
gender_data = {}
gender_data['male'] = {}
gender_data['male']['percent on registry'] = []
gender_data['male']['average age'] = []
gender_data['female'] = {}
gender_data['female']['percent on registry'] = []
gender_data['female']['average age'] = []
for t in t1ds:
agecol = 2
pctcol = 1
if t[0][1] == 'Average Age':
agecol = 1
pctcol = 2
male = t[1]
female = t[2]
gender_data['male']['percent on registry'].append(float(male[pctcol].strip('%')))
gender_data['female']['percent on registry'].append(float(female[pctcol].strip('%')))
gender_data['male']['average age'].append(float(male[agecol]))
gender_data['female']['average age'].append(float(female[agecol]))
maleDF = pd.DataFrame(gender_data['male'])
maleDF['gender'] = 'male'
maleDF['year'] = docYears
femaleDF = pd.DataFrame(gender_data['female'])
femaleDF['gender'] = 'female'
femaleDF['year'] = docYears
genderDF = maleDF.append(femaleDF)
genderDF.head()
sns.set(font_scale=2)
g = sns.factorplot(x="year", y="average age", hue="gender",
data=genderDF,
size=8, ci=None)
g = sns.factorplot(x="year", y="percent on registry", hue="gender",
data=genderDF,
size=8, ci=None)
# +
line = Line(genderDF, y=['python', 'pypy', 'jython'],
dash=['python', 'pypy', 'jython'],
color=['python', 'pypy', 'jython'],
legend_sort_field = 'color',
legend_sort_direction = 'ascending',
title="Interpreter Sample Data", ylabel='Duration', legend=True)
output_file("line_single.html", title="line_single.py example")
show(line)
# -
def parse_pdf_county(file_n):
countyWords = set(['County', 'County (A-L)'])
with open(file_n) as f:
table = False
two_col = False
table_dat = []
for l in f.readlines():
words = [i.strip('\n').strip() for i in l.split(' ') if i != '' and i != '\n']
if len(words) > 0 and words[0] in countyWords:
table = True
if words[0] == 'County (A-L)':
two_col = True
if table:
if two_col:
if len(words) < 1:
table = False
table_dat[1:] = sorted(table_dat[1:], key=lambda x: x[0])
continue
col1 = words[:3]
if words[0] == 'County (A-L)':
table_dat.append(['County', '# of Patients', '% of Patients'])
else:
table_dat.append(col1)
if len(words) > 3:
col2 = words[3:]
table_dat.append(col2)
else:
# print words
# print words[0] == '* Indicates fewer than three patients in the category'
# print table
if len(words) < 2:
if len(words) != 0 and words[0] == '* Indicates fewer than three patients in the category':
table = False
continue
table_dat.append(words)
return table_dat
r1
county = parse_pdf_county(r1.split('.')[0] + ".txt")
len(county)
county_data = []
docYears = []
for y in years:
for rep in [r.split('/')[-1] for r in report_dict[y][::-1]]:
docYears.append(int(y))
print rep
county = parse_pdf_county(rep.split('.')[0] + ".txt")
county_data.append(county)
print len(county)
county_data[-1]
| COdata/Process MJ reports.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The ALeRCE Light Curve Classifier: training the deployed model
#
# ```Author: <NAME>, Last updated: 20201109```
#
# ### Introduction:
#
# The ALeRCE light curve classifier ([Sánchez-Sáez et al. 2020](https://arxiv.org/abs/2008.03311)) uses variability features computed from the ZTF alert stream, and colors obtained from AllWISE and ZTF photometry. It uses a Balanced Random Forest algorithm with a two-level scheme, where the top level classifies each source as periodic, stochastic, and transient, and the bottom level further resolve each hierarchical class, yielding a total of 15 classes. This classifier corresponds to the first attempt to classify multiple classes of stochastic variables (including nucleus- and host-dominated active galaxies, blazars, young stellar objects, and cataclysmic variables) in addition to different classes of periodic and transient sources, using real data.
#
# The first level (top level hereafter) consists of a single classifier which classifies every source as periodic, stochastic, or transient. The second level (bottom level hereafter) consists of three distinct classifiers: Transient, Stochastic, and Periodic. The classes considered by each of these three classifiers are the ones shown in Table 1 and Figure 2 of [Sánchez-Sáez et al. 2020](https://arxiv.org/abs/2008.03311). Each classifier in the bottom level is trained using a training subset having only those classes included in the primary top class (for instance, the Transient classifier only includes sources classified as SNIa, SNIbc, SNII, and SLSN). It is important to note that these four classifiers are independent and process the same input features set described in Section 3 of [Sánchez-Sáez et al. 2020](https://arxiv.org/abs/2008.03311). The final classification is constructed by multiplying the probabilities obtained for each class of the top level [$P_{top}(transient)$, $P_{top}(stochastic)$, and $P_{top}(periodic)$] with the individual probabilities obtained by their correspondent classifier in the bottom level. Namely, the probabilities of the Transient classifier ($P_{T}$) are multiplied by $P_{top}(transient)$, the probabilities of the Stochastic classifier ($P_{S}$) are multiplied by $P_{top}(stochastic)$, and the probabilities of the Periodic classifier ($P_{S}$) are multiplied by $P_{top}(periodic)$. We denote the product of these probabilities as $P$. For instance, the probability of a given source being an RRL corresponds to the product of its probability of being periodic (according to the top level) and its probability of being an RRL (according to the Periodic classifier):
# \begin{equation}
# P(RRL) = P_{top}(periodic) \times P_P (RRL),
# \end{equation}
# while the probability of being a Blazar is computed as:
# \begin{equation}
# P(Blazar) = P_{top}(stochastic) \times P_S(Blazar).
# \end{equation}
# Following this, the sum of the probabilities of the 15 classes for a given source adds up to one. Finally, the class of a given object is determined by selecting the class with the maximum $P$.
#
#
# For more information about the ALeRCE broker, please visit http://alerce.science/, or read our publications:
# * The Automatic Learning for the Rapid Classification of Events (ALeRCE) Alert Broker, [Förster et al. 2020, submitted to AJ](https://arxiv.org/abs/2008.03303)
# * Alert Classification for the ALeRCE Broker System: The Real-time Stamp Classifier, [Carrasco-Davis et al. 2020, submitted to AJ](https://arxiv.org/abs/2008.03309)
# * Alert Classification for the ALeRCE Broker System: The Light Curve Classifier, [Sánchez-Sáez et al. 2020, submitted to AJ](https://arxiv.org/abs/2008.03311)
#
# ### This notebook:
#
# This notebook contains the code used to train the deployed model described in Section 5.3 of [Sánchez-Sáez et al. 2020](https://arxiv.org/abs/2008.03311), and can be used to reproduce the results presented in [Sánchez-Sáez et al. 2020](https://arxiv.org/abs/2008.03311). This notebook receives a file with the features already computed and a file with the labeled set.
#
# If you use this notebook, please cite our work: https://ui.adsabs.harvard.edu/abs/2020arXiv200803311S/exportcitation.
#
#
import numpy as np
import pandas as pd
from sklearn import preprocessing, model_selection, metrics, ensemble
import pickle
import itertools
import matplotlib.pyplot as plt
from collections import Counter
from imblearn.ensemble import BalancedRandomForestClassifier as RandomForestClassifier
from scipy.stats import randint as sp_randint
from sklearn.utils import class_weight
# ### Definition of names for plots and files
# +
date = '20200609'
#names of files with features and labels for the training set (v7)
#labeled set
labels_file = './labeled_set_lc_classifier_SanchezSaez_2020.csv'
#features
features_path = './'
features_file = features_path+'features_for_lc_classifier_20200609.csv'
#where the RF models are saved
model_first_layer = 'final_BHRF_model/hierarchical_level_RF_model.pkl'
model_periodic_layer = 'final_BHRF_model/periodic_level_RF_model.pkl'
model_transient_layer = 'final_BHRF_model/stockastic_level_RF_model.pkl'
model_stochastic_layer = 'final_BHRF_model/transient_level_RF_model.pkl'
features_pickle = 'final_BHRF_model/features_RF_model.pkl'
#confusion matrixes
conf_matrix_name_first_layer = 'stat_prob_hierRF_model_2/confusion_matrix_rf_model_2_hierarchical_layer_'+date
conf_matrix_name_second_layer = 'stat_prob_hierRF_model_2/confusion_matrix_rf_model_2_multiclass_'+date
#feature importances
feature_importance_name_first_layer = '../../paper_late_classifier/feature_importance_rf_model_2_hierarchical_layer_'+date+'.pdf'
feature_importance_name_periodic_layer = '../../paper_late_classifier/feature_importance_rf_model_2_periodic_layer_'+date+'.pdf'
feature_importance_name_transient_layer = '../../paper_late_classifier/feature_importance_rf_model_2_transient_layer_'+date+'.pdf'
feature_importance_name_stochastic_layer = '../../paper_late_classifier/feature_importance_rf_model_2_stochastic_layer_'+date+'.pdf'
# -
# ### reading the training set files
# +
df_feat = pd.read_csv(features_file,index_col='oid')
df_labels = pd.read_csv(labels_file,index_col='oid')
#discarding infinite values
df_feat = df_feat.replace([np.inf, -np.inf], np.nan)
print(df_labels['classALeRCE'].values.size)
print(df_feat.head())
# +
#defining taxonomy tree according to the taxonomy presented in Section 2.2 of the paper.
df_labels['class_original'] = df_labels['classALeRCE']
#defining the classes included in the RF model
label_order = ['SNIa', 'SNIbc', 'SNII', 'SLSN','QSO','AGN', 'Blazar', 'YSO','CV/Nova',
'LPV', 'E', 'DSCT', 'RRL', 'CEP','Periodic-Other']
labels = df_labels.loc[df_labels.class_original.isin(label_order)][["class_original"]]
#defining hierarchical classes:
labels['class_hierachical'] = labels['class_original']
labels.loc[ (labels['class_hierachical'] == 'LPV') | (labels['class_hierachical'] == 'Periodic-Other') | (labels['class_hierachical'] == 'E') | (labels['class_hierachical'] == 'DSCT') | (labels['class_hierachical'] == 'RRL') | (labels['class_hierachical'] == 'CEP') , 'class_hierachical'] = 'Periodic'
labels.loc[(labels['class_hierachical'] == 'SNIa') | (labels['class_hierachical'] == 'SNIbc') | (labels['class_hierachical'] == 'SNII') | (labels['class_hierachical'] == 'SLSN'), 'class_hierachical'] = 'Transient'
labels.loc[(labels['class_hierachical'] == 'CV/Nova') |(labels['class_hierachical'] == 'YSO') | (labels['class_hierachical'] == 'AGN') | (labels['class_hierachical'] == 'QSO') | (labels['class_hierachical'] == 'Blazar') , 'class_hierachical'] = 'Stochastic'
cm_classes_hierachical = ['Transient','Stochastic','Periodic']
cm_classes_original = label_order
print(labels['class_hierachical'].values.shape)
labels.head()
# +
#defining columns excluded from the df_nd table
rm_nd_cols = [
'n_det_1',
'n_det_2',
'n_pos_1',
'n_pos_2',
'n_neg_1',
'n_neg_2',
'first_mag_1',
'first_mag_2',
'MHPS_non_zero_1',
'MHPS_non_zero_2',
'MHPS_PN_flag_1',
'MHPS_PN_flag_2',
'mean_mag_1',
'mean_mag_2',
'min_mag_1',
'min_mag_2',
'W1','W2','W3','W4',
'iqr_1',
'iqr_2',
'delta_mjd_fid_1',
'delta_mjd_fid_2',
'last_mjd_before_fid_1',
'last_mjd_before_fid_2',
'g-r_ml',
'MHAOV_Period_1', 'MHAOV_Period_2',
]
df = labels.join(df_feat.drop(rm_nd_cols, axis=1),how='inner')
df = df.replace([np.inf, -np.inf], np.nan)
df_train = df.copy()
df_train = df_train.fillna(-999)
labels = df[['class_original','class_hierachical']]
df.drop(['Mean_1','Mean_2','class_original','class_hierachical'], axis=1, inplace=True)
df = df.fillna(-999)
print(len(labels['class_original'].values))
df.head()
# -
#which are the features included in the model?
print(np.array(df.columns))
print(len(np.array(df.columns)))
#how many sources belong to each class?
for idx, cl in enumerate(label_order):
print(cl, labels['class_original'][labels['class_original']==cl].shape[0])
# +
#plotting the number of sources per class for the labeled set
class_counts = Counter(labels['class_original'])
class_counts.most_common()
print(class_counts)
df_hist = pd.DataFrame.from_dict(class_counts, orient='index',columns=['number'])
df_hist = df_hist.sort_values(by=['number'],ascending=False)
print(df_hist)
df_hist.plot(kind='bar',legend=False)
plt.yscale('log')
plt.ylabel(r'$\#$ of sources')
plt.savefig('number_sources_labeled_set.pdf',bbox_inches='tight')
# -
# ### Defining functions to plot the confusion matrix and the feature importance
# +
def plot_confusion_matrix(cm, classes, plot_name,
normalize=True,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = np.round((cm.astype('float') / cm.sum(axis=1)[:, np.newaxis])*100)
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots(figsize=(12, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
#plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, fontsize = 17)
plt.yticks(tick_marks, classes, fontsize = 17)
#fmt = '.2f' if normalize else 'd'
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, "%d"% (cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",fontsize = 16)
plt.tight_layout()
plt.ylabel('True label',fontsize = 18)
plt.xlabel('Predicted label',fontsize = 18)
plt.savefig(plot_name, bbox_inches='tight')
#plt.close()
def plot_feature_importances(model, feature_names,feature_importances_name):
I = np.argsort(model.feature_importances_)[::-1]
I = I[0:60]
for i in I[0:30]:
print(feature_names[i], "& %.3f" % (model.feature_importances_[i]))
fig, ax = plt.subplots(figsize=(16, 5), tight_layout=True)
x_plot = np.arange(len(I))
plt.xticks(x_plot, [feature_names[i] for i in I], rotation='vertical')
ax.bar(x_plot, height=model.feature_importances_[I]);
plt.savefig(feature_importances_name, bbox_inches='tight')
#plt.close()
# -
# ### Pre-processing training data
# +
Y_hierarchical = labels['class_hierachical']
Y_original = labels['class_original']
print(len(labels['class_hierachical'].values))
print(len(labels['class_original'].values))
X_hierarchical = df
#splitting training set
X_train_hierarchical, X_test_hierarchical, y_train_hierarchical, y_test_hierarchical, y_train_original, y_test_original = model_selection.train_test_split(X_hierarchical,
Y_hierarchical, Y_original, test_size=0.2, stratify=Y_original)
# separating training sets for sub-classes
X_train_periodic = X_train_hierarchical.loc[y_train_hierarchical=='Periodic', :]
y_train_periodic = y_train_original.loc[y_train_hierarchical=='Periodic']
X_train_stochastic = X_train_hierarchical.loc[y_train_hierarchical=='Stochastic', :]
y_train_stochastic = y_train_original.loc[y_train_hierarchical=='Stochastic']
X_train_transient = X_train_hierarchical.loc[y_train_hierarchical=='Transient', :]
y_train_transient = y_train_original.loc[y_train_hierarchical=='Transient']
X_test_periodic = X_test_hierarchical
X_test_stochastic = X_test_hierarchical
X_test_transient = X_test_hierarchical
print(len(y_train_periodic), len(y_train_stochastic), len(y_train_transient))
# +
print(X_train_hierarchical.index)
print(X_test_hierarchical.index)
np.save('final_BHRF_model/labels_training_sample', X_train_hierarchical.index.values)
np.save('final_BHRF_model/labels_testint_sample', X_test_hierarchical.index.values)
# -
# ## Balanced random forest
#
# ### Top level: separating Periodic, Stochastic and Transients:
# +
#Training first level of the RF model
rf_model_hierarchical = RandomForestClassifier(
n_estimators=500,
max_features='auto',
max_depth=None,
n_jobs=-1,
bootstrap=True,
class_weight='balanced_subsample',
criterion='entropy',
min_samples_split=2,
min_samples_leaf=1)
rf_model_hierarchical.fit(X_train_hierarchical, y_train_hierarchical)
#testing first level performance
y_true, y_pred = y_test_hierarchical, rf_model_hierarchical.predict(X_test_hierarchical)
y_pred_proba_hier = rf_model_hierarchical.predict_proba(X_test_hierarchical)
classes_order_proba_hierarchical = rf_model_hierarchical.classes_
print(classes_order_proba_hierarchical)
print("Accuracy:", metrics.accuracy_score(y_true, y_pred))
print("Balanced accuracy:", metrics.balanced_accuracy_score(y_true, y_pred))
#Dumping trained model
features_hierarchical = list(X_train_hierarchical)
with open(model_first_layer, 'wb') as f:
pickle.dump(
rf_model_hierarchical,
f,
pickle.HIGHEST_PROTOCOL)
# -
#plotting confusion matrix
cnf_matrix = metrics.confusion_matrix(y_true, y_pred, labels=cm_classes_hierachical)
print(cnf_matrix)
plot_confusion_matrix(cnf_matrix,cm_classes_hierachical,'training_conf_matrix_hierarchical_level.pdf')
#plotting feature importance
plot_feature_importances(rf_model_hierarchical, features_hierarchical, 'final_BHRF_model/feature_ranking_hierarchical_level.pdf')
# ### Periodic classifier
# +
#Training Periodic classifier
rf_model_periodic = RandomForestClassifier(
n_estimators=500,
max_features='auto',
max_depth=None,
n_jobs=-1,
class_weight='balanced_subsample',
bootstrap=True,
criterion='entropy',
min_samples_split=2,
min_samples_leaf=1)
rf_model_periodic.fit(X_train_periodic, y_train_periodic)
# Applying periodic model to the test data
y_true_periodic, y_pred_periodic = y_test_original, rf_model_periodic.predict(X_test_periodic)
y_pred_proba_periodic = rf_model_periodic.predict_proba(X_test_periodic)
classes_order_proba_periodic = rf_model_periodic.classes_
print(classes_order_proba_periodic)
#Dumping trained model
features_periodic = list(X_train_periodic)
with open(model_periodic_layer, 'wb') as f:
pickle.dump(
rf_model_periodic,
f,
pickle.HIGHEST_PROTOCOL)
# -
#plotting feature importance
print(len(feature_importance_name_first_layer))
plot_feature_importances(rf_model_periodic, features_periodic, 'final_BHRF_model/feature_ranking_periodic_level.pdf')
# ### Stochastic classifier
# +
#Training Stochastic classifier
rf_model_stochastic = RandomForestClassifier(
n_estimators=500,
max_features=0.2,#'auto',
max_depth=None,
n_jobs=-1,
bootstrap=True,
class_weight='balanced_subsample',
criterion='entropy',
min_samples_split=2,
min_samples_leaf=1)
rf_model_stochastic.fit(X_train_stochastic, y_train_stochastic)
# Applying stochastic model to the test data
y_true_stochastic, y_pred_stochastic = y_test_original, rf_model_stochastic.predict(X_test_stochastic)
y_pred_proba_stochastic = rf_model_stochastic.predict_proba(X_test_stochastic)
classes_order_proba_stochastic = rf_model_stochastic.classes_
print(classes_order_proba_stochastic)
#Dumping trained model
features_stochastic = list(X_train_stochastic)
with open(model_stochastic_layer, 'wb') as f:
pickle.dump(
rf_model_stochastic,
f,
pickle.HIGHEST_PROTOCOL)
# -
#plotting feature importance
plot_feature_importances(rf_model_stochastic, features_stochastic, 'final_BHRF_model/feature_ranking_stochastic_level.pdf')
# ### Transient classifier
# +
#Training Transient classifier
rf_model_transient = RandomForestClassifier(
n_estimators=500,
max_features='auto',
max_depth=None,
n_jobs=-1,
bootstrap=True,
class_weight='balanced_subsample',
criterion='entropy',
min_samples_split=2,
min_samples_leaf=1)
rf_model_transient.fit(X_train_transient, y_train_transient)
# Applying transient model to the test data
y_true_transient, y_pred_transient = y_test_original, rf_model_transient.predict(X_test_transient)
y_pred_proba_transient = rf_model_transient.predict_proba(X_test_transient)
classes_order_proba_transient = rf_model_transient.classes_
print(classes_order_proba_transient)
#Dumping trained model
features_transient = list(X_train_transient)
with open(model_transient_layer, 'wb') as f:
pickle.dump(
rf_model_transient,
f,
pickle.HIGHEST_PROTOCOL)
with open(features_pickle, 'wb') as f:
pickle.dump(
features_transient,
f,
pickle.HIGHEST_PROTOCOL)
# +
#plotting feature importance
plot_feature_importances(rf_model_transient, features_transient, 'final_BHRF_model/feature_ranking_transient_level.pdf')
# -
# ## Putting al layers together
#
# +
# generating final probabilities
#multiplying probabilities of the top level with the other classifiers
prob_periodic = y_pred_proba_periodic*y_pred_proba_hier[:,np.where(classes_order_proba_hierarchical=='Periodic')[0][0]].T[:, np.newaxis]
prob_stochastic = y_pred_proba_stochastic*y_pred_proba_hier[:,np.where(classes_order_proba_hierarchical=='Stochastic')[0][0]].T[:, np.newaxis]
prob_trainsient = y_pred_proba_transient*y_pred_proba_hier[:,np.where(classes_order_proba_hierarchical=='Transient')[0][0]].T[:, np.newaxis]
#obtaining final probabilities matrix
prob_final = np.concatenate((prob_stochastic,prob_trainsient,prob_periodic),axis=1)
print(np.sum(prob_final,axis=1),np.mean(np.sum(prob_final,axis=1)),np.std(np.sum(prob_final,axis=1)))
#getting the ordered name of classes for prob_final
prob_final_class_names = np.concatenate((classes_order_proba_stochastic,classes_order_proba_transient,classes_order_proba_periodic))
print(prob_final_class_names)
class_final_proba = np.amax(prob_final,axis=1)
class_final_index = np.argmax(prob_final,axis=1)
class_final_name = [prob_final_class_names[x] for x in class_final_index]
# +
# generating confusion matrix for bottom level
cnf_matrix = metrics.confusion_matrix(y_test_original, class_final_name,labels=label_order)
print(cnf_matrix)
plot_confusion_matrix(cnf_matrix,label_order, 'conf_matrix_multiclass_level.pdf')
print("Accuracy:", "%0.2f" % metrics.accuracy_score(y_test_original, class_final_name))
print("Balanced accuracy:","%0.2f" % metrics.balanced_accuracy_score(y_test_original, class_final_name))
print("macro precision: ","%0.2f" % metrics.precision_score(y_test_original, class_final_name, average='macro'))
print("macro recall: ","%0.2f" % metrics.recall_score(y_test_original, class_final_name, average='macro'))
print("macro F1: ","%0.2f" % metrics.f1_score(y_test_original, class_final_name, average='macro'))
print(metrics.classification_report(y_test_original, class_final_name, digits=2))
| training_deployed_BRF_alerts_20200609.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Artificial Dermatologist
# ---
# This notebook is a way to easily developing and test the model for the dermatology engine.
# Dowload Data
# ---
# %mkdir data; cd data
# !mkdir train; cd train
# !wget -O train.tar.gz https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/skin-cancer/train.zip
# !tar -zxf train.tar.gz
# !cd ..
# !mkdir test; cd test
# !wget -O test.tar.gz https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/skin-cancer/test.zip
# !tar -zxf test.tar.gz
# !cd ..
# !mkdir valid; cd valid
# !wget -O valid.tar.gz https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/skin-cancer/valid.zip
# !tar -zxf valid.tar.gz
# !cd ..
# !cd ..
# Load Data and Preprocess data
# ---
# +
data_dir = 'data/'
train_dir = 'train/'
test_dir = 'test/'
valid_dir = 'valid/'
classes = ['melanoma', 'nevus', 'seborrheic_keratosis']
import pickle as pkl
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torchvision import models, transforms
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
# %matplotlib inline
# +
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
# -
def get_data_loader(batch_size, data_dir):
transform = transforms.Compose([transforms.Resize(299),
transforms.RandomCrop(299),#for inception network
transforms.RandomHorizontalFlip(), #random horizontal flip
transforms.RandomRotation(359),# random rotaion of 1-359 degrees
transforms.ToTensor()])
image_path = './' + data_dir
dataset = datasets.ImageFolder(image_path, transform)
loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=0)
return loader
batch_size = 128
train_loader = get_data_loader(batch_size, data_dir+train_dir)
valid_loader = get_data_loader(batch_size, data_dir+valid_dir)
test_loader = get_data_loader(batch_size, data_dir+test_dir)
# +
# display alot of this drawn from solutions from Udacity Deep Learning Course
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
dataiter = iter(train_loader)
images, _ = dataiter.next()
fig = plt.figure(figsize=(20, 4))
plot_size=20
for idx in np.arange(plot_size):
ax = fig.add_subplot(2, plot_size/2, idx+1, xticks=[], yticks=[])
imshow(images[idx])
# -
def scale(x, feature_range=(-1, 1)): #scale for tanh function
x=x*(feature_range[1]-feature_range[0])+feature_range[0]
return x
# +
img = images[0]
scaled_img = scale(img)
print('Min: ', scaled_img.min())
print('Max: ', scaled_img.max())
# -
# Model
# ---
# inspired by the use of the Google Inception v3 model by [Stanford researchers](https://www.nature.com/articles/nature21056.epdf?referrer_access_token=_snzJ5POVSgpHutcNN4lEtRgN0jAjWel9jnR3ZoTv0NXpMHRAJy8Qn10ys2O4tuP9jVts1q2g1KBbk3Pd3AelZ36FalmvJLxw1ypYW0UxU7iShiMp86DmQ5Sh3wOBhXDm9idRXzicpVoBBhnUsXHzVUdYCPiVV0Slqf-Q25Ntb1SX_HAv3aFVSRgPbogozIHYQE3zSkyIghcAppAjrIkw1HtSwMvZ1PXrt6fVYXt-dvwXKEtdCN8qEHg0vbfl4_m&tracking_referrer=edition.cnn.com)
# +
import torch.nn as nn
model = models.inception_v3(pretrained=True)
print(model)
model.fc = nn.Linear(model.fc.in_features, len(classes))
if train_on_gpu:
model.cuda()
print(model)
# +
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(list(filter(lambda p: p.requires_grad, model.parameters())),lr=0.001, momentum=0.9) #lambda from https://medium.com/@14prakash/almost-any-image-classification-problem-using-pytorch-i-am-in-love-with-pytorch-26c7aa979ec4
# -
# Training
# ---
epochs = 5
valid_loss_min = np.Inf
for epoch in range(1, epochs+1): #reused from one of my previous Udacity solutions
train_loss = 0.0
valid_loss = 0.0
model.train()
for data, target in train_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
model.eval()
for data, target in valid_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
valid_loss += loss.item()*data.size(0)
train_loss = train_loss/len(train_loader.dataset)
valid_loss = valid_loss/len(valid_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'artificial-derma-model.pt')
valid_loss_min = valid_loss
model.load_state_dict(torch.load('artificiald-derma-model.pt'))
# Testing
# ---
# +
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval()
for data, target in test_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
test_loss += loss.item()*data.size(0)
_, pred = torch.max(output, 1)
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# -
| .ipynb_checkpoints/Artificial Dermatologist-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training the rough Heston model part 2
#
# In this notebook we train a neural network for the rough Heston model for expiries in the range (0.008,0.03].
#
# Be aware that the datasets are rather large.
#
# ### Load, split and scale the datasets
# +
import os, pandas as pd, numpy as np
wd = os.getcwd()
# Load contract grid:
logMoneyness = pd.read_csv(wd + '\\data\\logMoneyness.txt', delimiter=",", header = None).values
expiries = pd.read_csv(wd + '\\data\\expiries.txt', delimiter=",", header = None).values
# Set useful parameters:
nIn = 12
nOut = 150
nXi = 9
# Load training data:
data_train = pd.read_csv(wd + '\\data\\training_and_test_data\\rheston\\rheston_training_data_2.csv', delimiter=",").values
x_train = data_train[:,:nIn]
y_train = data_train[:,nIn:nIn+nOut]
data_train = None
# Load test data:
data_test = pd.read_csv(wd + '\\data\\training_and_test_data\\rheston\\rheston_test_data_2.csv', delimiter=",").values
x_valid = data_test[:,:nIn]
y_valid = data_test[:,nIn:nIn+nOut]
data_test = None
# Normalise data:
from sklearn.preprocessing import StandardScaler
tmp1 = np.reshape(np.array([0.50,1.25,0.00]), (1, 3))
tmp2 = np.reshape(np.array([0.00,0.10,-1.00]), (1, 3))
ub = np.concatenate((tmp1,np.tile(1,(1,nXi))),1)
lb = np.concatenate((tmp2,np.tile(0.0025,(1,nXi))),1)
def myscale(x):
res=np.zeros(nIn)
for i in range(nIn):
res[i]=(x[i] - (ub[0,i] + lb[0,i])*0.5) * 2 / (ub[0,i] - lb[0,i])
return res
def myinverse(x):
res=np.zeros(nIn)
for i in range(nIn):
res[i]=x[i]*(ub[0,i] - lb[0,i]) *0.5 + (ub[0,i] + lb[0,i])*0.5
return res
# Scale inputs:
x_train_mod = np.array([myscale(x) for x in x_train])
x_valid_mod = np.array([myscale(x) for x in x_valid])
# Scale and normalise output:
scale_y = StandardScaler()
y_train_mod = scale_y.fit_transform(y_train)
y_valid_mod = scale_y.transform(y_valid)
# -
# ### Define utility functions
# +
import keras
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
keras.backend.set_floatx('float64')
def GetNetwork(nIn,nOut,nNodes,nLayers,actFun):
# Description: Creates a neural network of a specified structure
input1 = keras.layers.Input(shape=(nIn,))
layerTmp = keras.layers.Dense(nNodes,activation = actFun)(input1)
for i in range(nLayers-1):
layerTmp = keras.layers.Dense(nNodes,activation = actFun)(layerTmp)
output1 = keras.layers.Dense(nOut,activation = 'linear')(layerTmp)
return(keras.models.Model(inputs=input1, outputs=output1))
def TrainNetwork(nn,batchsize,numEpochs,objFun,optimizer,xTrain,yTrain,xTest,yTest):
# Description: Trains a neural network and returns the network including the history
# of the training process.
nn.compile(loss = objFun, optimizer = optimizer)
history = nn.fit(xTrain, yTrain, batch_size = batchsize,
validation_data = (xTest,yTest),
epochs = numEpochs, verbose = True, shuffle=1)
return nn,history.history['loss'],history.history['val_loss']
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_pred - y_true )))
# -
# ### Define and train neural network
# <span style="color:red">This section can be skipped! Just go straight to "Load network" and load the already trained model</span>
# +
# Define model:
model = GetNetwork(nIn,nOut,200,3,'elu')
# Set seed
import random
random.seed(455165)
# Train network
model,loss1,vloss1 = TrainNetwork(model,32,500,root_mean_squared_error,'adam',x_train_mod,y_train_mod,x_valid_mod,y_valid_mod)
model,loss2,vloss2 = TrainNetwork(model,5000,200,root_mean_squared_error,'adam',x_train_mod,y_train_mod,x_valid_mod,y_valid_mod)
# -
# ### Save network
# <span style="color:red">This section can be skipped! Just go straight to "Load network" and load the already trained model</span>
#
# +
# Save model:
model.save(wd + '\\data\\neural_network_weights\\rheston\\rheston_model_2.h5')
# Save weights (and scalings) in JSON format:
# - You need to install 'json-tricks' first.
# - We need this file for proper import into Matlab, R... etc.
weights_and_more = model.get_weights()
weights_and_more.append(0.5*(ub + lb))
weights_and_more.append(np.power(0.5*(ub - lb),2))
weights_and_more.append(scale_y.mean_)
weights_and_more.append(scale_y.var_)
import codecs, json
for idx, val in enumerate(weights_and_more):
weights_and_more[idx] = weights_and_more[idx].tolist()
json_str = json.dumps(weights_and_more)
text_file = open(wd + "\\data\\neural_network_weights\\rheston\\rheston_weights_2.json", "w")
text_file.write(json_str)
text_file.close()
# -
# ### Load network
# Load already trained neural network:
model = keras.models.load_model(wd + '\\data\\neural_network_weights\\rheston\\rheston_model_2.h5',
custom_objects={'root_mean_squared_error': root_mean_squared_error})
# ### Validate approximation
# +
# Specify test sample to plot:
sample_ind = 5006
# Print parameters of test sample:
print("Model Parameters (H,nu,rho,xi1,xi2,...): ",myinverse(x_valid_mod[sample_ind,:]))
import scipy, matplotlib.pyplot as plt
npts = 25
x_sample = x_valid_mod[sample_ind,:]
y_sample = y_valid_mod[sample_ind,:]
prediction = scale_y.inverse_transform(model.predict(x_valid_mod))
plt.figure(1,figsize=(14,12))
j = -1
for i in range(0,13):
j = j + 1
plt.subplot(4,4,j+1)
plt.plot(logMoneyness[i*npts:(i+1)*npts],y_valid[sample_ind,i*npts:(i+1)*npts],'b',label="True")
plt.plot(logMoneyness[i*npts:(i+1)*npts],prediction[sample_ind,i*npts:(i+1)*npts],'--r',label=" Neural network")
plt.title("Maturity=%1.3f "%expiries[i*npts])
plt.xlabel("log-moneyness")
plt.ylabel("Implied volatility")
plt.legend()
plt.tight_layout()
plt.show()
# -
| code/neural_networks/train_rheston_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="f2824acc" colab={"base_uri": "https://localhost:8080/", "height": 439} outputId="31985400-eaa3-4068-bac4-4495d9c743b2"
import seaborn as sns
df = sns.load_dataset('titanic')
df
# + colab={"base_uri": "https://localhost:8080/"} id="zOdblSVh5BnN" outputId="c72a9ab0-a78d-4353-bf16-9e1a638e7ea6"
X = df[['fare', 'pclass']]
Y = df[['survived']]
X.shape, Y.shape
# + colab={"base_uri": "https://localhost:8080/"} id="EvKctgKqBVHI" outputId="00762688-f443-4652-f981-81d9a773d6b5"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
X.shape
# + id="LSq4PZkrBjBB"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y)
# + colab={"base_uri": "https://localhost:8080/"} id="te6ia1I1DCXI" outputId="e5f92d22-04ff-4057-f366-3ac93293b07f"
from sklearn.linear_model import LogisticRegression
logR = LogisticRegression()
logR.fit(x_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="YwaZXSpQDYm5" outputId="3c428b16-a5d1-40d1-90b5-8c6c068161c7"
logR.score(x_train, y_train)
# + [markdown] id="Gagyc2RpFdoF"
# ##### 원핫 인코딩 스코어 : 0.7709580838323353
#
# ##### 낫 원핫 인코딩 스코어 : 0.6796407185628742
#
# + id="unoMZAapDegB"
| 0702_ML16_titanic_without_onehot_encoding_LogR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building the best classifier based on below problem statement:
# <b>Problem: To predict whether a person will be interested in the company proposed Health plan/policy given the information about:
# * Demographics (city, age, region etc.)
# * Information regarding holding policies of the customer
# * Recommended Policy Information
# <b>This Solution comprises of the following sub divisions:
# * EDA (Exploratory Data Analysis)
# * Feature Engineering
# * Feature Selection
# * Scaling Dataset
# * Performing all these steps above for Test Data
# * Multiple Model Training
# * Predictions from Multiple Models
# * Finding best solution based on data and model estimation
# ## 1. Exploratory Data Analysis:
# ### Import required libraries:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
pd.set_option('display.max_columns',None)
# ### Load data for visualization:
train_df = pd.read_csv('train_data.csv')
train_df.head()
train_df.columns
train_df.info()
# <b>From the above information, we could see that there are 3 different categories of data present:
# * int64 : Integer values for the 6 columns ID, Region_code, Upper_age, Lower_age, Reco_Policy_cat, Response. ID filed would be removed from further calculations as it does not contribute to the predictions
# * float64 : Floating values for the 2 columns Holding_Policy_Type, Reco_Policy_Premium
# * object : Categorical values for 6 columns City_Code, Accomodation_Type, Reco_Insurance_Type, Is_Spouse, Health Indicator, Holding_Policy_Duration.
train_df.describe()
# ### Categorical Variables:
cat_vars = [var for var in train_df.columns if train_df[var].dtypes=='O']
for var in cat_vars:
print(var)
# ### Missing Values:
mis_vars = [var for var in train_df.columns if train_df[var].isnull().sum()>0]
train_df[mis_vars].isnull().sum()
# ## 2. Feature Engineering:
# ### Implementing Mean & Mode Imputation:
train_df['Health Indicator'].mode()[0]
train_df['Health Indicator'] = train_df['Health Indicator'].fillna(train_df['Health Indicator'].mode()[0])
train_df['Health Indicator'].isnull().sum()
train_df.isnull().sum()
train_df['Holding_Policy_Duration'].head()
train_df['Holding_Policy_Duration'].mode()[0]
train_df['Holding_Policy_Duration'] = train_df['Holding_Policy_Duration'].fillna(train_df['Holding_Policy_Duration'].mode()[0])
train_df['Holding_Policy_Duration'].isnull().sum()
np.around(train_df['Holding_Policy_Type'].mean())
train_df['Holding_Policy_Type'] = train_df['Holding_Policy_Type'].fillna(np.around(train_df['Holding_Policy_Type'].mean()))
train_df['Holding_Policy_Type'].isnull().sum()
# Converting Holding_Policy_Duration to float data
train_df.Holding_Policy_Duration = train_df.Holding_Policy_Duration.str.replace('[+]', '')
train_df['Holding_Policy_Duration'].head()
train_df['Holding_Policy_Duration'] = train_df['Holding_Policy_Duration'].astype(float)
[var for var in train_df.columns if train_df[var].isnull().sum()>0]
# ### Numerical Variables:
# +
num_vars = [var for var in train_df.columns if train_df[var].dtypes!='O']
for var in num_vars:
print(var)
# -
train_df[num_vars].head()
final_cat_vars = [var for var in train_df.columns if train_df[var].dtypes=='O']
for var in final_cat_vars:
print(train_df[var].value_counts())
# ### Label Encoding categorical variables:
from sklearn.preprocessing import LabelEncoder
final_cat_vars
# +
city_code_le = LabelEncoder()
city_code_labels = city_code_le.fit_transform(train_df['City_Code'])
city_code_mappings = {index: label for index, label in
enumerate(city_code_le.classes_)}
print(city_code_mappings)
Accomodation_Type_le = LabelEncoder()
Accomodation_Type_labels = Accomodation_Type_le.fit_transform(train_df['Accomodation_Type'])
Accomodation_Type_mappings = {index: label for index, label in
enumerate(Accomodation_Type_le.classes_)}
print(Accomodation_Type_mappings)
Reco_Insurance_Type_le = LabelEncoder()
Reco_Insurance_Type_labels = Reco_Insurance_Type_le.fit_transform(train_df['Reco_Insurance_Type'])
Reco_Insurance_Type_mappings = {index: label for index, label in
enumerate(Reco_Insurance_Type_le.classes_)}
print(Reco_Insurance_Type_mappings)
Is_Spouse_le = LabelEncoder()
Is_Spouse_labels = Is_Spouse_le.fit_transform(train_df['Is_Spouse'])
Is_Spouse_mappings = {index: label for index, label in
enumerate(Is_Spouse_le.classes_)}
print(Is_Spouse_mappings)
Health_Indicator_le = LabelEncoder()
Health_Indicator_labels = Health_Indicator_le.fit_transform(train_df['Health Indicator'])
Health_Indicator_mappings = {index: label for index, label in
enumerate(Health_Indicator_le.classes_)}
print(Health_Indicator_mappings)
# -
train_df['City_Code_Labels'] = city_code_labels
train_df['Accomodation_Type_Labels'] = Accomodation_Type_labels
train_df['Reco_Insurance_Type_Labels'] = Reco_Insurance_Type_labels
train_df['Is_Spouse_Labels'] = Is_Spouse_labels
train_df['Health_Indicator_Labels'] = Health_Indicator_labels
train_df.head()
train_df.columns
# ## 3. Feature Selection:
train_df_1 = train_df.copy()
train_df_2 = train_df.copy()
# ### Dropping categorical variables:
train_df_1.drop(columns=['ID','City_Code','Accomodation_Type','Reco_Insurance_Type','Is_Spouse','Health Indicator'],axis=1,inplace=True)
train_df_1.head()
train_df_2.drop(columns=['ID','City_Code','Accomodation_Type','Reco_Insurance_Type','Is_Spouse','Health Indicator','City_Code_Labels','Region_Code','Reco_Policy_Cat'],axis=1,inplace=True)
train_df_2.head()
# ## 4. Scaling Dataset:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler2 = MinMaxScaler()
X_train_1 = train_df_1.drop(['Response'],axis=1).values
y_train_1 = train_df_1['Response'].values
X_train_1.shape, y_train_1.shape
X_train_2 = train_df_2.drop(['Response'],axis=1).values
y_train_2 = train_df_2['Response'].values
X_train_2.shape, y_train_2.shape
X_train_1 = scaler.fit_transform(X_train_1)
X_train_2 = scaler2.fit_transform(X_train_2)
# ## 5. Repeating all operations for Test Data:
# Loading dataset
test_df = pd.read_csv('test_data.csv')
test_df.head()
test_df.info()
test_df.columns
# Variables with missing data
test_mis_vars = [var for var in test_df.columns if test_df[var].isnull().sum()>0]
test_df[test_mis_vars].isnull().sum()
# Mode Imputation for missing categorical data:
test_df['Health Indicator'] = test_df['Health Indicator'].fillna(test_df['Health Indicator'].mode()[0])
test_df['Holding_Policy_Duration'] = test_df['Holding_Policy_Duration'].fillna(test_df['Holding_Policy_Duration'].mode()[0])
test_df['Holding_Policy_Type'] = test_df['Holding_Policy_Type'].fillna(np.around(test_df['Holding_Policy_Type'].mean()))
test_df.isnull().sum()
# Converting Holding_Policy_Duration to float
test_df.Holding_Policy_Duration = test_df.Holding_Policy_Duration.str.replace('[+]', '')
test_df['Holding_Policy_Duration'] = test_df['Holding_Policy_Duration'].astype(float)
test_df['Holding_Policy_Duration'].head()
# Categorical variables in test data:
test_cat_vars = [var for var in test_df.columns if test_df[var].dtypes=='O']
test_cat_vars
# +
# Label Encoding for categorical variables:
test_City_Code_le = LabelEncoder()
test_City_Code_labels = test_City_Code_le.fit_transform(test_df['City_Code'])
test_City_Code_mappings = {index: label for index, label in
enumerate(test_City_Code_le.classes_)}
print(test_City_Code_mappings)
test_Accomodation_Type_le = LabelEncoder()
test_Accomodation_Type_labels = test_Accomodation_Type_le.fit_transform(test_df['Accomodation_Type'])
test_Accomodation_Type_mappings = {index: label for index, label in
enumerate(test_Accomodation_Type_le.classes_)}
print(test_Accomodation_Type_mappings)
test_Reco_Insurance_Type_le = LabelEncoder()
test_Reco_Insurance_Type_labels = test_Reco_Insurance_Type_le.fit_transform(test_df['Reco_Insurance_Type'])
test_Reco_Insurance_Type_mappings = {index: label for index, label in
enumerate(test_Reco_Insurance_Type_le.classes_)}
print(test_Reco_Insurance_Type_mappings)
test_Is_Spouse_le = LabelEncoder()
test_Is_Spouse_labels = test_Is_Spouse_le.fit_transform(test_df['Is_Spouse'])
test_Is_Spouse_mappings = {index: label for index, label in
enumerate(test_Is_Spouse_le.classes_)}
print(test_Is_Spouse_mappings)
test_Health_Indicator_le = LabelEncoder()
test_Health_Indicator_labels = test_Health_Indicator_le.fit_transform(test_df['Health Indicator'])
test_Health_Indicator_mappings = {index: label for index, label in
enumerate(test_Health_Indicator_le.classes_)}
print(test_Health_Indicator_mappings)
# -
test_df['City_Code_Labels'] = test_City_Code_labels
test_df['Accomodation_Type_Labels'] = test_Accomodation_Type_labels
test_df['Reco_Insurance_Type_Labels'] = test_Reco_Insurance_Type_labels
test_df['Is_Spouse_Labels'] = test_Is_Spouse_labels
test_df['Health_Indicator_Labels'] = test_Health_Indicator_labels
test_df.head()
test_df.info()
# Dropping Categorical Variables from test data:
test_df_1 = test_df.copy()
test_df_2 = test_df.copy()
test_df_1.drop(columns=['ID','City_Code','Accomodation_Type','Reco_Insurance_Type','Is_Spouse','Health Indicator'],axis=1,inplace=True)
test_df_1.head()
test_df_2.drop(columns=['ID','City_Code','Accomodation_Type','Reco_Insurance_Type','Is_Spouse','Health Indicator','City_Code_Labels','Region_Code','Reco_Policy_Cat'],axis=1,inplace=True)
test_df_2.head()
X_test_1 = test_df_1.values
X_test_2 = test_df_2.values
X_test_1 = scaler.transform(X_test_1)
X_test_2 = scaler2.transform(X_test_2)
# ### Checking for Imbalanced Data:
train_df['Response'].value_counts()
# The data above clearly shows 75% values belong to majority class while 25% belong to minority class.
# * Hence, this is an example of Imbalanced Dataset
# * We will use techniques like Bagging, Boosting, Over & Under sampling & Hybrid models to tackle the predictions
# <b>We will compare the performance of:
# * just re-sampling
# * just boosting or bagging
# * bagging + resampling
# * boosting + resampling
# * bagging + boosting + resampling
# ## 6. Multiple Model creation & training:
# ### Ensemble Algorithms: With & without resampling:
# +
from sklearn.ensemble import (
RandomForestClassifier,
AdaBoostClassifier,
)
from imblearn.ensemble import (
BalancedRandomForestClassifier,
RUSBoostClassifier,
)
from sklearn.metrics import roc_auc_score
from collections import Counter
# +
# function to train ada boost and evaluate performance
def run_adaboost(X_train, X_test, y_train):
ada = AdaBoostClassifier(n_estimators=200, random_state=100)
ada.fit(X_train, y_train)
print('Train set')
pred = ada.predict_proba(X_train)
print(
'AdaBoost roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
final_pred = ada.predict(X_test)
return final_pred
# -
# train model and store result
ada_preds = run_adaboost(X_train_1, X_test_1, y_train_1)
print()
ada_preds_data = pd.DataFrame({'ID':test_df.ID, 'Response':ada_preds})
ada_preds_data.head()
ada_preds_data.to_csv('Imbalanced-Ada-Boost.csv',index=False)
# ### Random Under Sampling:
class_0, class_1 = train_df_1['Response'].value_counts()
print(class_0, class_1)
# +
# import library
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(random_state=42, replacement=True)# fit predictor and target variable
x_rus, y_rus = rus.fit_resample(X_train_1, y_train_1)
print('original dataset shape:', Counter(y_train_1))
print('Resample dataset shape', Counter(y_rus))
# -
def run_random_forest(X_train, X_test, y_train, n_est, max_dep):
rf = RandomForestClassifier(
n_estimators=n_est, random_state=40, max_depth=max_dep, n_jobs=4)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print(
'Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
final_pred = rf.predict(X_test)
return final_pred
# Model training and predictions:
random_forest_model = run_random_forest(x_rus, X_test_1, y_rus, 500, 3)
# ### Random OverSampling:
# +
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=42)
# fit predictor and target variable
x_ros, y_ros = ros.fit_resample(X_train_1, y_train_1)
print('Original dataset shape', Counter(y_train_1))
print('Resample dataset shape', Counter(y_ros))
# -
random_forest_model_ros = run_random_forest(x_ros, X_test_1, y_ros, 500, 3)
# ### SMOTE:
# +
from imblearn.over_sampling import SMOTE
smote = SMOTE()
# fit predictor and target variable
x_smote, y_smote = smote.fit_resample(X_train_1, y_train_1)
print('Original dataset shape', Counter(y_train_1))
print('Resample dataset shape', Counter(y_smote))
# -
random_forest_model_smote = run_random_forest(x_smote, X_test_1, y_smote, 200, 5)
# best model => run_random_forest(x_smote, X_test_1, y_smote, 500, 30) =>But Overfitting
random_forest_model_smote_data = pd.DataFrame({'ID':test_df.ID, 'Response':random_forest_model_smote})
random_forest_model_smote_data.to_csv('Smote-Random-Forest-Preds.csv',index=False)
random_forest_model_smote_data.head()
new_ada_preds = run_adaboost(x_smote, X_test_1, y_smote)
new_ada_preds_data = pd.DataFrame({'ID':test_df.ID, 'Response':new_ada_preds})
new_ada_preds_data.to_csv('New-AdaBoost-Preds.csv',index=False)
new_ada_preds_data.head()
| Health-Insurance-Predictions-EDA-ML-Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + slideshow={"slide_type": "skip"}
"""
IPython Notebook v4.0 para python 2.7
Librerías adicionales: Ninguna.
Contenido bajo licencia CC-BY 4.0. Código bajo licencia MIT. (c) <NAME>.
"""
# Configuracion para recargar módulos y librerías
# %reload_ext autoreload
# %autoreload 2
from IPython.core.display import HTML
HTML(open("style/iwi131.css", "r").read())
# + [markdown] slideshow={"slide_type": "slide"}
# <header class="w3-container w3-teal">
# <img src="images/utfsm.png" alt="" align="left"/>
# <img src="images/inf.png" alt="" align="right"/>
# </header>
# <br/><br/><br/><br/><br/>
# # IWI131
# ## Programación de Computadores
#
# ### <NAME>
#
# http://progra.usm.cl/
#
# https://www.github.com/usantamaria/iwi131
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Soluciones a Certamen 2, 1S 2014, Casa Central
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 1 [25%]
# (a) Realice el ruteo de los siguientes programas e indique qué es lo que imprimen.
# Cada vez que el valor de una variable cambie, escríbalo en una nueva fila de la tabla.
# Recuerde que si una variable es de tipo string, debe colocar su valor entre comillas simples ’ ’.
# Si una variable almacena una función coloque el nombre de ésta como valor (sin comillas).
#
# <img src="images/2014.png" alt="" align="middle"/>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Pregunta 1.b : Impresiones
# Indique lo que imprimen los siguientes programas.
# + slideshow={"slide_type": "slide"}
r,s = (2014,3,12),(2014,1,1)
t = (2014,2,1)
print r > s and s < t
# + slideshow={"slide_type": "slide"}
# DIGRESION: COMPARACION DE TUPLAS DEL MISMO LARGO
# Se verifican elementos en orden.
# El primer elemento que sea mayor, gana.
t1 = (0,1,2,3,4)
t2 = (10,0,0,0)
print t1<t2
# + slideshow={"slide_type": "-"}
# DIGRESION: COMPARACION DE TUPLAS DE DISTINTO LARGO
# Se verifican elementos en orden.
# El primer elemento que sea mayor, gana
t1 = (0,1,2,3)
t2 = (0,1,2,3,4,5)
print t1<t2
# + slideshow={"slide_type": "slide"}
w = {'uno':[1,3],'dos':[2,4],
'tres':[3,6]}
print w['uno'] + w['tres']
# + slideshow={"slide_type": "slide"}
def funcion1(a):
a.reverse()
return a
x = {1:[1, 0], 0:[0, 1]}
r = funcion1(x[0])[1]
print r
# + slideshow={"slide_type": "slide"}
def funcion2(x):
if len(x) == 1:
return x
else:
return x[-1] + funcion2(x[:-1])
print funcion2('FTW')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 2 [35%]
# El servicio de inteligencia de la UTFSM ha detectado una amenaza inminente a sus
# instalaciones por parte de un grupo terrorista que busca impedir que la universidad se convierta en el mejor centro educacional del mundo. Dada la gravedad de esta amenaza, se ha solicitado que la división de agentes “IWI-131” analice los datos obtenidos por los infiltrados que el servicio de inteligencia posee en otras universidades.
#
# Los datos con los que se trabajará se encuentran en un diccionario llamado `terroristas` (variable global) que tiene por llave el identificador de cada terrorista y, por valor, una lista de tuplas que indica las universidades en las que ha sido visto el terrorista junto con la fecha correspondiente.
#
# ```Python
# terroristas = {
# 2352: [('Stanfox', '2010-05-02'),
# ('Hardyard', '2010-06-07'),
# ('<NAME>', '2010-05-02')],
# 1352: [('Stanfox', '2010-05-02'),
# ('Stanfox', '2011-06-08')],
# 352: [('Hardyard', '2009-03-03')],
# 22: [('<NAME>', '2012-11-16')]}
# ```
#
# Un diccionario llamado experticias (variable global) que tiene por llave el identificador de cada terrorista y, por valor, la experticia de dicho terrorista.
#
# ```Python
# experticias = { 2352:'TNT', 1352:'TNT',
# 352:'rayos laser', 22:'teletransportacion'}
# ```
#
# + slideshow={"slide_type": "slide"}
# CARGAR LOS DATOS
terroristas = {
2352: [('Stanfox', '2010-05-02'),
('Hardyard', '2010-06-07'),
('<NAME>', '2010-05-02')],
1352: [('Stanfox', '2010-05-02'),
('Stanfox', '2011-06-08')],
352: [('Hardyard', '2009-03-03')],
22: [('<NAME>', '2012-11-16')]}
experticias = { 2352:'TNT', 1352:'TNT',
352:'rayos laser', 22:'teletransportacion'}
# + [markdown] slideshow={"slide_type": "slide"}
#
# ## Pregunta 2.a
# Desarrolle la función `terroristas_se_conocen(terrorista1, terrorista2)` que reciba
# como parámetros los identificadores de dos terroristas y que retorne `True` si ambos se conocen o `False` si no. Dos terroristas se conocen si ambos han sido vistos en el mismo lugar en la misma fecha.
#
# ```Python
# >>> terroristas_se_conocen(2352, 1352)
# True
# >>> terroristas_se_conocen(2352, 352)
# False
# ```
#
# ***Estrategia de solución:***
# * ¿Qué estructura tienen los datos de entrada?
# * ¿Que estructura deben tener los datos de salida?
# * ¿Cómo proceso los inputs para generar el output deseado?
# + slideshow={"slide_type": "slide"}
def terroristas_se_conocen(terrorista1, terrorista2):
lugares1 = set( terroristas[terrorista1] )
lugares2 = set( terroristas[terrorista2] )
return len(lugares1 & lugares2) > 0
print terroristas_se_conocen(2352, 1352)
print terroristas_se_conocen(2352, 352)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 2.b
# Desarrolle la función `terroristas_que_han_estado_en(universidad)` que reciba como
# parámetro el nombre de una universidad y que retorne un conjunto conformado por los identificadores de los terroristas que han sido visto en la universidad ingresada como parámetro.
#
# ```Python
# >>> terroristas_que_han_estado_en('Stanfox')
# set([1352, 2352])
# >>> terroristas_que_han_estado_en('Prinxton')
# set([])
# ```
#
# ***Estrategia de solución:***
# * ¿Qué estructura tienen los datos de entrada?
# * ¿Que estructura deben tener los datos de salida?
# * ¿Cómo proceso los inputs para generar el output deseado?
# + slideshow={"slide_type": "slide"}
def terroristas_que_han_estado_en(universidad):
terroristas_a_la_vista = set()
for terrorista_id, terrorista_lugares in terroristas.items():
for lugar, fecha in terrorista_lugares:
if lugar==universidad:
terroristas_a_la_vista.add(terrorista_id)
return terroristas_a_la_vista
print terroristas_que_han_estado_en('Stanfox')
print terroristas_que_han_estado_en('Prinxton')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 2.c
# Desarrolle la función `terroristas_clave()` que retorne una lista de tuplas con los identificadores de los terroristas claves e informe si cada uno de ellos pertenece (`True`) o no (`False`) a una ”sleeper cell”.
#
# * Se considera que un terrorista es clave si es el único
# que posee cierta experticia.
# * Se considera que un terrorista pertenece a una ”sleeper cell” si es que no conoce a ningún
# otro terrorista.
#
# ```Python
# >>> terroristas_clave()
# [(22, True), (352, True)]
# ```
#
# ***Estrategia de solución:***
# * ¿Qué estructura tienen los datos de entrada?
# * ¿Que estructura deben tener los datos de salida?
# * ¿Cómo proceso los inputs para generar el output deseado?
# + slideshow={"slide_type": "slide"}
def sleeper_call(terrorista_id):
for tid in terroristas:
if tid!=terrorista_id:
if terroristas_se_conocen(tid, terrorista_id):
return False
return True
def terroristas_clave():
# Obtener terroristas claves
experticias_inv = {}
for tid, exp in experticias.items():
if exp not in experticias_inv:
experticias_inv[exp] = []
experticias_inv[exp].append(tid)
lista_terroristas_claves = []
for exp, lista_id in experticias_inv.items():
if len(lista_id)==1:
lista_terroristas_claves.append(lista_id[0])
# Sleeper Call
clave_y_sleeper_call = []
for tid in lista_terroristas_claves:
clave_y_sleeper_call.append( (tid, sleeper_call(tid)))
# Return value
return clave_y_sleeper_call
terroristas_clave()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 3 [35%]
# La gran maratón de Chago City es una de las carreras más importantes a nivel mundial.
# Debido a la gran cantidad de competidores que reúne este evento, se han generado las siguientes estructuras para ayudar con la organiación.
#
# * Diccionario con los inscritos, donde se almacena el número del corredor y los datos de éste.
#
# ```Python
# inscritos = { #num_corredor: (rut,nombre,apellido,id_categoria,edad)
# 1001: ('1111111-2', 'Carlos', 'Caszely', 2, 55),
# 1002: ('223244-4', 'Marcelo', 'Rios', 3, 45),
# 2129: ('3838292-1', 'Ivan', 'Zamorano', 4, 38),
# 4738: ('5940301-2', 'Erika', 'Olivera', 5, 48),
# 8883: ('3843993-1', 'Condor', 'ito', 3, 22),
# 231: ('9492922-2', 'Pepe', 'Antartico', 3, 30)
# }
# ```
#
# * Diccionario con los inscritos, donde se almacena el número del corredor y los datos de éste.
#
# ```Python
# categorias = { # id_categoria: (distancia, premio)
# 1: ('1k', 10000),
# 2: ('5k', 20000),
# 3: ('10k', 450000),
# 4: ('21k', 100000),
# 5: ('42k', 250000)
# }
# ```
#
# * Lista de resultados, donde se registra el número del corredor y el tiempo que logró.
# ```Python
# # [ (num_corredor, tiempo) ]
# resultados = [(1001, '00:30:12'), (1002, '00:55:43'),
# (2129, '01:45:23'), (4738, '03:05:09'),
# (8883, '00:31:33'), (231, '00:39:45')]
# ```
# + slideshow={"slide_type": "slide"}
# CARGAR DATOS
inscritos = { #num_corredor: (rut,nombre,apellido,id_categoria,edad)
1001: ('1111111-2', 'Carlos', 'Caszely', 2, 55),
1002: ('223244-4', 'Marcelo', 'Rios', 3, 45),
2129: ('3838292-1', 'Ivan', 'Zamorano', 4, 38),
4738: ('5940301-2', 'Erika', 'Olivera', 5, 48),
8883: ('3843993-1', 'Condor', 'ito', 3, 22),
231: ('9492922-2', 'Pepe', 'Antartico', 3, 30)
}
categorias = { # id_categoria: (distancia, premio)
1: ('1k', 10000),
2: ('5k', 20000),
3: ('10k', 450000),
4: ('21k', 100000),
5: ('42k', 250000)
}
resultados = [(1001, '00:30:12'), (1002, '00:55:43'),
(2129, '01:45:23'), (4738, '03:05:09'),
(8883, '00:31:33'), (231, '00:39:45')]
# + slideshow={"slide_type": "slide"}
# OBSERVACION
# Tenemos los resultados como una lista
resultados = [(1001, '00:30:12'), (1002, '00:55:43'),
(2129, '01:45:23'), (4738, '03:05:09'),
(8883, '00:31:33'), (231, '00:39:45')]
# Para buscar un resultado en particular (por ej, para 4738), tendriamos que recorrer toda la lista
# Pero podemos convertir a un diccionario de resultados
resultados_dict = dict(resultados)
print resultados_dict[2129]
print resultados_dict[231]
# + slideshow={"slide_type": "slide"}
# Esto es por una hermosa simetría en python
diccio = {"zero":0, "uno":1,"dos":2,"tres":3,"cuatro":4}
print diccio
l = diccio.items()
print l
d = dict(l)
print d
# Es decir, podemos convertir toda lista del tipo [(key1, val1), ..., (keyn, valn)]
# en un diccionario {key1:val1, ..., keyn:valn}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 3.a
# Desarrolle la función `competidores_edad(inscritos, categorias, min_edad, max_edad)`
# que reciba el diccionario `inscritos`, el diccionario `categorias` y los valores enteros `min_edad` y `max_edad` (que representan la máxima y mínima edad).
#
# La función debe retornar una lista de tuplas de todos los competidores que se encuentren entre la edad mínima y máxima (incluyéndolos), donde cada tupla contenga el nombre, apellido y la distancia a correr de un individuo.
#
# ```Python
# >>> competidores_edad(inscritos,categorias,25,40)
# [('Pepe', 'Antartico', '10k'), ('Ivan', 'Zamorano', '21k')]
# ```
#
# ***Estrategia de solución:***
# * ¿Qué estructura tienen los datos de entrada?
# * ¿Que estructura deben tener los datos de salida?
# * ¿Cómo proceso los inputs para generar el output deseado?
# + slideshow={"slide_type": "slide"}
def competidores_edad(inscritos, categorias, min_edad, max_edad):
corredores_en_edad = []
for num_corredor, datos in inscritos.items():
rut, nombre, apellido, id_categoria,edad = datos
if min_edad<=edad<=max_edad:
distancia, premio = categorias[id_categoria]
tupla = (nombre, apellido, distancia)
corredores_en_edad.append(tupla)
return corredores_en_edad
print competidores_edad(inscritos,categorias,25,40)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 3.b
#
# Desarrolle la función `tiempo_competidor(inscritos, resultados, rut)` que reciba el
# diccionario `inscritos`, la lista de tuplas `resultados` y el string `rut`.
# La función debe retornar el tiempo, como cadena de texto, de un competidor en particular.
#
# ```Python
# >>> tiempo_competidor(inscritos,resultados,'9492922-2')
# '00:39:45'
# ```
#
# ***Estrategia de solución:***
# * ¿Qué estructura tienen los datos de entrada?
# * ¿Que estructura deben tener los datos de salida?
# * ¿Cómo proceso los inputs para generar el output deseado?
# + slideshow={"slide_type": "slide"}
def obtener_numero_corredor(inscritos, rut_buscado):
for num_corredor, datos in inscritos.items():
rut, nombre, apellido, id_categoria,edad = datos
if rut==rut_buscado:
return num_corredor
print "Not found"
return ""
def tiempo_competidor(inscritos, resultados, rut):
# Obtener el id_corredor
num_corredor = obtener_numero_corredor(inscritos, rut)
# Convertir resultados a un dict
resultados_dict = dict(resultados)
# Obtener el tiempo
return resultados_dict[num_corredor]
print tiempo_competidor(inscritos,resultados,'9492922-2')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Pregunta 3.c
#
# Desarrolle la función `ganador_categoria(inscritos, categorias, resultados, distancia)` que reciba el diccionario `inscritos`, el diccionario `categorias`, la lista de tuplas
# `resultados` y el string `distancia`. La función debe retornar una tupla con el ganador de la categoría, indicando el nombre, apellido y premio obtenido.
#
#
# ```Python
# >>> ganador_categoria(inscritos, categorias, resultados, '10k')
# ('Condor', 'ito', 450000)
# ```
#
# ***Estrategia de solución:***
# * ¿Qué estructura tienen los datos de entrada?
# * ¿Que estructura deben tener los datos de salida?
# * ¿Cómo proceso los inputs para generar el output deseado?
# + slideshow={"slide_type": "slide"}
def tiempo_en_segundos(tiempo_string):
horas = int(tiempo_string[:2])
minutos = int(tiempo_string[3:5])
segundos = int(tiempo_string[6:])
tiempo = horas*3600+minutos*60+segundos
return tiempo
def ganador_categoria(inscritos, categorias, resultados, distancia):
# Obtener id de la distancia
id_distancia = 0
premio_distancia = 0
for idc, (dist, premio) in categorias.items():
if dist==distancia:
id_distancia = idc
premio_distancia = premio
# Convertir resultados a un dict
resultados_dict = dict(resultados)
# Obtener menor tiempo
menor_tiempo = float("inf")
nombre_ganador = ""
apellido_ganador = ""
for num_corredor, datos in inscritos.items():
rut, nombre, apellido, id_categoria,edad = datos
if id_categoria==id_distancia:
tiempo = tiempo_en_segundos(resultados_dict[num_corredor])
if tiempo<menor_tiempo:
menor_tiempo = tiempo
nombre_ganador = nombre
apellido_ganador = apellido
# Regresar ganador
tupla_ganador = (nombre_ganador, apellido_ganador, premio_distancia)
return tupla_ganador
ganador_categoria(inscritos, categorias, resultados, '10k')
| ipynb/21-EjerciciosDeCertamen/Certamen2_2014_1S_CC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + deletable=true editable=true
library(SNPRelate)
library(reshape2)
library(ggplot2)
library(Cairo)
# + [markdown] deletable=true editable=true
# # Format conversion
#
# SNPRelate requires gds, so convert the vcf to GDS format
# + deletable=true editable=true
#system("rm -f flowers.gds")
#snpgdsVCF2GDS("bwa_msdr_MR_ih_lc_nr503_F.vcf.gz", "flowers.gds",
# ignore.chr.prefix = c("scaffold_", "chromosome_"))
# + [markdown] deletable=true editable=true
# # PCA
#
# Flowers et al. state they used SNPrelate to perform PCA decomposition. Here we use default parameters on Flower's filtered VCF.
# + deletable=true editable=true
# snpgdsSummary("flowers.gds")
# + deletable=true editable=true
geno <- snpgdsOpen("flowers.gds")
# + deletable=true editable=true
pca <- snpgdsPCA(geno, num.thread=12, verbose = T)
# + [markdown] deletable=true editable=true
# # Plot
# + [markdown] deletable=true editable=true
# The names of lines in the VCF do not match what is given in the SRA database. Our metadata table (from the SRA) has line IDs like CC-1010, whereas the VCF has CR1010. The below converts VCF names to SRA names.
# + deletable=true editable=true
sra_names = sub("CR", "CC-", pca$sample.id)
# + [markdown] deletable=true editable=true
# Import metadata from the kWIP analysis under 'writeups'
# + deletable=true editable=true
chlamy_meta = read.delim("../chlamy/chlamy_meta.tab")
# + [markdown] deletable=true editable=true
# Note that all the "sra names" from above conversion match the names in the SRA metadata
# + deletable=true editable=true
m = match(sra_names, chlamy_meta$strain)
m
# + [markdown] deletable=true editable=true
# Reorder the metadata, assert the names match
# + deletable=true editable=true
chlamy_meta = chlamy_meta[m, ]
print(paste(chlamy_meta$strain, sra_names))
# + [markdown] deletable=true editable=true
# Assemble all data & metadata for plotting
# + deletable=true editable=true
plotdat = data.frame(sample=pca$sample.id,
sraname=sra_names,
region=chlamy_meta$origin,
mbases=chlamy_meta$MBases,
PC1=pca$eigenvect[,1],
PC2=pca$eigenvect[,2],
PC3=pca$eigenvect[,3])
# + deletable=true editable=true
ggplot(plotdat, aes(x=PC1, y=PC2)) +
geom_point(aes(colour=region)) +
theme_bw()
# + [markdown] deletable=true editable=true
# The above plot is upside-down from the flowers et al. plot. Reverse PC2 and try again
# + deletable=true editable=true
plotdat$PC2 = -plotdat$PC2
# + [markdown] deletable=true editable=true
# ### Proper plot
# + deletable=true editable=true
cols = c("light blue", "blue", "dark green", "red" )
p = ggplot(plotdat, aes(x=PC1, y=PC2)) +
geom_point(aes(colour=region), size=2) +
scale_color_manual(values = cols, name="Region") +
ggtitle("SNPrelate") +
theme_bw() +
theme(panel.grid = element_blank()
#, axis.text = element_blank(), axis.ticks = element_blank()
)
print(p)
# + deletable=true editable=true
pdf("chlamy_snprelate.pdf", width=4, height=3)
print(p)
dev.off()
svg("chlamy_snprelate.svg", width=4, height=3)
print(p)
dev.off()
# + [markdown] deletable=true editable=true
# # SNP IBS <-> kWIP correlation
#
# Calculate the IBS matrix for comparison to kWIP's matrix (1-IBS ~ WIP)
#
# (Old code -- SNP Distance (IBD))
#
#
# ```R
# ibd <- snpgdsIBDMoM(geno, maf=0.05, missing.rate=0.05, num.thread=12)
# r = acast(snpgdsIBDSelection(ibd), ID1 ~ ID2, value.var = "kinship")
# r[lower.tri(r)] = t(r)[lower.tri(r)]
# write.table(r, "kinship.mat", sep="\t", quote=F)
# ```
# + deletable=true editable=true
ibs = snpgdsIBS(geno)
ibs.m = ibs$ibs
rownames(ibs.m) = colnames(ibs.m) = sub("CR", "CC-", ibs$sample.id)
ibs = ibs.m
ibs1m = 1-ibs
# -
wip = as.matrix(read.delim("../chlamy/kwip/flowers_wip.dist", row.names=1))
# Rename to match strain names in VCF
rownames(wip) = colnames(wip) = chlamy_meta$Sample_name[match(rownames(wip), chlamy_meta$Run)]
# Reorder WIP matrix to IBS matrix order
m = match(rownames(ibs), rownames(wip))
wip = wip[m,m]
# + deletable=true editable=true
ip = as.matrix(read.delim("../chlamy/kwip/flowers_ip.dist", row.names=1))
# Rename to match strain names in VCF
rownames(ip) = colnames(ip) = chlamy_meta$Sample_name[match(rownames(ip), chlamy_meta$Run)]
# Reorder ip matrix to IBS matrix order
m = match(rownames(ibs), rownames(ip))
ip = ip[m,m]
# -
all(rownames(wip) == rownames(ibs))
all(rownames(ip) == rownames(ibs))
image(wip)
image(ip)
image(ibs1m)
wip.t = wip[upper.tri(wip)]
ip.t = ip[upper.tri(wip)]
ibs.t = ibs1m[upper.tri(ibs1m)]
wip.d = as.dist(wip)
ip.d = as.dist(ip)
ibs.d = as.dist(ibs1m)
cor(wip.t, ibs.t, method="spearman")
cor(ip.t, ibs.t, method="spearman")
| writeups/chlamy-snps/snprelate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
# + jupyter={"outputs_hidden": true} tags=[]
with open('ldaseq1234.pickle', 'rb') as f:
ldaseq = pickle.load(f)
print(ldaseq.print_topic_times(topic=0))
# + jupyter={"source_hidden": true} tags=[]
topicdis = [[0.04461942257217848,
0.08583100499534332,
0.0327237321141309,
0.0378249089831513,
0.08521717043434086,
0.03543307086614173,
0.054356108712217424,
0.04057658115316231,
0.0499745999491999,
0.04468292269917873,
0.028257556515113032,
0.026013885361104057,
0.0668021336042672,
0.07567098467530269,
0.08409533485733638,
0.026966387266107866,
0.04533909067818136,
0.028172889679112693,
0.04121158242316485,
0.06623063246126493],
[0.04375013958058825,
0.07278290193626193,
0.025437166402394087,
0.03566563190923912,
0.0600978180762445,
0.03541997007392188,
0.07258190588918417,
0.0305960649440561,
0.06355941666480559,
0.0459164303102039,
0.0295464189204279,
0.02733546240257275,
0.03622395426223284,
0.12723049780020992,
0.07838845836031891,
0.03517430823860464,
0.0370726042387833,
0.030216405744020368,
0.04841771445161579,
0.06458672979431404],
[0.047832813411448426,
0.07557888863526846,
0.01995992797179741,
0.03816987496512719,
0.13469781125567476,
0.03291993202972431,
0.07570569885109944,
0.030586624058434146,
0.049760328692079435,
0.04080752745441173,
0.02835476425980877,
0.02495625047553831,
0.044434299627177966,
0.08879251312485734,
0.07190139237616983,
0.028405488346141164,
0.034416292576529964,
0.028608384691470746,
0.04149230261989906,
0.06261888457734155],
[0.042617561579875174,
0.07770737052741912,
0.03601886702558483,
0.04750107199009005,
0.06608223355090762,
0.07060841393110677,
0.0826861689456382,
0.0338272428414884,
0.042951069607889844,
0.04888274810615084,
0.04173614750583639,
0.03728143313164038,
0.04371337367192339,
0.04190290151984373,
0.06603458954690553,
0.03363666682548001,
0.045190337795988376,
0.035590070989565965,
0.03327933679546429,
0.0727523941112011],
[0.05211050194283281,
0.022701868313195296,
0.04215681056049396,
0.03776547612710917,
0.06246340554638846,
0.05240325757172513,
0.03425240858040134,
0.060094746367168786,
0.04189066907968276,
0.03837760153297493,
0.031431308883802626,
0.08609676904242296,
0.04383350188960451,
0.11209879171767712,
0.06754670782988237,
0.03071272688561239,
0.0415446851546282,
0.02789162718901368,
0.0347314632458615,
0.07989567253952201],
[0.052505147563486614,
0.03777739647677877,
0.03743422557767102,
0.03311599176389842,
0.11201670098375657,
0.06963509494394875,
0.02916952642415923,
0.043239533287577216,
0.03854953099977122,
0.03260123541523679,
0.03546099290780142,
0.07958705101807367,
0.03165751544269046,
0.1153054221002059,
0.06637497140242507,
0.02304964539007092,
0.03955044612216884,
0.030942576069549303,
0.031457332418210936,
0.06056966369251887],
[0.03823109185601696,
0.0364105636723971,
0.03279255196570954,
0.033691293727243395,
0.15926164907590912,
0.061321841729271326,
0.036203161727427755,
0.03440567820436005,
0.03157118495644559,
0.0335069364428262,
0.03426741024104715,
0.07637000506982532,
0.03892243167258146,
0.11098308521915472,
0.05643637369221551,
0.026086555745033876,
0.036525786975157855,
0.04528275798497488,
0.033046043231783194,
0.04468359681061898],
[0.02800869900733102,
0.048055000175383215,
0.02597425374443158,
0.0358483285979866,
0.1538987688098495,
0.06082289803220036,
0.04098705671893087,
0.035585253779508226,
0.03213020449682556,
0.03448033954189905,
0.03277912238240556,
0.06162966080886738,
0.07131081412887158,
0.11022834894243923,
0.029727454488056405,
0.02874530849907047,
0.04032060051211898,
0.06248903854923007,
0.029253919814795328,
0.03772492896979901],
[0.04004854368932039,
0.019997889404812157,
0.015882228788518363,
0.04353102574926129,
0.10579358379062896,
0.01978682988602786,
0.030656395103419165,
0.02532714225411566,
0.055878007598142675,
0.033241874208526805,
0.02643520472773322,
0.05730265934993668,
0.05566694807935838,
0.20409455466441537,
0.05925495989869143,
0.02543267201350781,
0.0408400168847615,
0.03197551709582102,
0.033030814689742505,
0.07582313212325877],
[0.035613691698095196,
0.026543180407695613,
0.03375184990690791,
0.020337041103738004,
0.10770038669021817,
0.02291497589153578,
0.02597030601040722,
0.11419296319281998,
0.04516159831956843,
0.02897789659617129,
0.023344631689502078,
0.05060390509380818,
0.04430228672363584,
0.21196352699670598,
0.03948059387979186,
0.028643719864419725,
0.0347543801021626,
0.025779347877977754,
0.031078436052895404,
0.048885281901943]]
# -
# ## Fig.1.a.topic proportion over time (bar chart)
# + tags=[]
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
import numpy as np
fig, ax = plt.subplots(1, figsize=(32,16))
year = ['19Q1','19Q2','19Q3','19Q4','20Q1','20Q2','20Q3','20Q4','21Q1','21Q2']
col = ['#63b2ee','#76da91','#f8cb7f','#f89588','#7cd6cf','#9192ab','#7898e1','#efa666','#eddd86','#9987ce',
'#95a2ff','#fa8080','#ffc076','#fae768','#87e885','#3cb9fc','#73abf5','#cb9bff','#90ed7d','#f7a35c']
topicdis1 = DataFrame(topicdis)
topic2 = topicdis1.T
topic2 = np.array(topic2)
for i in range(20):
data = topic2[i]
if i == 0:
plt.bar(year,data)
else:
bot = sum(topic2[k] for k in range(i))
plt.bar(year,data,color=col[i],bottom = bot)
# x and y limits
#plt.xlim(-0.6, 2.5)
#plt.ylim(-0.0, 1.5)
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
#grid
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed', alpha=0.7)
# title and legend
legend_label = ['Topic1', 'Topic2', 'Topic3', 'Topic4','Topic5','Topic6','Topic7','Topic8','Topic9','Topic10','Topic11'
,'Topic12','Topic13','Topic14','Topic15','Topic16','Topic17','Topic18','Topic19','Topic20']
plt.legend(legend_label, ncol = 20, bbox_to_anchor=([1, 1, 0, 0]), frameon = False)
plt.title('Topic proportions over 2019 - 2021\n', loc='left', fontsize = 20)
plt.show()
# -
# ## Fig.1.b.topic proportion over time (line graph)
# best topics in seed1234
ntop = [0,7,8,14,19]
# +
year = ['19Q1','19Q2','19Q3','19Q4','20Q1','20Q2','20Q3','20Q4','21Q1','21Q2']
col = ['#63b2ee','#76da91','#f8cb7f','#f89588','#7cd6cf','#9192ab','#7898e1','#efa666','#eddd86','#9987ce',
'#95a2ff','#fa8080','#ffc076','#fae768','#87e885','#3cb9fc','#73abf5','#cb9bff','#90ed7d','#fa8080']
fig, ax = plt.subplots(1, figsize=(32,16))
# plot proportions of each topic over years
for i in ntop:
ys = [item[i] for item in topicdis]
ax.plot(year, ys, label='Topic ' + str(i+1),color = col[i],linewidth=3)
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
# x and y limits
# plt.xlim(-0.2, 2.2)
# plt.ylim(-0.0, 0.4)
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
#grid
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed', alpha=0.7)
# title and legend
legend_label = ['Topic1', 'Topic2', 'Topic3', 'Topic4','Topic5','Topic6','Topic7','Topic8','Topic9','Topic10','Topic11'
,'Topic12','Topic13','Topic14','Topic15','Topic16','Topic17','Topic18','Topic19','Topic20']
plt.legend(legend_label, ncol = 20, bbox_to_anchor=([1, 1.05, 0, 0]), frameon = False)
plt.title('Topic proportions over 2019 - 2021\n', loc='left', fontsize = 20)
plt.show()
# + [markdown] tags=[]
# ## Fig.2. topic key words over time
# -
import matplotlib.pyplot as plt
topicEvolution0 = ldaseq.print_topic_times(topic=13)
# +
fig, axes = plt.subplots(2,5, figsize=(30, 15), sharex=True)
axes = axes.flatten()
year = ['19Q1','19Q2','19Q3','19Q4','20Q1','20Q2','20Q3','20Q4','21Q1','21Q2']
title = 'Topic0'
for i in range(len(topicEvolution0)):
value = [item[1] for item in topicEvolution0[i]]
index = [item[0] for item in topicEvolution0[i]]
ax = axes[i]
ax.barh(index,value,height = 0.7)
ax.set_title(year[i],
fontdict={'fontsize': 30})
ax.invert_yaxis()
ax.tick_params(axis='both', which='major', labelsize=20)
for k in 'top right left'.split():
ax.spines[k].set_visible(False)
fig.suptitle(title, fontsize=40)
plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)
plt.show()
# -
# * Drop in 2020 Q4 because of delay reveal of purchase patterns. Purchased in Q4 but discuss online in Q121.
# ## Fig.3. Trend of key words under 1 topic over time
# print the nth topic proportions of every time slice
topicEvolution0 = ldaseq.print_topic_times(topic=0)
# transfrom topicEvolution0 to dictionary
for i in range(len(topicEvolution0)):
topicEvolution0[i] = dict(topicEvolution0[i])
# our most interested key words under this topic, pick manually
brand = ['nars', 'revlon', 'wetnwild', 'dior']
# +
year = ['19Q1','19Q2','19Q3','19Q4','20Q1','20Q2','20Q3','20Q4','21Q1','21Q2']
col = ['#63b2ee','#76da91','#f8cb7f','#f89588','#7cd6cf','#9192ab','#7898e1','#efa666','#eddd86','#9987ce',
'#95a2ff','#fa8080','#ffc076','#fae768','#87e885','#3cb9fc','#73abf5','#cb9bff','#90ed7d','#fa8080']
fig, ax = plt.subplots(1, figsize=(32,16))
# plot the proportion of brand
# lookup brand proportion by using the brand name in the brand dictionary
for z in brand:
ys = [item[z] for item in topicEvolution0]
ax.plot(year, ys, label='Topic' + str(i+1), linewidth=3)
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
# x and y limits
# plt.xlim(-0.2, 2.2)
# plt.ylim(-0.0, 0.4)
# remove spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
#grid
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed', alpha=0.7)
# title and legend
legend_label = ['Topic1', 'Topic2', 'Topic3', 'Topic4','Topic5','Topic6','Topic7','Topic8','Topic9','Topic10','Topic11'
,'Topic12','Topic13','Topic14','Topic15','Topic16','Topic17','Topic18','Topic19','Topic20']
plt.legend(legend_label, ncol = 20, bbox_to_anchor=([1, 1.05, 0, 0]), frameon = False)
plt.title('Topic proportions over 2019 - 2021\n', loc='left', fontsize = 20)
plt.show()
| 4_DTM/.ipynb_checkpoints/load142857-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
from bs4 import BeautifulSoup
from pathlib import Path
# +
def get_movies_request(url):
response = requests.get(url)
if response.ok:
return BeautifulSoup(response.content)
def download_image(url, img_name, directory):
img_data = requests.get(url).content
path = Path(f'{directory}')
path.mkdir(exist_ok=True)
with open(path/f'{img_name}.jpg', 'wb') as handler:
handler.write(img_data)
def get_movie_data(movies_html):
result = soup.find('div', {'class': 'lister-list'}) # main div with all movies
movie_elems = result.findAll('div', {'class': 'lister-item-content'})
movie_headers = []
movie_details = []
movies = []
# Find the metadata needed
for movie in movie_elems:
movie_headers.append(movie.find('h3', {'class': 'lister-item-header'}))
movie_details.append(movie.find('p', {'class': 'text-muted'}))
for header in movie_headers:
url_elem = header.find('a')
title = url_elem.get_text()
url = url_elem['href']
movies.append({'url': url, 'title': title})
for indx, details in enumerate(movie_details):
genre = details.find('span', {'class': 'genre'})
genre = genre.get_text().strip(' ').strip('\n')
movies[indx]['genre'] = genre
# print('Len of movies: ' + str(len(movies)))
return movies
# +
max_movies = input("how many movie posters do you want?")
request_url_one = f'https://www.imdb.com/search/title/?groups=top_{max_movies}&sort=user_rating'
soups = []
soups.append(get_movies_request(request_url_one))
for num in range(50, int(number),50):
request_url_rest = f'https://www.imdb.com/search/title/?groups=top_1000&sort=user_rating,desc&start={num}&ref_=adv_nxt'
soups.append(get_movies_request(request_url_rest))
print(len(soups))
# -
soups[-1].prettify()
# +
domain = 'http://imdb.com'
all_movies = []
for indx, soup in enumerate(soups):
data = get_movie_data(soup)
all_movies += get_movie_data(soup)
print(f'Done with soup: {indx+1}')
# -
print(len(all_movies))
# +
movie_directory = 'movie_posters'
# Store the images in a directory using the genres as part of the file pathway
# 723 = frost, nixon?lol
# 864 / 865 = '50/50'
for indx, movie in enumerate(movies[866:]):
title = movie['title']
url = domain + movie['url']
genres = movie['genre']
# print(f'Movie Title: {title}')
# print(f'Movie URL: {url}')
# print(f'Movie Genre: {genre}')
movie_url_request = requests.get(url)
if movie_url_request.ok:
movie_url_soup = BeautifulSoup(movie_url_request.content)
poster_elem = movie_url_soup.find('div', {'class': 'poster'})
img_url = poster_elem.find('img')['src']
title_with_underscore = title.replace(' ', '_')
for genre in genres.split(','):
download_image(img_url, title_with_underscore, f'{movie_directory}/{genre.lower()}') # can change pixel in the url ex: https://m.media-amazon.com/images/M/MV5BMDFkYTc0MGEtZmNhMC00ZDIzLWFmNTEtODM1ZmRlYWMwMWFmXkEyXkFqcGdeQXVyMTMxODk2OTU@._V1_UX182_CR0,0,182,268_AL_.jpg
print(f'Done with: {indx}')
print('yay!')
# -
| lesson_1_image_classifier/Movie Poster Image Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/oguchi-ebube/Sugar-Instance-Packager/blob/master/SVM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="0oixZzeOtlF6" colab_type="code" outputId="3594ef12-a732-40fd-8c73-2d3c9d7bb2ee" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Run this cell to mount your Google Drive.
from google.colab import drive
drive.mount('/content/drive')
# + id="Nf0KsD4mt_qa" colab_type="code" colab={}
#import necessary libraries
import pandas as pd
import numpy as np
import re
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
sns.set(style="ticks",color_codes=True)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import gzip
import matplotlib.pyplot as plt
# + id="iuIlWyFquUnc" colab_type="code" colab={}
# !cp "/content/drive/My Drive/Colab Notebooks/load_data.py" load_data.py
import load_data
# !cp "/content/drive/My Drive/Colab Notebooks/data_helper_first.py" data_helper_first.py
import data_helper_first
# + id="6DYqy6xIuUr7" colab_type="code" colab={}
def parse_gz(path):
g = gzip.open(path, 'rb')
for l in g:
yield eval(l)
def convert_to_DF(path):
i = 0
df = {}
for d in parse_gz(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
# + id="FAV1AUA4uUwm" colab_type="code" outputId="b4a95911-c781-47f0-d8a8-799ccb51312c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# #insert path
path_df1 = '/content/drive/My Drive/Colab Notebooks/Products_review.csv'
path_df2 = '/content/drive/My Drive/Colab Notebooks/Products_reviews.csv'
path_df3 = '/content/drive/My Drive/Colab Notebooks/customer_review.csv'
path_df4 = '/content/drive/My Drive/Colab Notebooks/reviews_Sports_and_Outdoors_5.json.gz'
# Read the data in the CSV file using pandas
df1 = pd.read_csv(path_df1)
#print(df3)
df2 = pd.read_csv(path_df2)
df3 = pd.read_csv(path_df3, encoding = 'unicode_escape')
#print(df3)
# !python load_data.py
df4 = convert_to_DF(path_df4)
# print(df4.columns)
df4 = df4.rename(index=str, columns={"reviewText": "review"})
df4 = df4.rename(index=str, columns={"overall": "rating"})
#print(df4.columns)
# + id="c0lemKmruVCS" colab_type="code" colab={}
#concat the dataframes
df = pd.concat([df1, df2,df3,df4])
#df.drop(columns=['asin', 'asins', 'brand', 'categories', 'date', 'dateAdded','dateUpdated', 'helpful', 'imageURLs', 'keys', 'manufacturer','manufacturerNumber', 'name', 'primaryCategories', 'reviewTime', 'reviewerID', 'reviewerName', 'reviews.date','reviews.dateSeen', 'reviews.didPurchase', 'reviews.doRecommend','reviews.numHelpful', 'reviews.sourceURLs', 'reviews.title','reviews.username', 'sourceURLs', 'summary', 'title', 'unixReviewTime'])
# print(df[:5])
# print(df.columns)
df["label"] = np.where(df.rating >=3, 1, 0)
#print(df['label'])
#Number of the smaller class (negative reviews)
num_to_sample = len(df[df.label == 0])
df_neg = df[df["label"] == 0].sample(n=num_to_sample)
df_pos = df[df["label"] == 1].sample(n=num_to_sample)
# + id="4iTD7CP0uVJ1" colab_type="code" colab={}
df = pd.concat([df_neg, df_pos])
# Get the text from the dataframe
text = df["review"].values
#print(text)
# #clean strings
text= [data_helper_first.clean_str(str(sent)) for sent in text]
# Create labels from the dataframe
labels = df["label"].values
df = pd.concat([pd.DataFrame(text),pd.DataFrame(labels)],axis=1)
#print(len(df)) 144634
#print(df[3000:])
# + id="tvlKoTP_1MEG" colab_type="code" colab={}
#print(text[:50])
# + id="XtTJYatScW1T" colab_type="code" colab={}
X = text
y = labels
# + id="XIsjiHNuVDcS" colab_type="code" colab={}
#change text lower cases and removal of white spaces
lower_text = []
for i in range(0,len(X)):
s = str(X[i])
s1 = s.strip()
lower_text.append(s1.lower())
#print(“After converting text to lower case\n\n”,lower_text)
# + id="KKVYMVdGVVs2" colab_type="code" colab={}
#Remove punctuation
punc_text = []
for i in range(0,len(lower_text)):
s2 = (lower_text[i])
s3 = re.sub(r'[^\w\s2]',"",s2)
punc_text.append(s3)
#print(“After removed punctuation\n\n”,punc_text)
# + id="EQCIsWhUVV5h" colab_type="code" colab={}
#Word vectorization
#Initialize the TF-IDF vectorizer
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=2,max_df = 0.7,max_features =200000,norm='l2', encoding='latin-1',ngram_range=(3,3),stop_words='english')
# + id="xlumO9mhWfRO" colab_type="code" outputId="e2c7bd93-706e-4b60-87c7-1e4b23be9045" colab={"base_uri": "https://localhost:8080/", "height": 67}
#transform independent variable using TF-IDF vectorizer
print("\n")
X_tfidf = tfidf.fit_transform(punc_text)
print(X_tfidf.shape)
#print("After vectorized text data\n\n",X_tfidf)
# + id="Ur-XVrp_dX7w" colab_type="code" colab={}
#Split the data into train and testing
X_train, X_test, Y_train, Y_test = train_test_split(X_tfidf, y, test_size=0.3, random_state = 42)
# + id="DaKDMovRWfxe" colab_type="code" colab={}
# #Print training data
# print("\n")
# print("Training data\n\n",X_train,"\n",Y_train)
# print("\n\n")
# + id="ECcgbOKxXBHQ" colab_type="code" colab={}
# #Print testing data
# print("Testing data\n\n",X_test)
# print("\n\n")
# + id="ROkqlTUBXBXV" colab_type="code" colab={}
#Build the SVM model
clf = LinearSVC()
# + id="fzZgK54sXBi9" colab_type="code" outputId="d640c467-fc56-4c8a-8216-d8f5f25fa10e" colab={"base_uri": "https://localhost:8080/", "height": 84}
#Fit train and test into the model
clf.fit(X_train, Y_train)
# + id="f1nEiveDXBxN" colab_type="code" colab={}
#Predict the result
y_pred = clf.predict(X_test)
# + id="cvfhiY8FJOgq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="71a80970-5486-419c-9a1d-83c5c183ff06"
clf.score(X_test,Y_test)
# + id="oyEDYqldXCGz" colab_type="code" outputId="594f7864-70ab-4fdc-85af-9396cc927278" colab={"base_uri": "https://localhost:8080/", "height": 319}
#classification report & confusion matrix
print("Confusion Matrix\n",confusion_matrix(Y_test,y_pred))
print("\n")
print("Classification Report\n",classification_report(Y_test,y_pred))
print("\n")
print("Accuracy : ",accuracy_score(Y_test,y_pred)*100)
# + id="8185pt3rW-GK" colab_type="code" colab={}
import pickle
# + id="Bj2FkIrvW_ah" colab_type="code" colab={}
with open('/content/drive/My Drive/Colab Notebooks/training_models/svm_trained_model.pkl', 'wb') as f:
pickle.dump(clf, f)
# + id="fIgepELRW_m7" colab_type="code" colab={}
# + id="vtHWMLXRh58L" colab_type="code" colab={}
# # !pip install scikit-plot
# + id="5UNOKL0rQOEI" colab_type="code" colab={}
# #Precision-recall graph
# plt.rcParams["figure.figsize"] = [16,10]
# import scikitplot as skplt
# probas = clf.predict(X_test)
# skplt.metrics.plot_precision_recall((Y_test,y_pred),probas)
# plt.title("Precision & Recall graph for SVM model")
# plt.show()
# + id="80atVPzUPQQp" colab_type="code" outputId="8a77f247-6b8f-4c01-ca02-e8b461e97bc6" colab={"base_uri": "https://localhost:8080/", "height": 34}
import numpy as np
label = {0:'negative', 1:'positive'}
example = ["i hate hate these make my feet hurt"]
Testing = tfidf.transform(example)
print(label[clf.predict(Testing)[0]])
# + id="SFvNkIscSQU2" colab_type="code" colab={}
| SVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] ipub={"ignore": true} slideshow={"slide_type": "slide"}
# <font size=10>Classification and Regression from linear and logistic regression to neural networks</font>
#
#
# -
# https://github.com/anacost/project2-FYS-STK4155
# ## Abstract
#
# The data used is the so-called Ising model and the methods are based on the article https://arxiv.org/abs/1803.08823.
# First, the dataset for the Ising model is produced. Some results from the article are reproduced using self-programmed code for the regression analysis of the one-dimensional Ising model. Then logistic regression for binary classification is performed on the phase of the two-dimensional Ising model.
# Finally, a neural network is programmed and applied to regression and classification on both one-dimensional and two-dimensional models in order to compare the results with the regression analyses.
#
# ## Introduction
#
# The linear regression analyses were reproduced with the codes developed in project1. Linear regression was applied in the estimation of the coupling constant of the one-dimensional Ising model. Ordinary least square, Ridge and Lasso regression analyses were compared. The performance of these methods was plotted.
# In this project, logistic regression is programmed for binary classification of the phase of the two-dimensional Ising model, as well. The error of the logistic regression was calculated.
#
# For the last two parts of the project, a neural network program was developed for regression and classification of the one-dimensional and two-dimensional Ising models, respectively.
#
# The neural networks were programmed using numpy.maskedarrays. The weights and biases resulting from the neural networks are numpy.maskedarrays. Numpy.maskedarrays are supposed to perform better than lists to calculate the weights and the biases in the neural network. It is possible to create arrays of different shapes since a mask is created to mask/hide the elements that are not used in the calculation.
#
#
# The codes developed and some other files are available in the repository https://github.com/anacost/project2-FYS-STK4155.
# + ipub={"init_cell": true}
import sklearn
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# %matplotlib inline
import numpy as np
# -
# ## Part a) Producing the data for the one-dimensional Ising model
# +
#import scipy.sparse as sp
np.random.seed(12)
import warnings
#Comment this to turn on warnings
warnings.filterwarnings('ignore')
### define Ising model aprams
# system size
L=40
# create 10000 random Ising states
states=np.random.choice([-1, 1], size=(10000,L))
def ising_energies(states,L):
"""
This function calculates the energies of the states in the nn Ising Hamiltonian
"""
J=np.zeros((L,L),)
for i in range(L):
J[i,(i+1)%L]-=1.0
# compute energies
E = np.einsum('...i,ij,...j->...',states,J,states)
return E
# calculate Ising energies
energies=ising_energies(states,L)
# reshape Ising states into RL samples: S_iS_j --> X_p
states=np.einsum('...i,...j->...ij', states, states)
shape=states.shape
states=states.reshape((shape[0],shape[1]*shape[2]))
# build final data set
Data=[states,energies]
# define number of samples
n_samples=400
# define train and test data sets
X_train=Data[0][:n_samples]
Y_train=Data[1][:n_samples] #+ np.random.normal(0,4.0,size=X_train.shape[0])
X_test=Data[0][n_samples:3*n_samples//2]
Y_test=Data[1][n_samples:3*n_samples//2] #+ np.random.normal(0,4.0,size=X_test.shape[0])
# -
# ## Part b) Estimating the coupling constant of the one-dimensional Ising model
# + ipub={"code": {"asfloat": true, "caption": "Code example", "format": {}, "label": "code:example_code", "placement": "H", "widefigure": false}, "figure": {"caption": "OLS, Ridge, Lasso", "label": "fig:OLSRidgeLasso"}}
from sklearn import linear_model
# define error lists
train_errors_leastsq = []
test_errors_leastsq = []
train_MSE_leastsq = []
test_MSE_leastsq = []
train_bias_leastsq = []
test_bias_leastsq = []
train_var_leastsq = []
test_var_leastsq = []
train_errors_ridge = []
test_errors_ridge = []
train_MSE_ridge = []
test_MSE_ridge = []
train_bias_ridge = []
test_bias_ridge = []
train_var_ridge = []
test_var_ridge = []
train_errors_lasso = []
test_errors_lasso = []
train_MSE_lasso = []
test_MSE_lasso = []
train_bias_lasso = []
test_bias_lasso = []
train_var_lasso = []
test_var_lasso = []
# set regularisation strength values
lmbdas = np.logspace(-4, 5, 10)
#Initialize coeffficients for OLS, ridge regression and Lasso
coefs_leastsq = []
coefs_ridge = []
coefs_lasso=[]
# set up Lasso Regression model
lasso = linear_model.Lasso()
for _,lmbda in enumerate(lmbdas):
### ordinary least squares
xb = np.c_[np.ones((X_train.shape[0],1)),X_train]
#fit model/singularity :
beta_ols = np.linalg.pinv(xb.T @ xb) @ xb.T @ Y_train
coefs_leastsq.append(beta_ols) # store weights
# use the coefficient of determination R^2 as the performance of prediction.
fitted_train = xb @ beta_ols
xb_test = np.c_[np.ones((X_test.shape[0],1)),X_test]
fitted_test = xb_test @ beta_ols
R2_train = 1 - np.sum( (fitted_train - Y_train)**2 )/np.sum( (Y_train - np.mean(Y_train))**2 )
R2_test = 1 - np.sum( (fitted_test - Y_test)**2 )/np.sum((Y_test - np.mean(Y_test))**2)
MSE_train = np.sum((fitted_train - Y_train)**2)/len(Y_train)
MSE_test = np.sum((fitted_test - Y_test)**2)/len(Y_test)
var_train = np.sum((fitted_train - np.mean(fitted_train))**2)/len(Y_train)
var_test = np.sum((fitted_test - np.mean(fitted_test))**2)/len(Y_test)
bias_train = np.sum((Y_train - np.mean(fitted_train))**2)/len(Y_train)
bias_test = np.sum((Y_test - np.mean(fitted_test))**2)/len(Y_test)
train_errors_leastsq.append(R2_train)
test_errors_leastsq.append(R2_test)
train_MSE_leastsq.append(MSE_train)
test_MSE_leastsq.append(MSE_test)
train_bias_leastsq.append(bias_train)
test_bias_leastsq.append(bias_test)
train_var_leastsq.append(var_train)
test_var_leastsq.append(var_test)
### apply Ridge regression
I3 = np.eye(xb.shape[1])
beta_ridge = (np.linalg.inv(xb.T @ xb + lmbda*I3) @ xb.T @ Y_train).flatten()
coefs_ridge.append(beta_ridge[1:]) # store weights
# use the coefficient of determination R^2 as the performance of prediction.
fitted_train = xb @ beta_ridge
fitted_test = xb_test @ beta_ridge
R2_train = 1 - np.sum( (fitted_train - Y_train)**2 )/np.sum( (Y_train - np.mean(Y_train))**2 )
R2_test = 1 - np.sum( (fitted_test - Y_test)**2 )/np.sum((Y_test - np.mean(Y_test))**2)
MSE_train = np.sum((fitted_train - Y_train)**2)/len(Y_train)
MSE_test = np.sum((fitted_test - Y_test)**2)/len(Y_test)
var_train = np.sum((fitted_train - np.mean(fitted_train))**2)/len(Y_train)
var_test = np.sum((fitted_test - np.mean(fitted_test))**2)/len(Y_test)
bias_train = np.sum((Y_train - np.mean(fitted_train))**2)/len(Y_train)
bias_test = np.sum((Y_test - np.mean(fitted_test))**2)/len(Y_test)
train_errors_ridge.append(R2_train)
test_errors_ridge.append(R2_test)
train_MSE_ridge.append(MSE_train)
test_MSE_ridge.append(MSE_test)
train_bias_ridge.append(bias_train)
test_bias_ridge.append(bias_test)
train_var_ridge.append(var_train)
test_var_ridge.append(var_test)
### apply Lasso regression
lasso.set_params(alpha=lmbda) # set regularisation parameter
lasso.fit(X_train, Y_train) # fit model
coefs_lasso.append(lasso.coef_) # store weights
# use the coefficient of determination R^2 as the performance of prediction.
test_pred = np.array(lasso.predict(X_test))
train_pred = np.array(lasso.predict(X_train))
var_train = np.sum((train_pred - np.mean(train_pred))**2)/len(Y_train)
var_test = np.sum((test_pred - np.mean(test_pred))**2)/len(Y_test)
bias_train = np.sum((Y_train - np.mean(train_pred))**2)/len(Y_train)
bias_test = np.sum((Y_test - np.mean(test_pred))**2)/len(Y_test)
train_errors_lasso.append(lasso.score(X_train, Y_train))
test_errors_lasso.append(lasso.score(X_test,Y_test))
train_MSE_lasso.append(sklearn.metrics.mean_squared_error(Y_train, train_pred))
test_MSE_lasso.append(sklearn.metrics.mean_squared_error(Y_test, test_pred))
train_bias_lasso.append(bias_train)
test_bias_lasso.append(bias_test)
train_var_lasso.append(var_train)
test_var_lasso.append(var_test)
### plot Ising interaction J
J_leastsq=np.array(beta_ols[1:]).reshape((L,L))
J_ridge=np.array(beta_ridge[1:]).reshape((L,L))
J_lasso=np.array(lasso.coef_).reshape((L,L))
cmap_args=dict(vmin=-1., vmax=1., cmap='seismic')
fig, axarr = plt.subplots(nrows=1, ncols=3)
axarr[0].imshow(J_leastsq,**cmap_args)
axarr[0].set_title('$\\mathrm{OLS}$',fontsize=16)
axarr[0].tick_params(labelsize=16)
axarr[1].imshow(J_ridge,**cmap_args)
axarr[1].set_title('$\\mathrm{Ridge},\ \\lambda=%.4f$' %(lmbda),fontsize=16)
axarr[1].tick_params(labelsize=16)
im = axarr[2].imshow(J_lasso,**cmap_args)
axarr[2].set_title('$\\mathrm{LASSO},\ \\lambda=%.4f$' %(lmbda),fontsize=16)
axarr[2].tick_params(labelsize=16)
divider = make_axes_locatable(axarr[2])
cax = divider.append_axes("right", size="5%", pad=0.5)
cbar=fig.colorbar(im, cax=cax)
cbar.ax.set_yticklabels(np.arange(-1.0, 1.0+0.25, 0.25),fontsize=14)
cbar.set_label('$J_{i,j}$',labelpad=-40, y=1.12,fontsize=16,rotation=0)
fig.subplots_adjust(right=1.)
plt.show();
#fig.savefig('lasso_ridge'+str()+'.png') ;
# + ipub={"code": {"asfloat": true, "caption": "Performance", "format": {}, "label": "code:performance", "placement": "H", "widefigure": false}, "figure": {"caption": "Performance", "label": "fig:performance"}}
# Plot our performance on both the training and test data
plt.semilogx(lmbdas, train_errors_leastsq, 'b',label='Train (OLS)')
plt.semilogx(lmbdas, test_errors_leastsq,'--b',label='Test (OLS)')
plt.semilogx(lmbdas, train_errors_ridge,'r',label='Train (Ridge)',linewidth=1)
plt.semilogx(lmbdas, test_errors_ridge,'--r',label='Test (Ridge)',linewidth=1)
plt.semilogx(lmbdas, train_errors_lasso, 'g',label='Train (LASSO)')
plt.semilogx(lmbdas, test_errors_lasso, '--g',label='Test (LASSO)')
fig = plt.gcf()
fig.set_size_inches(10.0, 6.0)
#plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
# linewidth=3, label='Optimum on test')
plt.legend(loc='lower left',fontsize=16)
plt.ylim([-0.01, 1.01])
plt.xlim([min(lmbdas), max(lmbdas)])
plt.xlabel(r'$\lambda$',fontsize=16)
plt.ylabel('Performance',fontsize=16)
plt.tick_params(labelsize=16)
plt.show()
# -
# The results agree with the corresponding results from https://arxiv.org/abs/1803.08823.
#
# ## Understanding the results
# (from https://arxiv.org/abs/1803.08823)
# Let us make a few remarks: (i) the (inverse, see [Scikit documentation](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model)) regularization parameter $\lambda$ affects the Ridge and LASSO regressions at scales, separated by a few orders of magnitude. Notice that this is different for the data considered in Notebook 3 __Section VI: Linear Regression (Diabetes)__. Therefore, it is considered good practice to always check the performance for the given model and data with $\lambda$. (ii) at $\lambda\to 0$ and $\lambda\to\infty$, all three models overfit the data, as can be seen from the deviation of the test errors from unity (dashed lines), while the training curves stay at unity. (iii) While the OLS and Ridge regression test curves are monotonic, the LASSO test curve is not -- suggesting the optimal LASSO regularization parameter is $\lambda\approx 10^{-2}$. At this sweet spot, the Ising interaction weights ${\bf J}$ contain only nearest-neighbor terms (as did the model the data was generated from).
#
# Gauge degrees of freedom: recall that the uniform nearest-neighbor interactions strength $J_{j,k}=J$ which we used to generate the data was set to unity, $J=1$. Moreover, $J_{j,k}$ was NOT defined to be symmetric (we only used the $J_{j,j+1}$ but never the $J_{j,j-1}$ elements). The colorbar on the matrix elements plot above suggest that the OLS and Ridge regression learn uniform symmetric weights $J=-0.5$. There is no mystery since this amounts to taking into account both the $J_{j,j+1}$ and the $J_{j,j-1}$ terms, and the weights are distributed symmetrically between them. LASSO, on the other hand, can break this symmetry (see matrix elements plots for $\lambda=0.001$ and $\lambda=0.01$). Thus, we see how different regularization schemes can lead to learning equivalent models but in different gauges. Any information we have about the symmetry of the unknown model that generated the data has to be reflected in the definition of the model and the regularization chosen.
# + ipub={"code": {"asfloat": true, "caption": "Performance MSE", "format": {}, "label": "code:performanceMSE", "placement": "H", "widefigure": false}, "figure": {"caption": "Performance MSE", "label": "fig:performanceMSE"}}
# Plot our performance on both the training and test data
plt.semilogx(lmbdas, train_MSE_leastsq, 'b',label='Train (OLS)')
plt.semilogx(lmbdas, test_MSE_leastsq,'--b',label='Test (OLS)')
plt.semilogx(lmbdas, train_MSE_ridge,'r',label='Train (Ridge)',linewidth=1)
plt.semilogx(lmbdas, test_MSE_ridge,'--r',label='Test (Ridge)',linewidth=1)
plt.semilogx(lmbdas, train_MSE_lasso, 'g',label='Train (LASSO)')
plt.semilogx(lmbdas, test_MSE_lasso, '--g',label='Test (LASSO)')
fig = plt.gcf()
fig.set_size_inches(10.0, 6.0)
#plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
# linewidth=3, label='Optimum on test')
plt.legend(loc='lower left',fontsize=16)
plt.ylim([-0.01, 1.01])
plt.xlim([min(lmbdas), max(lmbdas)])
plt.xlabel(r'$\lambda$',fontsize=16)
plt.ylabel('Performance-MSE',fontsize=16)
plt.tick_params(labelsize=16)
plt.show()
# + ipub={"code": {"asfloat": true, "caption": "Performance bias/variance", "format": {}, "label": "code:performancebiasvar", "placement": "H", "widefigure": false}, "figure": {"caption": "Performance bias/variance", "label": "fig:performancebiasvar"}}
# Plot our bias-variance on both the training and test data
plt.semilogx(lmbdas, train_bias_leastsq, 'b',label='Bias-Train (OLS)')
plt.semilogx(lmbdas, test_bias_leastsq,'--b',label='Bias-Test (OLS)')
plt.semilogx(lmbdas, train_bias_ridge,'r',label='Bias-Train (Ridge)',linewidth=1)
plt.semilogx(lmbdas, test_bias_ridge,'--r',label='Bias-Test (Ridge)',linewidth=1)
plt.semilogx(lmbdas, train_bias_lasso, 'g',label='Bias-Train (LASSO)')
plt.semilogx(lmbdas, test_bias_lasso, '--g',label='Bias-Test (LASSO)')
plt.semilogx(lmbdas, train_var_leastsq, ':b',label='Variance-Train (OLS)')
plt.semilogx(lmbdas, test_var_leastsq,'.b',label='Variance-Test (OLS)')
plt.semilogx(lmbdas, train_var_ridge,':r',label='Variance-Train (Ridge)',linewidth=1)
plt.semilogx(lmbdas, test_var_ridge,'.r',label='Variance-Test (Ridge)',linewidth=1)
plt.semilogx(lmbdas, train_var_lasso, ':g',label='Variance-Train (LASSO)')
plt.semilogx(lmbdas, test_var_lasso, '.g',label='Variance-Test (LASSO)')
fig = plt.gcf()
fig.set_size_inches(10.0, 6.0)
#plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
# linewidth=3, label='Optimum on test')
plt.legend(loc='lower left',fontsize=16)
#plt.ylim([-0.01, 1.01])
plt.xlim([min(lmbdas), max(lmbdas)])
plt.xlabel(r'$\lambda$',fontsize=16)
plt.ylabel('Bias-Variance',fontsize=16)
plt.tick_params(labelsize=16)
plt.show()
# + ipub={"code": {"asfloat": true, "caption": "Performance OlS bias/variance", "format": {}, "label": "code:performanceOLSbiasvar", "placement": "H", "widefigure": false}, "figure": {"caption": "Performance OLS bias/variance", "label": "fig:performanceOLSbiasvar"}}
# Plot our bias-variance on both the training and test data
plt.semilogx(lmbdas, train_bias_leastsq, 'b',label='Bias-Train (OLS)')
plt.semilogx(lmbdas, test_bias_leastsq,'--b',label='Bias-Test (OLS)')
#plt.semilogx(lmbdas, train_bias_ridge,'r',label='Bias-Train (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, test_bias_ridge,'--r',label='Bias-Test (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, train_bias_lasso, 'g',label='Bias-Train (LASSO)')
#plt.semilogx(lmbdas, test_bias_lasso, '--g',label='Bias-Test (LASSO)')
plt.semilogx(lmbdas, train_var_leastsq, ':b',label='Variance-Train (OLS)')
plt.semilogx(lmbdas, test_var_leastsq,'.b',label='Variance-Test (OLS)')
#plt.semilogx(lmbdas, train_var_ridge,':r',label='Variance-Train (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, test_var_ridge,'.r',label='Variance-Test (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, train_var_lasso, ':g',label='Variance-Train (LASSO)')
#plt.semilogx(lmbdas, test_var_lasso, '.g',label='Variance-Test (LASSO)')
fig = plt.gcf()
fig.set_size_inches(10.0, 6.0)
#plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
# linewidth=3, label='Optimum on test')
plt.legend(loc='lower left',fontsize=16)
#plt.ylim([-0.01, 1.01])
plt.xlim([min(lmbdas), max(lmbdas)])
plt.xlabel(r'$\lambda$',fontsize=16)
plt.ylabel('Bias-Variance',fontsize=16)
plt.tick_params(labelsize=16)
plt.show()
# + ipub={"code": {"asfloat": true, "caption": "Performance Ridge bias/variance", "format": {}, "label": "code:performanceRidgebiasvar", "placement": "H", "widefigure": false}, "figure": {"caption": "Performance Ridge bias/variance", "label": "fig:performanceRidgebiasvar"}}
# Plot our bias-variance on both the training and test data
#plt.semilogx(lmbdas, train_bias_leastsq, 'b',label='Bias-Train (OLS)')
#plt.semilogx(lmbdas, test_bias_leastsq,'--b',label='Bias-Test (OLS)')
plt.semilogx(lmbdas, train_bias_ridge,'r',label='Bias-Train (Ridge)',linewidth=1)
plt.semilogx(lmbdas, test_bias_ridge,'--r',label='Bias-Test (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, train_bias_lasso, 'g',label='Bias-Train (LASSO)')
#plt.semilogx(lmbdas, test_bias_lasso, '--g',label='Bias-Test (LASSO)')
#plt.semilogx(lmbdas, train_var_leastsq, ':b',label='Variance-Train (OLS)')
#plt.semilogx(lmbdas, test_var_leastsq,'.b',label='Variance-Test (OLS)')
plt.semilogx(lmbdas, train_var_ridge,':r',label='Variance-Train (Ridge)',linewidth=1)
plt.semilogx(lmbdas, test_var_ridge,'.r',label='Variance-Test (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, train_var_lasso, ':g',label='Variance-Train (LASSO)')
#plt.semilogx(lmbdas, test_var_lasso, '.g',label='Variance-Test (LASSO)')
fig = plt.gcf()
fig.set_size_inches(10.0, 6.0)
#plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
# linewidth=3, label='Optimum on test')
plt.legend(loc='lower left',fontsize=16)
#plt.ylim([-0.01, 1.01])
plt.xlim([min(lmbdas), max(lmbdas)])
plt.xlabel(r'$\lambda$',fontsize=16)
plt.ylabel('Bias-Variance',fontsize=16)
plt.tick_params(labelsize=16)
plt.show()
# + ipub={"code": {"asfloat": true, "caption": "Performance Lasso bias/variance", "format": {}, "label": "code:performanceLassobiasvar", "placement": "H", "widefigure": false}, "figure": {"caption": "Performance Lasso bias/variance", "label": "fig:performanceLassobiasvar"}}
# Plot our bias-variance on both the training and test data
#plt.semilogx(lmbdas, train_bias_leastsq, 'b',label='Bias-Train (OLS)')
#plt.semilogx(lmbdas, test_bias_leastsq,'--b',label='Bias-Test (OLS)')
#plt.semilogx(lmbdas, train_bias_ridge,'r',label='Bias-Train (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, test_bias_ridge,'--r',label='Bias-Test (Ridge)',linewidth=1)
plt.semilogx(lmbdas, train_bias_lasso, 'g',label='Bias-Train (LASSO)')
plt.semilogx(lmbdas, test_bias_lasso, '--g',label='Bias-Test (LASSO)')
#plt.semilogx(lmbdas, train_var_leastsq, ':b',label='Variance-Train (OLS)')
#plt.semilogx(lmbdas, test_var_leastsq,'.b',label='Variance-Test (OLS)')
#plt.semilogx(lmbdas, train_var_ridge,':r',label='Variance-Train (Ridge)',linewidth=1)
#plt.semilogx(lmbdas, test_var_ridge,'.r',label='Variance-Test (Ridge)',linewidth=1)
plt.semilogx(lmbdas, train_var_lasso, ':g',label='Variance-Train (LASSO)')
plt.semilogx(lmbdas, test_var_lasso, '.g',label='Variance-Test (LASSO)')
fig = plt.gcf()
fig.set_size_inches(10.0, 6.0)
#plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
# linewidth=3, label='Optimum on test')
plt.legend(loc='lower left',fontsize=16)
#plt.ylim([-0.01, 1.01])
plt.xlim([min(lmbdas), max(lmbdas)])
plt.xlabel(r'$\lambda$',fontsize=16)
plt.ylabel('Bias-Variance',fontsize=16)
plt.tick_params(labelsize=16)
plt.show()
# -
# ## Part c) Determine the phase of the two-dimensional Ising model.
# +
import glob
import os
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
np.random.seed(1) # shuffle random seed generator
# Ising model parameters
L=40 # linear system size
J=-1.0 # Ising interaction
T=np.linspace(0.25,4.0,16) # set of temperatures
T_c=2.26 # Onsager critical temperature in the TD limit
##### prepare training and test data sets
###### define ML parameters
num_classes=2
train_to_test_ratio=0.5 # training samples
# path to data directory
path_to_data=os.path.expanduser('.')+'/data/'
# load data
file_name = "Ising2DFM_reSample_L40_T=All.pkl" # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25)
file = open(path_to_data+file_name,'rb')
data = pickle.load(file) # pickle reads the file and returns the Python object (1D array, compressed bits)
data = np.unpackbits(data).reshape(-1, 1600) # Decompress array and reshape for convenience
type(data)
#data[np.where(data==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1)
file_name = "Ising2DFM_reSample_L40_T=All_labels.pkl" # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25)
labels = pickle.load(open(path_to_data+file_name,'rb')) # pickle reads the file and returns the Python object (here just a 1D array with the binary labels)
# divide data into ordered, critical and disordered
X_ordered=data[:70000,:]
Y_ordered=labels[:70000]
X_critical=data[70000:100000,:]
Y_critical=labels[70000:100000]
X_disordered=data[100000:,:]
Y_disordered=labels[100000:]
X_ordered[np.where(X_ordered==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1)
X_critical[np.where(X_critical==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1)
X_disordered[np.where(X_disordered==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1)
del data,labels
# define training and test data sets
X=np.concatenate((X_ordered,X_disordered))
Y=np.concatenate((Y_ordered,Y_disordered))
# pick random data points from ordered and disordered states
# to create the training and test sets
test_size = 1. - train_to_test_ratio
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=test_size) #train_size=train_to_test_ratio)
# full data set
X=np.concatenate((X_critical,X))
Y=np.concatenate((Y_critical,Y))
print('X_train shape:', X_train.shape)
print('Y_train shape:', Y_train.shape)
print()
print(X_train.shape[0], 'train samples')
print(X_critical.shape[0], 'critical samples')
print(X_test.shape[0], 'test samples')
file = open("inputNN", 'wb')
data = {'X_train': X_train, 'Y_train': Y_train, 'X_test': X_test, 'Y_test': Y_test}
pickle.dump(data, file)
file.close()
# -
###### apply logistic regression
import logisRegresANA
import importlib
importlib.reload(logisRegresANA)
weights = logisRegresANA.logistic_reg(X_train, Y_train, epochs= 100, lr=0.001)
error_train = logisRegresANA.simptest(weights, X_train, Y_train)
error_test = logisRegresANA.simptest(weights, X_test, Y_test)
# ## Part d) Regression analysis of the one-dimensional Ising model using neural networks.
# %cat project2re.py
# Following are the biases and the weights from the neural network with one hidden layer and 10 neurons in the hidden layer.
file = open("RstateNN", 'rb')
data = pickle.load(file)
file.close()
biases = data['biases']
weights = data['weights']
#print('biases ',biases)
#print('weights ', weights)
# The resulting biases and weights are numpy.MaskedArray to handle different shapes inside the array.
#evaluating the results:
import importlib
import logisRegresANA
importlib.reload(logisRegresANA)
R2_train, MSE_train, bias_train, var_train = logisRegresANA.evaluater(X_train, Y_train, biases, weights)
R2_test, MSE_test, bias_test, var_test = logisRegresANA.evaluater(X_test, Y_test, biases, weights)
print('In training, ')
print('R2 score is ', R2_train)
print('MSE is ', MSE_train)
print('bias is ', bias_train)
print('variance is ', var_train)
print('In the test set, ')
print('R2 score is ', R2_test)
print('MSE is ', MSE_test)
print('bias is ', bias_test)
print('variance is ', var_test)
# #### model with 20 neurons in one hidden layer:
# The model with 20 neurons in one hidden layer instead of 10 was also tested.
import pickle
file = open("stateNN2", 'rb')
data = pickle.load(file)
file.close()
biases2 = data['biases']
weights2 = data['weights']
#print('biases ',biases2)
#print('weights ', weights2)
#evaluating the results:
import importlib
import logisRegresANA
importlib.reload(logisRegresANA)
R2_train, MSE_train, bias_train, var_train = logisRegresANA.evaluater(X_train,Y_train,biases2,weights2)
R2_test, MSE_test, bias_test, var_test = logisRegresANA.evaluater(X_test,Y_test,biases2,weights2)
print('In training, ')
print('R2 score is ', R2_train)
print('MSE is ', MSE_train)
print('bias is ', bias_train)
print('variance is ', var_train)
print('In the test set, ')
print('R2 score is ', R2_test)
print('MSE is ', MSE_test)
print('bias is ', bias_test)
print('variance is ', var_test)
# It is visible that the MSE is high in both neural networks for regression. The R2 score is low.
# ## Part e) Classifying the Ising model phase using neural networks.
# %cat project2.py
# ### Following are the biases and the weights from the neural network with one hidden layer and 10 neurons in the hidden layer.
import pickle
file = open("stateNN", 'rb')
data = pickle.load(file)
file.close()
biases = data['biases']
weights = data['weights']
#print('biases ',biases)
#print('weights ', weights)
file = open("inputNN", 'rb')
data = pickle.load(file)
file.close()
X_train = data['X_train']
Y_train = data['Y_train']
X_test = data['X_test']
Y_test = data['Y_test']
#evaluating the results:
import importlib
import logisRegresANA
importlib.reload(logisRegresANA)
print('Accuracy in training is ',logisRegresANA.evaluate(X_train, Y_train, biases, weights))
print('Accuracy in test is ',logisRegresANA.evaluate(X_test, Y_test, biases, weights))
# #### model with 20 neurons in one hidden layer:
# The model with 20 neurons in one hidden layer instead of 10, resulted in higher accuracy.
import pickle
file = open("stateNN2", 'rb')
data = pickle.load(file)
file.close()
biases2 = data['biases']
weights2 = data['weights']
#print('biases ',biases2)
#print('weights ', weights2)
file = open("inputNN", 'rb')
data = pickle.load(file)
file.close()
X_train = data['X_train']
Y_train = data['Y_train']
X_test = data['X_test']
Y_test = data['Y_test']
#evaluating the results:
import importlib
import logisRegresANA
importlib.reload(logisRegresANA)
print('Accuracy in training is ',logisRegresANA.evaluate(X_train, Y_train, biases2, weights2))
print('Accuracy in test is ',logisRegresANA.evaluate(X_test, Y_test, biases2, weights2))
#
#
#
# ## Part f) Critical evaluation of the various algorithms.
# ### Regression
# The neural network for regression was trained with ten (10) neurons in one hidden layer. It can be assumed that a better performance with regard to the errors would be achieved with a higher amount of neurons. Then, it was also tested.
#
# ### Classification
# The accuracy of the binary classification with the neural network was higher than with the logistic regression. The neural network was trained with only ten (10) neurons in one hidden layer, first. Supposing that a higher amount of neurons or an additional hidden layer could perform even better, a second neural network with twenty (20) neurons in one hidden layer was trained. The accuracy increased in training from 0.618 to 0.708 with the double amount of neurons in the hidden layer.
#
# Both neural networks for classification perform better than the logistic regression, which had accuracy of 0.53. In the neural networks for regression in relation to the linear regression methods, some measures perform better.
#
# The self-programmed neural network does not run in my personal computer for the datasets proposed for the Ising model. An alternative solution was to run the codes in Abel.
# Therefore, in both cases, regression and classification, the neural networks were trained in Abel. An example slurm script is available in the repository.
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="I852UGrI5TKS"
from google.colab import drive
drive.mount('/content/drive')
# + id="pZ-M2oI45ioi"
import os
from keras import models
import cv2
from google.colab.patches import cv2_imshow
# + id="B1IZwBKf5pcO"
# Dataset link: https://drive.google.com/drive/folders/1RNOg9Au19eSA5xKd-Sx_GdAm6k1j-fCQ?usp=sharing
os.chdir('./drive/My Drive/signature_data_one_shot')
# + colab={"base_uri": "https://localhost:8080/"} id="PD8FipE05q8W" outputId="9db87789-6cf9-4f65-a7d7-cb01239c489b"
# !ls
# + [markdown] id="IqRRtRWn5xJd"
# #Loading trained model
# + id="EF1bz0jY5sbY"
model = models.load_model('signature_forgery_one_shot.h5')
# + [markdown] id="Xn7v2ioU0hPG"
# #Helper functions
# + id="CMWLtFHN8upk"
def show_images(path_img_1, path_img_2):
img1 = cv2.imread(path_img_1, cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(path_img_2, cv2.IMREAD_GRAYSCALE)
cv2_imshow(img1)
cv2_imshow(img2)
# + id="Tr5V7ufl8T5h"
def check_forgery(path_img_1, path_img_2):
img1 = cv2.imread(path_img_1, cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(path_img_2, cv2.IMREAD_GRAYSCALE)
img1 = img1.reshape((1, 268, 650, 1))
img2 = img2.reshape((1, 268, 650, 1))
img1 = img1.astype('float32') / 255
img2 = img2.astype('float32') / 255
if model.predict((img1, img2))[0][0] >=0.5:
return 'Genuine Signatures'
else:
return 'Forged Signatures'
# + [markdown] id="V4dLeqJj7hGJ"
# #Checking Forgery using the model
# + colab={"base_uri": "https://localhost:8080/"} id="v3CiK8ap7mxH" outputId="45dc8d35-bd52-4cfe-cd44-8edd34949731"
# !ls test_data
# + colab={"base_uri": "https://localhost:8080/"} id="eHtvZZdI73H0" outputId="ffcbc3ff-225e-470e-89a5-4398343599ad"
# 063 contains genuine signatures of a same person
# !ls test_data/063
# + colab={"base_uri": "https://localhost:8080/"} id="eHQlRNva78MX" outputId="b5a97d99-3cc4-400f-982c-32d6d00db85f"
# 063_forg contains forged signatures of the same person
# !ls test_data/063_forg
# + colab={"base_uri": "https://localhost:8080/", "height": 553} id="BWVFbg5f85d6" outputId="66ad24a1-694a-474f-a776-70e779dc0e82"
# visualizing the two images
show_images('test_data/063/01_063.png', 'test_data/063/02_063.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="5ANzfYOc9Kd9" outputId="84d8ced2-f64a-4ca0-895b-fd86701c21d2"
# Checking Forgery
check_forgery('test_data/063/01_063.png', 'test_data/063/02_063.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 553} id="mCyUG_-k-df-" outputId="d8d90583-86d2-4319-c619-ac47eed52ed8"
show_images('test_data/063/01_063.png', 'test_data/063_forg/01_0104063.PNG')
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="AapWvCE1-hT-" outputId="bb78057e-edb9-44da-cc8c-b342a2f3c8b1"
check_forgery('test_data/063/01_063.png', 'test_data/063_forg/01_0104063.PNG')
# + colab={"base_uri": "https://localhost:8080/"} id="z7P4RDDaAagP" outputId="57e2712c-4920-4257-ff5c-adaa392c7e45"
# !ls test_data/064
# + colab={"base_uri": "https://localhost:8080/"} id="jp9LtGODAbMB" outputId="fa753fd9-aabf-4d97-c561-d3e7c058824f"
# !ls test_data/064_forg
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="NipOShH6AciC" outputId="a199f404-6757-4cdb-8ba4-3dd665db8203"
check_forgery('test_data/064/05_064.png', 'test_data/064_forg/01_0203064.PNG')
| checking_forgery.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SF-133 Budget Execution
# ## Background
#
# A handful of required DATA Act elements are found on the [Report on Budget Execution and Budgetary Resources (SF-133)](http://www.whitehouse.gov/sites/default/files/omb/assets/a11_current_year/s130.pdf).
#
# This notebook examines the quarterly SF-133s published by OMB and matches them to the following DATA Act elements:
# * Amount of budget authority appropriated
# * Obligated amount
# * Unobligated amount
# * ~~Amount of other budgetary resources~~
# * Outlay
#
# This notebook **does not** attempt to recreate the SF-133 from individual agency account trial balance submissions (via the USSGL <--> SF-133 crosswalk).
import pandas as pd
from pyquery import PyQuery as pq
from lxml import etree
from urllib.parse import urljoin
from slugify import slugify
# ## Get SF-133 Raw Data
#
# OMB maintains copies of SF-133s on its public MAX portal:
# [Public MAX portal for Reports on Budget Execution and Budgetary Resources](https://max.omb.gov/maxportal/document/SF133/Budget/FY%202014%20-%20SF%20133%20Reports%20on%20Budget%20Execution%20and%20Budgetary%20Resources.html)
try:
sf133 = pd.read_csv('data/sf133_all.csv')
except:
pages = {
'2013' : 'https://max.omb.gov/maxportal/document/SF133/Budget/FY%202013%20-%20SF%20133%20Reports%20on%20Budget%20Execution%20and%20Budgetary%20Resources.html',
'2014' : 'https://max.omb.gov/maxportal/document/SF133/Budget/FY%202014%20-%20SF%20133%20Reports%20on%20Budget%20Execution%20and%20Budgetary%20Resources.html',
'2015' : 'https://max.omb.gov/maxportal/document/SF133/Budget/FY%202013%20-%20SF%20133%20Reports%20on%20Budget%20Execution%20and%20Budgetary%20Resources.html'
}
#generate list of quarterly SF-133 URLs
#(quarterly reports are .xls and monthly reports are .xlsx,
#which makes it easier to grab the right ones)
urls = []
for year in pages:
html = pq(pages[year])
links = html('a[href$="xls"]')
for link in links:
urls.append(urljoin(html.base_url, pq(link).attr['href']))
#concatenate the quarterly SF-133s
sf133 = pd.DataFrame()
for url in urls:
df = pd.read_excel(url, sheetname=0)
print ('adding {} rows from {}'.format(len(df.index), url))
sf133 = pd.concat([sf133, df])
#cleanup column names
sf133.columns = [slugify(
x, to_lower = True, separator = '_') for x in sf133.columns.values]
#save concatenated sf133
sf133.to_csv('data/sf133_all.csv', index=False)
sf133.lno = sf133.lno.astype(str)
# ## SF133 <--> DATA Act Crosswalk
#
# We've already done some work to match SF-133 lines to required
# DATA Act elements. This information (along with some detailed mapping
# info to the US General Ledger) is in a .csv file that ships with this notebook.
#
# Most information in the file is related to the General Ledger mapping, so we'll
# clean things up to get only what we want: the name of a DATA Act
# element and it's corresponding line on the SF-133
crosswalk = pd.read_csv('data/data_act_gl_mapping.csv')
crosswalk.columns = [slugify(
x, to_lower=True, separator='_') for x in crosswalk.columns.values]
crosswalk['da_element_number'], crosswalk['da_element_name'] = zip(
*crosswalk['data_act_element'].apply(lambda x: x.split(' - ', 1)))
crosswalk = crosswalk[['da_element_number', 'da_element_name', 'sf_133_line_number']]
crosswalk.drop_duplicates(inplace=True)
# Now we're left with a handful of DATA Act elements that can be
# dervied from the SF-133
crosswalk
# ## Get DATA Act-Related Subset of SF-133
#
# Create a subset of the SF-133 limited to DATA Act lines.
sf133_data_act = pd.merge(
sf133, crosswalk,
left_on = 'lno',
right_on = 'sf_133_line_number'
)
# ## Explore Data Act-related Subset of SF-133
#
# (more better stuff coming soon)
# ### Columns & Sample Data
sf133_data_act.columns
subset = pd.concat([sf133_data_act.head(15), sf133_data_act.tail(15)])
subset
| Budget Execution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Performance data structures operations
# ## List versus set membership
value_list = list(range(1_000_000))
# %timeit 2_013_301 in value_list
value_set = set(range(1_000_000))
# %timeit 2_013_301 in value_set
| Python/DataStructures/performance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 06, Worksheet 1: Files
#
# `string`s are all sorts of fun. That is, until we run out of things to come up with -- which happens pretty fast. (Even I can entertain myself for only so long: just about 1 minute.)
#
# More often than not, using `string`s (and the practice of programming at-large) is the practice of working with files, or information stored _outside_ of a program. Here, we use the `open` function to access the file. Our use of this function features two parts:
#
# ```python
# # 1. File's path (as string)
# # |
# open(filename, mode)
# # |
# # 2. File mode (see table below)
# ```
# 1. The `path` represents the location of the file
# 2. The `mode` tells the function what kinds of work we might do on or with the file
#
# Modes include:
#
# |Mode |Purpose |
# |-----|--------|
# |`r` |Read a file|
# |`w` |Write to a file|
# |`a` |Append to a file|
#
# This week, we'll focus on simply reading and writing to a file. First, let's read from a file:
# Fun fact: if you recognize any of the names in this poem, you've clearly seen the musical _Cats_.
# <NAME> wrote the book on which that musical is based; this is its first poem.
cat_poem = open("eliot_the_naming_of_cats.txt","r")
# Once our file is open, like any object in Python, we gain the ability to use certain _methods_ of it. In order to access the content, we need to use one of two methods which allow us to do so:
#
# 1. `read`
# 2. `readlines`
#
# (As in anything programming, there are many more ways to complete the above actions; as you extend your knowledge in programming -- particularly Python -- you'll discover them and likely opt to use other methods.)
#
# <div class="alert alert-block alert-warning">
# Using any one of these methods will <em>automatically consume the contents of the file</em>. This means that once one of these methods is used, the only way to access the file's contents is to <b>open</b> it again.
# </div>
#
# Below, we'll see what using the `read` method looks like.
text = cat_poem.read()
text
# Perhaps that wasn't quite what you expected -- however, sometimes we get exactly what we ask for. This is one of those times.
#
# `read` pulls in the exact contents of the file without respect to things like _control characters_ -- another important part of `string`s. Recall the use of `\t` in the opening weeks of the course. Here, we see a new one, `\n`, which indicates that a _new line_ should occur _exactly at that point_.
#
# Should we `print` the variable, we'll see the result of our _control character_ `\n`.
print(text)
# _Much_ better. But, I told you there were other ways to do this, so there must be some benefit. Let's look at `readlines`.
cat_poem = open("eliot_the_naming_of_cats.txt","r")
lines = cat_poem.readlines()
print(lines)
# Curious. We get a `list` of `string`s containing all of the lines in the file _including_ the `\n` control characters. This means that we can use useful _data structure_ operations on it like, for example:
# +
# Counting the number of lines in the poem
print("There are", len(lines), "lines in the poem.")
# Getting the second-to-last line
print(lines[-2])
# Getting a slice of the list from a spot in the middle
print(lines[10:15])
# -
# ### Formatting
#
# Let's say, for argument's sake, we want to print the poem as poems are traditionally seen: line by line with line numbers to the left of the line, separated by some space. Ok, here we go:
print("The Naming of Cats")
print(" <NAME>")
print()
line_num = 1
for line in lines:
print(line_num,line)
line_num += 1
# Cool and all, but what's with that space between the lines?
#
# Oh! We forgot that every line actually has an `\n` character after it. To rid outselves of this `\n`, we'll use a new method: `rstrip`.
#
# _And_, that number to the left looks a bit close. We _could_ solve this using our typical approach to `print`ing things. Or, we could learn a _new_ way to format out strings: the `f-string`.
#
# The `f-string` allows us to create a _template_ `string` -- something that holds the variables we want to `print` and the formatting we want to use to `print` them using slightly modified syntax:
# +
line_num = 1
print("The Naming of Cats")
# See what I did here with \n?
print(" <NAME>\n")
for line in lines:
print(f"{line_num}\t{line.rstrip()}")
line_num += 1
# -
# But, even that's a bit clunky -- _there're too many line numbers!_. Let's print a number to the left of every _five_ (5).
# +
line_num = 1
print("The Naming of Cats")
print(" <NAME>\n")
for line in lines:
# The "%" is called the modulus -- it asks if there's any remainder after division
if line_num % 5 == 0:
print(f"{line_num}\t{line.rstrip()}")
else:
print(f"\t{line.rstrip()}")
line_num += 1
# -
# Much better!
#
# ## Final exercise
#
# Now, it's up to you to do the same to the poem contained in the file `w_b_yeats_the_cat_and_the_moon.txt`. Its title is "The Cat and the Moon," and its author: the famous Irish poet W.B. Yeats (pronounced "Yates"). Hopefully you learn something about this meditation on cat behavior.
#
# To reiterate our problem, we need to:
#
# * `open` the file
# * `print` the title and author on separate lines, followed by a blank new line
# * Then, using our new `f-string`s:
# * `print` the file's contents without spaces between the lines
# * `print` a line number to the left of the line _every five (5) lines_.
# * This will use the new thing we've seen -- the modulus `%` operator.
# * This should be separated by a tab
# * All lines, regardless of being numbered should be spaced in by one tab
# +
poem = open("w_b_yeats_the_cat_and_the_moon.txt","r")
print("The Cat and the Moon")
print("W.B. Yeats\n")
line_num = 1
for line in poem.readlines():
if line_num % 5 == 0:
print(f"{line_num}\t{line.rstrip()}")
else:
print(f"\t{line.rstrip()}")
line_num += 1
| worksheets/Week 06 - Worksheet 1 - Syntax - File IO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# <hr style="margin-bottom: 40px;">
#
# # NumPy exercises
#
# +
# Import the numpy package under the name np
import numpy as np
# Print the numpy version and the configuration
print(np.__version__)
# -
# 
#
# ## Array creation
# ### Create a numpy array of size 10, filled with zeros.
# your code goes here
# + [solution]
#np.array([0] * 10)
np.zeros(10)
# -
# 
#
# ### Create a numpy array with values ranging from 10 to 49
# your code goes here
# + [solution]
np.arange(10,50)
# -
# 
#
# ### Create a numpy matrix of 2*2 integers, filled with ones.
# your code goes here
# + [solution]
np.ones([2,2], dtype=np.int)
# -
# 
#
# ### Create a numpy matrix of 3*2 float numbers, filled with ones.
# your code goes here
# + [solution]
np.ones([3,2], dtype=np.float)
# -
# 
#
# ### Given the X numpy array, create a new numpy array with the same shape and type as X, filled with ones.
# your code goes here
# + [solution]
X = np.arange(4, dtype=np.int)
np.ones_like(X)
# -
# 
#
# ### Given the X numpy matrix, create a new numpy matrix with the same shape and type as X, filled with zeros.
# your code goes here
# + [solution]
X = np.array([[1,2,3], [4,5,6]], dtype=np.int)
np.zeros_like(X)
# -
# 
#
# ### Create a numpy matrix of 4*4 integers, filled with fives.
# your code goes here
# + [solution]
np.ones([4,4], dtype=np.int) * 5
# -
# 
#
# ### Given the X numpy matrix, create a new numpy matrix with the same shape and type as X, filled with sevens.
# your code goes here
# + [solution]
X = np.array([[2,3], [6,2]], dtype=np.int)
np.ones_like(X) * 7
# -
# 
#
# ### Create a 3*3 identity numpy matrix with ones on the diagonal and zeros elsewhere.
# +
# your code goes here
# + [solution]
#np.eye(3)
np.identity(3)
# -
# 
#
# ### Create a numpy array, filled with 3 random integer values between 1 and 10.
# your code goes here
# + [solution]
np.random.randint(10, size=3)
# -
# 
#
# ### Create a 3\*3\*3 numpy matrix, filled with random float values.
# your code goes here
# + [solution]
#np.random.random((3,3,3))
np.random.randn(3,3,3) # 0 to 1 floats
# -
# 
#
# ### Given the X python list convert it to an Y numpy array
# your code goes here
# + [solution]
X = [1, 2, 3]
print(X, type(X))
Y = np.array(X)
print(Y, type(Y)) # different type
# -
# 
#
# ### Given the X numpy array, make a copy and store it on Y.
# your code goes here
# + [solution]
X = np.array([5,2,3], dtype=np.int)
print(X, id(X))
Y = np.copy(X)
print(Y, id(Y)) # different id
# -
# 
#
# ### Create a numpy array with numbers from 1 to 10
# your code goes here
# + [solution]
np.arange(1, 11)
# -
# 
#
# ### Create a numpy array with the odd numbers between 1 to 10
# your code goes here
# + [solution]
np.arange(1, 11, 2)
# -
# 
#
# ### Create a numpy array with numbers from 1 to 10, in descending order.
# your code goes here
# + [solution]
np.arange(1, 11)[::-1]
# -
# 
#
# ### Create a 3*3 numpy matrix, filled with values ranging from 0 to 8
# your code goes here
# + [solution]
np.arange(9).reshape(3,3)
# -
# 
#
# ### Show the memory size of the given Z numpy matrix
# your code goes here
# + [solution]
Z = np.zeros((10,10))
print("%d bytes" % (Z.size * Z.itemsize))
# -
# 
#
# ## Array indexation
#
# ### Given the X numpy array, show it's first element
# your code goes here
# + [solution]
X = np.array(['A','B','C','D','E'])
X[0]
# -
# 
#
# ### Given the X numpy array, show it's last element
# your code goes here
# + [solution]
X = np.array(['A','B','C','D','E'])
#X[len(X)-1]
X[-1]
# -
# 
#
# ### Given the X numpy array, show it's first three elements
# your code goes here
# + [solution]
X = np.array(['A','B','C','D','E'])
X[0:3] # remember! elements start at zero index
# -
# 
#
# ### Given the X numpy array, show all middle elements
# your code goes here
# + [solution]
X = np.array(['A','B','C','D','E'])
X[1:-1]
# -
# 
#
# ### Given the X numpy array, show the elements in reverse position
# your code goes here
# + [solution]
X = np.array(['A','B','C','D','E'])
X[::-1]
# -
# 
#
# ### Given the X numpy array, show the elements in an odd position
# your code goes here
# + [solution]
X = np.array(['A','B','C','D','E'])
#X[[0, 2, -1]]
X[::2]
# -
# 
#
# ### Given the X numpy matrix, show the first row elements
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X[0]
# -
# 
#
# ### Given the X numpy matrix, show the last row elements
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X[-1]
# -
# 
#
# ### Given the X numpy matrix, show the first element on first row
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
#X[0][0]
X[0, 0]
# -
# 
#
# ### Given the X numpy matrix, show the last element on last row
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
#X[-1][-1]
X[-1, -1]
# -
# 
#
# ### Given the X numpy matrix, show the middle row elements
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
#X[1:-1][1:-1] wrong!
X[1:-1, 1:-1]
# -
# 
#
# ### Given the X numpy matrix, show the first two elements on the first two rows
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
#X[:2][:2] wrong!
#X[0:2, 0:2]
X[:2, :2]
# -
# 
#
# ### Given the X numpy matrix, show the last two elements on the last two rows
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X[2:, 2:]
# -
# 
#
# ## Array manipulation
#
# ### Convert the given integer numpy array to float
# your code goes here
# + [solution]
X = [-5, -3, 0, 10, 40]
np.array(X, np.float)
# -
# 
#
# ### Reverse the given numpy array (first element becomes last)
# your code goes here
# + [solution]
X = [-5, -3, 0, 10, 40]
X[::-1]
# -
# 
#
# ### Order (sort) the given numpy array
# your code goes here
# + [solution]
X = [0, 10, -5, 40, -3]
X.sort()
X
# -
# 
#
# ### Given the X numpy array, set the fifth element equal to 1
# your code goes here
# + [solution]
X = np.zeros(10)
X[4] = 1
X
# -
# 
#
# ### Given the X numpy array, change the 50 with a 40
# your code goes here
# + [solution]
X = np.array([10, 20, 30, 50])
X[3] = 40
X
# -
# 
#
# ### Given the X numpy matrix, change the last row with all 1
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X[-1] = np.array([1, 1, 1, 1])
X
# -
# 
#
# ### Given the X numpy matrix, change the last item on the last row with a 0
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X[-1, -1] = 0
X
# -
# 
#
# ### Given the X numpy matrix, add 5 to every element
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X + 5
# -
# 
#
# ## Boolean arrays _(also called masks)_
#
# ### Given the X numpy array, make a mask showing negative elements
# your code goes here
# + [solution]
X = np.array([-1,2,0,-4,5,6,0,0,-9,10])
mask = X <= 0
mask
# -
# 
#
# ### Given the X numpy array, get the negative elements
# your code goes here
# + [solution]
X = np.array([-1, 2, 0, -4, 5, 6, 0, 0, -9, 10])
mask = X <= 0
X[mask]
# -
# 
#
# ### Given the X numpy array, get numbers higher than 5
# your code goes here
# + [solution]
X = np.array([-1, 2, 0, -4, 5, 6, 0, 0, -9, 10])
mask = X > 5
X[mask]
# -
# 
#
# ### Given the X numpy array, get numbers higher than the elements mean
# your code goes here
# + [solution]
X = np.array([-1, 2, 0, -4, 5, 6, 0, 0, -9, 10])
mask = X > X.mean()
X[mask]
# -
# 
#
# ### Given the X numpy array, get numbers equal to 2 or 10
# your code goes here
# + [solution]
X = np.array([-1, 2, 0, -4, 5, 6, 0, 0, -9, 10])
mask = (X == 2) | (X == 10)
X[mask]
# -
# 
#
# ## Logic functions
#
# ### Given the X numpy array, return True if none of its elements is zero
# your code goes here
# + [solution]
X = np.array([-1, 2, 0, -4, 5, 6, 0, 0, -9, 10])
X.all()
# -
# 
#
# ### Given the X numpy array, return True if any of its elements is zero
# your code goes here
# + [solution]
X = np.array([-1, 2, 0, -4, 5, 6, 0, 0, -9, 10])
X.any()
# -
# 
#
# ## Summary statistics
# ### Given the X numpy array, show the sum of its elements
# your code goes here
# + [solution]
X = np.array([3, 5, 6, 7, 2, 3, 4, 9, 4])
#np.sum(X)
X.sum()
# -
# 
#
# ### Given the X numpy array, show the mean value of its elements
# your code goes here
# + [solution]
X = np.array([1, 2, 0, 4, 5, 6, 0, 0, 9, 10])
#np.mean(X)
X.mean()
# -
# 
#
# ### Given the X numpy matrix, show the sum of its columns
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X.sum(axis=0) # remember: axis=0 columns; axis=1 rows
# -
# 
#
# ### Given the X numpy matrix, show the mean value of its rows
# your code goes here
# + [solution]
X = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
X.mean(axis=1) # remember: axis=0 columns; axis=1 rows
# -
# 
#
# ### Given the X numpy array, show the max value of its elements
# your code goes here
# + [solution]
X = np.array([1, 2, 0, 4, 5, 6, 0, 0, 9, 10])
#np.max(X)
X.max()
# -
# 
| pandas/freecodecamp/freecodecamp-intro-to-numpy-master/freecodecamp-intro-to-numpy-master/3. NumPy exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
import re
from datetime import datetime
import warnings
warnings.filterwarnings("ignore")
# ### Online and Offline Training data
df_on = pd.read_csv('DataSets/ccf_online_stage1_train.csv')
df_off = pd.read_csv('DataSets/ccf_offline_stage1_train.csv')
print("Online Training Data Sample\nShape:"+str(df_on.shape))
df_on.head()
print("Offline Training Data Sample\nShape:"+str(df_off.shape))
df_off.head()
# ### Test Data(Offline)
df_test = pd.read_csv('DataSets/ccf_offline_stage1_test_revised.csv')
print("Testing Data(Offline) Sample\nShape:"+str(df_test.shape))
df_test.head()
# #### Converting Date to DateTime format
# +
#Online Training Data
df_on['Date'] = pd.to_datetime(df_on["Date"],format='%Y%m%d')
df_on['Date_received'] = pd.to_datetime(df_on["Date_received"],format='%Y%m%d')
#Offline Training Data
df_off['Date'] = pd.to_datetime(df_off["Date"],format='%Y%m%d')
df_off['Date_received'] = pd.to_datetime(df_off["Date_received"],format='%Y%m%d')
#Test Data
df_test['Date_received'] = pd.to_datetime(df_test["Date_received"],format='%Y%m%d')
# -
# ### Removing Duplicates from Online and Offline Training Data
# +
#Removing duplicates and giving frequency counts(Count) to each row
#Online
x = 'g8h.|$hTdo+jC9^@'
df_on_unique = (df_on.fillna(x).groupby(['User_id', 'Merchant_id', 'Action', 'Coupon_id', 'Discount_rate',
'Date_received', 'Date']).size().reset_index()
.rename(columns={0 : 'Count'}).replace(x,np.NaN))
df_on_unique["Date_received"]=pd.to_datetime(df_on_unique["Date_received"])
df_on_unique["Date"]=pd.to_datetime(df_on_unique["Date"])
print("Online Training Data Shape:"+str(df_on_unique.shape))
# +
#Offline
x = 'g8h.|$hTdo+jC9^@' #garbage value for nan values
df_off_unique = (df_off.fillna(x).groupby(['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance',
'Date_received', 'Date']).size().reset_index()
.rename(columns={0 : 'Count'}).replace(x,np.NaN))
df_off_unique["Date_received"]=pd.to_datetime(df_off_unique["Date_received"])
df_off_unique["Date"]=pd.to_datetime(df_off_unique["Date"])
print("Offline Training Data Shape:"+str(df_off_unique.shape))
# -
# #### Filling Nan for Distance (OFFLINE)
df_off_unique['Distance'].fillna(df_off_unique['Distance'].mean(), inplace=True)
df_off_unique['Distance'] = df_off_unique.Distance.astype(int)
# ### Converting Discount Ratio to Rate
# +
#Funtion to convert discount ratio to discount rate
def convert_discount(discount):
values = []
for i in discount:
if ':' in i:
i = i.split(':')
rate = round((int(i[0]) - int(i[1]))/int(i[0]),3)
values.append([int(i[0]),int(i[1]),rate])
elif '.' in i:
i = float(i)
x = 100*i
values.append([100,int(100-x),i])
discounts = dict(zip(discount,values))
return discounts
# convert_discount(list(df_of['Discount_rate']))
# -
#ONLINE DATA
df_on_coupon = df_on_unique[(df_on_unique['Coupon_id'].isna()==False) & (df_on_unique['Coupon_id']!='fixed')]
discounts_online = list(df_on_coupon['Discount_rate'].unique())
df_on_coupon.loc[:,('Discount')] = df_on_coupon.loc[:,('Discount_rate')]
df_on_coupon.loc[:,('Discount_rate')] = df_on_coupon.loc[:,('Discount')].map(convert_discount(discounts_online))
df_on_coupon[['Original_price','Discounted_price','Rate']] = pd.DataFrame(df_on_coupon.Discount_rate.values.tolist(), index= df_on_coupon.index)
df_on_coupon.head()
#OFFLINE DATA
df_off_coupon = df_off_unique[(df_off_unique['Coupon_id'].isna()==False)].copy()
discounts_offline = list(df_off_coupon['Discount_rate'].unique())
df_off_coupon.loc[:,('Discount')] = df_off_coupon.loc[:,('Discount_rate')]
df_off_coupon['Discount_rate'] = df_off_coupon['Discount'].map(convert_discount(discounts_offline))
df_off_coupon[['Original_price','Discounted_price','Rate']] = pd.DataFrame(df_off_coupon.Discount_rate.values.tolist(), index= df_off_coupon.index)
df_off_coupon.head()
# ### Training Data (Online + Offline)
df_train = df_on_unique.append(df_off_unique, sort=False)
df_train = df_train.sort_values(by = ['User_id'] )
df_train = df_train.reset_index()
del df_train['index']
print("Training Data(Offline+Online) \nShape:"+str(df_train.shape))
df_train.head()
df_train_coupon = df_on_coupon.append(df_off_coupon, sort=False)
# ## DISCOUNT ANALYSIS
# +
# Coupons Released and redeemed and Discount Rate(OFFLINE)
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(20,7))
plt.subplot(121)
ax1 = sns.countplot(df_off_coupon['Rate'])
ax1.set_xticklabels(ax1.get_xticklabels(),rotation=90)
for p in ax1.patches:
ax1.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.xlabel('Discount Rate')
plt.ylabel('Count of Coupons released')
plt.title('Number of coupons released for each discount rate(OFFLINE)')
plt.subplot(122)
df_off_redeem_coupon= df_off_coupon[df_off_coupon['Date'].isna()==False]
ax2 = sns.countplot(df_off_redeem_coupon['Rate'])
ax2.set_xticklabels(ax2.get_xticklabels(),rotation=90)
for p in ax2.patches:
ax2.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.xlabel('Discount Rate')
plt.ylabel('Count of Coupons redeemed')
plt.title('Number of coupons redeemed for each discount rate(OFFLINE)')
plt.show()
# +
# Coupons Released and redeemed and Discount Rate(ONLINE)
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(20,7))
plt.subplot(121)
ax1 = sns.countplot(df_on_coupon['Rate'])
ax1.set_xticklabels(ax1.get_xticklabels(),rotation=90)
for p in ax1.patches:
ax1.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.xlabel('Discount Rate')
plt.ylabel('Count of Coupons released')
plt.title('Number of coupons released for each discount rate(ONLINE)')
plt.subplot(122)
df_on_redeem_coupon= df_on_coupon[df_on_coupon['Date'].isna()==False]
ax2 = sns.countplot(df_on_redeem_coupon['Rate'])
ax2.set_xticklabels(ax2.get_xticklabels(),rotation=90)
for p in ax2.patches:
ax2.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.xlabel('Discount Rate')
plt.ylabel('Count of Coupons redeemed')
plt.title('Number of coupons redeemed for each discount rate(ONLINE)')
plt.show()
# -
discount_redemption = pd.DataFrame(df_train_coupon.groupby(['Rate'])['Coupon_id','Date'].count()).reset_index()
discount_redemption.columns = ['Rate','Rate_Releases','Rate_Redeemed']
discount_redemption['Rate_Ratio'] = discount_redemption['Rate_Redeemed']/discount_redemption['Rate_Releases']
discount_redemption
discount_redemption.to_csv('DataSets/DatasetsCreated/rate_level.csv',index=False)
# ## Date Analysis
# #### Count of coupons released each day (OFFLINE)
plt.figure(figsize=(35,20))
ax = sns.countplot(df_off_coupon['Date_received'])
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.xlabel('Dates : Jan 1, 2016 - Jun 30,2016 (182 days)')
plt.ylabel('Count of Coupon Released')
plt.title('Count of coupons Released each day')
plt.show()
# #### Count of coupons redeemed each day (OFFLINE)
plt.figure(figsize=(35,20))
ax = sns.countplot(df_off_redeem_coupon['Date'])
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.xlabel('Dates : Jan 1, 2016 - Jun 30,2016 (182 days)')
plt.ylabel('Count of Coupon Redeemed')
plt.title('Count of coupons redeemed each day')
plt.show()
# #### Count of coupons released each day (ONLINE)
plt.figure(figsize=(35,20))
ax = sns.countplot(df_on_coupon['Date_received'])
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.xlabel('Dates : Jan 1, 2016 - Jun 30,2016 (182 days)')
plt.ylabel('Count of Coupon Released')
plt.title('Count of coupons Released each day')
plt.show()
# #### Count of coupons redeemed each day (ONLINE)
plt.figure(figsize=(35,20))
ax = sns.countplot(df_on_redeem_coupon['Date'])
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.xlabel('Dates : Jan 1, 2016 - Jun 30,2016 (182 days)')
plt.ylabel('Count of Coupon Redeemed')
plt.title('Count of coupons redeemed each day')
plt.show()
# ### Weekdays or Weekends for Date Received (Offline)
# +
#Receive Date
df_off_coupon.loc[:,('Weekend')] = np.where((df_off_coupon.loc[:,('Date_received')] .dt.dayofweek) < 5,0,1)
df_off_coupon.loc[:,('DayOfWeek')] = df_off_coupon.loc[:,('Date_received')].dt.dayofweek
df_off_coupon.loc[:,('Month')] = (df_off_coupon.loc[:,('Date_received')]).dt.month
#Purchase Date
df_off_redeem_coupon.loc[:,('Weekend_p')] = np.where((df_off_redeem_coupon.loc[:,('Date')] .dt.dayofweek) < 5,0,1)
df_off_redeem_coupon.loc[:,('DayOfWeek_p')] = df_off_redeem_coupon.loc[:,('Date')].dt.dayofweek
df_off_redeem_coupon.loc[:,('Month_p')] = (df_off_redeem_coupon.loc[:,('Date')]).dt.month
df_off_redeem_coupon.head()
# +
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(20,10))
plt.subplot(121)
# plt.figure(figsize=(7,4))
ax1 = sns.countplot(df_off_coupon['Weekend'])
ax1.set_xticklabels(['Weekdays','Weekend'],rotation=90)
for p in ax1.patches:
ax1.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('Number of Releases and Weekends')
plt.subplot(122)
# plt.figure(figsize=(7,4))
ax2 = sns.countplot(df_off_redeem_coupon['Weekend_p'])
ax2.set_xticklabels(['Weekdays','Weekend'],rotation=90)
for p in ax2.patches:
ax2.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('Count of Redemption and Weekends')
plt.show()
# +
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(20,10))
plt.subplot(121)
# plt.figure(figsize=(7,4))
ax1 = sns.countplot(df_off_coupon['DayOfWeek'])
ax1.set_xticklabels(['MON','TUE','WED','THUR','FRI','SAT','SUN'],rotation=90)
for p in ax1.patches:
ax1.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('Number of Releases and Days')
plt.subplot(122)
# plt.figure(figsize=(7,4))
ax2 = sns.countplot(df_off_redeem_coupon['DayOfWeek_p'])
ax2.set_xticklabels(['MON','TUE','WED','THUR','FRI','SAT','SUN'],rotation=90)
for p in ax2.patches:
ax2.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('Count of Redemption and Days')
plt.show()
# +
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(20,10))
plt.subplot(121)
# plt.figure(figsize=(7,4))
ax1 = sns.countplot(df_off_coupon['Month'])
ax1.set_xticklabels(['JAN','FEB','MAR','APR','MAY','JUN'],rotation=90)
for p in ax1.patches:
ax1.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('Number of Releases and Months')
plt.subplot(122)
# plt.figure(figsize=(7,4))
ax2 = sns.countplot(df_off_redeem_coupon['Month_p'])
ax2.set_xticklabels(['JAN','FEB','MAR','APR','MAY','JUN'],rotation=90)
for p in ax2.patches:
ax2.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('Count of Redemption and Months')
plt.show()
# -
# ### MONTHLY PURCHASES WITH COUPON
df_train_purchase = df_train[df_train['Date'].isna()==False].copy().reset_index(drop=True)
df_train_purchase.loc[:,('PurchaseMonth')] = (df_train_purchase.loc[:,('Date')]).dt.month
df_train_purchase.head()
monthly_purchase = pd.DataFrame(df_train_purchase.groupby('PurchaseMonth')['Date_received','Date'].count()).reset_index()
monthly_purchase.columns = ['Month','PurchasesWithCoupon','TotalPurchases']
monthly_purchase['Purchase_Ratio'] = monthly_purchase['PurchasesWithCoupon']/monthly_purchase['TotalPurchases']
monthly_purchase['PurchasesWithCoupon'].plot()
monthly_purchase['TotalPurchases'].plot()
plt.show()
# +
fig,(ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(15,6))
plt.subplot(121)
# plt.figure(figsize=(7,4))
ax1 = monthly_purchase['PurchasesWithCoupon'].plot()
ax1.set_xticklabels(['JAN','FEB','MAR','APR','MAY','JUN'],rotation=90)
for p in ax1.patches:
ax1.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('PurchasesWithCoupon')
plt.subplot(122)
# plt.figure(figsize=(7,4))
ax2 =monthly_purchase['TotalPurchases'].plot()
ax2.set_xticklabels(['JAN','FEB','MAR','APR','MAY','JUN'],rotation=90)
for p in ax2.patches:
ax2.annotate('{:.0f}'.format(p.get_height()), (p.get_x()+0.1, p.get_height()+50))
plt.title('TotalPurchases')
plt.show()
# -
monthly_purchase
# ## Date Level Features
date_level = pd.DataFrame(df_off_coupon.groupby(['Date_received']).size().reset_index(name='ReleasesCount'))
date_level['ImpDay'] = [1 if x>10000 else 0 for x in date_level['ReleasesCount']]
date_level.loc[:,('Weekend')] = np.where((date_level.loc[:,('Date_received')].dt.dayofweek) < 5,0,1)
date_level.loc[:,('DayOfWeek')] = date_level.loc[:,('Date_received')].dt.dayofweek
date_level.head()
unique_coupons_train = pd.DataFrame(df_off_coupon.groupby(['Date_received'])['Coupon_id'].nunique()
.reset_index(name='UniqueReleasesCount'))
unique_coupons_train.head()
date_level = date_level.merge(unique_coupons_train,how='left',on='Date_received')
date_level.head()
date_level.columns
date_level['ReleasesCount'].describe()
# ## Date Level Features - TEST Data
date_level_test = pd.DataFrame(df_test.groupby(['Date_received']).size().reset_index(name='ReleasesCount'))
date_level_test['ImpDay'] = [1 if x>10000 else 0 for x in date_level_test['ReleasesCount']]
date_level_test.loc[:,('Weekend')] = np.where((date_level_test.loc[:,('Date_received')].dt.dayofweek) < 5,0,1)
date_level_test.loc[:,('DayOfWeek')] = date_level_test.loc[:,('Date_received')].dt.dayofweek
date_level_test.head()
unique_coupons_test = pd.DataFrame(df_test.groupby(['Date_received'])['Coupon_id'].nunique()
.reset_index(name='UniqueReleasesCount'))
unique_coupons_test.head()
date_level_test = date_level_test.merge(unique_coupons_test,how='left',on='Date_received')
date_level_test.head()
date_level_test.columns
date_level = date_level.append(date_level_test, sort=False)
date_level.to_csv('DataSets/DatasetsCreated/date_level.csv',index=False)
| Approach1/RateDateAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K-Means from scratch visualised with 1D, 2D and 3D data
# > K-Means clustering algorithm implemented from scratch and the clustering process/progression visualised for 1D, 2D and 3D data
#
# - toc: false
# - branch: master
# - badges: true
# - comments: true
# - categories: [machine learning, jupyter]
# - image: images/kmeans/thumbnail.png
# - search_exclude: false
# ## Concept
# K-Means is a unsupervised clustering algorithm which is analogous to supervised classification algorithms. Due to the name, K-Means algorithm is often confused with supervised KNN(K Nearest Neighbhours) algorithm which is used for both classification and regression problems.
#
# As the name suggests, K-Means algorithm comprises of "**K**" "**Means**" which correspond to the number of clusters that the algorithm tries to find in the unlabeled data. The working of the algorithm though quite simple the challenge lies in scaling the algorithm for large datasets and picking an appropriate measure for *distance*.
#
# Before we go into any more details, let us have a look at the steps that comprise the K-Means algorithm-
# 1. Inputs to the algorithm-
# 1. Data to cluster
# 2. Number of clusters to identify
# 3. Convergence criteria i.e. when to stop
# 1. Usually a tolerance value is given which is used to observe when the "Mean" position doesn't change any more
# 2. Other option is the maximum number of iterations to carry out
# 4. Function to use as a **Measure** - usually cartesian distance is used but when data is text or any other abstract data, special measures have to be choosen
# 2. Based on the data space i.e. the bounds (minimums and maximums) and the number of clusters to identify, that many random **Mean** points in the same space as the data are generated
# 3. Distance of all the data samples/points in the data space, say $\mathbb{R}^n$, with resepect to **K** number of **Means** are calculated (i.e. if there are 10 samples and 2 clusters to find, the number of distance measures calculated are 20(1x10 + 1x10))
# 4. Based on the distances each data sample is associated to their closest **Mean**
# 5. Based on the associations made in step 4 the **Mean** values are updated by calculating the mean of all the values associated with one particular **Mean**. This is done for **K** number of **Means**
# 6. The steps 3 to 5 are then repeated either until the algorithm exceeds the maximum number of allowed iterations or until the values of **K Means** have settled down i.e. the change after update is less than the tolerance value specified in the input
#
# Next, we turn to the implementation and coding part of the algorithm and discuss the results.
# ## Code start
# ### Handling the imports
# +
# %matplotlib widget
import time
import IPython
import numpy as np
import pandas as pd
import numpy.linalg as LA
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from IPython.display import Video
plt.rcParams['figure.figsize'] = [8.0, 8.0]
plt.rcParams['figure.dpi'] = 100
# -
# ### K-Means class to hold data, methods to compute the distances, update the means, and plot the progress and results
class kmeans:
def __init__(self, dimensions, sample_size, clusters, tolerance, max_iters):
"""
Use the initialisation parameters as attributes of the object and create
a matplotlib figure canvas on which each iteration progress can be drawn
:param dimesnions: Dimension of data
:param sample_size: Not necessary for real data, here used for generating sample data
:param clusters: Number of clusters to identify in the data, control the number of Means
:param tolerance: The tolearance value
:param max_iters: Maximum iterations to execute
"""
self.dimensions = dimensions
self.sample_size = sample_size
self.clusters = clusters
self.tolerance = tolerance
self.max_iters = max_iters
self.colors = ['y', 'r', 'b', 'm', 'k', 'c', 'b', 'm', 'k', 'c']
if self.dimensions == 1:
self.fig, self.ax = plt.subplots(1,1)
self.sample_pts = np.array([[]])
self.ax.grid(True)
elif self.dimensions == 2:
self.fig, self.ax = plt.subplots(1,1)
self.sample_pts = np.array([[], []])
self.ax.grid(True)
elif self.dimensions == 3:
self.fig = plt.figure(1, figsize=(8, 6))
self.sample_pts = np.array([[], [], []])
self.ax = Axes3D(self.fig, rect=(0.0, 0.0, .95, 1.0), elev=48, azim=134)
def kmeans_init(self):
"""
Generate sample data and draw the initial state of the data and display the initial position
of the Means
"""
##################################################################################################################################
# Creating clusters using normal distribution and random variance and mean
# every cluster will have equal number of points
##################################################################################################################################
for i in range(0, self.clusters):
np.random.seed(int((-i) ** 2))
tmp = np.random.randn(1, (self.sample_size // self.clusters) * self.dimensions) * np.random.randint(1, 10) + np.random.randint(-100, 100)
self.sample_pts = np.hstack((self.sample_pts, tmp.reshape(self.dimensions, self.sample_size // self.clusters)))
np.random.seed(22)
self.previous_means = np.random.randn(self.clusters, self.dimensions) * np.random.randint(1, 12) # Randomly selected means i.e., cluster centers
# print(f'Starting means are: {self.previous_means}')
self.new_means = np.zeros((self.clusters, self.dimensions)) # To store the new means after every iteration
##################################################################################################################################
# plot initial means and all data samples to see the distribution
##################################################################################################################################
if self.dimensions == 1:
self.ax.scatter(self.previous_means[:, 0], np.zeros((self.clusters, 1)), marker='o', c='r', label='Initial Means')
self.ax.scatter(self.sample_pts[0, :], np.zeros((1, self.sample_size)),
marker='*') # Plotting all the points to see the clusters
elif self.dimensions == 2:
self.ax.scatter(self.previous_means[:, 0], self.previous_means[:, 1], marker='o', c='r', label='Initial Means')
self.ax.scatter(self.sample_pts[0, :], self.sample_pts[1, :], marker='*') # Plotting all the points to see the clusters
elif self.dimensions == 3:
self.ax.scatter(self.previous_means[:, 0], self.previous_means[:, 1], self.previous_means[:, 2], marker='o', c='r',
label='Initial Means', depthshade=False)
self.ax.scatter(self.sample_pts[0, :], self.sample_pts[1, :], self.sample_pts[2, :],
marker='*') # Plotting all the points to see the clusters
self.ax.legend(loc='upper right')
##################################################################################################################################
# Loop till convergence
##################################################################################################################################
def kmeans_iter(self, iteration_count):
"""
Iteration part of the algorithm which iterates until the tolerance criteria is met while
limiting the maximum number of iterations to preven infinite loops when the algorithm
cannot associate a Mean value with a cluster
"""
if (abs(self.previous_means - self.new_means) > self.tolerance).any() and (iteration_count < self.max_iters) and (iteration_count != 0):
print(f'Iteration number {iteration_count}')
if iteration_count != 1:
self.previous_means = self.new_means.copy()
dist = pd.DataFrame()
##################################################################################################################################
# Compute distances of all points with respect to each mean
##################################################################################################################################
for i in range(0, self.clusters):
# distance_to_mean_1_iter_1 naming used
dist['dtm_' + str(i + 1) + f'_iter_{iteration_count}'] = LA.norm(
self.sample_pts - self.previous_means[i, :].T.reshape(self.dimensions, 1), axis=0)
# Assign a data sample to the mean it is nearest to by extracting the digit in the name of the index i.e., column
# name where the minimum value is found
# dtm_{1}_iter_1
dist['assign_to_mean'] = dist.idxmin(axis=1).str[4]
##################################################################################################################################
# compute the new means based on the classes assigned
##################################################################################################################################
for i in range(0, self.clusters):
indices = dist.assign_to_mean[dist.assign_to_mean == str(i + 1)]
if self.dimensions > 1:
if len(indices.index) != 0:
self.new_means[i, :] = np.mean(self.sample_pts[:, indices.index], axis=1)
else:
# Re-initialise a mean if it is not associated with any data sample
self.new_means[i, :] = np.random.randn(1, self.dimensions) * 100
else:
if len(indices.index) != 0:
self.new_means[i, 0] = np.mean(self.sample_pts[0, indices.index])
else:
# Re-initialise a mean if it is not associated with any data sample
self.new_means[i, 0] = np.random.randn(1, self.dimensions) * 100
# print(f'New means are:{self.new_means}')
##################################################################################################################################
# Plot the movement of the means
##################################################################################################################################
if self.dimensions == 1:
for i in range(0, self.clusters):
self.ax.plot([self.previous_means[i, 0], self.new_means[i, 0]],
[0, 0], label='mean movement' if iteration_count == 1 else "", c=self.colors[i])
self.ax.scatter(self.new_means[i, 0], 0, marker='o', c='g', label='new Means' if i == 0 and iteration_count == 1 else "")
elif self.dimensions == 2:
for i in range(0, self.clusters):
self.ax.plot([self.previous_means[i, 0], self.new_means[i, 0]],
[self.previous_means[i, 1], self.new_means[i, 1]],
label='mean movement' if iteration_count == 1 else "", c=self.colors[i])
self.ax.scatter(self.new_means[i, 0], self.new_means[i, 1], marker='o', c='g',
label='new Means' if i == 0 and iteration_count == 1 else "")
elif self.dimensions == 3:
for i in range(0, self.clusters):
self.ax.plot([self.previous_means[i, 0], self.new_means[i, 0]],
[self.previous_means[i, 1], self.new_means[i, 1]],
[self.previous_means[i, 2], self.new_means[i, 2]],
label='mean movement' if iteration_count == 1 else "", c=self.colors[i])
self.ax.scatter(self.new_means[i, 0], self.new_means[i, 1], self.new_means[i, 2], marker='o', c='g',
label='new Means' if i == 0 and iteration_count == 1 else "")
self.ax.legend(loc='upper right')
# iteration_count += 1
# self.fig.canvas.draw()
##################################################################################################################################
# Plot the clustering results upon convergence
##################################################################################################################################
if (abs(self.previous_means - self.new_means) < self.tolerance).all():
cluster_pts = []
division = self.sample_size // self.clusters
if self.dimensions == 1:
for i in range(0, self.clusters):
indices = dist.assign_to_mean[dist.assign_to_mean == str(i + 1)]
cluster_pts.append(len(indices.index))
self.ax.scatter(self.sample_pts[0, indices.index], np.zeros((1, cluster_pts[i])), marker='*',
label=f'predicted cluster {i + 1}')
self.ax.scatter(self.sample_pts[0, i * division:(i + 1) * division - 1], np.zeros((1, division - 1)),
marker='o', facecolors='none', edgecolors=self.colors[i], s=200, linewidths=2,
label=f'real cluster {i + 1}')
elif self.dimensions == 2:
for i in range(0, self.clusters):
indices = dist.assign_to_mean[dist.assign_to_mean == str(i + 1)]
cluster_pts.append(len(indices.index))
self.ax.scatter(self.sample_pts[0, indices.index], self.sample_pts[1, indices.index], marker='*',
label=f'predicted cluster {i + 1}')
self.ax.scatter(self.sample_pts[0, i * division:(i + 1) * division - 1],
self.sample_pts[1, i * division:(i + 1) * division - 1],
marker='o', facecolors='none', edgecolors=self.colors[i], s=200, linewidths=2,
label=f'real cluster {i + 1}')
elif self.dimensions == 3:
for i in range(0, self.clusters):
indices = dist.assign_to_mean[dist.assign_to_mean == str(i + 1)]
cluster_pts.append(len(indices.index))
self.ax.scatter(self.sample_pts[0, indices.index], self.sample_pts[1, indices.index], self.sample_pts[2, indices.index],
marker='*',
label=f'predicted cluster {i + 1}')
self.ax.scatter(self.sample_pts[0, i * division:(i + 1) * division - 1],
self.sample_pts[1, i * division:(i + 1) * division - 1],
self.sample_pts[2, i * division:(i + 1) * division - 1],
marker='o', label=f'real cluster {i + 1}', s=40)
# facecolors='none', edgecolors=self.colors[i], s=200, linewidths=2)
##################################################################################################################################
# set title with the clustering results and show legend
##################################################################################################################################
if self.dimensions < 3:
self.ax.set_title('Number of points in each cluster are: ' + str(cluster_pts))
self.ax.legend(loc='upper right')
else:
self.ax.text2D(0.05, 0.95, 'Number of points in each cluster are: ' + str(cluster_pts), transform=self.ax.transAxes)
self.ax.legend(loc='upper right')
# ### Animation parameters
# +
fps = 0.5
Writer = animation.writers['ffmpeg']
writer = Writer(fps=fps, metadata=dict(artist='Sai'), bitrate=1800)
# -
# ### 1D example of K-Means in action
# +
max_iterations = 5
kmeans1d = kmeans(dimensions=1, sample_size=100, clusters=2, tolerance=1e-8, max_iters=max_iterations)
animation.FuncAnimation(kmeans1d.fig, kmeans1d.kmeans_iter, init_func=kmeans1d.kmeans_init ,frames=max_iterations, interval=(1/fps)*1000, repeat=False).save('../images/kmeans/kmeans_1D.mp4', writer=writer);
# -
# In the video/animation below initially the data points in 1D space belonging to two clusters along with the initial random generated means are shown. After which the movement of means is shown and once the mean values are converged the results are shown. Since the data is being manually generated here, there is a possibility of verify how accurate the results are. As a last step for the verification, the predicted/identified clusters along with the supposed real clusters details are shown.
#
# In this particular case the K-Means algorithm clusters accurately with 50 1D data samples of each of the two clusters.
Video("../images/kmeans/kmeans_1D.mp4", embed=True)
# ### 2D example of K-Means in action
# +
max_iterations = 15
kmeansnn2d = kmeans(dimensions=2, sample_size=600, clusters=4, tolerance=1e-8, max_iters=max_iterations)
animation.FuncAnimation(kmeans2d.fig, kmeans2d.kmeans_iter, init_func=kmeans2d.kmeans_init ,frames=max_iterations, interval=(1/fps)*1000, repeat=False).save('../images/kmeans/kmeans_2D.mp4', writer=writer);
# -
# Like in previous case, here instead of 1D data samples 2D data samples are being used to cluster with the objective of identifying 4 clusters in the data. The movement of one of the **Mean** value if haphazard/chaotic because when a **Mean** cannot associate with any of the data sample, the position of the **Mean** is again randomly initialised, this randomness ensures that exactly 4 clusters will be found even if the starting position of the **Mean** values are not ideal.
#
# In this case we also get to see that the clustering is not 100% accurate, as the final clustering results result in 150, 150, 152 and 148 data samples being associated to each cluster which ideally should have been a even 150 value for all the 4 clusters.
Video("../images/kmeans/kmeans_2D.mp4", embed=True)
# ### 3D example of K-Means in action
# +
max_iterations = 8
kmeans3d = kmeans(dimensions=3, sample_size=300, clusters=3, tolerance=1e-8, max_iters=max_iterations)
animation.FuncAnimation(kmeans3d.fig, kmeans3d.kmeans_iter, init_func=kmeans3d.kmeans_init ,frames=max_iterations, interval=(1/fps)*1000, repeat=False).save('../images/kmeans/kmeans_3D.mp4', writer=writer);
# -
# Like in previous cases, here instead of 1D or 2D data samples 3D data samples are being used to cluster with the objective of identifying 3 clusters in the data.
Video("../images/kmeans/kmeans_3D.mp4", embed=True)
# ## Remarks
# In this post/jupyter notebook the distance measure used is the "cartesian distance", but other distance measures like "cosine" distance can also be used.
# In this post/jupyter notebook the data used is also randomly generated numerical data, but when data is textual the implementation details vary.
| _notebooks/2021-01-15-K-Means.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyhanlp import HanLP, JClass
CoNLLWord = JClass("com.hankcs.hanlp.corpus.dependency.CoNll.CoNLLWord")
NLPTokenizer = JClass("com.hankcs.hanlp.tokenizer.NLPTokenizer")
def describe_rel(word, result):
if word.DEPREL=="主谓关系":
result.append("\tactor: {}".format(word.LEMMA))
elif word.DEPREL=="动宾关系":
result.append("\tobject: {}".format(word.LEMMA))
elif word.DEPREL=="标点符号":
pass
else:
result.append("\trel.{}({}): {}".format(word.POSTAG, word.DEPREL, word.LEMMA))
def get_pinyin(sentence):
Pinyin = JClass("com.hankcs.hanlp.dictionary.py.Pinyin")
pinyin_list = HanLP.convertToPinyinList(sentence)
l=[]
for pinyin in pinyin_list:
l.append("%s" % pinyin.getPinyinWithToneMark())
return (" ".join(l))
def parse_tree(sentence):
conll = HanLP.parseDependency(sentence)
coreindex=0
result=[]
for word in conll.iterator():
if word.HEAD==CoNLLWord.ROOT:
coreindex=word.ID
result.append("core: {} - {}".format(word.POSTAG, word.LEMMA))
for word in conll.iterator():
if word.HEAD.ID==coreindex:
describe_rel(word, result)
result.append("⊕ "+str(NLPTokenizer.analyze(sentence)))
result.append("ﺴ "+get_pinyin(sentence))
result.append("☫ "+HanLP.convertToTraditionalChinese(sentence))
result.append("% "+sentence)
return '\n'.join(result)
raw="苹果电脑可以运行开源阿尔法狗代码吗"
result=parse_tree(raw)
print(result)
# -
raw="苹果电脑可以运行开源阿尔法狗代码吗"
result=parse_tree(raw)
print(result)
| notebook/procs-hanlp-pylib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MNIST
# # 1. Training & Loading
# +
import numpy as np
import torch
from torch import nn
import flow
import train
import utils
import math
import h5py
# Set gobal variables.
rootFolder = "./demo/MNIST_relax_True_shift_True_T_300_depthLevel_1_l16_M3_H680/"
device = torch.device("cpu")
dtype = torch.float32
dataset = "./database/mnist.npz"
# Load paremeters
with h5py.File(rootFolder+"/parameter.hdf5","r") as f:
n = int(np.array(f["n"]))
numFlow = int(np.array(f["numFlow"]))
lossPlotStep = int(np.array(f["lossPlotStep"]))
hidden = int(np.array(f["hidden"]))
nlayers = int(np.array(f["nlayers"]))
nmlp = int(np.array(f["nmlp"]))
lr = int(np.array(f["lr"]))
batchSize = int(np.array(f["batchSize"]))
epochs = int(np.array(f["epochs"]))
K = int(np.array(f["K"]))
# Build the target.
from utils import MDSampler,load
dataset = load(dataset).to(device).to(dtype)
target = MDSampler(dataset)
# Rebuild the model.
def innerBuilder(num):
maskList = []
for i in range(nlayers):
if i %2==0:
b = torch.zeros(num)
i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2)
b.zero_()[i] = 1
b=b.reshape(1,num)
else:
b = 1-b
maskList.append(b)
maskList = torch.cat(maskList,0).to(torch.float32)
fl = flow.RNVP(maskList, [utils.SimpleMLPreshape([num]+[hidden]*nmlp+[num],[nn.Softplus()]*nmlp+[None]) for _ in range(nlayers)], [utils.SimpleMLPreshape([num]+[hidden]*nmlp+[num],[nn.Softplus()]*nmlp+[utils.ScalableTanh(num)]) for _ in range(nlayers)])
return fl
from utils import flowBuilder
f = flowBuilder(n,numFlow,innerBuilder,1).to(device).to(dtype)
# Load saving.
import os
import glob
name = max(glob.iglob(rootFolder+"savings/"+'*.saving'), key=os.path.getctime)
print("load saving at "+name)
saved = torch.load(name,map_location=device)
f.load(saved);
# -
# # 2. Analysis
# +
# Calculate modes in the latent space.
d0 = f.layerList[0].elements[:n]
d1 = f.layerList[0].elements[n:]
omega = (1/(torch.exp(d0+d1))).detach()
from matplotlib import pyplot as plt
from utils import logit_back,logit
from copy import deepcopy
omega, idx = torch.sort(omega)
original = target.sample(1)
saveList = [original[:,:784].reshape(28,28)]
z = f.forward(original)[0].detach()
for nslow in [5,10,15,20,25,30,35]:
noise = torch.randn(nslow)
zz = deepcopy(z)
zz[:,idx[nslow:784]] = f.layerList[0].inverse(torch.randn(original.shape))[0][:,idx[nslow:784]]
saveList.append(f.inverse(zz)[0].detach()[:,:784].reshape(28,28))
imgs = torch.cat(saveList,1)
plt.figure(figsize=(12,4))
plt.imshow(logit_back(imgs),cmap="gray")
plt.show()
# -
| 4_MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="c83c668a-da49-440c-852f-552321c46abc" _execution_state="idle" _uuid="032819ba65351cd09b3a6f98b57cbf72bbdb7d49"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
#read the data from the kc_house_data and drop the columns which are not needed
dataFrame = pd.read_csv('../input/kc_house_data.csv',nrows=1000)#read the CSV file only 1000 dataset
Cols = ['price','sqft_living'] #these are the the columns which are needed
dataFrame = dataFrame[Cols] #consider only those columns which are required and drop the other columns
dataFrame[['price']] = dataFrame[['price']]/1000
print(dataFrame.head())#print the data
print('no of dataset:',len(dataFrame))#no of dataset
#data_points = dataFrame.as_matrix() #conver the data to the matrix form
#simply plotting of data in 2d form
plt.scatter(dataFrame['sqft_living'],dataFrame['price'])
plt.title(' sqft_living vs price ')
plt.xlabel('sqft_living area')
plt.ylabel('price k $')
plt.show()
#b,m are the constant of equation linear rgression y = m*x +b
init_consts = np.array([0,0])#inital parameter of best fit which is assign to b=0,m=0
criteria = 8000
epsi = 1e-5 #epsilon
N = len(dataFrame.index)#length of dataset
total_living = sum(dataFrame['sqft_living'])#sum of all sqft_living
sq_total_living = sum(np.power(dataFrame['sqft_living'],2))# sum of sqft_living^2
#Initialize hessian matrix
H = [[-N,-total_living],[-total_living,-sq_total_living]]
#update newton method to give new points
def newton_method_update(old_consts, H, J):
new_consts = np.array(np.subtract(old_consts, np.dot(np.linalg.pinv(H),J)))
return new_consts
price = np.array(dataFrame['price'])#conver to array
living_sqft = np.array(dataFrame['sqft_living'])#conver to array
new_consts = init_consts#initialie new parameter
#this condition for looping
while criteria > epsi:
old_consts = new_consts
J_position1 = np.nansum(price) - N * old_consts[0] - total_living * old_consts[1]
J_position2 = np.nansum(price * living_sqft) - total_living * old_consts[0] - sq_total_living * old_consts[1]
J = np.array([J_position1,J_position2])
new_consts = newton_method_update(old_consts, H, J)
criteria = np.linalg.norm(new_consts - old_consts)#criteria check every time for looping
#this is point obtains which of best fit
#were m = new_points[1] and b=new_points[0]
#
print(new_consts)
#plot the line of best fit
plt.plot(price, new_consts[1] * price + new_consts[0],'red')
#data with respect to sqft_living vs price
plt.scatter(dataFrame['sqft_living'],dataFrame['price'],)
plt.title(' sqft_living vs price ')
plt.xlabel('sqft_living area')
plt.ylabel('price k $')
plt.show()
# + _execution_state="idle" _uuid="36fa84375881d3c73da3d62a1fb5e260bef27db0"
| downloaded_kernels/house_sales/kernel_152.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import math
import torch
import gpytorch
import spectralgp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
# %matplotlib inline
torch.set_default_dtype(torch.float64)
# #### Set up the GPyTorch Model with Spectral GP kernel
# Using the same framework as standard GPyTorch models, we merely drop-in the spectral GP kernel as the covar module
#
# The `initialize_from_data` method does some pre-training on the latent model using the log-periodogram of data as training targets.
#
# For specifics on the components of GPyTorch models we refer to the [GPyTorch Documentation](https://gpytorch.readthedocs.io/en/latest/index.html)
class SpectralModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, **kwargs):
super(SpectralModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = spectralgp.kernels.SpectralGPKernel(**kwargs)
self.covar_module.initialize_from_data(train_x, train_y, **kwargs)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
# #### Generate training data and Build GP Model
# Generate points in [0, 5] and a sine wave to serve as the response, then split into training and test data.
#
# Pass this data into the GP model from above along with a likelihood
# +
nx = 200
split = 150
full_x = torch.linspace(0, 5, nx)
full_y = torch.sin(2 * full_x)
train_x = full_x[:split]
train_y = full_y[:split]
test_x = full_x[(split - nx):]
test_y = full_y[(split - nx):]
# -
likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_prior=gpytorch.priors.SmoothedBoxPrior(1e-8, 1e-4))
model = SpectralModel(train_x, train_y, likelihood, nomg=100)
# #### Set up sampling factories
#
# In the inference procedure we consider fixing the latent GP observation and doing gradient descent updates on the hyperparameters, then fixing the hyperparameters and using elliptical slice sampling to update the latent GP.
#
# The `ss_factory` generates a "factory" that fixes the latent GP and computes the loss function of the hyperparameters
# Set up Alternating Sampler
n_iters = 10
ess_iters = 5
optim_iters = 5
alt_sampler = spectralgp.samplers.AlternatingSampler(
[model], [likelihood],
spectralgp.sampling_factories.ss_factory, [spectralgp.sampling_factories.ess_factory],
totalSamples=n_iters, numInnerSamples=ess_iters, numOuterSamples=optim_iters
)
alt_sampler.run();
# #### Process the outputs
# The alternating sampler provides a good set of hyperparamters (already attached to the model) and samples of the spectral density of the covariance function.
#
# To generate predictions we look at the last samples of the alterntating sampler and generate covariance functions from each of these samples.
#
# In the below plotting we show 10 predictions: one prediction for each of the last 10 sampled log-spectral densities taken from the alternating sampler.
# +
model.eval()
n_samples = 10
spectrum_samples = alt_sampler.gsampled[0][0,:, -10:].detach()
predictions = torch.zeros(len(full_x), 10) # predictions for each sample
upper_bds = torch.zeros(len(full_x), 10) # upper conf. bd for each sample
lower_bds = torch.zeros(len(full_x), 10) # lower conf. bd for each sample
with torch.no_grad():
for ii in range(n_samples):
model.covar_module.set_latent_params(spectrum_samples[:, ii])
model.set_train_data(train_x, train_y) # to clear out the cache
pred_dist = model(full_x)
lower_bds[:, ii], upper_bds[:, ii] = pred_dist.confidence_region()
predictions[:, ii] = pred_dist.mean
# -
alt_sampler.gsampled[0].shape
# #### Now Generate the Plot
# +
sns.set_style("whitegrid")
colors = cm.get_cmap("tab10")
## plot the predictions ##
plt.plot(full_x.numpy(), predictions[:, 0].detach().numpy(), label="Predictions",
color=colors(0), linewidth=2)
plt.plot(full_x.numpy(), predictions.detach().numpy(), linewidth=2,
color=colors(0))
## Shade region +/- 2 SD around the mean ##
plt.fill_between(full_x.numpy(), lower_bds[:, 0].detach().numpy(),
upper_bds[:, 0].detach().numpy(),
color=colors(0), alpha=0.03, label = r"$\pm 2$ SD")
for ii in range(n_samples):
plt.fill_between(full_x.numpy(), lower_bds[:, ii].detach().numpy(),
upper_bds[:, ii].detach().numpy(),
color=colors(0), alpha=0.03)
## plot data ##
plt.plot(train_x.numpy(), train_y.numpy(), color=colors(1),
linewidth=2, label="Train Data")
plt.plot(test_x.numpy(), test_y.numpy(), color=colors(1),
linestyle="None", marker=".", markersize=12,
label="Test Data")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Predictions and Data")
plt.legend()
plt.show()
# -
plt.plot(model.covar_module.omega.numpy(), spectrum_samples.exp().numpy(), label = 'Posterior Samples')
plt.xlabel('Omega')
plt.ylabel('Density')
plt.xlim((0, 7))
plt.ylim((0,1))
plt.vlines(2/(2*3.14159),ymin=0, ymax=10, label = 'True Period')
plt.legend()
import numpy as np
print(np.trapz(spectrum_samples.exp().t().numpy(), model.covar_module.omega.numpy()))
| notebooks/regression_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# REMEMBER: FIRST CREATE A COPY OF THIS FILE WITH A UNIQUE NAME AND DO YOUR WORK THERE. AND MAKE SURE YOU COMMIT YOUR CHANGES TO THE `hw3_submissions` BRANCH.
# # Assignment 3 | Cleaning and Exploring Data with Pandas
#
#
# <img src="data/scoreCard.jpg" width=250>
#
# In this assignment, you will investigate restaurant food safety scores for restaurants in San Francisco. Above is a sample score card for a restaurant. The scores and violation information have been made available by the San Francisco Department of Public Health.
# ## Loading Food Safety Data
#
#
# There are 2 files in the data directory:
# 1. business.csv containing food establishments in San Francisco
# 1. inspections.csv containing retaurant inspections records
#
# Let's start by loading them into Pandas dataframes. One of the files, business.csv, has encoding (ISO-8859-1), so you will need to account for that when reading it.
# ### Question 1
#
# #### Question 1a
# Read the two files noted above into two pandas dataframes named `bus` and `ins`, respectively. Print the first 5 rows of each to inspect them.
#
import pandas as pd
# + tags=["solution"]
bus = pd.read_csv('data/businesses.csv', encoding='ISO-8859-1')
ins = pd.read_csv('data/inspections.csv')
# -
bus.head()
ins.head()
# ## Examining the Business data
#
# From its name alone, we expect the `businesses.csv` file to contain information about the restaurants. Let's investigate this dataset.
# ### Question 2
#
# #### Question 2a: How many records are there?
# + tags=["solution"]
len(bus)
# -
# #### Question 2b: How many unique business IDs are there?
bus['business_id'].nunique()
# #### Question 2c: What are the 5 most common businesses by name, and how many are there in San Francisco?
bus['name'].value_counts()[:5]
# + [markdown] tags=["written"]
# ## Zip code
#
# Next, let's explore some of the variables in the business table. We begin by examining the postal code.
#
# ### Question 3
#
# #### Question 3a
# How are the zip code values stored in python (i.e. data type)?
#
# To answer this you might want to examine a particular entry.
# + tags=["solution"]
bus['postal_code']
# +
# they are stored as objects as it says above
# -
# #### Question 3b
#
# What are the unique values of postal_code?
bus['postal_code'].value_counts()
# #### Question 3c
#
# Let's say we decide to exclude the businesses that have no zipcode for our analysis (which might include food trucks for example). Use the list of valid 5-digit zip codes below to create a new dataframe called bus_valid, with only businesses whose postal_codes show up in this list of valid zipcodes. How many businesses are there in this new dataframe?
validZip = ["94102", "94103", "94104", "94105", "94107", "94108",
"94109", "94110", "94111", "94112", "94114", "94115",
"94116", "94117", "94118", "94121", "94122", "94123",
"94124", "94127", "94131", "94132", "94133", "94134"]
bus_valid_index = bus['postal_code'].isin(validZip)
bus_valid = bus[bus_valid_index]
print(bus_valid.reset_index())
# +
# this is with the code that was provided, I just called it 'second try' because I wasn't sure if you wanted us to print the list
# with only the postal codes that are valid, or print all the businesses but leave the invalid zip codes blank
bus_valid_secondtry = bus[bus['postal_code'].isin(validZip)]
print(bus_valid_secondtry)
# -
# ## Latitude and Longitude
#
# Another aspect of the data we want to consider is the prevalence of missing values. If many records have missing values then we might be concerned about whether the nonmissing values are representative of the population.
#
# ### Question 4
#
# Consider the longitude and latitude in the business DataFrame.
#
# #### Question 4a
#
# How many businesses are missing longitude values, working with only the businesses that are in the list of valid zipcodes?
# + tags=["solution"]
bus_valid[pd.isnull(bus_valid['longitude'])]
# -
# #### Question 4b
#
# Create a new dataframe with one row for each valid zipcode. The dataframe should include the following three columns:
#
# 1. `postal_code`: Contains the zip codes in the `validZip` variable above.
# 2. `null_lon`: The number of businesses in that zipcode with missing `longitude` values.
# 3. `not_null_lon`: The number of businesses without missing `longitude` values.
# +
newdf = pd.DataFrame()
newdf['postal_code'] = validZip
newdf.index = validZip
#newdf.loc[:, 'postal_code'] = list(bus_valid['postal_code'])
null = bus_valid[pd.isnull(bus_valid['longitude'])]
newdf.loc[:, 'null_lon'] = null.groupby('postal_code')['business_id'].nunique()
notnull = bus_valid[pd.notnull(bus_valid['longitude'])]
newdf.loc[:, 'not_null_lon'] = notnull.groupby('postal_code')['business_id'].nunique()
print(newdf)
# -
# #### 4c. Do any zip codes appear to have more than their 'fair share' of missing longitude?
#
# To answer this, you will want to compute the proportion of missing longitude values for each zip code, and print the proportion missing longitude, and print the top five zipcodes in descending order of proportion missing postal_code.
#
# +
newdf['prop_miss_pc'] = newdf['null_lon'] / newdf['not_null_lon']
print(newdf)
top = newdf.nsmallest(n=5, columns=['prop_miss_pc'])
top.head(5)
# -
# # Investigate the inspection data
#
# Let's now turn to the inspection DataFrame. Earlier, we found that `ins` has 4 columns, these are named `business_id`, `score`, `date` and `type`. In this section, we determine the granularity of `ins` and investigate the kinds of information provided for the inspections.
#
# ### Question 5
#
# #### Question 5a
# As with the business data, assess whether there is one inspection record for each business, by counting how many rows are in the data and how many unique businesses there are in the data. If they are exactly the same number, it means there is only one inspection per business, clearly.
# + tags=["solution"]
len(ins)
# -
ins['business_id'].nunique()
# #### Question 5b
#
# What values does `type` take on? How many occurrences of each value is in the DataFrame? Create a new dataframe named `ins2` by copying `ins` and keeping only records with values of `type` that occur more than 10 times in the original table. In other words, eliminate records that have values of `type` that occur rarely (< 10 times). Check the result to make sure rare types are eliminated.
# + tags=["solution", "written"]
ins['type'].value_counts()
# +
#ins2['type'] = pd.ins.groupby('business_id')['type'].apply(lambda x > 10)
#ins2 = ins[ins['type'] > 10].groupby(['business_id']).nunique()
#ins['type']
# -
# #### Question 5c
#
# Since the data was stored in a .csv file, the dates are formatted as strings such as `20160503`. Once we read in the data, we would like to have dates in an appropriate format for analysis. Add a new column called `year` by capturing the first four characters of the date column.
#
# Hint: we have seen multiple ways of doing this in class, includings `str` operations, `lambda` functions, `datetime` operations, and others. Choose the method that works best for you :)
# +
ins.date = ins.date.astype(str)
ins.loc[:,'year'] = ins['date'].str[:4]
ins
#hi Irene, i think my data is messed up, sorry about that... this code works, but my data is changed for some reason
# + [markdown] tags=["written"]
# #### Question 5d
#
# What range of years is covered in this data set? Are there roughly same number of inspections each year? Try dropping records for any years with less than 50 inspections and store the result in a new dataframe named `ins3`.
# + tags=["solution", "written"]
ins['year'].value_counts()
# -
# Let's examine only the inspections for one year: 2016. This puts businesses on a more equal footing because [inspection guidelines](https://www.sfdph.org/dph/eh/Food/Inspections.asp) generally refer to how many inspections should occur in a given year.
# ### Question 6
#
# #### Question 6a
#
# Merge the business and 2016 inspections data, keeping all businesses regardless of whether they show up in the inspections file. Show the first several rows of the resulting dataframe.
bus_ins = pd.merge(bus, ins)
bus_ins.head()
# #### Question 6b
# Print the 20 lowest rated businesses names, their addresses, and their ratings.
lowest = bus_ins.loc[:,['name', 'address', 'score']].nsmallest(n=20, columns=['score'])
lowest.head(20)
# ## Done!
#
# Now commit this notebook to your `hw3_submissions` branch, push it to your GitHub repo, and open a PR!
| assignments/assignment_3/assignment_3_daisylewis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Insight Project --Birding Big Year--
#
# In this project I intend to determine the cheapest way to win the Big Year competition by the American Birding Association (ABA), following their rules. As part of their rules they give the list of eligible birds (1116). All the birds have to be seen with in 12:00 AM, January 1st to 11:59 PM, December 31st of the same year.
#
#
# +
import numpy as np
from datetime import datetime
import geopandas as gpd
from shapely.geometry import Point
import os
import struct
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.cluster import KMeans
import pandas as pd
from pandas.io.json import json_normalize, read_json
def save_fig(name):
fig.savefig(name,dpi=80,bbox_inches='tight', pad_inches=0.02, format = 'jpg')
# %matplotlib inline
# -
# # Our goal
#
# The list of birds from ABA that have to be found. ABA also give us a way to rank the birds. That information will be used to map our route and determine hotspots. ABA 1-2 will be consider low priority and 3-5 high priority.
dfbirdList = pd.read_csv('./ABA_Checklist-8.0.6a.csv', usecols=[1,3,4], skiprows=1)
dfbirdList.rename(columns={' including detailed species accounts': 'Bird name', 'Unnamed: 3': '4 letter code',
'Unnamed: 4': 'ABA Rarity'}, inplace=True)
dfbirdList.dropna(inplace=True)
dfbirdList.reset_index(inplace=True)
dfbirdList.drop(['index'], axis=1, inplace=True)
dfbirdList['ABA TF Rarity'] = dfbirdList['ABA Rarity']//3
listOfBirds = list(dfbirdList['Bird name'].unique())
dfbirdList.head(5)
# # Now the ebird Data
#
# I will start with a singe state the State of WY. Since the ebird API limits the type of request I can make, I have a downloaded cvs file. I'm using the last two full years of data.
# +
dfAll = pd.read_csv('./ebd_US-WY_201801_201912_relApr-2020/ebd_US-WY_201801_201912_relApr-2020.txt'
,delimiter="\t")
# dfAll = pd.read_csv('./ebd_US-WI_201001_201812_relApr-2020/ebd_US-WI_201001_201812_relApr-2020.txt'
# ,delimiter="\t")
# -
# I add sertain condition to satify completnes fo the data, public locations and only bird species (i.e. no hybirds). `dfReduce` will contian all the information I will be using.
dfAll = dfAll[(dfAll['CATEGORY'] == 'species') & (dfAll['LOCALITY TYPE'] == 'H')
& (dfAll['ALL SPECIES REPORTED'] == 1) & (dfAll['APPROVED'] == 1)]
dfReduce = dfAll.filter(['SAMPLING EVENT IDENTIFIER', 'COMMON NAME', 'LOCALITY', 'TIME OBSERVATIONS STARTED',
'LATITUDE', 'LONGITUDE', 'OBSERVATION DATE', 'ALL SPECIES REPORTED'])
dfReduce['OBSERVATION DATE'] = pd.to_datetime(dfReduce['OBSERVATION DATE'])
dfReduce['YEAR WEEK'] = dfReduce['OBSERVATION DATE'].dt.strftime('%W')
dfReduce['YEAR DAY'] = dfReduce['OBSERVATION DATE'].dt.strftime('%j')
dfReduce = dfReduce.merge(dfbirdList, left_on='COMMON NAME', right_on='Bird name', how = 'left')
dfReduce.drop(['Bird name'], axis=1, inplace=True)
# ### Now lets find the rare birds by percentils
#
# The rare birds are going to be determine as the bottom 20%, interms of sightings (average over time). We then also need to find what are the locations that host this birds.
dfPercentil = dfReduce.groupby(['COMMON NAME']).sum().filter(['ALL SPECIES REPORTED']).reset_index()
dfPercentil.rename(columns = {'ALL SPECIES REPORTED':'POS OBS'}, inplace=True)
dfPercentil['PROVABILITY'] = dfPercentil['POS OBS']/dfReduce.shape[0]
dfPercentil.head(5) #one hot encoding sckit lern pivot them
# +
# dfPercentil2 = dfReduce.groupby(['LOCALITY']).sum().filter(['ALL SPECIES REPORTED']).reset_index()
# dfPercentil2.rename(columns = {'ALL SPECIES REPORTED':'TOTAL OBS'}, inplace=True)
# dfPercentil = dfPercentil.merge(dfPercentil2, left_on='LOCALITY', right_on='LOCALITY', how = 'left')
# -
dfPercentil['Percentil TF Rarity'] = list(map(lambda x: 0 if x > dfPercentil.quantile(.1)[0] else 1,
dfPercentil['PROVABILITY']))
dfReduce = dfReduce.merge(dfPercentil, left_on='COMMON NAME', right_on='COMMON NAME', how = 'left')
dfReduce['Go/Nogo'] = list(map(lambda x, y: 1 if (x ==1 or y ==1) else 0,
dfReduce['ABA TF Rarity'],dfReduce['Percentil TF Rarity']))
dfReduce.head(5)
# # Is time for some plots!
country = gpd.read_file('/Users/casanova/DocumentsHere/Insight/gz_2010_us_040_00_5m.json')
bidName = '<NAME>'
state = 'Wyoming'
# +
# fig, ax = plt.subplots(1, figsize=(10,15))
# base = country[country['NAME'].isin([state]) == True].plot(ax=ax, color='#3B3C6E', alpha = 0.5)
# im = ax.scatter(bidsOut[bidsOut['COMMON NAME'] == bidName]['LONGITUDE'],
# bidsOut[bidsOut['COMMON NAME'] == bidName]['LATITUDE'],
# c = list(bidsOut[bidsOut['COMMON NAME'] == bidName]['POSITIVE OBS']))
# ax.set_ylabel(r'Latitude [$^o$]')
# ax.set_xlabel(r'Longitude [$^o$]')
# plt.text(-107, 45.01, r'{} on {}'.format(bidName,day), color='k',size=15)
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.05)
# fig.colorbar(im, cax = cax, orientation='vertical')
# plt.show()
# save_fig('/Users/casanova/DocumentsHere/Insight/{}{}{}.jpg'.format(state,bidName,day))
# -
# # Now the weather data
#
# The data will come from PRISM
# +
def parse_header(hdr):
contents = open(hdr).read()
lines = contents.strip().splitlines()
header = {}
for li in lines:
key, _, value = li.partition(" ")
header[key] = value.strip()
return header
def parse_bil(path, rows, cols, dtype):
# where you put the extracted BIL file
fi = open(path, "rb")
contents = fi.read()
fi.close()
# unpack binary data into a flat tuple z
n = int(rows*cols)
if dtype == "FLOAT":
s = "<%df" % (n,)
else: # spec says to assume unsigned int if no type specified..
s = "<%dH" % (n,) # unsigned int
z = struct.unpack(s, contents)
values = np.zeros((rows,cols))
for r in range(rows):
for c in range(cols):
val = z[(cols*r)+c]
if (val <= -6000):
# may not be needed depending on format, and the "magic number"
# value used for 'void' or missing data
val= np.nan
values[r][c]=val
return values
def geoloc_rc(r,c):
lat = float(hdrInfo['ULYMAP']) - r*float(hdrInfo['YDIM'])
lon = float(hdrInfo['ULYMAP']) + c*float(hdrInfo['XDIM'])
return lat, lon
def geoloc_ll(lat,long):
r = -1*(lat - float(hdrInfo['ULYMAP']))/float(hdrInfo['YDIM'])
c = (lon - float(hdrInfo['ULYMAP']))/float(hdrInfo['XDIM'])
return r, c
def findweather_from_coord(Plat, Plon, WeatherMat):
latRange = np.linspace(float(hdrInfo['ULYMAP']),float(hdrInfo['ULYMAP'])-float(hdrInfo['YDIM'])
*float(hdrInfo['NROWS']),float(hdrInfo['NROWS']))
lonRange = np.linspace(float(hdrInfo['ULXMAP']),float(hdrInfo['ULXMAP'])+float(hdrInfo['XDIM'])
*float(hdrInfo['NCOLS']),float(hdrInfo['NCOLS']))
Pr,Pc = np.argmin(np.abs(latRange-Plat)), np.argmin(np.abs(lonRange-Plon))
return WeatherMat[Pr,Pc], Pr, Pc
# -
hdrInfo = parse_header('../../Downloads/PRISM_ppt_stable_4kmD2_20180101_20181231_bil/PRISM_ppt_stable_4kmD2_20180101_bil.hdr')
hdrInfo
out1 = parse_bil('../../Downloads/PRISM_ppt_stable_4kmD2_20180101_20181231_bil/PRISM_ppt_stable_4kmD2_20180401_bil.bil',
int(hdrInfo['NROWS']), int(hdrInfo['NCOLS']), hdrInfo['PIXELTYPE'])
findweather_from_coord(43.0731, -89.4012, out1)
fig,ax = plt.subplots(1)
ax.imshow((out1))
rect = mpatches.Rectangle((854-10, 164-10),20,20,linewidth=2,edgecolor='r',facecolor='none')
ax.add_patch(rect)
plt.show()
# # Is time for some ML
#
# I'll be using a Logistic Regression to determine if I should go to a place or not. To determine if I should go or not has been already introduce and that is my external parameter. The way is currently set up is that the bottome 20 percentile and 2-5 ABA rarity code birds select locations that I should visit.
# +
y = dfReduce[(dfReduce['YEAR DAY'] >= 0) & (dfReduce['YEAR DAY'] < 7)]['Go/Nogo'].values
use_columns = ['LATITUDE', 'LONGITUDE','YEAR DAY']
# ,'YEAR DAY'] # 'TIME OBSERVATIONS STARTED'
# dfReduce['TIME OBSERVATIONS STARTED'] = pd.to_datetime(dfReduce['TIME OBSERVATIONS STARTED'],format= '%H:%M:%S' )
X = dfReduce[(dfReduce['YEAR DAY'] >= 0) & (dfReduce['YEAR DAY'] < 7)][use_columns]
# -
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=432545)
model = LogisticRegression(class_weight={0:2,1:60})
model.fit(X_train, y_train)
print(model.score(X_test,y_test))
# +
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = np.array(X)[:, 0].min() - .5, np.array(X)[:, 0].max() + .5
y_min, y_max = np.array(X)[:, 1].min() - .5, np.array(X)[:, 1].max() + .5
h = .02 # step size in the mesh
xx, yy, tt = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h), np.arange(1, 366, 1))
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = model.predict(np.c_[xx.ravel(), yy.ravel(), tt.ravel()])
Z = Z.reshape(xx.shape)
# -
plt.pcolormesh(xx, yy,Z, cmap=plt.cm.Reds)
plt.scatter(dfReduce[dfReduce['Go/Nogo'] == 0]['LATITUDE'],dfReduce[dfReduce['Go/Nogo'] == 0]['LONGITUDE'],
c=dfReduce[dfReduce['Go/Nogo'] == 0]['Go/Nogo'].values, cmap=plt.cm.Paired)
plt.scatter(dfReduce[dfReduce['Go/Nogo'] == 1]['LATITUDE'],dfReduce[dfReduce['Go/Nogo'] == 1]['LONGITUDE'],
c=dfReduce[dfReduce['Go/Nogo'] == 1]['Go/Nogo'].values, cmap=plt.cm.Greens)
plt.show()
model.coef_
model.intercept_
Z.min()
Z.max()
import statsmodels.api as sm
logit_model=sm.Logit(y,X)
result=logit_model.fit()
print(result.summary2())
y_pred = model.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(model.score(X_test, y_test)))
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
# +
# import seaborn as sns
# plt.figure(figsize=(10,5))
# sns.scatterplot(data=dfReduce,x='LATITUDE',y='LONGITUDE',hue='Go/Nogo')
# -
roc_auc_score(y,LogisticRegression(C=1e9,class_weight={0:1,1:90}).fit(X,y).predict(X))
roc_auc_score(y,LogisticRegression(C=1e9,class_weight='balanced').fit(X,y).predict(X))
# +
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
# -
x.shape
# # Let do the k-mean clustering
kmeans = KMeans(init='k-means++', n_clusters=11, n_init=10)
dfKMeans = dfReduce.filter(['LATITUDE', 'LONGITUDE', 'LOCALITY']).drop_duplicates()
kmeans.fit(dfKMeans.filter(['LATITUDE', 'LONGITUDE']))
centroids = kmeans.cluster_centers_
centroids[:,1]
# +
plotter = dfReduce.filter(['LATITUDE', 'LONGITUDE']).drop_duplicates()
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = np.min(plotter['LATITUDE']), np.max(plotter['LATITUDE'])
y_min, y_max = np.min(plotter['LONGITUDE']), np.max(plotter['LONGITUDE'])
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
# -
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower', alpha = 0.5)
plt.scatter(plotter['LATITUDE'],plotter['LONGITUDE'], marker = '+')
plt.scatter(centroids[:,0], centroids[:,1])
plt.show()
kmeans.labels_[:5]
kmeans.predict([[42.024710,-110.589578]])
dfKMeans['K-cluster'] = np.array(kmeans.labels_)
dfReduce.head(5)
dfKMeans.head(5)
# +
dfKCluster = dfReduce.merge(dfKMeans.filter(['LOCALITY','K-cluster']),
left_on='LOCALITY', right_on='LOCALITY', how = 'left').filter(['COMMON NAME','ALL SPECIES REPORTED','YEAR WEEK', 'K-cluster'])
# dfReduce.drop(['Bird name'], axis=1, inplace=True)
# -
dfKCluster.head(5)
dfPercentilK1 = dfKCluster.groupby(['COMMON NAME','K-cluster']).sum().filter(['ALL SPECIES REPORTED']).reset_index()
dfPercentilK1.rename(columns = {'ALL SPECIES REPORTED':'POS OBS'}, inplace=True)
# dfPercentilK1['PROVABILITY'] = dfPercentilK1['POS OBS']/dfReduce.shape[0]
dfPercentilK1.head(5)
dfPercentilK1 = dfKCluster.groupby(['K-cluster']).sum().filter(['ALL SPECIES REPORTED']).reset_index()
dfPercentilK2.rename(columns = {'ALL SPECIES REPORTED':'TOT OBS'}, inplace=True)
dfPercentilK2.head(5)
df = dfPercentilK1.merge(dfPercentilK2, left_on='K-cluster', right_on='K-cluster', how = 'left')
df['POS PROB'] = df['POS OBS']/df['TOT OBS']
# df.set_index('COMMON NAME', inplace=True)
df.head(5)
df[df['COMMON NAME']=='American Avocet']['POS PROB'].quantile(.5)
df['PROB TF'] = list(map(lambda x: 0 if x > dfPercentil.quantile(.1)[0] else 1,
dfPercentil['PROVABILITY']))
np.unique(df['COMMON NAME'].values)
df.shape[0]
df.loc[0]
for i in range(0,df.shape[0]):
| flaskexample/Project/Insight Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro to pandas - Solutions
# <hr style="clear:both">
#
# This notebook is part of a series of exercises for the CIVIL-226 Introduction to Machine Learning for Engineers course at EPFL. Copyright (c) 2021 [VITA](https://www.epfl.ch/labs/vita/) lab at EPFL
# Use of this source code is governed by an MIT-style license that can be found in the LICENSE file or at https://www.opensource.org/licenses/MIT
#
# **Author(s):** [<NAME>](mailto:<EMAIL>)
# <hr style="clear:both">
# [pandas](https://pandas.pydata.org/) is a fast, powerful and flexible package for data manipulation and analysis in Python, built on top of NumPy.
#
# It provides:
# - a fast and efficient DataFrame object for data manipulation, with integrated indexing
# - tools for reading and writing data between in-memory data and various file formats
# - easy handling of missing data
# - easy conversion to and from NumPy arrays
# - [and much more](https://pandas.pydata.org/about/index.html)
#
# Pandas has quickly become a fundamental package for data science in Python. In this tutorial, we'll cover the basics of this package and show how it can be used to handle real-world data for ML applications.
#
# In addition, we'll also briefly cover the seaborn package, which we'll use to generate informative plots from pandas data.
#
#
# <img src="images/stack_overflow_traffic.png" width=500></img>
#
# Source: https://stackoverflow.blog/2017/09/14/python-growing-quickly/
#
# **Note:** Unlike previous tutorials, there is no code to write here. Just read through it and run the cells. For a more hands-on tutorial, we recommend the [pandas course on Kaggle](https://www.kaggle.com/learn/pandas).
#
# **Note:** This solution is exactly identical to the exercise, except that the cells are now executed.
import numpy as np
import pandas as pd
# ## 1. Basics
# A **DataFrame** is the primary data structure in Pandas. It is a data table composed of rows and columns.
#
# You can also refer to the 2 dimensions of a DataFrame as axes, with axis 0 corresponding to the row index, and axis 1 to the column index.
#
# Each column of a DataFrame can be of a different type such as integers, floats, booleans, datetime or even `object`, which can hold any Python object
#
# <img src="images/dataframe.png" width=400></img>
#
# In this part, we'll cover basic pandas operations.
# ### Creating a DataFrame
# +
data = np.array([[21, 184], [19, 168], [36, 178], [34, 175], [63, 159], [25, 165]])
# df is an abbrevation of DataFrame
df = pd.DataFrame(data=data, columns=["age", "height (cm)"])
# Show DataFrame
df
# -
# ### Accessing specific columns
#
# Accessing a single object returns a [Series](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html), which is a one-dimensional ndarray with axis labels.
# Accessing only the age column (as a pd.Series object)
df["age"]
# ### Adding columns
# +
df["sex"] = ["M", "F", "M", "F", "F", "M"]
df["height (m)"] = df["height (cm)"] / 100
# Show updated DataFrame
df
# -
# ### Removing columns
df = df.drop(columns="height (m)")
df
# By default, operations in pandas are not in-place (i.e. they return a copy, and don't modify the original object). This can be changed by adding `inplace=False` as a parameter.
# ### Adding rows
df = df.append({"age": 29, "height (cm)": 172, "sex": "F"}, ignore_index=True)
df
# ### Boolean indexing / slicing
#
# More info: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html
# In the df DataFrame, show me only the rows in which the "sex" column is "F"
df[df["sex"] == "F"]
# In the df DataFrame, show me only the rows in which the "sex" column is "M" AND (&) the "age" is below 30
df[(df["sex"] == "M") & (df["age"] < 30)]
# In the df DataFrame, show me only the rows in which the "sex" column is "F" OR (|) in which (the "sex" column is "M" AND (&) the age is below 30)
df[(df["sex"] == "F") | ((df["sex"] == "M") & (df["age"] < 30))]
# ### Sorting
# Sort values by age in ascending order
df.sort_values(by="age")
# Sort values first by sex, then by age in descending order
df.sort_values(by=["sex", "age"], ascending=False)
# ### Grouping
#
# More info: https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html
df.groupby("sex")["height (cm)"].mean()
# ## 2. I/O
# Pandas supports reading from and writing to many data formats, such as CSV, JSON, Pickle, Excel, and more.
# ### Writing
# Here's how to write our current DataFrame `df` to a file.
#
# Here are some of the formats you can write to:
# - `.to_csv`
# - `.to_json`
# - `.to_excel`
# - `.to_pickle`
# - `.to_clipboard`
# - `.to_markdown`
# - `.to_latex` (very useful for papers / reports)
# index=False means we don't want to add our index to the CSV file
df.to_csv("demo_df.csv", index=False)
# ### Reading
# Now, we'll load a real-world dataset which contains data for 891 of the Titanic's passengers.
titanic = pd.read_csv("data/titanic.csv")
# Here are some of the formats you can read from:
# - `pd.read_csv`
# - `pd.read_json`
# - `pd.read_excel`
# - `pd.read_pickle`
# - `pd.read_clipboard`
#
# More info about I/O in pandas: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html
# ## 3. Exploratory data analysis
# Let's suppose we want to use our dataset to create a model that predicts which passengers survived the Titanic shipwreck.
#
# <img src="images/titanic.jpg" width=400></img>
#
# What do we know about the Titanic? It's a boat. It hit an iceberg. It sank. This is definitely not enough information to build a solid classifier.
#
#
# This is where exploratory data analysis comes into play. It helps us understand how our data looks like, and how it can be processed and manipulated into something meaningful.
# ### Preview
#
# When dataframes are large, it's not feasible to view the entirety of rows. The `head()`, `tail()` and `sample()` functions can be used to glance at a few of the rows of the datasets and better understand how the data looks like.
#
# - `.head(n)` returns the first n rows
# - `.tail(n)` returns the last n rows
# - `.sample(n)` returns a random sample of the rows (can also be `.sample(frac=m)` to return a fraction of the total number of rows)
titanic.head(5)
titanic.tail(5)
titanic.sample(5)
# ### Shape and column information
# `shape` works just like it does in NumPy. Here, the first value is the number of rows and the second is the number of columns.
titanic.shape
# `info()` prints a concise summary of the DataFrame. It gives, for each column, its type and the number of columns that are non-null (not `NaN`). It also provides the memory usage of the DataFrame.
titanic.info()
# ### Descriptive statistics
# `describe()` generates descriptive statistics, such as the mean, standard deviation, mean, max and quartiles.
#
# By default, it only analyzes the numeric columns of a DataFrame, but this can be changed by adding `include="all"` as a parameter.
titanic.describe()
titanic.describe(include="all")
# ### Unique values
# Some of these columns can be a bit obscure, using `.unique()` can shed some light about which values are contained in these columns.
titanic["who"].unique()
titanic["embarked"].unique()
titanic["embark_town"].unique()
titanic["alive"].unique()
titanic["deck"].unique()
# ### Redundant information
# Looking at this data, it seems like "survived" and "alive" are quite similar, but are written in a different way. Let's see if that's the case.
titanic[(titanic["survived"] == 1) & (titanic["alive"] == "yes")]
titanic[(titanic["survived"] == 1) & (titanic["alive"] == "no")]
# Using `.all()`, we can check if these two columns actually encode the same information.
((titanic["alive"] == "yes") == (titanic["survived"] == 1)).all()
((titanic["alive"] == "no") == (titanic["survived"] == 0)).all()
# This also applies for the columns "embarked" and "embark_town", as well as "pclass" and "class", so we'll only keep one of each. In addition, the "adult_male" column can be directly obtained from the "who" column, so we'll remove it and work on this reduced DataFrame for the rest of the exercise.
keep_cols = ["survived", "pclass", "sex", "age", "sibsp", "parch", "fare", "embark_town", "deck", "who", "alone"]
titanic = titanic[keep_cols].copy()
titanic.columns
# Let's now clarify what these columns mean:
# - **survived**: Survival of the passenger (0 = No, 1 = Yes)
# - **pclass**: Ticket class (1= 1st, 2 = 2nd, 3 = 3rd)
# - **sex**: Sex
# - **age**: Age
# - **sibsp**: # of siblings / spouses aboard the Titanic
# - **parch**: # of parents / children aboard the Titanic
# - **fare**: Passenger fare
# - **embark_town**: Port of embarkation (Southampton, Cherbourg, Queenstown)
# - **deck**: Ship deck (A to F)
# - **who**: man, woman or child
# - **alone**: Whether the passenger is alone or not
# ## 4. Plotting
# Let's now see how we can generate informative plots from DataFrames.
#
# Plots are a great way to get some insight on the data you're working with, as it can help you uncover relations between different features and visualize distributions.
# Import plotting packages
import matplotlib.pyplot as plt
import seaborn as sns
# ### Plotting with pandas
# Pandas offer plotting functionality with the `.plot` functions, which wrap-around matplotlib.pyplot's `plot()`.
#
# More info about plotting with pandas can be found at: https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html
#
# Here are two simple examples:
titanic.plot.scatter(x="age", y="fare", alpha=0.5)
titanic["age"].hist(bins=20, alpha=0.5)
plt.xlabel("age")
plt.ylabel("count")
# ### Plotting with seaborn
# Seaborn is a data visualization library based on matplotlib, which works very nicely with pandas DataFrames, allowing you to very quickly generate complex, informative (and aesthetically pleasing) plots. In this section, we'll show some of the plots that can be generated with seaborn.
#
# For a more in-depth seaborn tutorial, check out the official tutorial: https://seaborn.pydata.org/tutorial.html
#
# Let's improve on the two previous plots by adding the "sex" column as hue.
sns.scatterplot(data=titanic, x="age", y="fare", hue="sex", alpha=0.5)
# #### Distributions
# Visualizing distributions is a good way to find heavy tails and other key information about a feature's distribution, which can help you decide whether or not to truncate / scale features.
# Visualize distribution with a histogram
# KDE = Kernel Density Estimation
sns.histplot(data=titanic, x="age", hue="sex", kde=True)
# Empirical Cumulative Distribution Function (ECDF) plots are another great way to visualize distributions.
sns.ecdfplot(data=titanic, x="age")
# #### Categorical data
#
# Now, let's use a variety of plots offered by seaborn (such as count plots, box plots and violin plots) to gain a better insight on some of the features.
sns.countplot(data=titanic, x="pclass", hue="who")
sns.boxplot(data=titanic, x="pclass", y="age")
sns.violinplot(data=titanic, x="pclass", y="age")
# Seaborn also computes confidence intervals using [bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics))
sns.pointplot(data=titanic, y="survived", x="pclass", hue="sex")
# The previous plot reveals two key features for predicting which passengers survived the shipwreck.
# #### Multi-plot grids
#
# More advanced (and harder to plot), but can offer very insightful visualizations.
grid = sns.PairGrid(data=titanic, y_vars="survived", x_vars=["pclass", "who", "alone"])
grid.map(sns.pointplot)
# That's all for seaborn! For more examples of plots that can be generated with this library, check out the [seaborn example gallery](https://seaborn.pydata.org/examples/index.html).
# ## 5. Data cleaning / wrangling
# Let's now cover how to use pandas to clean / transform our data into a proper dataset for machine learning tasks.
# Quick preview of the dataset (with columns removed from part 3)
titanic
# ### Binning
# Binning features is very easy with `cut` and `qcut`.
#
# - `pd.cut` bins values into discrete intervals. These bins are equal-width bins (uniform binning) when providing an `int` for the `bins` parameters, but these bins can be whichever values you want by providing a sequence of scalars instead.
# - `pd.qcut` bins values using quantiles (quantile binning) instead.
titanic["age_group"] = pd.cut(x=titanic["age"], bins=5)
titanic["age_group"]
titanic["fare_group"] = pd.qcut(x=titanic["fare"], q=5)
titanic["fare_group"]
# ### Missing values
# Let's make a copy of the dataframe called `titanic_ml` to prevent destructive changes
titanic_ml = titanic.copy()
# Find which columns have missing values
titanic_ml.isna().any()
# Alternative approach: check non-null count, which also informs us of how many values are NaN
titanic_ml.info()
# #### Imputation
# Imputation can be done using `fillna`.
# Mean imputation for age (just as an example, there are many other approaches that are valid)
titanic_ml["age"] = titanic_ml["age"].fillna(titanic["age"].mean())
# #### Deletion
#
# `dropna()` can be used for deletion.
# - The `subset` parameter can be used to only drop missing values from a few columns / columns.
# - `axis=0` drops rows, `axis=1` drops columns.
# Only 2 rows don't have NaN for embark_town, let's drop it
titanic_ml = titanic_ml.dropna(subset=["embark_town"], axis=0)
# Drop all the columns containing NaN
titanic_ml = titanic_ml.dropna(axis=1)
# How does our data look like now?
titanic_ml.info()
# ### One-hot encoding
# For most ML algorithms, we want our data to be entirely numerical, this requires encoding categorical features.
#
# One-hot encoding can be performed using `pd.get_dummies()`.
titanic_ml = pd.get_dummies(titanic_ml)
titanic_ml
titanic_ml.info()
# ### To NumPy
# All our columns are now numeric, we can further convert them all to the same data type (if needed) using `astype()` and then to NumPy using `to_numpy()`.
X = titanic_ml.drop(columns="survived").astype(float).to_numpy()
y = titanic_ml["survived"].to_numpy()
X[0:3]
X.shape
y[0:3]
y.shape
# And there we go! We covered the basics of pandas, as well as all the steps needed to go from a raw dataset to one usable by ML algorithms.
#
# This processed dataset can now be used for classification with whichever package you desire (e.g. NumPy, as was done in previous weeks, or PyTorch and scikit-learn as we'll see later on).
#
# Pandas is a very flexible package with many use cases, feel free to check out the additional resources to learn more about it.
# ## Additional pandas resources
#
# - Pandas Cheatsheet: https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf **<- VERY USEFUL**
# - Pandas User Guide: https://pandas.pydata.org/pandas-docs/stable/user_guide/index.html
# - API Reference: https://pandas.pydata.org/pandas-docs/stable/reference/index.html#api
# - Chapter 3 of the Python Data Science Handbook: https://jakevdp.github.io/PythonDataScienceHandbook/03.00-introduction-to-pandas.html
# - Kaggle Pandas course: https://www.kaggle.com/learn/pandas
#
| exercises/05-pandas/intro_to_pandas_Sol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 ('the_bridge_22')
# language: python
# name: python3
# ---
# # 01 - Caracol y el pozo
#
# Un caracol cae en el fondo de un pozo de 125 cm. Cada día el caracol sube 30 cm. pero por la noche, mientras duerme, resbala 20 cm debido a que las paredes son húmedas. ¿Cuantos días tarda en escapar del pozo?
#
# TIP: https://www.vix.com/es/btg/curiosidades/59215/acertijos-matematicos-el-caracol-en-el-pozo-facil
#
# TIP: http://puzzles.nigelcoldwell.co.uk/sixtytwo.htm
# ## Solución
# +
# Asigna los datos del problema a variables con nombres representativos
# altura del pozo, avance diario, retroceso nocturno, distancia acumulada
altura_pozo = 125; avance_diario = 30; retroceso = 20; distancia_acumulada = 0; dia = 0
ok = True
while ok:
distancia_acumulada += avance_diario
dia += 1
if distancia_acumulada >= 125:
ok = False
print (f"El caracol ha tardado en escapar del pozo {dia} dias")
distancia_acumulada -= retroceso
# Asigna 0 a la variable que representa la solución
# Escribe el código que soluciona el problema
# Imprime el resultado con print('Dias =', dias)
# -
# ## Objetivos
#
# 1. Tratamiento de variables
# 2. Uso de bucle **while**
# 3. Uso de condicionales **if-else**
# 4. Imprimir por consola
| 01_PREWORK/week03/pra/01-snail-and-well/your-solution-here/snail-and-well-solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Работа с файлами и pandas
# ## 1. Библиотека os
pwd
# cd ../Downloads
pwd
# cd /Users/randomwalk/Desktop
pwd
import os # библиотека для работы с операционной системой компа
os.remove('grrra.java')
os.mkdir('sem08')
os.replace('com.json', 'sem08/com.json')
os.replace('myFile.txt', 'sem08/myFile.txt')
os.listdir('sem08')
# ## 2. Разные расширения
#
# __.txt__ - просто тупо текст, записанный внутри файлика
pwd
# !cat 'sem08/myFile.txt' # скорее всего не работает на windows
f = open('/Users/randomwalk/Desktop/sem08/myFile.txt')
s = f.read() # считывает файл целиком
# s = f.readline() # считывает по одной строчке при каждом вызове
# s = f.readlines() # считывает все строчки в виде листа
s
# +
# \n - перенос строки
# \t - табуляция (4 пробела)
# \r - перенос каретки в начало строки (устаревшая штука)
# -
f.close()
# Постоянно следить за тем, какие файлы открыты, а какие закрыты - влом!
# +
# r - read
# w - write (перезапись файла)
# a - append (дозаписать в файл новые строчки)
# -
with open('sem08/myFile.txt', 'r') as f:
s = f.read()
s
with open('sem08/myFile.txt', 'w') as f:
f.write('Привет, Мир!')
with open('sem08/myFile.txt', 'a') as f:
f.write('\nЕееее')
# !cat sem08/myFile.txt
with open('sem08/myFile.txt', 'r') as f:
s = f.read()
s
print(s)
# __.json -__ с точки зрения питона просто тупо словари, но с ограничениями и некоторым отличием
# +
import json
with open('sem08/com.json', 'r') as f:
s = json.load(f)
s.keys()
# -
m = s['tresholds'][:3]
m
with open('sem08/com_2.json', 'w') as f:
json.dump(m, f)
# !cat sem08/com_2.json
dct = {'Маша':10, 'lol': 15}
with open('sem08/file.json', 'w') as f:
json.dump(dct, f)
# !cat sem08/file.json
# __.pickle -__ бинарный формат данных, то есть поток байтов
# +
import pickle # Не надо сохранять файлы в pickle, если они больше 2GB
dct = {'Маша':10, 'lol': 15}
with open('sem08/file.pickle', 'wb') as f:
pickle.dump(dct, f)
# -
# !cat sem08/file.pickle
with open('sem08/file.pickle', 'rb') as f:
s = pickle.load(f)
s
# __.csv__, __.tsv__, __.xslx__ - таблички
#
# - csv - значения разделенные через запятую
# - tsv - значения разделённые знаками табуляции
# !head dap_2021.tsv
import pandas as pd
df = pd.read_csv("dap_2021.tsv", sep='\t', decimal=',')
df.shape
df.head()
df.tail()
# +
# Таблица состоит из трёх частей
# Имена колонок
# Имена строчек
# Значения
# -
df.columns # Имена колонок
df.columns.values # превратили имена колонок в numpy вектор
df['cw01'].mean()
df[['hw01', 'hw02', 'cw01', 'cw02']].mean()
df.index # Имена строчек
df.loc[40]
# +
# df = df.set_index('Login')
df.set_index('Login', inplace=True)
# Назад откатиться можно df.reset_index( )
# -
df.head()
df.loc['dap_econ_2021_5']
# Значения - нумпаевская матрица
df.values
# ## 3. Работа с табличкой
df_s = df[['hw01', 'hw02', 'cw01', 'cw02']]
df_s.head()
df_s.describe()
df_s['hw01'].hist()
df_s['hw01'] >= 100
df_s[df_s['hw01'] >= 100].shape
# __Задание:__
#
# Прорешать тетрадку с pandas: https://nbviewer.jupyter.org/github/hse-econ-data-science/dap_2021_spring/blob/main/sem08_pandas/sem08_pandas_intro.ipynb
# ## 4. Очень хорошие вопросы
# __А чем отличаются методы от функций__
# +
# любая переменная в python - это объект
class Game():
def __init__(self, size):
self.size = size
def play(self):
print(self.size)
# -
x = Game(20)
x.size
x.play()
y = Game(30)
y.size
y.play()
# +
# если функция прописана внутри интерфейса, она - метод
# ессли функциия прописана вне интерфейса - функция
# -
import numpy as np
x = np.array([1,2,3,4,5])
x.sum() # метод
np.sum(x) # функция
# __Что такое кодировка?__
a = '<NAME>!'
a = a.encode('cp1251')
a
a = a.decode('cp1251')
a
a = 'Привет, мир!'
a = a.encode('utf-8')
a
a = a.decode('utf-8')
a
with open('sem08/myFile.txt', 'r', encoding='utf-8') as f:
s = f.read()
s
#
| sem08_pandas/sem08_204.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sts
plt.style.use("seaborn") # If not compiling, remove this line.
# # Analyzing distribution
# For this task I chose [arcsine distribution](https://en.wikipedia.org/wiki/Arcsine_distribution).
#
# $$f(x) = \dfrac{1}{\pi\sqrt{x(1-x)}}$$
#
# Mean: $\dfrac{1}{2}$
#
# Variance: $\dfrac{1}{8}$
# Initializing mean and variance constants.
mean = 1/2
variance = 1/8
# +
# Initializing distribution.
arcsine = sts.arcsine()
# Taking 1000 random values from distribution.
sample = arcsine.rvs(1000)
# +
# Plotting histogramm of our 1000 values.
plt.hist(sample, bins=30, density=True, label="Values form distribution")
# Plotting theoretical probability density function.
x = np.linspace(arcsine.ppf(0.05), arcsine.ppf(0.95), 1000)
y = arcsine.pdf(x)
plt.plot(x, y, label="Theoretical probability density")
plt.ylabel('fraction of samples')
plt.xlabel('$x$')
plt.legend(loc='center')
plt.show()
# -
# # Estimating mean value distribution
# For each $n$ generating 1000 samples with sizes $n$.
# As I have mean and variance as constants, I just have to divide variance by $n$.
#
# $\bar{p_n} \approx \sim N(\mathbb{E}X, \dfrac{\mathbb{D}X}{n}) = N(\dfrac{1}{2}, \dfrac{\dfrac{1}{8}}{n}) $
# +
# Number of values in each sample.
n = [5, 10, 50, 100]
# List with size 1000 of means of n-sized sample list.
means = [[np.mean(arcsine.rvs(n_local)) for i in range(1000)] for n_local in n]
# Normal distributions for each n.
norms = [sts.norm(mean, np.sqrt(variance/n_local)) for n_local in n]
# -
# Building histogramms of samples distributions and normal distribution above them.
# +
# Initial data.
nrows = 2
ncols = 2
figsize = (10, 10)
bins = 20
# Initializing plots.
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex='all', sharey='all', figsize=figsize)
# Iterating throw each subplot and drawing histogram on every
# distribution and normal distribution above it.
for number, axe in enumerate(axes.reshape(-1)):
axe.hist(means[number], bins=bins, density=True, label="Sample's mean's distribution")
axe.plot(np.linspace(norms[number].ppf(1e-10), norms[number].ppf(1-1e-10), 1000), norms[number].pdf(x), 'r', label="Normal distributions")
axe.set_title("Number of values in samples: {}".format(n[number]))
axe.legend(loc="upper right")
# Drawing xtitle and ytitle.
fig.text(0.5, 0.06, '$x$', ha='center')
fig.text(0.04, 0.5, 'fraction of samples', va='center', rotation='vertical')
plt.show()
# -
# # Conclusion
# The difference between received normal distributions is in approximation of means dispertions accuracy: it rises as $n$ rises.
| MathematicsAndPython/CentralLimitTheorem/CentralLimitTheorem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: DESI master
# language: python
# name: desi-master
# ---
# ### Denali Analysis Plots
import os, pdb
from glob import glob
import numpy as np
import fitsio
from astropy.table import Table
# +
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
sns.set(context='talk', style='ticks', palette='deep', font_scale=1.3)#, rc=rc)
colors = sns.color_palette()
pref = {'ELG': {'color': colors[0], 'marker': 's'},
'LRG': {'color': colors[3], 'marker': '^'},
'QSO': {'color': colors[4], 'marker': 'x'},
'BGS_ANY': {'color': colors[8], 'marker': 'o'}
}
# %matplotlib inline
# -
specprod = 'denali'
reduxdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'redux', specprod)
fastspecdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'fastspecfit', specprod, 'tiles')
figdir = os.path.join(os.getenv('DESI_ROOT'), 'users', 'ioannis', 'talks', '2021', '21apr-denali')
#figdir = os.path.join(os.getenv('HOME'), 'research', 'talks', '2021', '21Apr-denali')
print(fastspecdir)
print(figdir)
# ### Read the tiles file and the fastspecfit results
# +
def read_tileinfo():
tileinfo = Table.read(os.path.join(reduxdir, 'tiles-denali.csv'))
tileinfo = tileinfo[tileinfo['SURVEY'] != 'unknown']
tileinfo = tileinfo[np.argsort(tileinfo['TILEID'])]
return tileinfo
tileinfo = read_tileinfo()
print(len(tileinfo), tileinfo.colnames)
#tileinfo
# +
#specfile = os.path.join(fastspecdir, 'merged', 'fastspec-{}-cumulative.fits'.format(specprod))
#meta = Table(fitsio.read(specfile, 'METADATA'))
# +
def read_fastspecfit(tileinfo):
from desitarget.targets import main_cmx_or_sv
specfile = os.path.join(fastspecdir, 'merged', 'fastspec-{}-cumulative.fits'.format(specprod))
photfile = os.path.join(fastspecdir, 'merged', 'fastphot-{}-cumulative.fits'.format(specprod))
spec = Table(fitsio.read(specfile, 'FASTSPEC'))
meta = Table(fitsio.read(specfile, 'METADATA'))
phot = Table(fitsio.read(photfile, 'FASTPHOT'))
assert(np.all(spec['TARGETID'] == phot['TARGETID']))
print('Read {} objects from {}'.format(len(spec), specfile))
print('Read {} objects from {}'.format(len(phot), photfile))
ngal = len(spec)
# convenience magnitudes and targeting variables
for band in ('G', 'R', 'Z', 'W1'):
phot['{}MAG'.format(band)] = np.zeros(ngal, 'f4')
good = np.where(meta['FLUX_{}'.format(band)] > 0)[0]
phot['{}MAG'.format(band)][good] = 22.5 - 2.5 * np.log10(meta['FLUX_{}'.format(band)][good])
for band in ('G', 'R', 'Z'):
phot['{}FIBERMAG'.format(band)] = np.zeros(ngal, 'f4')
good = np.where(meta['FIBERFLUX_{}'.format(band)] > 0)[0]
phot['{}FIBERMAG'.format(band)][good] = 22.5 - 2.5 * np.log10(meta['FIBERFLUX_{}'.format(band)][good])
for targ in ['BGS_ANY', 'ELG', 'LRG', 'QSO']:
spec[targ] = np.zeros(ngal, bool)
phot[targ] = np.zeros(ngal, bool)
for tile in tileinfo['TILEID']:
I = np.where(meta['TILEID'] == tile)[0]
if len(I) == 0:
continue
(desicol, bgscol, mwscol), (desimask, bgsmask, mwsmask), survey = main_cmx_or_sv(meta[I])
#print(tile, survey)
for targ in ['BGS_ANY', 'ELG', 'LRG', 'QSO']:
phot[targ][I] = meta[desicol][I] & desimask.mask(targ) != 0
spec[targ][I] = meta[desicol][I] & desimask.mask(targ) != 0
print()
for targ in ['BGS_ANY', 'ELG', 'LRG', 'QSO']:
print(' {}: {}'.format(targ, np.sum(phot[targ])))
return phot, spec, meta
allphot, allspec, allmeta = read_fastspecfit(tileinfo)
#spec
# -
# ### BGS Analysis
# +
def zhist(png=None):
targ = 'BGS_ANY'
itarg = np.where(allspec[targ])[0]
itarg_nongal = np.where(allspec[targ] * (allmeta['SPECTYPE'] != 'GALAXY'))[0]
#itarg_dchi2 = np.where(allspec[targ] * (allmeta['DELTACHI2'] < 40))[0]
zlim = (-0.02, 0.7)
fig, ax = plt.subplots(figsize=(8, 6))
_ = ax.hist(allmeta['Z'][itarg], label='{} (N={})'.format(targ, len(itarg)),
bins=500, alpha=0.5, color=pref[targ]['color'])
#_ = ax.hist(allmeta['Z'][itarg_dchi2], label=r'{} & $\Delta\chi^2>40$ (N={})'.format(
# targ, len(itarg_dchi2)), range=zlim, bins=75, alpha=0.5, color='k')
_ = ax.hist(allmeta['Z'][itarg_nongal], label=r"{} & spectype!='GALAXY' (N={})".format(
targ, len(itarg_nongal)), bins=500, alpha=0.5, color='k')
#ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
#ax.yaxis.set_major_locator(ticker.MultipleLocator(500))
ax.set_xlim(zlim)
ax.set_xlabel('Redshift')
ax.set_ylabel('Number of Targets')
ax.legend(fontsize=12)#, loc='upper right')
fig.subplots_adjust(left=0.18, bottom=0.2, top=0.95, right=0.95)
if png:
print('Writing {}'.format(png))
fig.savefig(png)
zhist(png=os.path.join(figdir, 'zhist.png'))
# +
def qa_tiledepth(tileinfo, png=None):
target, efftimecol = 'bgs', 'BGS_EFFTIME_BRIGHT'
thesetiles = tileinfo[[target in faprogram for faprogram in tileinfo['FAPRGRM']]]
deep = np.where(thesetiles[efftimecol] > np.percentile(thesetiles[efftimecol], 75))[0]
xlim = (0, 60)
fig, ax = plt.subplots(figsize=(6, 6))
_ = ax.hist(thesetiles[efftimecol]/60, range=xlim,
bins=20, label='BGS Tiles (N={})'.format(
len(thesetiles)))
#_ = ax.hist(thesetiles[efftimecol][deep]/60, range=xlim,
# bins=20, label='Deepest 25%', alpha=0.8)
#ax.set_xlabel('{} (sec)'.format(efftimecol))
ax.set_xlabel('Effective Bright Time (minutes)')
ax.set_ylabel('Number of Tiles')
ax.legend(fontsize=14)
deep = deep[np.argsort(thesetiles[efftimecol][deep])][::-1]
deeptiles = thesetiles[deep]
fig.subplots_adjust(left=0.18, bottom=0.2, top=0.95, right=0.95)
if png:
print('Writing {}'.format(png))
fig.savefig(png)
return thesetiles
bgstiles = qa_tiledepth(tileinfo, png=os.path.join(figdir, 'bgs-tiledepth.png'))
# -
# ### Broadband photometry
# +
def restphot(png=None):
targ = 'BGS_ANY'
itarg = np.where(allspec[targ] * (allmeta['DELTACHI2'] > 40) *
(allmeta['SPECTYPE'] == 'GALAXY') *
(allmeta['Z'] < 0.7) *
(allphot['CONTINUUM_CHI2'] < 1e2))[0]
spec = allspec[itarg]
phot = allphot[itarg]
meta = allmeta[itarg]
zlim = (0, 0.7)
absmaglim = (-14, -26)
grlim = (-0.2, 1.2)
ss = 10
label = 'All BGS'
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
#ax1.scatter(meta['Z'], phot['ABSMAG_R'], s=ss,
# color=pref[targ]['color'], marker=pref[targ]['marker'],
# alpha=0.7, label=label)
im = ax1.hexbin(meta['Z'], phot['ABSMAG_R'],
mincnt=5, gridsize=80, bins='log',
extent=np.hstack((zlim, absmaglim)))
#ax.set_xlim(0.01, 200)
ax1.set_ylim(absmaglim)
ax1.set_xlabel('Redshift')
ax1.set_ylabel(r'M$_{0.0_{r}}$')
#ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%g'))
ax1.yaxis.set_major_locator(ticker.MultipleLocator(3))
#ax2.scatter(phot['ABSMAG_R'], phot['ABSMAG_G']-phot['ABSMAG_R'],
# s=ss, color=pref[targ]['color'], marker=pref[targ]['marker'],
# alpha=0.7)
im = ax2.hexbin(phot['ABSMAG_R'], phot['ABSMAG_G']-phot['ABSMAG_R'],
mincnt=5, gridsize=80, bins='log',
extent=np.hstack((absmaglim, grlim)))
ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax2.xaxis.set_major_locator(ticker.MultipleLocator(3))
ax2.set_xlim(absmaglim)
ax2.set_ylim(grlim)
ax2.set_ylabel(r'$^{0.0}(g - r)$')
ax2.set_xlabel(r'M$_{0.0_{r}}$')
txt = '\n'.join((
#'N={}'.format(len(itarg)),
"spectype=='GALAXY'",
r'$0<z<0.7$',
r'$\Delta\chi^2>40$',
r'$\chi_{{fastphot}}<100$'))
ax1.text(0.94, 0.04, txt, ha='right', va='bottom',
transform=ax1.transAxes, fontsize=12)
ax1.text(0.04, 0.94, 'N={}'.format(len(itarg)), ha='left', va='top',
transform=ax1.transAxes, fontsize=12)
#ax1.legend(loc='lower right', markerscale=3, fontsize=16)
from matplotlib.patches import Rectangle
ax2.add_patch(Rectangle((-24, 0.7), 1, 0.2, # red, bright
facecolor='none', edgecolor='k'))
ax2.add_patch(Rectangle((-21, 0.7), 1, 0.2, # red, faint
facecolor='none', edgecolor='k'))
ax2.add_patch(Rectangle((-23, 0.2), 1, 0.2, # blue, bright
facecolor='none', edgecolor='k'))
ax2.add_patch(Rectangle((-18, 0.2), 1, 0.2, # blue, faint
facecolor='none', edgecolor='k'))
fig.subplots_adjust(wspace=0.3, bottom=0.2, left=0.1, right=0.95, top=0.95)
if png:
print('Writing {}'.format(png))
fig.savefig(png)
restphot(png=os.path.join(figdir, 'bgs-restphot.png'))
# -
# ### 4000-A break & line-emission
# +
def ewha_vs_d4000(png=None):
targ = 'BGS_ANY'
itarg = np.where(allspec[targ] * (allmeta['DELTACHI2'] > 40) *
(allmeta['SPECTYPE'] == 'GALAXY') *
(allmeta['Z'] < 0.7) *
(allphot['CONTINUUM_CHI2'] < 1e2) *
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) > 3) *
(allspec['HALPHA_EW'] > 0))[0]
ewha = np.log10(allspec['HALPHA_EW'][itarg])
d4000 = allphot['D4000_MODEL'][itarg]
itarg2 = np.where(allspec[targ] * (allmeta['DELTACHI2'] > 40) *
(allmeta['SPECTYPE'] == 'GALAXY') *
(allmeta['Z'] < 9800/6570-1) *
(allphot['CONTINUUM_CHI2'] < 1e2))[0]
print(len(itarg), len(itarg2), len(itarg)/len(itarg2))
d4000lim = (0.9, 2.2)
ewhalim = (0, 3.0)
fig, ax1 = plt.subplots(figsize=(8, 6))
im = ax1.hexbin(d4000, ewha,
mincnt=5, gridsize=80, bins='log',
extent=np.hstack((d4000lim, ewhalim)))
ax1.set_xlim(d4000lim)
ax1.set_ylim(ewhalim)
ax1.set_xlabel(r'$D_{n}(4000)$ [photometric]')
ax1.set_ylabel(r'$\log_{10}\ \mathrm{EW}(\mathrm{H}\alpha\ (\AA)$')
ax1.xaxis.set_major_locator(ticker.MultipleLocator(0.2))
txt = '\n'.join((
#'N={}'.format(len(itarg)),
r'S/N(H$\alpha)_{\mathrm{amp}}>3$',
r'EW(H$\alpha)>0$'))
ax1.text(0.96, 0.96, txt, ha='right', va='top',
transform=ax1.transAxes, fontsize=18)
ax1.text(0.02, 0.96, 'N={}'.format(len(itarg)), ha='left', va='top',
transform=ax1.transAxes, fontsize=18)
fig.subplots_adjust(left=0.18, bottom=0.2, top=0.95, right=0.95)
if png:
print('Writing {}'.format(png))
fig.savefig(png)
ewha_vs_d4000(png=os.path.join(figdir, 'ewha-vs-d4000.png'))
# -
# ### Build fun stacked spectra!
def quick_coadd(flux3d, ivar3d):
ivar = np.sum(ivar3d, axis=0)
flux = np.zeros_like(ivar)
good = np.where(ivar > 0)[0]
flux[good] = np.sum(ivar3d[:, good] * flux3d[:, good], axis=0) / ivar[good]
return flux, ivar
# +
#stackfiles = np.array(glob(os.path.join(fastspecdir, 'stacks', 'bgs-*.fits')))
def build_stacks(alltiles, targ='BGS_ANY'):
from desitarget.targets import main_cmx_or_sv
def sample_cuts(sample_number, allmeta, allspec, allphot):
(desicol, bgscol, mwscol), (desimask, bgsmask, mwsmask), survey = main_cmx_or_sv(allmeta)
itarg = ( (allmeta[desicol] & desimask.mask(targ) != 0) *
(allmeta['DELTACHI2'] > 40) *
(allmeta['SPECTYPE'] == 'GALAXY') *
(allmeta['Z'] > 0.2) * (allmeta['Z'] < 0.4) *
(allspec['CONTINUUM_CHI2'] < 20) *
(allphot['CONTINUUM_CHI2'] < 1e2)
)
if sample_number == 0:
I = np.where(
itarg *
(allphot['D4000_MODEL'] > 1.8) * (allphot['D4000_MODEL'] < 2.0) * # red
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) < 5) *
(allspec['HALPHA_EW'] < 5))[0] # dead
elif sample_number == 1:
I = np.where(
itarg *
(allphot['D4000_MODEL'] > 1.7) * (allphot['D4000_MODEL'] < 2.0) * # red
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) > 5) *
(allspec['HALPHA_EW'] > 5))[0] # active
elif sample_number == 2:
I = np.where(
itarg *
(allphot['D4000_MODEL'] > 1.1) * (allphot['D4000_MODEL'] < 1.2) * # blue
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) > 5) *
(allspec['HALPHA_EW'] > 10))[0] # active
else:
pass
return I
nsample = 3
samples = [{'wave': [], 'flux': [], 'ivar': []} for x in np.arange(nsample)]
for thistile in alltiles:
stackfile = os.path.join(fastspecdir, 'stacks', 'bgs-{}-restflux.fits'.format(thistile['TILEID']))
#print('Reading {}'.format(stackfile))
flux = fitsio.read(stackfile, ext='FLUX')
ivar = fitsio.read(stackfile, ext='IVAR')
wave = fitsio.read(stackfile, ext='WAVE')
allphot = Table(fitsio.read(stackfile, ext='FASTPHOT'))
allspec = Table(fitsio.read(stackfile, ext='FASTSPEC'))
allmeta = Table(fitsio.read(stackfile, ext='METADATA'))
# select the sample of interest
for sample_number in np.arange(len(samples)):
I = sample_cuts(sample_number, allmeta, allspec, allphot)
print(stackfile, sample_number, len(I))
if len(I) > 0:
phot = allphot[I]
spec = allspec[I]
meta = allmeta[I]
if len(samples[sample_number]['wave']) == 0: # just need one
samples[sample_number]['wave'] = wave
samples[sample_number]['flux'].append(flux[I, :])
samples[sample_number]['ivar'].append(ivar[I, :])
# now stack the arrays, for convenience
for sample_number in np.arange(len(samples)):
samples[sample_number]['flux'] = np.vstack(samples[sample_number]['flux'])
samples[sample_number]['ivar'] = np.vstack(samples[sample_number]['ivar'])
return samples
samples = build_stacks(bgstiles)
# -
for sample in samples:
print(sample['flux'].shape)
wave = samples[0]['wave']
flux_red, ivar_red = quick_coadd(samples[0]['flux'], samples[0]['ivar'])
flux_blue, ivar_blue = quick_coadd(samples[2]['flux'], samples[2]['ivar'])
# +
xlim = (2800, 7200)
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
ax[0].plot(wave, flux_red / np.interp(5500, wave, flux_red), color='firebrick')
ax[1].plot(wave, flux_blue / np.interp(5500, wave, flux_red), color='dodgerblue')
ax[0].set_ylim(0, 1.2)
ax[1].set_ylim(0, 5.4)
ax[0].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
for xx in ax:
xx.set_xlim(xlim)
#xx.set_yticklabels([])
#xx.set_xlabel('Rest-frame Wavelength ($\AA$)')
xx.xaxis.set_major_locator(ticker.MultipleLocator(1000))
#ax[0].set_ylabel(r'$F_{\lambda}$ (normalized)')
ax[0].set_ylabel(r'$F_{\lambda}\ /\ F_{5500}$')
txt = '\n'.join((
r'EW(H$\alpha)<5\ \AA$',
r'$1.8<D_{n}(4000)<2$'))
ax[0].text(0.96, 0.03, txt, ha='right', va='bottom',
transform=ax[0].transAxes, fontsize=14)
txt = '\n'.join((
r'EW(H$\alpha)>10\ \AA$',
r'$1.1<D_{n}(4000)<1.2$'))
ax[1].text(0.96, 0.97, txt, ha='right', va='top',
transform=ax[1].transAxes, fontsize=14)
txt = '\n'.join(('N=8965', r'$0.2 < z < 0.4$'))
ax[0].text(0.03, 0.97, txt, ha='left', va='top',
transform=ax[0].transAxes, fontsize=14)
txt = '\n'.join(('N=1714', r'$0.2 < z < 0.4$'))
ax[1].text(0.03, 0.97, txt, ha='left', va='top',
transform=ax[1].transAxes, fontsize=14)
fig.text(0.5, 0.1, r'Rest-frame Wavelength ($\AA$)',
ha='center', va='top', fontsize=21)
fig.subplots_adjust(wspace=0.15, bottom=0.2, right=0.95, top=0.95)
fig.savefig(os.path.join(figdir, 'bgs-stacked-red-blue.png'))
# -
sample = samples[1]
flux, ivar = quick_coadd(sample['flux'], sample['ivar'])
xlim = (3800, 7000)
wave = sample['wave']
I = (wave > xlim[0]) * (wave < xlim[1])
plt.plot(wave[I], flux[I])
sample = samples[2]
flux, ivar = quick_coadd(sample['flux'], sample['ivar'])
xlim = (3800, 7000)
wave = sample['wave']
I = (wave > xlim[0]) * (wave < xlim[1])
plt.plot(wave[I], flux[I])
stop
# #### Select mildly deep BGS tiles
# +
expbgs_all = expinfo['TARGETS'] == 'BGS+MWS'
expbgs_deep = (expinfo['TARGETS'] == 'BGS+MWS') * (expinfo['EFFTIME_BRIGHT'] > 120)
_ = plt.hist(expinfo['EFFTIME_BRIGHT'][expbgs_all], bins=20, range=(0, 300))
expbgs = expinfo[expbgs_deep]
thesetiles = np.array(list(set(expbgs['TILEID'])))
I = np.isin(allphot['TILEID'], thesetiles)
phot = allphot[I]
spec = allspec[I]
print(len(spec), len(allspec))
print(thesetiles)
print(np.array(list(set(spec['TILEID']))))
# +
xlim = (18, 22.5)
ylim = (-1, 1)
I = np.where(
(spec['ZWARN'] == 0) * (spec['SPECTYPE'] != 'STAR') *
#(spec['CONTINUUM_SNR'][:, 1] > 5) *
(phot['FLUX_G'] > 0) * (phot['FLUX_R'] > 0) * (phot['FLUX_Z'] > 0) *
(phot['FLUX_W1'] > 0) * (phot['FIBERTOTFLUX_R'] > 0) *
(phot['D4000_MODEL'] > 0.5) * (phot['D4000_MODEL'] < 2.5)
)[0]
print(len(I))
schlegel_color = (phot['ZMAG'][I] - phot['W1MAG'][I]) - 3/2.5 * (phot['GMAG'][I] - phot['RMAG'][I]) + 1.2
fig, ax = plt.subplots(figsize=(10, 7))
im = ax.hexbin(phot['RFIBERMAG'][I], schlegel_color, mincnt=5,
gridsize=80, vmin=0.5, vmax=2.5, bins='log',
extent=np.hstack((xlim, ylim)), C=phot['D4000_MODEL'][I], reduce_C_function=np.mean)
#cax = fig.add_axes([0.9, 0.15, 0.025, 0.7])
cb = fig.colorbar(im, label='D4000')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(r'$r$ fiber')
ax.set_ylabel(r'$(z - w1) - 3 (g - r) / 2.5 + 1.2$')
# +
def zhist_bins(deltaz=0.1, zmin=0.0, zmax=2, target='QSO', binedges=False):
if target.strip() == 'LRG':
zmin, zmax = 0.0, 1.2
elif target.strip() == 'BGS':
zmin, zmax = 0.0, 0.5
elif target.strip() == 'QSO':
zmin, zmax = 0.0, 3.0
else:
pass
if binedges:
bins = np.arange(zmin, zmax, deltaz) # bin left edges
else:
bins = np.arange(zmin, zmax, deltaz) + deltaz / 2 # bin centers
return bins
def zhist_type(data, target='QSO'):
bins = zhist_bins(target=target, binedges=True)
hist, _ = np.histogram(data, bins=len(bins), range=(bins.min(), bins.max()))
return hist, bins
def qso_mgii(png=None):
iqso = np.where(allspec['SV1_DESI_TARGET'] & desi_mask.mask('QSO') != 0)[0]
iqso_mgii = np.where(((allspec['SV1_DESI_TARGET'] & desi_mask.mask('QSO') != 0) *
((allspec['MGII_2800_AMP'] * allspec['MGII_2800_AMP_IVAR']) > 3.0)))[0]
iqso_nomgii = np.where(((allspec['SV1_DESI_TARGET'] & desi_mask.mask('QSO') != 0) *
((allspec['MGII_2800_AMP'] * allspec['MGII_2800_AMP_IVAR']) <= 3.0)))[0]
#iqso_redrock = np.where((allspec['SV1_DESI_TARGET'] & desi_mask.mask('QSO') != 0) *
# (allspec['SPECTYPE'] == 'QSO'))[0]
iqso_redrock_qso_mgii = np.where((allspec['SV1_DESI_TARGET'] & desi_mask.mask('QSO') != 0) *
(allspec['SPECTYPE'] == 'QSO') *
((allspec['MGII_2800_AMP'] * allspec['MGII_2800_AMP_IVAR']) > 3.0))[0]
iqso_redrock_notqso_mgii = np.where((allspec['SV1_DESI_TARGET'] & desi_mask.mask('QSO') != 0) *
(allspec['SPECTYPE'] != 'QSO') *
((allspec['MGII_2800_AMP'] * allspec['MGII_2800_AMP_IVAR']) > 3.0))[0]
print('Number of QSOs: {}'.format(len(iqso)))
#print('Number of QSOs with MgII: {}'.format(len(iqso_mgii)))
#print('Number with QSOs with spectype==QSO: {}'.format(len(iqso_redrock)))
#print('Number with QSOs with spectype==QSO and MgII: {}'.format(len(iqso_redrock_mgii)))
with sns.plotting_context(context='talk', font_scale=1.0):
fig, ax = plt.subplots(figsize=(9, 7))
allhist, zbins = zhist_type(allspec['Z'][iqso])
mgii_hist, _ = zhist_type(allspec['Z'][iqso_mgii])
nomgii_hist, _ = zhist_type(allspec['Z'][iqso_nomgii])
redrock_qso_mgii_hist, _ = zhist_type(allspec['Z'][iqso_redrock_qso_mgii])
redrock_notqso_mgii_hist, _ = zhist_type(allspec['Z'][iqso_redrock_notqso_mgii])
zgood = np.where(allhist > 0)[0]
ax.plot(zbins[zgood], mgii_hist[zgood]/allhist[zgood],
ls='-', lw=2, label='MgII')
ax.plot(zbins[zgood], nomgii_hist[zgood]/allhist[zgood],
ls='-', lw=2, label='No MgII')
ax.plot(zbins[zgood], redrock_qso_mgii_hist[zgood]/allhist[zgood],
ls='-', lw=2, label='MgII & spectype==QSO')
ax.plot(zbins[zgood], redrock_notqso_mgii_hist[zgood]/allhist[zgood],
ls='-', lw=2, label='MgII & spectype!=QSO')
ax.set_ylim(0, 1.05)
ax.set_ylabel('Fraction of QSO Targets')
ax.axvline(x=3600/2800-1, ls='--', lw=1, color='gray')
ax.axvline(x=9900/2800-1, ls='--', lw=1, color='gray')
ax.legend(fontsize=14)
if png:
print('Writing {}'.format(png))
fig.savefig(png)
qso_mgii(png=os.path.join(figdir, 'frac-qso-mgii.png'))
# +
#stackfiles = np.array(glob(os.path.join(fastspecdir, 'stacks', 'bgs-*.fits')))
def build_stacks(alltiles, targ='BGS_ANY'):
from desitarget.targets import main_cmx_or_sv
def sample_cuts(sample_number, allmeta, allspec, allphot):
itarg = ( (allmeta[desicol] & desimask.mask(targ) != 0) *
(allmeta['DELTACHI2'] > 40) *
(allmeta['SPECTYPE'] == 'GALAXY') *
(allmeta['Z'] > 0.05) * (allmeta['Z'] < 0.7) *
(allphot['CONTINUUM_CHI2'] < 1e2)
)
if sample_number == '1':
I = np.where(
itarg *
(allphot['D4000_MODEL'] > 1.7) * (allphot['D4000_MODEL'] < 2.0) * # red
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) < 5) *
(allspec['HALPHA_EW'] < 5))[0] # dead
elif sample_number == '2':
I = np.where(
itarg *
(allphot['D4000_MODEL'] > 1.7) * (allphot['D4000_MODEL'] < 2.0) * # red
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) > 5) *
(allspec['HALPHA_EW'] > 5))[0] # active
elif sample_number == '3':
I = np.where(
itarg *
(allphot['D4000_MODEL'] > 1.1) * (allphot['D4000_MODEL'] < 1.3) # blue
(allspec['HALPHA_AMP'] * np.sqrt(allspec['HALPHA_AMP_IVAR']) > 5) *
(allspec['HALPHA_EW'] > 5))[0] # active
else:
pass
wave_red_bright, flux_red_bright, ivar_red_bright = [], [], []
wave_red_faint, flux_red_faint, ivar_red_faint = [], [], []
wave_blue_bright, flux_blue_bright, ivar_blue_bright = [], [], []
wave_blue_faint, flux_blue_faint, ivar_blue_faint = [], [], []
for thistile in alltiles:
stackfile = os.path.join(fastspecdir, 'stacks', 'bgs-{}-restflux.fits'.format(thistile['TILEID']))
#print('Reading {}'.format(stackfile))
flux = fitsio.read(stackfile, ext='FLUX')
ivar = fitsio.read(stackfile, ext='IVAR')
wave = fitsio.read(stackfile, ext='WAVE')
allphot = Table(fitsio.read(stackfile, ext='FASTPHOT'))
allspec = Table(fitsio.read(stackfile, ext='FASTSPEC'))
allmeta = Table(fitsio.read(stackfile, ext='METADATA'))
(desicol, bgscol, mwscol), (desimask, bgsmask, mwsmask), survey = main_cmx_or_sv(allmeta)
ibgs = allmeta[desicol] & desimask.mask(targ) != 0
itarg = np.where(ibgs * (allmeta['DELTACHI2'] > 40) *
(allmeta['SPECTYPE'] == 'GALAXY') *
(allmeta['Z'] > 0.05) * (allmeta['Z'] < 0.7) *
(allphot['CONTINUUM_CHI2'] < 1e2))[0]
phot = allphot[itarg]
spec = allspec[itarg]
meta = allmeta[itarg]
Mr = phot['ABSMAG_R']
gr = phot['ABSMAG_G'] - phot['ABSMAG_R']
if False:
plt.scatter(Mr, gr, s=10)
plt.xlim(-15, -25)
plt.ylim(0, 1.2)
plt.show()
pdb.set_trace()
#ax2.add_patch(Rectangle((-24, 0.7), 1, 0.2, # red, bright
# facecolor='none', edgecolor='k'))
#ax2.add_patch(Rectangle((-21, 0.7), 1, 0.2, # red, faint
# facecolor='none', edgecolor='k'))
#ax2.add_patch(Rectangle((-23, 0.2), 1, 0.2, # blue, bright
# facecolor='none', edgecolor='k'))
#ax2.add_patch(Rectangle((-18, 0.2), 1, 0.2, # blue, faint
# facecolor='none', edgecolor='k'))
red_bright = np.where((Mr > -24) * (Mr < -22) * (gr > 0.8) * (gr < 1.2))[0]
red_faint = np.where((Mr > -21) * (Mr < -20) * (gr > 0.8) * (gr < 1.2))[0]
blue_bright = np.where((Mr > -23) * (Mr < -22) * (gr > 0.2) * (gr < 0.4))[0]
blue_faint = np.where((Mr > -18) * (Mr < -17) * (gr > 0.2) * (gr < 0.4))[0]
print(stackfile, len(red_bright), len(red_faint), len(blue_bright), len(blue_faint))
if len(red_bright) > 0:
wave_red_bright.append(wave)
flux_red_bright.append(flux[red_bright, :])
ivar_red_bright.append(ivar[red_bright, :])
if len(red_faint) > 0:
wave_red_faint.append(wave)
flux_red_faint.append(flux[red_faint, :])
ivar_red_faint.append(ivar[red_faint, :])
if len(blue_bright) > 0:
wave_blue_bright.append(wave)
flux_blue_bright.append(flux[blue_bright, :])
ivar_blue_bright.append(ivar[blue_bright, :])
if len(blue_faint) > 0:
wave_blue_faint.append(wave)
flux_blue_faint.append(flux[blue_faint, :])
ivar_blue_faint.append(ivar[blue_faint, :])
wave = wave_red_bright[0] # all the same
flux_red_bright = np.vstack(flux_red_bright)
flux_red_faint = np.vstack(flux_red_faint)
flux_blue_bright = np.vstack(flux_blue_bright)
flux_blue_faint = np.vstack(flux_blue_faint)
ivar_red_bright = np.vstack(ivar_red_bright)
ivar_red_faint = np.vstack(ivar_red_faint)
ivar_blue_bright = np.vstack(ivar_blue_bright)
ivar_blue_faint = np.vstack(ivar_blue_faint)
return (wave,
flux_red_bright, ivar_red_bright,
flux_red_faint, ivar_red_faint,
flux_blue_bright, ivar_blue_bright,
flux_blue_faint, ivar_blue_faint)
(wave, flux_red_bright, ivar_red_bright, flux_red_faint, ivar_red_faint,
flux_blue_bright, ivar_blue_bright, flux_blue_faint, ivar_blue_faint) = build_stacks(bgstiles)
| 2021/21apr-denali/figures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Interpolation
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://licensebuttons.net/l/by/4.0/80x15.png" /></a><br />This notebook by <NAME> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>.
# All code examples are also licensed under the [MIT license](http://opensource.org/licenses/MIT).
import numpy as np
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import clear_output, display
# Polynomial interpolation is an ancient and efficient way of representing data, and it is fundamental to advancing the understanding of scientific problems.
#
# __Problem:__ Suppose that points $(x, y)$ are taken from a given function $y = f(x)$, or perhaps from an experiment where $x$ denotes a physical variable and $y$ denotes the reaction rate. Finding a polynomial through the set of data,
#
# * replacing the __infinite__ amount of information (a function) with a rule that can be evaluated in a __finite__ number of steps.
# * usually, the polynomial can not the function exactly at new inputs $x$, but it may be close enough to solve practical problems.
# ## Interpolating Polynomial
#
# __Main Theorem of Polynomial Interpolation__
#
# Let $(x_1, y_1),\ldots, (x_n, y_n)$ be $n$ points in the plane with distinct $x$. Then, there exists one and only one polynomial $P(x)$ of degree $n-1$ or less
# that satisfies $P(x_j) = y_j$ for $j = 1,\ldots, n$.
#
# * __Lagrange interpolating polynomial__
# $$ P_{n-1}(x) = y_1 L_1(x) + \cdots + y_n L_n(x),$$
# where
# $$ L_k(x) = \frac{(x - x_1)\cdots(x - x_{k-1})(x-x_{k+1})\cdots(x - x_n)}{(x_k - x_1)\cdots(x_k-x_{k-1})(x_k-x_{k+1})\cdots(x_k - x_n)}.$$
#
# * __Newton's divided differences__
# $$ P_{n-1}(x) = f[x_1] + f[x_1, x_2](x-x_1) + \cdots + f[x_1,\ldots,x_n](x - x_1)\cdots(x - x_{n-1}),$$
# where
# \begin{align*}
# f[x_k] & = f(x_k) \\
# f[x_k, x_{k+1}] & = \frac{f[x_{k+1}] - f[x_k]}{x_{k+1}-x_k} \\
# f[x_k, x_{k+1}, x_{k+2}] & = \frac{f[x_{k+1},x_{k+2}] - f[x_k, x_{k+1}]}{x_{k+2} - x_k} \\
# & \cdots
# \end{align*}
# +
# standard polynomial interpolation (Newton)
def newton(x, y):
n = np.size(x)
v = np.zeros([n,n])
c = np.zeros(n)
for j in range(n):
v[j,0] = y[j]
for i in range(1,n):
for j in range(n-i):
v[j,i] = (v[j+1,i-1] - v[j,i-1])/(x[j+i]-x[j])
for i in range(n):
c[i] = v[0,i]
return c
def newton_eval(x_eval, x, c):
n = np.size(x)
p = c[n-1]*(x_eval - x[n-2])
for i in range(1,n-1):
p = (p + c[n-1-i])*(x_eval - x[n-2-i])
p = p + c[0]
return p
def newton_plot(x, y, num):
# plot the points (x_j, y_j)
# and polynomial interpolation (with points of number num)
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
c = newton(x, y)
plot_x = np.linspace(np.min(x),np.max(x),num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = newton_eval(plot_x[i], x, c)
plt.plot(plot_x, plot_y,'k', linewidth=3)
# -
# **Example**
# Interpolate the points $(0, 1), (2,2), (3,4)$.
n = 3
x = np.array([0, 2, 3])
y = np.array([1, 2, 4])
c = newton(x,y)
newton_eval(-1, x, c)
newton_plot(x,y,10)
def Interpolating_demo(f, a, b, n):
num = 200
plot_x = np.linspace(a, b, num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i])
plt.plot(plot_x, plot_y,'b', linewidth=3)
x = np.linspace(a, b, n+1)
y = np.zeros(n+1)
for i in range(n+1):
y[i] = f(x[i])
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
c = newton(x, y)
for i in range(num+1):
plot_y[i] = newton_eval(plot_x[i], x, c)
plt.plot(plot_x, plot_y,'k', linewidth=3)
# **Example**
# Interpolate the function $f(x) = \sin x$ at $4$ equally spaced points on $[0,\pi/2]$.
Interpolating_demo(np.sin, 0, np.pi/2, 3)
# **Question:** How about the performance of $P(x)$ outside the given points (range)?
def Interpolating_demo1(f, a, b, a1, b1, n):
num = 400
plot_x = np.linspace(a1, b1, num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i])
plt.plot(plot_x, plot_y,'b', linewidth=3)
x = np.linspace(a, b, n+1)
y = np.zeros(n+1)
for i in range(n+1):
y[i] = f(x[i])
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
c = newton(x, y)
for i in range(num+1):
plot_y[i] = newton_eval(plot_x[i], x, c)
plt.plot(plot_x, plot_y,'k', linewidth=3)
Interpolating_demo1(np.sin, 0, np.pi/2, -2*np.pi, 2*np.pi, 3)
# +
a = -np.pi
b = np.pi
w = interactive(Interpolating_demo, f=fixed(np.sin), a=fixed(a), b=fixed(b), n=widgets.IntSlider(min=1,max=20,value=1))
display(w)
# -
# ## Runge phenomenon
# Interpolate $f(x) = 1/(1 + 12 x^2)$ at evenly spaced points in $[-1,1]$.
# +
def fun(x):
return 1/(1+12*x**2)
w = interactive(Interpolating_demo, f=fixed(fun), a=fixed(-1), b=fixed(1), n=widgets.IntSlider(min=1,max=40,value=1))
display(w)
# -
# ## Chebyshev Interpolation
#
# ### Chebyshev Polynomial
# +
def poly_f(x0, x):
n = np.size(x)
y = 1.
for i in range(n):
y = y*(x0 - x[i])
return y
def Chebyshev_demo2(f, a, b, n):
x = np.zeros(n+1)
for i in range(n+1):
x[i] = np.cos((2*i+1)*np.pi/(2*(n+1)))
x = x*(b-a)/2 + (a + b)/2
y = np.zeros(n+1)
for i in range(n+1):
y[i] = f(x[i], x)
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
num = 200
plot_x = np.linspace(a, b, num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i], x)
plt.plot(plot_x, plot_y,'b', linewidth=3)
# -
w = interactive(Chebyshev_demo2, f=fixed(poly_f), a=fixed(-1), b=fixed(1), n=widgets.IntSlider(min=1,max=20,value=1))
display(w)
def Interpolating_demo2(f, a, b, n):
num = 200
plot_x = np.linspace(a, b, num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i])
plt.plot(plot_x, plot_y,'b', linewidth=3)
x = np.zeros(n+1)
for i in range(n+1):
x[i] = np.cos((2*i+1)*np.pi/(2*(n+1)))
x = x*(b-a)/2 + (a + b)/2
y = np.zeros(n+1)
for i in range(n+1):
y[i] = f(x[i])
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
c = newton(x, y)
for i in range(num+1):
plot_y[i] = newton_eval(plot_x[i], x, c)
plt.plot(plot_x, plot_y,'k', linewidth=3)
w = interactive(Interpolating_demo2, f=fixed(fun), a=fixed(-1), b=fixed(1), n=widgets.IntSlider(min=1,max=20,value=1))
display(w)
# * $f(x) = e^{|x|}$
# +
def fun1(x):
return np.exp(np.abs(x))
w = interactive(Interpolating_demo2, f=fixed(fun1), a=fixed(-1), b=fixed(1), n=widgets.IntSlider(min=1,max=20,value=1))
display(w)
# -
# ## Piecewise-Linear Interpolation
def Interpolating_demo3(f, a, b, n):
num = 200
plot_x = np.linspace(a, b, num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i])
plt.plot(plot_x, plot_y,'b', linewidth=3)
x = np.linspace(a, b, n+1)
y = np.zeros(n+1)
for i in range(n+1):
y[i] = f(x[i])
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
plt.plot(x, y, 'k', linewidth=3)
w = interactive(Interpolating_demo3, f=fixed(fun), a=fixed(-1), b=fixed(1), n=widgets.IntSlider(min=1,max=40,value=1))
display(w)
# ## Cubic Splines
# +
def CubicSpline_coeff(h, x, y):
n = np.size(x)
b = np.zeros(n) + 4.
b[0] = 2.; b[n-1] = 2.
a = np.zeros(n-1) + 1.
A = np.diag(b) + np.diag(a, -1) + np.diag(a, 1)
dy = np.zeros(n)
for i in range(2,n-1):
dy[i] = y[i+1] - y[i-1]
dy[0] = y[1] - y[0]
dy[n-1] = y[n-1] - y[n-2]
dy = 3*dy/h
m = np.linalg.solve(A, dy)
return m
def CubicHermite(x, x1, x2, y1, y2, m1, m2):
return y1*(1 + 2*(x - x1)/(x2 - x1))*((x - x2)/(x1 - x2))**2 \
+ y2*(1 + 2*(x - x2)/(x1 - x2))*((x - x1)/(x2 - x1))**2 \
+ m1*(x - x1)*((x - x2)/(x1 - x2))**2 \
+ m2*(x - x2)*((x - x1)/(x1 - x2))**2
# -
def Interpolating_demo4(f, a, b, n):
num = 200
plot_x = np.linspace(a, b, num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i])
plt.plot(plot_x, plot_y,'b', linewidth=3)
x = np.linspace(a, b, n+1)
y = np.zeros(n+1)
for i in range(n+1):
y[i] = f(x[i])
h = (b - a)/n
m = CubicSpline_coeff(h, x, y)
num1 = 10
for i in range(n):
x_tmp = np.linspace(x[i], x[i+1], num1+1)
y_tmp = np.zeros(num1+1)
for j in range(num1+1):
y_tmp[j] = CubicHermite(x_tmp[j], x[i], x[i+1], y[i], y[i+1], m[i], m[i+1])
plt.plot(x_tmp, y_tmp, 'k', linewidth=3)
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
w = interactive(Interpolating_demo4, f=fixed(fun), a=fixed(-1), b=fixed(1), n=widgets.IntSlider(min=1,max=40,value=1))
display(w)
def Interpolating_demo5(f, x, y):
num = 200
plot_x = np.linspace(np.min(x), np.max(x), num+1)
plot_y = np.zeros(num+1)
for i in range(num+1):
plot_y[i] = f(plot_x[i])
plt.plot(plot_x, plot_y,'b', linewidth=3)
#x = np.linspace(a, b, n+1)
#y = np.zeros(n+1)
#for i in range(n+1):
# y[i] = f(x[i])
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
c = newton(x, y)
for i in range(num+1):
plot_y[i] = newton_eval(plot_x[i], x, c)
plt.plot(plot_x, plot_y,'k', linewidth=3)
# # Does polynomial interpolation __always__ good?
# +
def fun1(x):
#return x**3 - x**2 + x
return 0.5*9.8*x*x
m = 15
x = np.linspace(0,1,m)
y = np.zeros(m)
for i in range(m):
y[i] = fun1(x[i])
y = y + 0.1*np.random.rand(m)
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
# -
Interpolating_demo5(fun1, x, y)
# +
def fun2(x):
#return x**3 - x**2 + x
return 3.5*x
m = 20
x = np.linspace(-1,1,m)
y = np.zeros(m)
for i in range(m):
y[i] = fun2(x[i])
y = y + 0.01*np.random.rand(m)
plt.plot(x, y, 'ro', markersize=12, linewidth=3)
# -
Interpolating_demo5(fun2, x, y)
# # Interpolation
| Numerical_Analysis/.ipynb_checkpoints/Interpolation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### For the nerdy Valentine...
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
t = np.arange(0, 2*np.pi, 0.1)
x = 16 * np.sin(t)**3
y = 13 * np.cos(t) - 5 * np.cos(2*t) - 2*np.cos(3*t) - np.cos(4*t)
plt.plot(x, y)
plt.fill_between(x, y, y2=5, color='red', data=None)
sns.despine()
plt.axis('off')
plt.show()
| jupyter_notebooks/matplotlib/Heart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import prerequisite packages
import io
import requests
import pandas as pd
# ## Define helper method(s)
#
# `create_df_from_remote_csv` fetches a CSV file from a remote URL and creates a Pandas DataFrame from it. If an error is encountered (file not found, not a csv file, etc) the method returns None.
def create_df_from_remote_csv(url):
if url is None:
return None
response = requests.get(url)
if response.status_code == 200:
if response.headers['content-type'] == "text/csv":
response.encoding = 'utf-8'
data = pd.read_csv(io.StringIO(response.text))
return data
else:
print('Error. '
'The file is encoded using unsupported content-type {}'
.format(response.headers['content-type']))
else:
print('Error. The file could not be downloaded. '
'Returned HTTP status code: {}'
.format(response.status_code))
return None
# ## Create a Pandas DataFrame from a remote URL
#
# Load the Iris data set and create a DataFrame.
df = create_df_from_remote_csv(
"https://datahub.io/machine-learning/iris/r/iris.csv")
# ## Preview the DataFrame
#
# Display the first few rows of the data set if it was successfully loaded.
if df is not None:
# Print first few data rows
print(df.head())
else:
print("Data file couldn't be loaded into a DataFrame.")
# ### Preview using the code snippet
# +
# TODO: insert the inspect_dataframe code snippet in this cell
# -
inspect_dataframe(df)
| binder/getting-started/getting_started_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
# %matplotlib widget
from plastic_app.bs_model_explorer import BSModelExplorer
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
bs = BSModelExplorer(delta_s=2)
bs.bs_model.trait_set(K=3, gamma=0)
bs.n_steps=100
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
bs.interact()
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# -
| tour4_plastic_bond/bs_ep_ikh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pandas as pd
import sys
# +
import exman
def last(s):
return s.values[-1]
from itertools import product
def plot_alot(x, ys, data, hue, col, row, ylims=None, ylabel=None, std=True):
row_grid = sorted(data[row].unique()) if row else [None]
col_grid = sorted(data[col].unique()) if col else [None]
hue_grid = sorted(data[hue].unique())
i = 0
if ylims is None:
ylims = data[ys].values.min(), data[ys].values.max()
for row_i in row_grid:
for col_i in col_grid:
plt.subplot(len(row_grid), len(col_grid), i + 1)
plt.ylim(*ylims)
if i % len(col_grid) == 0:
plt.ylabel(ylabel if ylabel else ys[0])
i += 1
cond = [True] * data.shape[0]
if col:
cond = cond & (data[col] == col_i)
if row:
cond = cond & (data[row] == row_i)
tmp = data[cond]
for (hue_i, color), (y, ls) in product(zip(hue_grid, sns.color_palette(n_colors=len(hue_grid))), zip(ys, ['-', '--'])):
t = tmp[tmp[hue] == hue_i]
t = t.groupby(x).agg({y: ('mean', 'std')}).reset_index()
xpos, xlabels = np.arange(len(t[x])), t[x].values
label = '{} ('.format(hue_i)
for k, yv in enumerate(t[y]['mean'].values):
label += '{:.3f}'.format(yv)
label += ', ' if k != len(t[y].values) - 1 else ''
label += ')'
if std:
(_, caps, _) = plt.errorbar(xpos, t[y]['mean'], yerr=t[y]['std'], ecolor=color, label=label,
color=color, elinewidth=2., linewidth=3., marker='o',
ms=8, alpha=.8, capsize=8, linestyle=ls)
else:
plt.plot(xpos, t[y]['mean'], marker='o', ms=8, lw=3, ls=ls, c=color, label=label)
plt.legend()
plt.xlabel(x)
# plt.title('{} = {} | {} = {}'.format(row, row_i, col, col_i))
plt.xticks(xpos, xlabels)
# -
logs = pd.read_csv('notMNIST-MNIST-classification-logs.csv', index_col=0)
# +
df = logs.groupby(['num_examples', 'lr', 'hid_dim', 'prior_list', 'init_list', 'root', 'seed', 'kl_weight']).agg({
'ens[10]_test_acc': (last, max),
'samp_test_acc': (last, max),
'det_test_acc': (last, max),
'KL': (last, min),
'train_loss': (last, min),
'test_nll': (last, min),
'train_nll': (last, min),
})
df = df.reset_index()
df.columns = ['_'.join(col).strip('_') for col in df.columns.values]
bnn = df.copy()
bnn['type'] = bnn['prior_list']
bnn['test_acc_last'] = bnn['ens[10]_test_acc_last']
bnn['test_acc_max'] = bnn['ens[10]_test_acc_max']
# -
logs = pd.read_csv('notMNIST-MNIST-classification-logs-vanilla-l2.csv', index_col=0)
# +
df = logs.groupby(['num_examples', 'lr', 'hid_dim', 'l2', 'init_list', 'root']).agg({
'test_acc': (last, max),
'test_nll': (last, min)
})
df = df.reset_index()
df.columns = ['_'.join(col).strip('_') for col in df.columns.values]
# det = df[df['init_list'] == "['vae', 'xavier']"]
df['type'] = "['det']"
det = df[(df['init_list'] == "['vae', 'vae']") & (df['lr'] <= 1e-3)]
# -
det['num_examples'].unique()
# +
x='num_examples'
ys=['test_acc_last']
data=bnn.append(det).query('num_examples > 10')
hue='type'
col=''
row=''
plt.figure(figsize=(12, 8))
plot_alot(x, ys, data, hue, col, row, ylims=(0.71, .965))
| experiments/notMNIST-MNIST-Classification-results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import itertools
from ezc3d import c3d
import matplotlib.pyplot as plt
import pyomeca
#import dtw
from pathlib import Path
from math import sqrt
from pyomeca import Markers, Analogs
from tqdm import tqdm
#DATA_FOLDER = Path(r"C:\Users\GMultimedia\Desktop\Praktyki_2020")
#MARKERS1_C3D = DATA_FOLDER / "2020-08-05-B0444-S01-E01-T01.c3d"
#Markers.from_c3d(MARKERS1_C3D)
#c = c3d(r"C:\Users\GMultimedia\Desktop\Praktyki_2020\2020-08-05-B0444-S01-E01-T01.c3d")
#print(c['parameters']['POINT']['USED']['value'][0]); # Print the number of points used
#point_data = c['data']['points']
#points_residuals = c['data']['meta_points']['residuals']
#analog_data = c['data']['analogs']
#point_data.shape
#data_path = (r"C:\Users\GMultimedia\Desktop\Praktyki_2020\2020-08-05-B0444-S01-E01-T01.c3d")
#markers = ["B0444:LFHD"]
#wykres = Markers.from_c3d(data_path, usecols=markers)
#wykres.plot()
# +
from pyomeca import Markers
data_path = r"C:\Users\GMultimedia\Desktop\Praktyki_2020\2020-08-05-B0444-S01-E01-T01.c3d"
markers = Markers.from_c3d(data_path, prefix_delimiter=":")
# -
markers
markers.sel(axis="x", channel="LBHD").plot.line(x="time");
plt.show()
markers.sel(axis="y", channel="LBHD").plot.line(x="time");
plt.show()
markers.sel(axis="z", channel="LBHD").plot.line(x="time");
plt.show()
markers.attrs
# +
from ezc3d import c3d
data_path = r"C:\Users\GMultimedia\Desktop\Praktyki_2020\2020-08-05-B0444-S01-E01-T01.c3d"
file = c3d(data_path)
print("Ilość ramek to:", file['parameters']['POINT']['FRAMES']['value'][0])
# -
file['parameters']
# +
# Print the header
c3d_to_compare= c3d(data_path)
print("# ---- HEADER ---- #")
print(f"Number of points = {c3d_to_compare['header']['points']['size']}")
print(f"Point frame rate = {c3d_to_compare['header']['points']['frame_rate']}")
print(f"Index of the first point frame = {c3d_to_compare['header']['points']['first_frame']}")
print(f"Index of the last point frame = {c3d_to_compare['header']['points']['last_frame']}")
print("")
print(f"Number of analogs = {c3d_to_compare['header']['analogs']['size']}")
print(f"Analog frame rate = {c3d_to_compare['header']['analogs']['frame_rate']}")
print(f"Index of the first analog frame = {c3d_to_compare['header']['analogs']['first_frame']}")
print(f"Index of the last analog frame = {c3d_to_compare['header']['analogs']['last_frame']}")
print("")
print("")
# Print the parameters
print("# ---- PARAMETERS ---- #")
print(f"Number of points = {c3d_to_compare['parameters']['POINT']['USED']['value'][0]}")
print(f"Name of the points = {c3d_to_compare['parameters']['POINT']['LABELS']['value']}")
print(f"Point frame rate = {c3d_to_compare['parameters']['POINT']['RATE']['value'][0]}")
print(f"Number of frames = {c3d_to_compare['parameters']['POINT']['FRAMES']['value'][0]}")
# print(f"My point new Param = {c3d_to_compare['parameters']['POINT']['NewParam']['value']}")
# +
print("")
print(f"Number of analogs = {c3d_to_compare['parameters']['ANALOG']['USED']['value'][0]}")
print(f"Name of the analogs = {c3d_to_compare['parameters']['ANALOG']['LABELS']['value']}")
print(f"Analog frame rate = {c3d_to_compare['parameters']['ANALOG']['RATE']['value'][0]}")
print("")
#print(f"My NewGroup new Param = {c3d_to_compare['parameters']['NewGroup']['NewParam']['value']}")
print("")
print("")
# Print the data
print("# ---- DATA ---- #")
print(f" = {c3d_to_compare['data']['points'][0:2, :, :]}")
#print(f" = {c3d_to_compare['data']['analogs']}")
# -
c3d_to_compare['parameters']['EVENT']
c3d_to_compare['parameters']['EVENT']['CONTEXTS']['value']
c3d_to_compare['parameters']['EVENT']['LABELS']['value']
c3d_to_compare['parameters']['EVENT']['TIMES']['value'][1]
markers
def read_labels(data_path):
c3d_to_compare= c3d(data_path)
event = c3d_to_compare['parameters']['EVENT']['LABELS']['value']
time = c3d_to_compare['parameters']['EVENT']['TIMES']['value'][1]
return [event, time]
read_labels(data_path)
3.31999993 * 200
import data_procesing as dp
import numpy as np
import importlib
# +
importlib.reload(dp)
data_path = r"C:\Users\GMultimedia\Desktop\Praktyki_2020\2020-08-05-B0444-S01-E01-T01.c3d"
eventy = dp.read_labels(data_path)
print(eventy[1])
print(eventy[0][15])
print(eventy[0][:])
eventy[0].index('Foot Strike')
indxE = [i for i, x in enumerate(eventy[0]) if x == "Event"]
print(indxE)
indxFS = [i for i, x in enumerate(eventy[0]) if x == "Foot Strike"]
print(indxFS)
CzasFS = np.zeros(len(indxFS))
for i in range(len(indxFS)):
print(indxFS[i])
CzasFS[i] = eventy[1][indxFS[i]]
print('Czasy Foot Strikeów',CzasFS)
CzasE = np.zeros(len(indxE))
for i in range(len(indxE)):
print(indxE[i])
CzasE[i] = eventy[1][indxE[i]]
print('Czasy Eventów:',CzasE)
print(eventy[1])
eventy[1].sort()
print(eventy[1])
# -
p=np.zeros(20)
d=np.zeros(20)
j=0
for i in range(len(eventy[1])):
if not i == len(eventy[1])-1:
pierwszy = eventy[1][i]
drugi = eventy[1][i+1]
#print('pierwszy', pierwszy)
#print('drugi', drugi)
#print('CZASY E',CzasE)
if pierwszy in CzasE:
if drugi in CzasFS:
p[j]=pierwszy
d[j]=drugi
print('Początek ruchu', pierwszy, 'Koniec ruchu', drugi)
j+=1
p=p.astype(int)
d=d.astype(int)
"""j=0
for i in range(len(eventy[1])):
if not i >= len(eventy[1])-2:
pierwszy = eventy[1][i]
drugi = eventy[1][i+1]
trzeci = eventy[1][i+2]
#print('pierwszy', pierwszy)
#print('drugi', drugi)
#print('CZASY E',CzasE)
if pierwszy in CzasE:
if drugi in CzasFS:
if trzeci in CzasE:
p[j]=pierwszy
d[j]=trzeci
j+=1
print('Początek ruchu', pierwszy, 'Uderzenie', drugi, 'Koniec ruchu', trzeci)
p=p.astype(int)
d=d.astype(int)"""
# +
from numpy import *
import math
import matplotlib.pyplot as plt
t = linspace(0, 2*math.pi, 400)
a = sin(t)
b = cos(t)
c = a + b
plt.plot(t, a, 'r') # plotting t, a separately
plt.plot(t, b, 'b') # plotting t, b separately
plt.plot(t, c, 'g') # plotting t, c separately
plt.show()
#plt.plot(markers[i][0],label="base model")
markers.shape
plt.plot(markers[0][0][0:4300])
#plt.xlabel('time [s]')
#plt.ylabel('distance [mm]')
#plt.legend(loc="upper right")
#plt.show()
# +
markers.shape
print(markers[0][0][:])
print(p)
for i in range(10):
plt.plot(markers[0][40][p[i]:d[i]])
# +
markers.shape
print(markers[0][0][:])
print(p)
#z = x- minx
# __________
# maxx - minx
numer_markera=55
for i in range(10):
#markers[0][numer_markera][p[i]:d[i]]=(markers[0][numer_markera][p[i]:d[i]]-min(markers[0][numer_markera][p[i]:d[i]]))/(max(markers[0][numer_markera][p[i]:d[i]])-min(markers[0][numer_markera][p[i]:d[i]]))
t_konc=100
dl_ciagu=d[i]-p[i]
x=np.linspace(0,t_konc, dl_ciagu)
plt.plot(x, markers[0][numer_markera][p[i]:d[i]])
# +
markers.shape
print(markers[0][0][:])
print(p)
#z = x- minx
# __________
# maxx - minx
numer_markera=36
for i in range(1):
markers[0][numer_markera][p[i]:d[i]]=(markers[0][numer_markera][p[i]:d[i]]-min(markers[0][numer_markera][p[i]:d[i]]))/(max(markers[0][numer_markera][p[i]:d[i]])-min(markers[0][numer_markera][p[i]:d[i]]))
t_konc=100
dl_ciagu=d[i]-p[i]
x=np.linspace(0,t_konc, dl_ciagu)
plt.plot(x, markers[0][numer_markera][p[i]:d[i]])
plt.show()
# +
for i in range(1):
#[markers[0][i][p[i]:d[i]]-markers[0][i][p[i+1]:d[i+1]]] #for i in range(Len(markers[0][i][p[i]:d[i]])-1)]
output_diffrence=np.diff(markers[0][36][p[i]:d[i]])
#print(output_diffrence)
plt.plot(output_diffrence)
# -
for i in range(1):
plt.plot(markers[0][numer_markera][p[i]:d[i]])
plt.show()
| RDCLabs/dzielenie_na_kawalki.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Simply importing the needed classes.
# Notice the AAAx and BBBx dicts are optimal guesses for a few Q curves as interpolated from our CDS bootstrapping class
# This computation is expensive and does not always converge for our intial guess of x0Vas so I have copied them here to save some computations.
#
# +
from pandas import DataFrame
import numpy as np
import pandas as pd
from datetime import date
import math
from dateutil.relativedelta import relativedelta
from random import shuffle
import random
import fractions
AAAx={'3M': [1.0015824271136229, 0.07118430651357378, -0.16068883479216692, 0.0073983085859183105, 3.1083459253964976, -4.971784090683851, -0.4774848528659512, -0.10058722679096088, -0.32595880089361595, 1.2498165670968577, 3.4947594489126534, 0.7693240320536217, 1.3561952580367567, 6.371501575362355, 1.5717830330107334, 3.0431872392927932], '6M': [0.6253740242837578, 0.07187788235360676, 0.002756754524306165, 0.0007534565001362353, -1.9788331302293565, 1.3633887485139464, 5.119926963331688, 3.1051517704782445, 0.7634682512381973, -0.2440315461962444, -1.625294304111004, 1.1807892914373608, 1.5803472042649411, 2.2546258881657137, -0.6220529111275982, -3.918280795179225], '1Y': [0.02268430209412819, 0.12335315163831377, 0.0019492996048123179, 0.001628657655447479, 2.012129584631548, -0.14425637029306565, 3.0201995002610156, 2.147972541679386, -0.5128642176120338, 2.2747902950169627, -0.20546619851504466, 1.5945520333717365, 1.1372771020777144, 3.5153776822797216, 0.9602982736891876, -2.470770239032655], '3Y': [26.4868013103451, 0.10123386920113561, 0.007172027822595987, -0.0011729920248976869, 4.671838150691669, 2.0943942967130518, 1.8784163354679428, 2.829205309274365, 0.6419078923238758, 1.9913439793507237, 0.9155288227819725, 0.2038138762167537, 5.345533516522538, 3.7619427230742546, 0.1152302416309914, 2.657152673978014]}
BBBx={'3M': [2.2676030271568077, 0.06869592728485677, -0.002415215219504258, 0.0010910153202821262, 2.076053981582788, -2.4830012835412374, 1.4792817746843325, 2.227857983492404, -0.3936126755070518, -0.16392645500488395, 1.285584627035015, 3.041436386446073, 3.2291187114730233, 3.3449348319234886, -2.054285553987237, 0.906769966943711]}
class MC_Vasicek_Sim(object):
""" Monte Carlo simulator for interest rates under the Vasicek
model.
Attributes
----------
kappa (float): Vasicek perameter: 'speed of reversion'.
theta (float): Vasicek perameter: 'long term mean level'.
sigma (float): Vasicek perameter: 'volatility'
r0 (float): Vasicek perameter: 'initial value'.
t_step (float): The time difference between the 'steps' in the
simulation. Represents 'dt' in the Vasicek model. Should always
be set to 1 day.
simNumber (int): The number of times the simulation is to execute.
datelist (list): A list of strings that are date-formatted (e.g.
'2016-10-17').
datelistlong (list): A list of days between (and including)
min(datelist) and max(datelist). Each element is of type
datetime.date.
ntimes (list): The length of datelistlong.
libor (pandas DataFrame): A (1 + ntimes, simNumber) shaped array
that contains the simulated discount curves. The zeroth column
contains the mean curve. The type of each element is
numpy.float64. The row labels are dates corresponding to
nodes in the simulation.
smallLibor (pandas DataFrame): A matrix subset of the
libor array. But it only contains rows corresponding to the
dates in `datelist` instead of `datelistlong`.
liborAvg (numpy ndarray): A vector containing the mean
simulated libor values. It is also the zeroth column of
`libor`.
"""
def __init__(self, datelist,x, simNumber,t_step):
"""Perameters
----------
datelist (list): A list of strimgs that are date-formatted,
e.g. '2012-04-16'.
x (tuple): A 4-tuple containing the Vasicek SDE perameters:
kappa, theta, sigma, r0.
simNumber (int): The number of simulations that is to be
executed.
"""
#SDE parameters - Vasicek SDE
# dr(t) = k(θ − r(t))dt + σdW(t)
self.kappa = x[0]
self.theta = x[1]
self.sigma = x[2]
self.r0 = x[3]
self.simNumber = simNumber
self.t_step = t_step
#internal representation of times series - integer multiples of t_step
self.datelist = datelist
#creation of a fine grid for Monte Carlo integration
#Create fine date grid for SDE integration
minDay = min(datelist)
maxDay = max(datelist)
self.datelistlong = pd.date_range(minDay, maxDay).tolist()
self.datelistlong = [x.date() for x in self.datelistlong]
self.ntimes = len(self.datelistlong)
self.libor=[]
self.smallLibor = []
self.liborAvg=pd.DataFrame()
def getLibor(self):
"""Executes the simulations and returns the simulated libor curves.
Returns
-------
A large 2D pandoc DataFrame. Each column represents a simulated value of
the libor curve at a given point in time. Each row corresponds to a
date in `datelonglist`. The zeroth column contains the mean value of
the simulated libor curves. The row labels are the elements of
datelonglist.
"""
rd = np.random.standard_normal((self.ntimes,self.simNumber)) # array of numbers for the number of samples
r = np.zeros(np.shape(rd))
nrows = np.shape(rd)[0]
sigmaDT = self.sigma* np.sqrt(self.t_step)
#calculate r(t)
r[1,:] = self.r0+r[1,:]
for i in np.arange(2,nrows):
r[i,:] = r[i-1,:]+ self.kappa*(self.theta-r[i-1,:])*self.t_step + sigmaDT*rd[i,:]
#calculate integral(r(s)ds)
integralR = r.cumsum(axis=0)*self.t_step
#calculate Libor
self.libor = np.exp(-integralR)
self.liborAvg=np.average(self.libor,axis=1)
self.libor=np.c_[self.liborAvg,self.libor]
self.libor = pd.DataFrame(self.libor,index=self.datelistlong)
return self.libor
# -
# genUnderlyings generates a stripped down version of an underlying with the important information stored in a tuple.
# The computation later gets very slow the larger the number of underlyings.
def genUnderlyings(notional,R,start,freq,quality,number):
out=[]
for i in range(0,number):
out.append((notional,start,freq,quality,R))
return out
class Scheduler(object):
def __init__(self):
pass
def extractDelay(self, freq):
if type(freq) == list:
freq = freq[0]
if (freq == 'Date'): return relativedelta(days=+ 1)
x = self.only_numerics(freq)
if (x == ''):
freqValue = 100
else:
freqValue = np.int(x)
if (freq.upper().find('D') != -1): delta = relativedelta(days=+ freqValue)
if (freq.upper().find('W') != -1): delta = relativedelta(weeks=+ freqValue)
if (freq.find('M') != -1): delta = relativedelta(months=+ freqValue)
if (freq.find('Y') != -1): delta = relativedelta(years=+ freqValue)
if (freq.find('ZERO') != -1): delta = relativedelta(years=+ freqValue)
return delta
def only_numerics(self, seq):
seq_type = type(seq)
return seq_type().join(filter(seq_type.isdigit, seq))
#
# The book calls this type of function an exact function. The underlying principle is that we can build the conditional loss distribution by remembering the fact that when the underlying credits are independent we have a natural recursive algorithim to calculate. Also notice, that for base cases if 0 credits default in a portfolio of 0 then the probability of this event is set to 1. In a portfolio of 0 credits and the probability of more than 1 default occuring is 0.
# Naturally, the probability that the portfolio survives is simply the multiplication of the underlying survival probabilities. So the rest is easily computed by recursion.
#
# In this function we used our Monte Carlo simulator to give us the Q(0,Maturity) for each of the underlyings. Then
# f(k,j) calculates the probability of k defaults in a portfolio of j credits under our homogenous loss. Although this is very easily extendable to inhomogenous cases as the book points out using a greatest common denomenator.
#
#
class ExactFunc(object):
def __init__(self,start,underlyings):
myScheduler=Scheduler()
myDelays=[]
freqs=['3M','6M','1Y','3Y']
for i in range(0,len(freqs)):
myDelays.append(myScheduler.extractDelay(freqs[i]))
AAA={}
for i in range(0,len(freqs)):
vas=MC_Vasicek_Sim(x=AAAx[freqs[i]],datelist=[start,myDelays[i]+start],t_step=1/365.,simNumber=500)
AAA[freqs[i]]=vas.getLibor()[0].loc[myDelays[i]+start]
BBB={'3M': MC_Vasicek_Sim(x=BBBx[freqs[0]],datelist=[start,myDelays[0]+start],t_step=1/365.,simNumber=500).getLibor()[0].loc[myDelays[0]+start]}
self.probs={'AAA': AAA, 'BBB':BBB}
self.underlyings=underlyings
def f(self,k,j):
'''
The recursion relation for the homogenous portfolio
takes in k: an int for numer of defaults
and j: number of underlyings you want to consider in the calculation k cannnot be greater than j
'''
if(j==0 and k==0):
return 1
if(j==0 and k>0):
return 0
if(k==0 and j>0):
return self.f(k,j-1)*self.probs[self.underlyings[j][3]][self.underlyings[j][2]]
else:
return self.f(k,j-1)*(self.probs[self.underlyings[j][3]][self.underlyings[j][2]])+self.f(k-1,j-1)*(1-self.probs[self.underlyings[j][3]][self.underlyings[j][2]])
'''
Helper functions
'''
def gcd(self,x, y):
while y != 0:
(x, y) = (y, x % y)
return x
def totalGCD(self):
g=(1-self.underlyings[0][4])*self.underlyings[0][0]
for i in range(1,len(self.underlyings)):
g=self.gcd(g,((1-self.underlyings[i][4])*self.underlyings[i][0]))
return g
def getLossVec(self):
g=self.totalGCD()
n=[]
for i in range(0,len(self.underlyings)):
n.append(((1-self.underlyings[i][4])*self.underlyings[i][0])/g)
return n
def fprime(self,k,j,vec):
'''
recursion relation for inhomogenous portfolio takes
k an int representing number of defaulted credits
j an int representing number of underlyings we wish to consider
vec a list of length of underlyings with the underlyings Loss given default scaled by gcd so
each entry is an int
'''
if(j==0 and k==0):
return 1
if(j==0 and k>0):
return 0
if(0<k and vec[j]>k):
return self.fprime(k,j-1,vec)*self.probs[self.underlyings[j][3]][self.underlyings[j][2]]
if(vec[j]<= k and k<=np.array(vec[0:j]).sum()):
return self.fprime(k,j-1,vec)*(self.probs[self.underlyings[j][3]][self.underlyings[j][2]])+self.fprime(k-vec[j],j-1,vec)*(1-self.probs[self.underlyings[j][3]][self.underlyings[j][2]])
else:
return self.fprime(k,j-1,vec)*self.probs[self.underlyings[j][3]][self.underlyings[j][2]]
'''
methods to get number of defaults required to break tranche upperstrike not used just informative
'''
def getTrancheNumb(self,K):
sum=np.array(self.getLossVec()).sum()
losses=self.getLossVec()
totalLoss=0
for i in range(0,len(losses)):
totalLoss=totalLoss+losses[i]/sum
if(totalLoss >= K):
return i
def threshold(self,K):
sum=np.array(self.getLossVec()).sum()
return math.floor(sum*K)
trim_start=date(2005,1,10)
credits=genUnderlyings(1,.4,trim_start,'3M','AAA',10)+genUnderlyings(1,.4,trim_start,'6M','AAA',10)+genUnderlyings(1,.4,trim_start,'1Y','AAA',10)+genUnderlyings(1,.4,trim_start,'3Y','AAA',5)+genUnderlyings(1,.4,trim_start,'3M','BBB',5)
ex=ExactFunc(underlyings=credits,start=trim_start)
y=[]
x=[]
for i in range(0,7):
y.append(ex.f(i,len(ex.underlyings)-1))
x.append(i*.6)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.xlabel("Portfolio Loss %")
plt.ylabel("Probability")
plt.bar(x,y)
# -
# Here we are inserting a list of underlyings with a random recovery. After some expirementation the random recovery
# can cause the sum of the losses of the credits to get extremely large. There is a discussion in the book about this issue. So we consider only a minimal case where a few credits have recovery different than .4. But now we will look at the loss in terms of "loss units." We create a method to determine the number of units lost out of the total will breach the upper strike. So we can limit our iterations of defaults to just these numbers.
#
randR=genUnderlyings(100,.4,trim_start,'3M','AAA',20)+genUnderlyings(100,.4,trim_start,'6M','AAA',10)+genUnderlyings(100,.4,trim_start,'1Y','AAA',10)+genUnderlyings(100,round(random.uniform(.25,.5),2),trim_start,'3Y','AAA',1)+genUnderlyings(100,.3,trim_start,'3M','BBB',1)
shuffle(randR)
exactRandR=ExactFunc(underlyings=randR,start=trim_start)
z=[]
w=[]
for i in range(0,exactRandR.threshold(.1)):
z.append(exactRandR.fprime(i,len(exactRandR.underlyings)-1,exactRandR.getLossVec()))
w.append(i)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.xlabel("Portfolio Loss In Dollars")
plt.ylabel("Probability")
plt.bar(w,z)
| JupyterFiles/ExactDistributionHomogenous.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import display
from IPython.display import HTML
import IPython.core.display as di # Example: di.display_html('<h3>%s:</h3>' % str, raw=True)
# This line will hide code by default when the notebook is exported as HTML
di.display_html('<script>jQuery(function() {if (jQuery("body.notebook_app").length == 0) { jQuery(".input_area").toggle(); jQuery(".prompt").toggle();}});</script>', raw=True)
# This line will add a button to toggle visibility of code blocks, for use with the HTML export version
di.display_html('''<button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Hide/Show code</button>''', raw=True)
# +
import pandas as pd
#from pandas_datareader import data
import pandas_datareader.data as web
#for math
import numpy as np
import math
#for inline charts
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
def print_msg (msg, typ):
if typ == 'h':
font_weight = '400;'
font_size = '1.5em'
else:
font_weight = '300;'
font_size = '1.4em'
return HTML('<p style="font-weight:'+font_weight +
'font-size: '+ font_size+'"> '+ msg + ' </p>')
portfolio1 = ['AAPL', 'DIS', 'MSFT', 'COST', 'ADBE']
portfolio2 = ['AAL', 'ALK', 'UAL', 'CUK', 'ACB', 'MRO', 'GE', 'ZNGA']
#Get 10 year stock data
portfolio = web.DataReader(portfolio1,
start='2010-01-01',
end='2020-01-01',
data_source='yahoo')['Adj Close']
print_msg("Daily Closing Price", 'h')
portfolio.plot(figsize=(15,5), title='Daily Closing Price')
print_msg("5 year cummulative returns", 'h')
# +
daily_return = portfolio.pct_change()
cum_return = (1 + daily_return).cumprod()
cum_return['2015-01-01':'2020-01-01'].plot(figsize=(18,5), title='5 year cummulative return')
# -
print_msg("MPT calculations", 'h')
risk_free_rate = 0.553/100
#10 year average return by calculating return on day 1 of every month
annual_return_monthly = portfolio.resample('M').ffill().pct_change(periods=12)
re = annual_return_monthly.mean() - risk_free_rate
re * 100
reT = re.T
ones = pd.Series([1 for num in range(re.count())])
onesT = ones.T
sigma = annual_return_monthly.cov()
inv = pd.DataFrame(np.linalg.pinv(sigma.values), sigma.columns, sigma.index)
tangencynum = np.dot(inv, re)
tangencyden1 = np.dot(onesT, inv)
tangencyden2 = np.dot(tangencyden1, re)
print_msg("Tangency Portfolio", 'h')
tangency_portfolio_weights = pd.DataFrame(tangencynum, sigma.index, ['weights'])/tangencyden2
round(tangency_portfolio_weights*100, 2)
tangency_portfolio_return = np.dot(tangencynum/tangencyden2, annual_return_monthly.mean().T)
tangency_portfolio_std = math.sqrt(np.dot(np.dot(tangencynum/tangencyden2, sigma),tangency_portfolio_weights))
msg = "Expected average weighted return of the portfolio = {0}%".format(round(tangency_portfolio_return * 100, 2))
print_msg(msg, 'h')
msg = "Expected average weighted return of the portfolio = {0}%".format(round(tangency_portfolio_std * 100, 2))
print_msg(msg, 'h')
| Stock analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
DATA_PATH = '../data/'
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv('../data/mushrooms.csv')
# Change column names: replace hyphens with underscores
df.columns = [col.replace('-', '_') for col in df]
# +
import numpy as np
cap_shape_map = {'b':' bell','c': 'conical', 'x': 'convex', 'f': 'flat', 'k': 'knobbed', 's': 'sunken'}
cap_surface_map = {'f': 'fibrous', 'g': 'grooves','y': 'scaly','s': 'smooth'}
color_map = {'k': 'black', 'n': 'brown', 'b': 'buff',
'h': 'chocolate', 'c': 'cinnamon',
'g': 'gray', 'r': 'green', 'o': 'orange',
'p': 'pink', 'u': 'purple', 'e': 'red',
'w': 'white', 'y': 'yellow'}
bruises_map = {'t': True, 'f': False}
odor_map = {'a': 'almond', 'l': 'anise',
'c': 'creosote', 'y': 'fishy',
'f': 'foul', 'm': 'musty',
'n': 'none', 'p': 'pungent',
's': 'spicy'}
gill_attachment_map = {'a': 'attached', 'd': 'descending', 'f': 'free', 'n': 'notched'}
gill_spacing_map = {'c': 'close','w': 'crowded', 'd': 'distant'}
gill_size_map = {'b': 'broad', 'n': 'narrow'}
stalk_shape_map = {'e': 'enlarging', 't': 'tapering'}
stalk_root_map = {'b': 'bulbous', 'c': 'club', 'u': 'cup', 'e': 'equal', 'z': 'rhizomorphs', 'r': 'rooted', '?': np.NaN}
stalk_surface_map = {'f': 'fibrous', 'y': 'scaly', 'k': 'silky', 's': 'smooth'}
ring_number_map = {'n': 0, 'o': 1, 't': 2}
ring_type_map = {'c': 'cobwebby', 'e': 'evanescent',
'f': 'flaring', 'l': 'large',
'n': 'none', 'p': 'pendant',
's': 'sheathing', 'z': 'zone'}
population_map = {'a': 'abundant','c': 'clustered', 'n': 'numerous', 's': 'scattered', 'v': 'several', 'y': 'solitary'}
habitat_map = {'g': 'grasses','l': 'leaves', 'm': 'meadows', 'p': 'paths', 'u': 'urban', 'w': 'waste', 'd': 'woods'}
class_map = {'p': 'poisonous', 'e': 'edible'}
# -
df_map = df.copy()
df_map.columns
# +
color_cols = ['cap_color',
'gill_color',
'stalk_color_above_ring',
'stalk_color_below_ring',
'veil_color',
'spore_print_color']
for c in color_cols:
df_map[c] = df_map[c].map(color_map)
# -
df_map['class'] = df_map['class'].map(class_map)
df_map['cap_shape'] = df_map['cap_shape'].map(cap_shape_map)
df_map['cap_surface'] = df_map['cap_surface'].map(cap_surface_map)
df_map['bruises'] = df_map['bruises'].map(bruises_map)
df_map['odor'] = df_map['odor'].map(odor_map)
df_map['gill_attachment'] = df_map['gill_attachment'].map(gill_attachment_map)
df_map['gill_spacing'] = df_map['gill_spacing'].map(gill_spacing_map)
df_map['gill_size'] = df_map['gill_size'].map(gill_size_map)
df_map['stalk_shape'] = df_map['stalk_shape'].map(stalk_shape_map)
df_map['stalk_root'] = df_map['stalk_root'].map(stalk_root_map)
df_map['ring_number'] = df_map['ring_number'].map(ring_number_map)
df_map['ring_type'] = df_map['ring_type'].map(ring_type_map)
df_map['population'] = df_map['population'].map(population_map)
df_map['habitat'] = df_map['habitat'].map(habitat_map)
# +
stalk_cols = ['stalk_surface_above_ring', 'stalk_surface_below_ring']
for c in stalk_cols:
df_map[c] = df_map[c].map(stalk_surface_map)
# -
df_map.describe(exclude='number')
df_map.isnull().sum()
df_map.to_csv('../data/mushrooms_mapped.csv')
| notebooks/Mapping_for_Mushroom_csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 11 -- Hash tables
# +
from objects import DoublyLinkedList
class ChainedHashTable(object):
def __init__(self, size):
self.size = size
self.node = DoublyLinkedList
self.data = [self.node() for i in range(size)]
def delete(self, item):
key = self.shorthash(item)
self.data[key].delete(item)
def insert(self, item):
key = self.shorthash(item)
self.data[key].insert(item)
def search(self, item):
key = self.shorthash(item)
return self.data[key].search(item)
def shorthash(self, item):
return hash(item) % self.size
# -
table = ChainedHashTable(10)
for name in ['alice', 'bob', 'cat', 'dillon']:
table.insert(name)
print(table.data)
table.search('bob'), table.delete('alice'), table.search('bob')
# +
class AddressedHashTable(object):
def __init__(self, size):
self.size = size
self.data = [None for i in range(self.size)]
def delete(self, item):
key = self.search(item)
self.data[key] = 'DELETED'
def insert(self, item):
for key in self.probhash(item):
if self.data[key] in [None, 'DELETED']:
self.data[key] = item
break
def probhash(self, item):
raise NotImplementedError
def search(self, item):
for key in self.probhash(item):
if self.data[key] == None:
return None
elif self.data[key] == item:
return key
class LinearHashTable(AddressedHashTable):
def __init__(self, size):
super(LinearHashTable, self).__init__(size)
def probhash(self, item):
for probe in range(self.size):
yield (hash(item) + probe) % self.size
# -
table = LinearHashTable(10)
for name in ['alice', 'bob', 'cat', 'dillon']:
table.insert(name)
print(table.data)
table.search('bob'), table.delete('alice'), table.search('bob')
class DoubleHashTable(AddressedHashTable):
def __init__(self, size):
super(DoubleHashTable, self).__init__(size)
def probhash(self, item):
for probe in range(self.size):
yield (hash(item) + probe * hash(item)) % self.size
table = DoubleHashTable(10)
for name in ['alice', 'bob', 'cat', 'dillon']:
table.insert(name)
print(table.data)
table.search('bob'), table.delete('alice'), table.search('bob')
| chapter_11.ipynb |
# ---
# title: "Python classes"
# date: 2020-04-12T14:41:32+02:00
# author: "<NAME>"
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Self in Python class
# +
class car():
# init method or constructor
def __init__(self, model, color):
self.model = model
self.color = color
def show(self):
print("Model is", self.model )
print("color is", self.color )
# both objects have different self which
# contain their attributes
audi = car("audi a4", "blue")
ferrari = car("ferrari 488", "green")
audi.show() # same output as car.show(audi)
ferrari.show() # same output as car.show(ferrari)
# Behind the scene, in every instance method
# call, python sends the instances also with
# that method call like car.show(audi)
| courses/datacamp/notes/python/basics/classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, time
import collections
import platform
import numpy as np
import matplotlib.pyplot as plt
import keras
import warnings;
warnings.filterwarnings('ignore');
import pandas as pd
import tensorflow as tf
from keras.layers import Dense, Dropout, Input
from sklearn.metrics import confusion_matrix, accuracy_score
from matplotlib.pyplot import cm
from keras.models import Model
from keras.models import Sequential, load_model
from keras.optimizers import SGD
from keras.utils import to_categorical
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from keras.models import model_from_json
from keras.callbacks import Callback
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_fscore_support
# init_notebook_mode(connected=True)
# %matplotlib inline
# load data
output_path = './Results/'
model_path = output_path + "Models/"
data_path = './Data/'
allClinical = pd.read_excel(data_path + "allClinical.xlsx",index_col=0)
segment = pd.read_excel(data_path + "allSegment.xlsx",index_col=0)
block_data = pd.read_excel(data_path + "allBlockData.xlsx",index_col=0)
# load training data
data = block_data.sample(frac=1,random_state=11)
study_frame,test_frame, c,d = train_test_split(data.ix[:,:-1],data.ix[:,-1],stratify=data.ix[:,'IsMS'], test_size=0.2, random_state=9)
block_study = data[data.ID.isin(study_frame.ID.tolist())]
block_study.ix[:,1:-8] = (block_study.ix[:,1:-8] - block_study.ix[:,1:-8].mean())/block_study.ix[:,1:-8].std()
block_study.reset_index(drop=True, inplace=True)
# top ten radiomics features + gender
op = ['GLCM_Imc2', 'GLRLM_RunLengthNonUniformity', 'GLSZM_GrayLevelNonUniformity',
'GLSZM_GrayLevelVariance', 'WAVELET_LLL_glcm_Idm', 'WAVELET_LLL_glcm_Id','WAVELET_LLH_glcm_Idm', 'WAVELET_LLH_glcm_Id',
'WAVELET_LLH_glrlm_RunLengthNonUniformity', 'WAVELET_HHH_glrlm_RunEntropy', 'Gender']
# +
# the neural network model
from keras.layers import GaussianNoise, GaussianDropout
from keras.optimizers import RMSprop,Adam
from keras.regularizers import l1,l2
def NeuralModel(input_dim):
model = Sequential()
model.add(Dense(1024, input_dim=input_dim, activation='relu', kernel_initializer='he_normal', kernel_regularizer=l2(0.05),name="DenseLayer_1024"))
model.add(GaussianNoise(0.02,name="GaussianNoise_0.02"))
model.add(Dense(512, activation='relu',kernel_initializer='he_normal',kernel_regularizer=l2(0.03),name="DenseLayer_512"))
model.add(GaussianDropout(0.05, name="GaussianDropout_0.05"))
model.add(Dense(256, activation='relu',kernel_initializer='he_normal',kernel_regularizer=l2(0.03),name="DenseLayer_256"))
model.add(Dense(128, activation='relu',kernel_initializer='he_normal',kernel_regularizer=l2(0.03),name="DenseLayer_128"))
model.add(Dense(64, activation='relu',kernel_initializer='he_normal',kernel_regularizer=l2(0.03),name="DenseLayer_64"))
model.add(GaussianNoise(0.01,name="GaussianNoise_0.01"))
model.add(Dense(2,activation='softmax',kernel_initializer='he_normal',kernel_regularizer=l2(0.03),name='Output'))
model.compile(optimizer=SGD(lr=0.001, momentum=0.9, decay=0.00001),loss='categorical_crossentropy',metrics=['accuracy'])
return model
# -
# save the structure of the neural network
model_json = NeuralModel(len(op)).to_json()
with open(model_path +"model_block.json", "w") as json_file:
json_file.write(model_json)
# +
# train and save the model under cross-validation
from keras.layers import GaussianNoise, GaussianDropout
from keras.models import model_from_json
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
ftypes = ['IsMS'] #,'IsCO','IsVO','IsCTVO','IsIR']
for ftype in ftypes:
print(ftype)
mcp_save = ModelCheckpoint(model_path + ftype[2:]+ '_block_model.h5', save_best_only=True, monitor='val_loss', mode='min', verbose=0)
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=9)
index = 0
for tr, te in cv.split(block_study.ix[:,:], block_study.ix[:,-1]):
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=5, verbose=0, epsilon=1e-4, mode='min')
x_train, x_test, y_train, y_test = block_study.ix[tr,op], block_study.ix[te,op],block_study.ix[:,ftype][tr], block_study.ix[:,ftype][te]
x_train.reset_index(drop=True, inplace=True)
x_test.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)
y_test.reset_index(drop=True, inplace=True)
model = NeuralModel(len(op))
history = model.fit(x_train, to_categorical(y_train), validation_data=(x_test, to_categorical(y_test)),epochs=50, batch_size=128, verbose=0, callbacks=[mcp_save])
pred = model.predict(x_test)
print(accuracy_score(np.argmax(pred,axis=1), y_test))
index += 1
# -
| DeepAdiposeBlock.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# # Evaly liquid money calculator
# +
using Printf
using JSON
using Plots
# parse the product list from json file
products = JSON.parsefile("./data/products.json")
no_of_products = count(i -> (i["name"] != ""), products)
@printf("%d products loaded.", no_of_products)
# -
# ## After how many days you start a new campaign?
campaign_duration = parse(Int64, readline())
# ## How many orders per campaign?
orders = parse(Int64, readline())
# ## How much delivery duration in days
delivery_duration = parse(Int64, readline())
# ## How many years the business should run
lifetime = parse(Int64, readline()) * 365
# +
# calculate debit / credit in a single campaign
debit_per_campaign = 0
credit_per_campaign = 0
i = no_of_products
while i <= orders
for product in products
debit_per_campaign += parse(Int64, product["offer_price"])
credit_per_campaign += parse(Int64, product["regular_price"])
end
i += no_of_products
end
@printf("Customers gave %d and we need to manage %d", debit_per_campaign, credit_per_campaign)
# +
x = []
y = []
bank = i = j = total_campaigns = total_delivery = 0
while i <= lifetime
# take orders
bank += debit_per_campaign
# deliver the products
if j + delivery_duration <= i
bank -= credit_per_campaign
total_delivery += 1
j += delivery_duration
end
append!(x, i)
append!(y, bank)
total_campaigns += 1
i += campaign_duration
end
for n in total_delivery:total_campaigns
i += campaign_duration
bank -= credit_per_campaign
append!(x, i)
append!(y, bank)
end
# -
plot(x, y, label="Liquid Money")
| Evaly Calculator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jharman
# language: python
# name: jharman
# ---
# # Phylogenetics Tutorial
# **Run a [phylogenetics](https://github.com/harmslab/phylogenetics "https://github.com/harmslab/phylogenetics") project, all in one place!**
#
# *This tutorial will cover:*
#
# 1. Project initialization and data input/output
#
# 2. BLASTing and aligning sequences
#
# 3. Building a phylogenetic tree
#
# 4. Reconstructing ancestral proteins (ASR)
#
# 5. Quality control and evaluation for each of the steps above
# +
# import packages
import phylogenetics as phy
import phylogenetics.tools as tools
import phylopandas as ph
import pandas as pd
from phylovega import TreeChart
# -
#
#
# ### 1. Inititialize a phylogenetics project
# This creates a folder in your current working directory (here called 'project1') where your project data will be automatically saved.
#
# This also initializes the `project` object (you can name it whatever you want).
#
# The `project` object is a [phylopandas dataframe](https://github.com/Zsailer/phylopandas "https://github.com/Zsailer/phylopandas") that you can view within the notebook at any point.
#
# As you proceed, it will house all of your sequence, alignment, tree, and ancestor data.
# +
# intitialize project object and create project folder
project = phy.PhylogeneticsProject(project_dir='tutorial', overwrite=True)
# -
#
#
# ### 2. Read in your starting sequence(s)
#
# You'll need at least one sequence to start with. In this example, we're reading in a single sequence - a human protein called MD2.
# +
# read in seed sequence(s) to project object
project.read_data("md2_seed_sequence.txt", schema="fasta")
# -
#
#
# ### 3. Use [BLAST](https://blast.ncbi.nlm.nih.gov/Blast.cgi "https://blast.ncbi.nlm.nih.gov/Blast.cgi") to search for orthologs similar to your seed sequence(s)
#
# The default search returns 100 hits with an e-value cutoff of 0.01 and default BLAST gap penalties.
#
# These parameters can be modified as you wish (to view options, run `project.compute_blast?` cell below or check out [Biopython's NCBI BLAST](https://biopython.org/DIST/docs/api/Bio.Blast.NCBIWWW-module.html "https://biopython.org/DIST/docs/api/Bio.Blast.NCBIWWW-module.html") module)
# +
# run BLAST search with default settings, returning 100 hits
project.compute_blast(hitlist_size=100)
# -
#
# ### 4. Build a phylogenetic tree using [PhyML](http://www.atgc-montpellier.fr/phyml/ "http://www.atgc-montpellier.fr/phyml/")
# ### 5. Reconstruct ancestral proteins using [PAML](http://abacus.gene.ucl.ac.uk/software/paml.html "http://abacus.gene.ucl.ac.uk/software/paml.html")
# \####### start working here ######
#
# \- add docs to alignment, clustering, tree building, ASR, gblocks, df_editor, etc.
#
# \- rd 1 tutorial - blast, align, tree, ancestors
#
# \- rd 2 - add in QC:
#
# 1) look at alignment (outsource to aliview)
#
# \- remove bad seqs (df_editor)
#
# 3) look at tree (outsource to figtree? use viewer?)
#
# \- remove long branches (df_editor)
#
# 4) ancestors - look at PP, compare before and after QC
project.compute_clusters()
project.compute_alignment()
project.compute_gblocks()
project.compute_tree()
project.compute_reconstruction()
# +
# Visualize tree and ancestors using phylovega
from phylovega import TreeChart
# Construct Vega Specification
chart = TreeChart.from_phylopandas(
project.data,
height_scale=300,
# Node attributes
node_size=300,
node_color="#ccc",
# Leaf attributes
leaf_labels="id",
# Edge attributes
edge_width=2,
edge_color="#000",
)
chart
| examples/detailed_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/bundickm/Study-Guides/blob/master/Unit_1_Sprint_2_Statistics_Study_Guide.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="GTv68Uw5Zk-P" colab_type="text"
# This study guide should reinforce and provide practice for all of the concepts you have seen in the past week. There are a mix of written questions and coding exercises, both are equally important to prepare you for the sprint challenge as well as to be able to speak on these topics comfortably in interviews and on the job.
#
# If you get stuck or are unsure of something remember the 20 minute rule. If that doesn't help, then research a solution with google and stackoverflow. Only once you have exausted these methods should you turn to your Team Lead - they won't be there on your SC or during an interview. That being said, don't hesitate to ask for help if you truly are stuck.
#
# Have fun studying!
# + [markdown] id="VvSCoixx7rRe" colab_type="text"
# # Resources
#
# [Scipy Stats Documentation](https://docs.scipy.org/doc/scipy/reference/stats.html)
# + [markdown] id="sDkirKu1B-Lw" colab_type="text"
# # General Terms
# + [markdown] id="iY916675DAXf" colab_type="text"
# Define the following terms. *Double click the text to edit the markdown cells.*
# <br/><br/>
#
# **Normal Distribution:** `Your Answer Here`
#
# **Standard Deviation:** `Your Answer Here`
#
# **Z-Score:** `Your Answer Here`
#
# **P-Value:** `Your Answer Here`
#
# **Null Hypothesis:** `Your Answer Here`
#
# **Sample:** `Your Answer Here`
#
# **Statistical Signifigance:** `Your Answer Here`
# + [markdown] id="KTiR7Fh6FPH0" colab_type="text"
# # T-Test
# + [markdown] id="L-NzA2VTFapj" colab_type="text"
# Answer the following questions as though you are explaining it to a non-technical person. *Double click the text to edit the markdown cells.*
# <br/><br/>
#
# 1. What is a T-Test? What is it used for?
#
# ` Your Answer Here `
#
# 2. What is the difference between the normal distribution and the t-distribution?
#
# ` Your Answer Here `
#
# 3. What is the difference between a 1-sample and a 2-sample t-test?
#
# ` Your Answer Here `
# + [markdown] id="_tZDJesBHeDB" colab_type="text"
# We are scientists running a drug trial and wanting to know whether our drug reduced patient symptoms. Below are the results (just random numbers), explain in 2-3 sentences whether or not the drug was effective. How can we tell that from the t-test?
#
# ```
# Your Answer Here
# ```
#
# What is likely our null hypothesis?
#
# ```
# Your Answer Here
# ```
# + id="0ggDf6GE4mVU" colab_type="code" outputId="6017d08b-d355-48e5-8eac-08739bcdaed3" colab={"base_uri": "https://localhost:8080/", "height": 35}
from scipy import stats
import numpy as np
import pandas as pd
# Get our "results" with random numbers
np.random.seed(42)
with_drug = stats.norm.rvs(loc=5, scale=10, size=500)
without_drug = stats.norm.rvs(loc=5, scale=10, size=500)
# See if our drug made a difference
stats.ttest_ind(rvs1, rvs2)
# + [markdown] id="5KJ4ZpQQPoIv" colab_type="text"
# Here is a dataframe of movie ratings. Divide the dataframe by gender and then use t-tests to show which movies have a statistically significant difference in rating when divided by gender. Give a sentence explanation of the results.
# + id="_HtmwEHBHTEb" colab_type="code" outputId="ca730152-2e64-4c81-fdce-cd083fdd8098" colab={"base_uri": "https://localhost:8080/", "height": 206}
df = pd.DataFrame({'gender':['m','f','f','m','m','m','f','f','m','f'],
'jurassic park':[10,9,10,9,9,10,10,10,9,9],
'love actually':[6,9,10,7,6,7,10,10,5,8],
'pacific rim':[10,3,4,8,9,8,5,4,9,3]})
df.head()
# + id="bNDXqu-ZRDNe" colab_type="code" colab={}
# Divide the dataframe here
# + [markdown] id="ReEWvQbmQrGz" colab_type="text"
# **Jurassic Park**
#
# Explanation of results:
#
# ```
# Your Answer Here
# ```
# + id="iOIwQT5zPX59" colab_type="code" colab={}
# T-Test Code Here
# + [markdown] id="8GTFaWm-Q5RL" colab_type="text"
# **Love Actually**
#
# Explanation of results:
#
# ```
# Your Answer Here
# ```
# + id="zlGdfuVhQ8e3" colab_type="code" colab={}
# T-Test Code Here
# + [markdown] id="JIZU8lzyQ80N" colab_type="text"
# **Pacific Rim**
#
# Explanation of results:
#
# ```
# Your Answer Here
# ```
# + id="KCN4M4SORBCZ" colab_type="code" colab={}
# T-Test Code Here
# + [markdown] id="hn-JhlRxRXQK" colab_type="text"
# # Confidence Interval
# + [markdown] id="zVKjVPipS9Ko" colab_type="text"
# Answer the following question as though you are explaining it to a non-technical person. *Double click the text to edit the markdown cells.*
# <br/><br/>
#
# 1. What is a confidence interval?
#
# ` Your Answer Here `
# + [markdown] id="Ozcajm5PXPLc" colab_type="text"
# Using the movie rating data, graph the ratings with a confidence interval. After graphing the ratings with the confidence interval, write a brief explanation of how to interpret the graph.
#
# ```
# Your interpretation here
# ```
# + id="1Wg7BLdGXXMq" colab_type="code" colab={}
import matplotlib.pyplot as plt
# Your Graph Code Here
# + [markdown] id="2kdB0Bcxaw3h" colab_type="text"
# # Chi Squared
# + [markdown] id="DOmy8rAhbnXj" colab_type="text"
# Answer the following questions as though you are explaining it to a non-technical person. *Double click the text to edit the markdown cells.*
# <br/><br/>
#
# 1. What is a Chi Squared Test? What is it used for?
#
# ` Your Answer Here `
#
# 2. What type of data is it used on?
#
# ` Your Answer Here `
#
# 3. What is a contingency table?
#
# ` Your Answer Here `
#
# 4. Define Degrees of Freedom
#
# ` Your Answer Here `
# + [markdown] id="J8VTCMJBiSu_" colab_type="text"
# Use the `grades` dataframe below to complete the following:
# - Create at least 2 contingency tables
# - Use chi-squared tests to find 2 features that are independent of each other.
# - Write a brief interpretation of the results
# - Use chi-squared tests to find 2 features that are dependent to each other.
# - Write a brief interpretation of the results
# + id="Xm4saRNNbGQd" colab_type="code" outputId="b11f0a4a-6f74-4f62-ff4c-d8fc53827324" colab={"base_uri": "https://localhost:8080/", "height": 206}
grades = pd.DataFrame({'good_standing':[True, True, False, False, False, True, True, False, True, True],
'grade_1':['A', 'B', 'A', 'C', 'A', 'A', 'D', 'A', 'B', 'B'],
'grade_2':['Pass', 'Pass', 'Fail', 'Fail', 'Fail','Pass', 'Pass', 'Fail', 'Pass', 'Fail'],
'grade_3':[10, 5, 6, 10, 9, 9, 8, 7, 3, 9]})
df.head()
# + id="mwcJfWhzh6gJ" colab_type="code" colab={}
# Contingency Table 1
# + id="q5AEI6Lgkcfm" colab_type="code" colab={}
# Contingency Table 2
# + id="JuK6pVIkkel1" colab_type="code" colab={}
# Chi Squared, independent features
# + id="ZsZrdkOHki-B" colab_type="code" colab={}
# Chi Squared, dependent features
# + [markdown] id="5g6IXrsppE_j" colab_type="text"
# # Bayesian Statisics
# + [markdown] id="MjPRgVbxp_eN" colab_type="text"
# Answer the following questions as though you are explaining it to a non-technical person. *Double click the text to edit the markdown cells.*
# <br/><br/>
#
# 1. What is the difference between Bayesian and Frequentist Statistics?
#
# ` Your Answer Here `
#
# 2. What is a prior belief? How is it used in Bayesian Statistics?
#
# ` Your Answer Here `
#
# 3. What is the law of total probability?
#
# ` Your Answer Here `
#
# 4. What is the law of conditional probability?
#
# ` Your Answer Here `
#
# 5. Give an example of when you might use bayesian statistics. Do not use an example given during the lecture or assignment.
#
# ` Your Answer Here `
# + [markdown] id="8N39IjRS7Jix" colab_type="text"
# # Graphing
# + [markdown] id="r3GRbrZI7NIP" colab_type="text"
# Use any of the dataframes above and make two additional visualizations to explore the data. Make sure to include axis labels and title for each graph.
# + id="ywKWLarY7khK" colab_type="code" colab={}
# + id="TYVX3IYZ7kmO" colab_type="code" colab={}
| Unit_1_Sprint_2_Statistics_Study_Guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dl_tf2
# language: python
# name: dl_tf2
# ---
# # Tensorboard strikes again
#
# On this notebook we will check aome more
#
# ## The data
# We will use our old friend MNIST for its simplicity.
#
# %load_ext tensorboard
# +
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.keras.backend.clear_session() # For easy reset of notebook state.
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from time import time
# -
# <font color=red><b>Load the dataset and preprocess it.
# </font>
...
# <font color=red><b>Build a simple model (no more than 4 layers) using the functional api
# </font>
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Flatten, Dropout
...
# <font color=red><b>Compile the model
# </font>
...
# <font color=red><b>Compile the model
# </font>
from tensorflow.keras.callbacks import TensorBoard
...
# <font color=red><b>Train the model, using the given tensorboard callback
# </font>
...
# <font color=red><b>Embed the tensorboard visualizer inside the notebook
# </font>
# %tensorboard --logdir ...
# The next lines are note going to work with this tensorflow version. It would show the embeddings projections with each class values, but there is an incompatibility between tf versions.
# +
log_dir='/home/fer/data/formaciones/afi/tensorboard_log/tensorboard_example1/{}'.format(time())
tensorboard = TensorBoard(
log_dir=log_dir,
embeddings_freq=1,
embeddings_layer_names=['features'],
embeddings_metadata='metadata.tsv',
embeddings_data=x_test)
model.fit(x=x_train,
y=y_train,
epochs=10,
validation_data=(x_test, y_test),
callbacks=[tensorboard])
# -
| Training/tensorboardDistributionsTraining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import sys
sys.path.append("..")
from source.clean import general_cleaner, drop_columns
from source.transf_category import recode_cat, make_ordinal
from source.transf_numeric import tr_numeric
import source.transf_univ as dfp
import source.utility as ut
import source.report as rp
# +
df_train = pd.read_csv('../data/train.csv')
df_train['Target'] = np.log1p(df_train.SalePrice)
df_train = df_train[df_train.GrLivArea < 4500].copy().reset_index(drop=True)
del df_train['SalePrice']
train_set, test_set = ut.make_test(df_train,
test_size=0.2, random_state=654,
strat_feat='Neighborhood')
y = train_set['Target'].copy()
del train_set['Target']
y_test = test_set['Target']
del test_set['Target']
folds = KFold(5, shuffle=True, random_state=541)
# +
numeric_lasso = Pipeline([('fs', dfp.feat_sel('numeric')),
('imp', dfp.df_imputer(strategy='median')),
('transf', tr_numeric(lot=False,
bedroom=False,
SF_room=False))])
cat_lasso = Pipeline([('fs', dfp.feat_sel('category')),
('imp', dfp.df_imputer(strategy='most_frequent')),
('ord', make_ordinal(['BsmtQual', 'KitchenQual', 'ExterQual', 'HeatingQC'],
extra_cols=['BsmtExposure', 'BsmtCond', 'ExterCond'],
include_extra='include')),
('recode', recode_cat()),
('dummies', dfp.dummify(drop_first=True))])
processing_lasso = dfp.FeatureUnion_df(transformer_list=[('cat', cat_lasso),
('num', numeric_lasso)])
lasso_pipe = Pipeline([('gen_cl', general_cleaner()),
('proc', processing_lasso),
('scaler', dfp.df_scaler(method='standard')),
('dropper', drop_columns(lasso=True)),
('lasso', Lasso(alpha=0.001, tol=0.005))])
# +
numeric_forest = Pipeline([('fs', dfp.feat_sel('numeric')),
('imp', dfp.df_imputer(strategy='median')),
('transf', tr_numeric(SF_room=False,
bedroom=False,
lot=False))])
cat_forest = Pipeline([('fs', dfp.feat_sel('category')),
('imp', dfp.df_imputer(strategy='most_frequent')),
('ord', make_ordinal(['BsmtQual', 'KitchenQual', 'ExterQual', 'HeatingQC'],
extra_cols=['BsmtExposure', 'BsmtCond', 'ExterCond'],
include_extra='include')),
('recode', recode_cat()),
('dummies', dfp.dummify(drop_first=True))])
processing_forest = dfp.FeatureUnion_df(transformer_list=[('cat', cat_forest),
('num', numeric_forest)])
forest_pipe = Pipeline([('gen_cl', general_cleaner()),
('proc', processing_forest),
('scaler', dfp.df_scaler(method='robust')),
('dropper', drop_columns(forest=True)),
('forest', RandomForestRegressor(n_estimators=1500, max_depth=30,
max_features='sqrt',
n_jobs=-1, random_state=32))])
# -
class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
# we define clones of the original models to fit the data in
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
# Train cloned base models
for model in self.models_:
model.fit(X, y)
return self
#Now we do the predictions for cloned models and average them
def predict(self, X):
predictions = np.column_stack([
model.predict(X) for model in self.models_
])
return np.mean(predictions, axis=1)
# +
short_lasso = Pipeline([('scaler', dfp.df_scaler(method='standard')),
('dropper', drop_columns(lasso=True)),
('lasso', Lasso(alpha=0.001, tol=0.005))])
short_forest = Pipeline([('scaler', dfp.df_scaler(method='robust')),
('dropper', drop_columns(forest=True)),
('forest', RandomForestRegressor(n_estimators=1500, max_depth=30,
max_features='sqrt',
n_jobs=-1, random_state=32))])
start_pipe = Pipeline([('gen_cl', general_cleaner()),
('proc', processing_forest)])
avg_pipe = Pipeline([('start', start_pipe),
('models', AveragingModels(models = (short_lasso, short_forest)))])
avg_oof = ut.cv_score(train_set, y, folds, avg_pipe, imp_coef=False)
avg_oof
# +
print(f'RMSE: {round(np.sqrt(mean_squared_error(y, avg_oof)), 4)}')
print(f'MAE: {round(mean_absolute_error(np.expm1(y), np.expm1(avg_oof)), 4)}')
rp.plot_predictions(train_set, y, avg_oof)
# -
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
| houseprice/notebooks_source/09 - Stacking and ensembling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Import-Libraries" data-toc-modified-id="Import-Libraries-1"><span class="toc-item-num">1 </span>Import Libraries</a></span></li><li><span><a href="#Plotting-Routine" data-toc-modified-id="Plotting-Routine-2"><span class="toc-item-num">2 </span>Plotting Routine</a></span></li><li><span><a href="#Load-Empirical-Data" data-toc-modified-id="Load-Empirical-Data-3"><span class="toc-item-num">3 </span>Load Empirical Data</a></span></li><li><span><a href="#Generate-Random-Call-Data" data-toc-modified-id="Generate-Random-Call-Data-4"><span class="toc-item-num">4 </span>Generate Random Call Data</a></span><ul class="toc-item"><li><span><a href="#Interarrival-Data" data-toc-modified-id="Interarrival-Data-4.1"><span class="toc-item-num">4.1 </span>Interarrival Data</a></span></li><li><span><a href="#Call-Length" data-toc-modified-id="Call-Length-4.2"><span class="toc-item-num">4.2 </span>Call Length</a></span></li></ul></li><li><span><a href="#Call-Centre-with-Variable-Capacity" data-toc-modified-id="Call-Centre-with-Variable-Capacity-5"><span class="toc-item-num">5 </span>Call Centre with Variable Capacity</a></span></li><li><span><a href="#Manual-Optimisation-Experiments" data-toc-modified-id="Manual-Optimisation-Experiments-6"><span class="toc-item-num">6 </span>Manual Optimisation Experiments</a></span><ul class="toc-item"><li><span><a href="#First-Step" data-toc-modified-id="First-Step-6.1"><span class="toc-item-num">6.1 </span>First Step</a></span></li><li><span><a href="#Second-Step" data-toc-modified-id="Second-Step-6.2"><span class="toc-item-num">6.2 </span>Second Step</a></span></li><li><span><a href="#Third-Step" data-toc-modified-id="Third-Step-6.3"><span class="toc-item-num">6.3 </span>Third Step</a></span></li><li><span><a href="#One-more-step..." data-toc-modified-id="One-more-step...-6.4"><span class="toc-item-num">6.4 </span>One more step...</a></span></li><li><span><a href="#And-one-more..." data-toc-modified-id="And-one-more...-6.5"><span class="toc-item-num">6.5 </span>And one more...</a></span></li></ul></li><li><span><a href="#Optimisation-Using-GA" data-toc-modified-id="Optimisation-Using-GA-7"><span class="toc-item-num">7 </span>Optimisation Using GA</a></span><ul class="toc-item"><li><span><a href="#Representing-Shift-Schedules" data-toc-modified-id="Representing-Shift-Schedules-7.1"><span class="toc-item-num">7.1 </span>Representing Shift Schedules</a></span></li><li><span><a href="#Cost-Function" data-toc-modified-id="Cost-Function-7.2"><span class="toc-item-num">7.2 </span>Cost Function</a></span></li><li><span><a href="#Generate-Random-Schedules" data-toc-modified-id="Generate-Random-Schedules-7.3"><span class="toc-item-num">7.3 </span>Generate Random Schedules</a></span></li><li><span><a href="#Binary-Representation-of-Schift-Schedules" data-toc-modified-id="Binary-Representation-of-Schift-Schedules-7.4"><span class="toc-item-num">7.4 </span>Binary Representation of Schift Schedules</a></span></li><li><span><a href="#Genetic-Operations" data-toc-modified-id="Genetic-Operations-7.5"><span class="toc-item-num">7.5 </span>Genetic Operations</a></span></li><li><span><a href="#Incremental-Optimisation" data-toc-modified-id="Incremental-Optimisation-7.6"><span class="toc-item-num">7.6 </span>Incremental Optimisation</a></span></li><li><span><a href="#Seed-the-optimisation-with-an-idea" data-toc-modified-id="Seed-the-optimisation-with-an-idea-7.7"><span class="toc-item-num">7.7 </span>Seed the optimisation with an idea</a></span></li></ul></li><li><span><a href="#Alternative-Run" data-toc-modified-id="Alternative-Run-8"><span class="toc-item-num">8 </span>Alternative Run</a></span></li><li><span><a href="#Another-Run" data-toc-modified-id="Another-Run-9"><span class="toc-item-num">9 </span>Another Run</a></span></li><li><span><a href="#References" data-toc-modified-id="References-10"><span class="toc-item-num">10 </span>References</a></span></li></ul></div>
# -
# # Import Libraries
# +
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.stats as stats
import math
import numpy as np
import random
import simpy
# + [markdown] heading_collapsed=true
# # Plotting Routine
# + hidden=true
def plotOverTime(data=None, f=None, style=None, scale=1,
title=None, ax=None, xlabel=None, ylabel=None):
if ax is None:
fig = plt.figure()
fig.set_figwidth(12)
fig.set_figheight(5)
ax = fig.gca()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.set_xlim(0, 24)
plt.xticks(ticks=range(24))
if title is not None:
ax.set_title(title)
if data is not None:
plt.hist(data*scale, bins=np.linspace(0,24,25))
if style is None:
style='r-'
if f is not None:
X=np.linspace(0, 24, 1000)
Y=[f(x)*scale for x in X]
plt.plot(X, Y, style)
return ax
# + hidden=true
def plot(data, xmin, xmax, pdf=None, bins=None,
μ=None, σ=None,
title=None, xlabel=None, ylabel=None):
fig = plt.figure()
fig.set_figwidth(10)
fig.set_figheight(5)
ax = fig.gca()
μ = np.mean(data)
σ = np.std(data)
ax.set_xlim(xmin, xmax)
if title!=None:
plt.title(title)
plt.hist(data, bins=bins, density=True)
# calculate parameter for text positioning
dx=(xmax-xmin)*0.02
ymin, ymax=plt.ylim()
ypos=ymin+0.9*(ymax-ymin)
# plot the probability density function if one is given
if pdf!=None:
X = list(np.linspace(xmin, xmax, 100))
Y = [ pdf(x) for x in X]
plt.plot(X, Y, lw=2, color='red')
ax.axvline(x=μ, color='red', linestyle='dashed', lw=2)
plt.text(μ+dx, ypos, f"μ={μ:3.2f}", color='red', fontsize=14)
plt.grid(True)
# + hidden=true
def poissonPlot(data, pdf=None, title=None):
μ = data.mean()
n = data.count()
max = data.mean()*10
fig = plt.figure()
fig.set_figwidth(10)
fig.set_figheight(5)
ax = fig.gca()
ax.set_xlim(0, max)
if title!=None:
ax.set_title(title+" (n={:,})".format(n))
bins = list(np.linspace(0,max,100))+[data.max()]
data.hist(ax=ax, bins=bins, density=True)
x = np.linspace(0, max, 100)
if pdf is None:
y = [ 1/μ*math.exp(-x/μ) for x in x]
else:
y = [ pdf(x) for x in x]
plt.plot(x, y, lw=3, color='red')
ax.axvline(x=μ, color='red')
plt.text(μ+0.2,0.9*y[0],'μ='+'%2.2f' % μ, color='red', fontsize=14)
plt.grid(True)
# -
# # Load Empirical Data
# The file `Calls.csv` contains the number of calls per hour recorded over a typical day.
callData = pd.read_csv('Calls.csv')
callData
def arrivals(x):
x = x%24
return callData.at[int(math.floor(x)),'Calls']
plotOverTime(f=arrivals,
xlabel='Time [h]', ylabel='Calls / h',
title="Average number of calls per hour")
# # Generate Random Call Data
# `callData` generates 24h of call data, the interarrival time of which varies over time according to the function f that returns the average number of calls per hour. The call length is normally distribution with the mean `length` and coefficient of variation `cv`.
# +
def calls(f, length, cv, days=1):
call, time, iat, lgt = [], [], [], []
no = 0
t = 0
while t<24*days:
dt = stats.expon(scale = 1/f(t)).rvs()
cl = stats.norm(loc=length, scale=length*cv).rvs()
t += dt
if t<24*days:
call.append(no)
time.append(t%24)
iat.append(dt)
lgt.append(cl)
no += 1
df = pd.DataFrame(index=call)
df['time'] = time
df['iat'] = iat
df['lgt'] = lgt
return df
np.random.seed(42)
Calls = calls(arrivals, 90/3600, 0.1) # call length 90secs [in hours]
# -
len(Calls)
Calls
# ## Interarrival Data
plotOverTime(data=Calls['time'], f=arrivals,
xlabel='Time [h]', ylabel='Calls / h',
title=f"Average number of calls per hour")
# +
print(f"Min. Interarrival Time: {Calls['iat'].min()*3600:11.4f}s")
print(f"Max. Interarrival Time: {Calls['iat'].max()*3600:11.4f}s")
print()
print(f" ... 5% Quantile: {Calls['iat'].quantile(q=0.05)*3600:11.4f}s")
print(f" ... 25% Quantile: {Calls['iat'].quantile(q=0.25)*3600:11.4f}s")
print(f" ... 50% Quantile: {Calls['iat'].quantile(q=0.50)*3600:11.4f}s")
print(f" ... 75% Quantile: {Calls['iat'].quantile(q=0.75)*3600:11.4f}s")
print(f" ... 95% Quantile: {Calls['iat'].quantile(q=0.95)*3600:11.4f}s")
print()
print(f"Mean Interarrival Time: {Calls['iat'].mean()*3600:11.4f}s")
# -
poissonPlot(Calls['iat']*3600, title="Inter-Arrival Times in seconds")
# ## Call Length
print(f"Min. Call Length: {Calls['lgt'].min()*3600:11.4f}s")
print(f"Max. Call Length: {Calls['lgt'].max()*3600:11.4f}s")
print(f"Mean Call Length: {Calls['lgt'].mean()*3600:11.4f}s")
plot(Calls['lgt']*3600, xmin=0, xmax=150, bins=50,
title="Call Length in seconds")
# # Call Centre with Variable Capacity
# We use a description for dynamic call center capacity based on the times when capacity changes:
times= [0, 8, 14, 17, 23]
capacity=[3, 10, 3, 20, 3]
# The cost function is the number of person hours spent:
# +
times=[0, 8, 16]
capacity=[1, 2, 1]
def personhours(times, capacity):
total=0
timespan=[ (times[i+1] if i+1<len(times) else 24)-times[i] for i in range(len(times)) ]
for i in range(len(times)):
total += timespan[i]*capacity[i]
return total
personhours(times, capacity)
# -
LABOUR_COST = 10 # €/hour
TELEFON_COST = 0.05 # €/min
# `callCentreFlex` runs the call centre simulation based on the shift_times and shift_capacity:
def callCentreFlex(cd, shift_times, shift_capacity):
assert(len(shift_times) == len(shift_capacity))
N = len(cd)
iarr = cd['iat'].mean()
proc_μ = cd['lgt'].mean()
proc_σ = cd['lgt'].std()
# Prepare a DataFrame to record observations
sd = pd.DataFrame()
sd['calling at'] = [None]*N
sd['answered at'] = [None]*N
sd['finished at'] = [None]*N
env = simpy.Environment()
staff = [ simpy.Resource(env, capacity=c) for c in shift_capacity ]
shift = 0
# initialise variables describing the queue
maxql=0 # maximum length of queue
qt=0 # "integral" of ql from 0 to lastT
lastT=0 # last time qt was updated
# initialise variables describing the server
maxs=0 # max number of customers served at anyone time
s=0 # current number of customers being served
st=0 # "integral" of s from 0 to env.now
def calls(n):
shift = 0
for i in range(n):
# wait for the next call
yield env.timeout(cd.at[i, 'iat'])
# check if a new shift starts
if shift+1<len(shift_times) and env.now>shift_times[shift+1]:
shift += 1
c = call(i, shift)
env.process(c)
def call(i, shift):
sd.at[i, 'calling at'] = env.now
nonlocal qt, lastT, maxql, s, st, maxs
# length of the queue before the customer enters
ql = len(staff[shift].queue)
if ql==maxql:
maxql = ql+1
qt += ql*(env.now-lastT)
lastT=env.now
req = staff[shift].request()
yield req
# length of the queue just before the customer leaves
ql = len(staff[shift].queue)+1
qt += ql*(env.now-lastT)
lastT=env.now
sd.at[i, 'answered at'] = env.now
# Wait while the clerk is dealing with you
s += 1
if s>maxs:
maxs = s
proc = cd.at[i, 'lgt']
yield env.timeout(proc)
sd.at[i, 'finished at'] = env.now
staff[shift].release(req)
s -= 1
st += proc/shift_capacity[shift]
env.process(calls(len(cd)))
env.run()
sd['wait time']=sd['answered at']-sd['calling at']
sd['call time']=sd['finished at']-sd['answered at']
sd['flow time']=sd['finished at']-sd['calling at']
return sd
def waiting_times(sd):
w = sd['wait time']
f = sd['flow time']
global times, capacity
labourCost=personhours(times, capacity)*LABOUR_COST
phoneCost=f.sum()*60*TELEFON_COST
print(f"Mean Waiting Time: {w.mean()*60:5.2f}min")
print(f"Cost: {personhours(times, capacity):8d} person hours = {labourCost:7.2f}€")
print(f" {int(f.sum()*60):8,d} phone minutes = {phoneCost:7.2f}€")
print(f" total cost = {labourCost+phoneCost:7.2f}€")
wt=pd.DataFrame(index=range(24))
for i in range(24):
sdi=sd[sd['calling at']%24>=i]
sdx=sdi[sdi['calling at']%24<i+1]
wt.at[i, 'mean']=0 if len(sdx)==0 else sdx['wait time'].mean()
wt.at[i, 'max']=0 if len(sdx)==0 else sdx['wait time'].max()
wt.at[i, 'min']=0 if len(sdx)==0 else sdx['wait time'].min()
def mean_waiting_time(x):
return wt.at[int(math.floor(x%24)),'mean']
def max_waiting_time(x):
return wt.at[int(math.floor(x%24)),'max']
def min_waiting_time(x):
return wt.at[int(math.floor(x%24)),'min']
ax=plotOverTime(f=mean_waiting_time, style='b-', scale=60,
xlabel='Time [h]', ylabel='Waiting Time [min]',
title="Waiting times over the day")
ax=plotOverTime(f=max_waiting_time, style='r-', scale=60, ax=ax)
ax=plotOverTime(f=min_waiting_time, style='g-', scale=60, ax=ax)
# A first attempt, just to see if it works...
times= [0, 8, 14, 17, 23]
capacity=[3, 10, 3, 20, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
# # Manual Optimisation Experiments
# ## First Step
# We begin with a wild guess, actually the same as before:
times= [0, 8, 14, 17, 23]
capacity=[3, 10, 3, 20, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
# It seems we need to increase the capacity between 14:00 and 17:00, which is currently only 3:
# ## Second Step
times= [0, 8, 14, 17, 23]
capacity=[3, 10, 4, 20, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
times= [0, 7, 8, 12, 14, 17, 23]
capacity=[2, 6, 12, 7, 4, 20, 2]
waiting_times(callCentreFlex(Calls, times, capacity))
# It seems we need to increase the capacity between 8:00 and 12:00, which is currently 10, but we leave it between 12:00 and 14:00, that means we need a new shift regime:
# ## Third Step
times= [0, 8, 12, 14, 17, 23]
capacity=[3, 14, 10, 6, 20, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
# We may be able to reduce the capacity between 0:00 and 7:00 and between 20:00 and 23:00:
# ## One more step...
times= [0, 7, 8, 12, 14, 17, 20, 23]
capacity=[2, 3, 14, 10, 6, 20, 6, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
# If we increase marginally between 7:00 and 8:00 and between 17:00 and 20:00:
# ## And one more...
times= [0, 7, 8, 12, 14, 17, 20, 23]
capacity=[2, 4, 14, 10, 6, 21, 6, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
times= [0, 7, 8, 10, 14, 17, 20, 23]
capacity=[2, 4, 14, 10, 6, 21, 6, 3]
waiting_times(callCentreFlex(Calls, times, capacity))
# # Optimisation Using GA
# ## Representing Shift Schedules
class Shift:
def __init__(self, start, hours, staff):
self.start = start
self.hours = hours
self.staff = staff
def __str__(self):
return f"Shift from {self.start:02d}:00 "\
f" {self.hours:d}h staff: {self.staff:2d}"
# We choose a representation of a shift schedule as an 24h array of shift lengths and staffing levels indexed by starting time.
class Schedule:
def __init__(self, hours=None, staff=None):
self.hours = hours if hours is not None else [ 0 for h in range(24) ]
self.staff = staff if staff is not None else[ 0 for h in range(24) ]
def add(self, newshifts):
for shift in newshifts:
h = shift.start
if self.staff[h]>0:
print("WARNING: conflicting shift", str(s))
self.hours[h]=shift.hours
self.staff[h]=shift.staff
def capacity(self):
N = [0 for h in range(24)]
for h in range(24):
if self.staff[h]>0:
for i in range(self.hours[h]):
N[(h+i)%24] += self.staff[h]
return N
def parameters(self):
N = self.capacity()
start = 0
staff = N[0]
t = [start]
c = [staff]
for h in range(1, 24):
if N[h]!=staff:
start = h
staff = N[h]
t.append(start)
c.append(staff)
return t, c
def print(self):
for h in range(24):
if self.staff[h]>0:
print(str(Shift(h, self.hours[h], self.staff[h])))
# +
times= [0, 8, 14, 17, 23]
capacity=[3, 10, 3, 20, 3]
s = Schedule()
s.add([Shift(6,8,3), Shift(14,8,3), Shift(22,8,3), Shift(8,6,7), Shift(17, 6, 17) ])
print(s.capacity())
t, c = s.parameters()
print(t)
print(c)
s.print()
# -
# ## Cost Function
def f(schedule):
global times, capacity
times, capacity = schedule.parameters()
# print("f: ", hours, staff)
sd = callCentreFlex(Calls, times, capacity)
labourCost=sum(schedule.capacity())*LABOUR_COST
phoneCost=sd['flow time'].sum()*60*TELEFON_COST
# print(f"f(personHours {sum(schedule.capacity()):d})={labourCost + phoneCost:8.1f}")
return labourCost + phoneCost
f(s)
# ## Generate Random Schedules
# We generate random schedules based on the pattern low-peak1-mid-peak2-low.
# The capacity values are generated by (sorted) random choice between 1 and 31.
# The time ponts are generate as a (sorted) random sample of time points between 0 and 23.
def randomSchedule():
T = sorted(random.sample(range(24), k=4))
# shifts should not be longer than 8 hours
while T[1]-T[0]>8 or T[2]-T[1]>8 or T[3]-T[2]>8:
T = sorted(random.sample(range(24), k=4))
C = sorted(random.sample(range(1, 22), k=4))
peak1 = Shift(T[0], T[1]-T[0], C[2]-C[0])
peak2 = Shift(T[2], T[3]-T[2], C[3]-C[0])
mid = Shift(T[1], T[2]-T[1], C[1]-C[0])
# generate the base schedule so that the shift start times for\
# three 8 hour shifts do not coincide with the peak and mid times
avoid = [t%8 for t in T]
start=[i for i in range(8)]
random.shuffle(start)
while start[0] in avoid:
random.shuffle(start)
base = [ Shift(start[0]+i*8, 8, C[0]) for i in range(3) ]
s=Schedule()
s.add(base+[peak1, mid, peak2])
return s
for i in range(5):
print(randomSchedule().parameters())
# ## Binary Representation of Schift Schedules
# We choose a representation of a shift schedule as an 24h array of shift lengths and staffing levels indexed by starting time:
#
# * The shift length (in hours minus 1) is represented by 3 bits, allowing for shift lengths from 1 hour to 8 hours.
# * The number of staff is represented by 5 bits, allowing for shifts of upto 31 staff members.
# +
def binary(n, s):
return (binary(n-1, s>>1) if n>1 else [])+[ s&1 ]
def integer(l):
return l[0] if len(l)==1 else 0 if len(l)==0 else integer(l[:-1])*2+l[-1]
# +
def chromosome(p):
schedule, _ = p
bits = []
for h in range(24):
if schedule.staff[h]>0:
bits += binary(3, schedule.hours[h]-1) + binary(5, schedule.staff[h])
else:
bits += binary(3, 0) + binary(5, 0)
return bits
def schedule(bits):
hours = [ 0 for h in range(24) ]
staff = [ 0 for h in range(24) ]
for h in range(24):
staff[h] = integer(bits[h*8+3:h*8+8])
if staff[h]>0:
hours[h] = integer(bits[h*8:h*8+3])+1
else:
hours[h] = 0
return Schedule(hours, staff)
# -
s = Schedule()
s.add([Shift(6,8,3), Shift(14,8,3), Shift(22,8,3), Shift(8,6,7), Shift(17, 6, 17) ])
t, c = s.parameters()
print(s.capacity())
print(t)
print(c)
fs = f(s)
X = chromosome((s, fs))
ss=schedule(X)
print(ss.capacity())
tt, cc = ss.parameters()
print(tt)
print(cc)
random.seed(0)
for i in range(1000):
s = randomSchedule()
X = chromosome((s, 10000))
t = schedule(X)
if s.capacity()!=t.capacity():
print("problem", i)
print(s.parameters())
print(t.parameters())
print(s.capacity())
print(t.capacity())
# ## Genetic Operations
# When manipulating genetic material it is possible that genetic defects are generated. We need a test,
def defect(s):
return 0 in schedule(s).capacity()
# Crossover respects the grouping of genetic information in chunks of 8 bits. When this results in illegal data, cross over is not execute.
def crossover2(a, b, p):
assert(len(a)==len(b))
u = random.random()
if u<=p:
pos1 = random.randint(1, len(a)//8-2)
pos2 = random.randint(pos1+1, len(b)//8-1)
pos1, pos2 = 8*pos1, 8*pos2
x, y = (a[0:pos1]+b[pos1:pos2]+a[pos2:],
b[0:pos1]+a[pos1:pos2]+b[pos2:])
if defect(x) or defect(y):
# print("NO CROSSOVER")
return a,b
else:
return x,y
else:
return a, b
# Mutation increments or decrements the number of staff in a shift or the length of a shift, provided that this results not in an illegal schedule.
# +
def modifyStaff(staff):
d=random.randint(-1,1)
if 0<staff+d and staff+d<32:
return staff+d
else:
return staff
def modifyHours(hours):
d=random.randint(-1,1)
if 0<hours+d and hours+d<8:
return hours+d
else:
return hours
def flipgroup(x):
hours=modifyHours(integer(x[0:3]))
staff=modifyStaff(integer(x[3:8]))
return binary(3, hours)+binary(5, staff)
def flipgroups(n, b):
b = b.copy()
groups = random.sample(range(len(b)//8), n)
for i in groups:
b[i*8:(i+1)*8] = flipgroup(b[i*8:(i+1)*8])
return b
def mutate(X, p):
u = random.random()
if u<=p:
Y = flipgroups(random.randint(1,len(X)//8), X)
if defect(Y):
# print("NO MUTATION")
return X
else:
return Y
else:
return X
# -
# ## Incremental Optimisation
def initialPopulation(size, seed=[]):
population = [ (s, f(s)) for s in seed ]
for i in range(len(population)):
print(f"{i:3d}: {population[i][1]:8.1f} (seed)")
for i in range(len(seed), size):
schedule=randomSchedule()
population.append((schedule, f(schedule)))
print(f"{i:3d}: {population[-1][1]:8.1f}")
population = sorted(population, key=lambda x:x[1], reverse=True)
return 0, population
def selectParents(population):
# A, B = random.sample(population, k=2)
# return (A, B)
fitness = [ p[1] for p in population ]
upb = math.ceil(max(fitness))
posf = [ (upb-f) for f in fitness ]
A, B = random.choices(population, weights=posf, k=2)
while A==B:
A, B = random.choices(population, weights=posf, k=2)
return (A, B)
def incarnation(bits):
x = schedule(bits)
# if defect(x):
# print("incarnation problem: ", x.parameters(), x.capacity())
return x, f(x)
def GA(pp, crossoverRate=0.1, mutationRate=0.1, generations=20):
start, population = pp
for gen in range(start, start+generations):
A, B = selectParents(population)
X, Y = crossover2(chromosome(A), chromosome(B), crossoverRate)
C = incarnation(mutate(X, mutationRate))
D = incarnation(mutate(Y, mutationRate))
population=sorted(population+[C,D], key=lambda x:x[1], reverse=True)[2:]
print(f"Generation {gen:4d}: {population[0][1]:8.1f} {population[-1][1]:8.1f}")
return start+generations, population
def solution(pp):
func = pp[1][-1][1]
schedule = pp[1][-1][0]
global times, capacity
times, capacity = schedule.parameters()
waiting_times(callCentreFlex(Calls, times, capacity))
# ## Seed the optimisation with an idea
s1 = Schedule()
s1.add([Shift(6,8,3), Shift(14,8,3), Shift(22,8,3), Shift(8,6,7), Shift(17, 6, 17) ])
f(s1)
random.seed(0)
population = initialPopulation(20, seed=[s1])
solution(population)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=50)
solution(population)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=10)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=10)
solution(population)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=50)
solution(population)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=50)
solution(population)
s = population[1][-1][0]
times, capacity = s.parameters()
print(times)
print(capacity)
# # Alternative Run
random.seed(1)
population = initialPopulation(20)
solution(population)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=5000)
solution(population)
s = population[1][-1][0]
times, capacity = s.parameters()
print(times)
print(capacity)
s.capacity()
sum(s.capacity())
# # Another Run
random.seed(1)
population = initialPopulation(50)
population = GA(population, crossoverRate=0.5, mutationRate=0.5, generations=100)
# # References
# [scipy.stats](https://docs.scipy.org/doc/scipy/reference/stats.html)
| 9,10_SingleServerApplication/Call Centre Optimisation v3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 행렬 미분
#
# 함수도 행렬의 원소가 될 수 있다. 이는 함수의 종속 변수 $y$가 행렬인 경우라고 볼 수 있다.
#
# $$ f(x) = \begin{bmatrix} x^2 & 2x \\ \log x & e^{x^2} \end{bmatrix} = y $$
#
# 반대로 여러개의 입력을 가지는 다변수 함수는 함수의 독립 변수가 벡터인 경우로 볼 수 있다.
#
# $$ f(x_1, x_2) = f\left(\begin{bmatrix} x_1 \\ x_2 \end{bmatrix}\right) = f(x)$$
#
# 이처럼 행렬을 입력 혹은 출력으로 가지는 함수를 미분하는 하는 것을 행렬 미분(실제로는 편미분)이라고 한다. 데이터 분석에서는 주로 분모 중심 표현법(Denominator-layout notation)을 사용한다.
# ---
# ## 스칼라를 벡터로 미분
#
# - gradient vector
# $$ \nabla y = \frac{\partial y}{\partial \mathbf{x}}
# = \begin{bmatrix}
# \dfrac{\partial y}{\partial x_1}\\
# \dfrac{\partial y}{\partial x_2}\\
# \vdots\\
# \dfrac{\partial y}{\partial x_N}\\
# \end{bmatrix}$$
# practice
# - 1 $$ f(x, y, x) = x + y + z $$
#
# $$ \nabla f = \begin{bmatrix} 1 \\ 1 \\ 1 \end{bmatrix}$$
# - 2 $$ f(x, y, x) = xyz $$
#
# $$ \nabla f = \begin{bmatrix} yz \\ xz \\ xy
# \end{bmatrix}$$
#
#
# 그레디언트 벡터는 2차원 상에서 countour plot 으로 나타낼 수 있으며, contour plot 상에 그레디언트 벡터를 화살표로 나타낸 것을 quiver plot이라고 한다. 또한 다음과 같은 특징이 있다.
# - 그레디언트 벡터의
# - 방향은 함수 곡면의 기울기가 가장 큰 방향을 가리킨다.
# - 방향은 등고선(isoline)의 방향과 직교한다.
# - 크기는 기울기를 의미한다.
# ---
# ### 미분 규칙 1: 선형 모형
# - 선형 모형을 미분하면 가중치 벡터가 된다.
#
# $$ \frac{\partial w^Tx}{\partial x} = \frac{\partial x^Tw}{\partial x} = w $$
#
#
#
# ### 미분 규칙 2: 이차 형식
# - 이차 형식을 미분하면 행렬과 벡터의 곱으로 나타난다.
#
# $$ \frac{\partial x^TAx}{
# \partial x} = (A + A^T)x $$
#
# 증명
# - https://datascienceschool.net/view-notebook/8595892721714eb68be24727b5323778/
# ### 벡터를 스칼라로 미분
# 함수의 종속 변수 $y$가 다차원벡터이고 독립 변수 $x$가 스칼라인 경우는 함수가 여러 개라고 보는 것과 마찬가지이다.
#
# $$\mathbf{y} =
# \begin{bmatrix}
# y_1 \\
# y_2 \\
# \vdots\\
# y_M \\
# \end{bmatrix}
# = \mathbf{f}(x)$$
#
# 벡터를 스칼라로 미분하는 경우에는 결과를 행 벡터로 표시한다.
#
# $$
# \frac{\partial \mathbf{y}}{\partial x} = \left[
# \frac{\partial y_1}{\partial x}
# \frac{\partial y_2}{\partial x}
# \cdots
# \frac{\partial y_M}{\partial x}
# \right]$$
# ### 벡터를 벡터로 미분
#
# 함수의 종속, 독립 변수가 모두 벡터인 경우, 각각의 조합에 대해 모두 미분이 존재. 따라서 도함수(derivative)는 행렬 형태가 되며 이를 자코비안 행렬(Jacobian matrix)이라고 한다. **자코비안 행렬의 경우** 벡터를 스칼라로, 스칼라를 벡터로 미분하는 경우와 **행/열의 방향이 다르다는 점에 유의**
#
# $$
# \mathbf J = \frac{d\mathbf y}{d\mathbf x} =
# \begin{bmatrix}
# \dfrac{\partial y_1}{\partial \mathbf x}^T \\ \vdots \\ \dfrac{\partial y_M}{\partial \mathbf x}^T
# \end{bmatrix} =
# \begin{bmatrix}
# \nabla y_1^T \\ \nabla y_2^T \\ \vdots \\ \nabla y_M^T \\
# \end{bmatrix} =
# \begin{bmatrix}
# \dfrac{\partial y_1}{\partial x_1} & \cdots & \dfrac{\partial y_1}{\partial x_N}\\
# \vdots & \ddots & \vdots\\
# \dfrac{\partial y_M}{\partial x_1} & \cdots & \dfrac{\partial y_M}{\partial x_N}
# \end{bmatrix}
# $$
# - practice
# $$f_1(x) =
# \begin{bmatrix}
# \sum_i^3 x_i \\
# \prod_i^3 x_i
# \end{bmatrix}$$
#
# $$ J =
# \dfrac{df_1(x)}{dx} =
# \begin{bmatrix}
# 1 & 1& 1\\
# x_2x_3&x_1x_3&x_2x_3
# \end{bmatrix}
# $$
#
#
# $$f(x) =
# \begin{bmatrix}
# \sum_i^N x_i \\
# \prod_i^N x_i
# \end{bmatrix}$$
#
# $$ J =
# \dfrac{df(x)}{dx} =
# \begin{bmatrix}
# 1 & 1 & \cdots & 1\\
# \dfrac{\prod_i^N x_i}{\partial x_1} & \dfrac{\prod_i^N x_i}{\partial x_2} & \cdots & \dfrac{\prod_i^N x_i}{\partial x_n}
# \end{bmatrix}
# $$
#
#
# 다변수 함수의 2차 도함수는 그레디언트 벡터를 독립 변수 벡터로 미분한 것으로 다음과 같이 행렬로 나타낼 수 있으며, 해시안 행렬(Hessian matrix)이라고 한다. 일반적으로 대칭행렬이 되는 것이 특징.
#
# $$
# H = \begin{bmatrix}
# \dfrac{\partial^2 f}{\partial x_1^2} & \dfrac{\partial^2 f}{\partial x_2\,\partial x_1} & \cdots & \dfrac{\partial^2 f}{\partial x_N\,\partial x_1} \\
# \dfrac{\partial^2 f}{\partial x_1\,\partial x_2} & \dfrac{\partial^2 f}{\partial x_2^2} & \cdots & \dfrac{\partial^2 f}{\partial x_N\,\partial x_2} \\
# \vdots & \vdots & \ddots & \vdots \\
# \dfrac{\partial^2 f}{\partial x_1\,\partial x_N} & \dfrac{\partial^2 f}{\partial x_2\,\partial x_N} & \cdots & \dfrac{\partial^2 f}{\partial x_N^2}
# \end{bmatrix} $$
# practice
# - H?
# $$f(x) = \sum_i^N x_i^2 $$
#
# $$
# H = \begin{bmatrix}
# 2 & 0 & \cdots & 0 \\
# 0 & 2 & \cdots & 0 \\
# \vdots & \vdots &\ddots & \vdots \\
# 0 & 0 & \cdots & 2 \\
# \end{bmatrix} $$
# ---
# ## 미분 규칙 3: 행렬 곱의 대각성분
#
# -
# $$\dfrac{\partial \text{tr} (\mathbf{B}\mathbf{A})}{\partial \mathbf{A}} = \mathbf{B}^T$$
#
# (증명)
#
#
# $$\text{tr}(\mathbf{B}\mathbf{A}) = \sum_{i=1}^N \sum_{j=1}^N b_{ji} a_{ij}$$
#
# $$\dfrac{\partial \text{tr} (\mathbf{B}\mathbf{A})}{\partial a_{ij}} = b_{ji}$$
#
#
# 추가
# - trace는 scalar 값이고, $ \text{tr}(AB) = \text{tr}(BA) $를 충족한다. 따라서 행렬의 순서에 관계없이 앞이나 뒤의 행렬로 미분할 경우, 다른 행렬의 전치 행렬이 나온다.
# ### 미분 규칙 4: 행렬식의 로그
#
# 행렬식은 스칼라이고, 이 값의 로그 값 또한 스칼라. 이 값을 원래 행렬로 미분하면 원래 행렬의 역행렬의 전치 행렬이 된다.
#
# $$ \frac{\partial \log \det A}{\partial A} = (A^{-1})^T $$
#
# (증명)
#
# - 규칙1. $ \dfrac{\partial w^Tx}{\partial x} = \dfrac{\partial x^Tw}{\partial x} = w $
# - 행렬식 정의 $A^{-1} = \dfrac{1}{\det A} C^T$
#
#
# 1 | 행렬식 정의
# $$ \det A = AC^T = A^TC $$
# $$ \frac{\partial\det A}{\partial A} = C $$
#
#
#
# 2 | 행렬식과 역행렬 관계$$ C^T = \det A (A)^{-1} $$
# $$ C = \det A (A^{-1})^T $$
#
#
# 3 | 로그 함수 대입$$ f(x) = \det A $$
#
#
# $$
# \begin{eqnarray}
# \frac{d}{dx} \log f(x)
# &=& \frac{f'(x)}{f(x)}
# &=& \frac{C}{\det A}
# &=& \frac{\det A(A^{-1})^T}{\det A}
# &=& \left(A^{-1}\right)^T
# \end{eqnarray}$$
#
#
| Past/DSS/Math/180128_2_matrix_derivative.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Start-to-Finish Example: Unit Testing `GiRaFFE_NRPy`: Induction Equation
#
# ## Author: <NAME>
#
# ## This module validates the routine to compute the flux of $\epsilon_{ijk} v^j B^k$ for `GiRaFFE`.
#
# **Notebook Status:** <font color='red'><b>In-Progress</b></font>
#
# **Validation Notes:** This module will validate the routines in [Tutorial-GiRaFFE_NRPy-Induction_Equation](Tutorial-GiRaFFE_NRPy-Induction_Equation.ipynb).
#
# ### NRPy+ Source Code for this module:
# * [GiRaFFE_NRPy/Afield_flux.py](../../edit/in_progress/GiRaFFE_NRPy/Afield_flux.py) [\[**tutorial**\]](Tutorial-GiRaFFE_NRPy-Afield_flux.ipynb) Generates the symbolic expressions needed to compute the flux term of the induction equation right-hand side.
#
# ## Introduction:
#
# This notebook validates our algorithm to compute the flux of $\epsilon_{ijk} v^j B^k$ through cell faces, which contributes to the right-hand side of the evolution equation for $A_i$, for use in `GiRaFFE_NRPy`. Because the original `GiRaFFE` used staggered grids and we do not, we can not trivially do a direct comparison to the old code. Instead, we will compare the numerical results with the expected analytic results.
#
# It is, in general, good coding practice to unit test functions individually to verify that they produce the expected and intended output. Here, we expect our functions to produce the correct cross product between the velocity and magnetic field in an arbitrary spacetime. To that end, we will choose functions with relatively simple analytic forms.
#
# In this test, we will generate analytic forms for the magnetic field and three-velocity. We will need to do this in a $7 \times 7 \times 7$ cube in order to run the PPM routine on the data to generate the inputs compute the non-gauge terms of the RHS of the induction equation, unless there's a way to usefully spoof the left- and right-face values for the HLLE solver. We care about this here, because we are comparing against an analytic expression and not the old code.
#
# When this notebook is run, the difference between the approximate and exact right-hand sides will be output to text files that can be found in the same directory as this notebook. These will be read in in [Step 3](#convergence), and used there to confirm second order convergence of the algorithm.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 1. [Step 1](#setup): Set up core functions and parameters for unit testing the A2B algorithm
# 1. [Step 1.a](#magnetic) Set analytic magnetic field
# 1. [Step 1.b](#velocity) Set analytic Valencia three-velocity
# 1. [Step 1.c](#free_parameters) Set free parameters in the code
# 1. [Step 2](#mainc): `Induction_Equation_unit_test.c`: The Main C Code
# 1. [Step 2.a](#compile_run): Compile and run the code
# 1. [Step 3](#convergence): Code validation: Verify that relative error in numerical solution converges to zero at the expected order
# 1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='setup'></a>
#
# # Step 1: Set up core functions and parameters for unit testing the A2B algorithm \[Back to [top](#toc)\]
#
# $$\label{setup}$$
#
# We'll start by appending the relevant paths to `sys.path` so that we can access sympy modules in other places. Then, we'll import NRPy+ core functionality and set up a directory in which to carry out our test. We must also set the desired finite differencing order.
# +
import os, sys # Standard Python modules for multiplatform OS-level functions
# First, we'll add the parent directory to the list of directories Python will check for modules.
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
nrpy_dir_path = os.path.join("..","..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
from outputC import outCfunction, outputC # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
out_dir = "Validation/"
cmd.mkdir(out_dir)
subdir = "Afield_flux"
cmd.mkdir(os.path.join(out_dir,subdir))
thismodule = "Start_to_Finish_UnitTest-GiRaFFE_NRPy-Afield_flux"
Use_Shock_Data = True
# -
# <a id='velocity'></a>
#
# ## Step 1.a: Valencia three-velocity \[Back to [top](#toc)\]
# $$\label{velocity}$$
#
# Here, we'll generate some functions for the velocity. Let's choose arctangents, since those have asymptotes that can be easily manipulated to prevent accidentally setting superluminal speeds.
# \begin{align}
# \bar{v}^x &= \frac{1}{5\pi} \arctan(ax + by + cz) \\
# \bar{v}^y &= \frac{1}{5\pi} \arctan(bx + cy + az) \\
# \bar{v}^z &= \frac{1}{5\pi} \arctan(cx + ay + bz) \\
# \end{align}
# If we want to add a jump at the origin, we can simply add $\max(0,x)$ to the argument of the arctangent. This will add a shock in the $x$-direction. The maximum will be described without the use of if statements using the `Min_Max_and_Piecewise_Expressions` module.
# +
import Min_Max_and_Piecewise_Expressions as noif
a,b,c = par.Cparameters("REAL",thismodule,["a","b","c"],1e300) # Note that this default value allows us to set
# these directly in the C code
M_PI = par.Cparameters("#define",thismodule,["M_PI"], "")
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
x = rfm.xxCart[0]
y = rfm.xxCart[1]
z = rfm.xxCart[2]
offset = sp.sympify(5)
args = ixp.zerorank1()
args[0] = a*(x-offset) + b*y + c*z
args[1] = b*x + c*(y-offset) + a*z
args[2] = c*x + a*y + b*(z-offset)
if Use_Shock_Data:
for i in range(3):
args[i] += noif.max_noif(0,5*x)
ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","ValenciavU")
for i in range(3):
ValenciavU[i] = (sp.Rational(1,5)/M_PI)*sp.atan(args[i])
# -
# <a id='magnetic'></a>
#
# ## Step 1.b: Magnetic field \[Back to [top](#toc)\]
# $$\label{magnetic}$$
#
# We'll also need some functions for the magnetic field. Exponentials sound fun.
# \begin{align}
# B^x &= \exp(ey+fz) \\
# B^y &= \exp(fz+dx) \\
# B^z &= \exp(dx+ey) \\
# \end{align}
# In this case, we'll add $\max{0,x}$ to the field to add the jump.
d,e,f = par.Cparameters("REAL",thismodule,["d","e","f"],1e300) # Note that this default value allows us to set
# these directly in the C code
BU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","BU")
BU[0] = sp.exp(sp.Rational(1,10)*(e*y+f*z))
BU[1] = sp.exp(sp.Rational(1,10)*(f*z+d*x))
BU[2] = sp.exp(sp.Rational(1,10)*(d*x+e*y))
if Use_Shock_Data:
for i in range(3):
BU[i] += noif.max_noif(0,5*x)
# <a id='functions'></a>
#
# ## Step 1.c: Generate C functions to write the test data \[Back to [top](#toc)\]
# $$\label{functions}$$
#
# +
BU_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","BU0"),rhs=BU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","BU1"),rhs=BU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","BU2"),rhs=BU[2]),\
]
desc = "Calculate sample magnetic field data"
name = "calculate_BU"
outCfunction(
outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs",
body = fin.FD_outputC("returnstring",BU_to_print,params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts="AllPoints,Read_xxs")
ValenciavU_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU0"),rhs=ValenciavU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU1"),rhs=ValenciavU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","ValenciavU2"),rhs=ValenciavU[2]),\
]
desc = "Calculate sample velocity data"
name = "calculate_ValenciavU"
outCfunction(
outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs",
body = fin.FD_outputC("returnstring",ValenciavU_to_print,params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts="AllPoints,Read_xxs")
# -
# <a id='free_parameters'></a>
#
# ## Step 1.d: Set free parameters in the code \[Back to [top](#toc)\]
# $$\label{free_parameters}$$
#
# We also need to create the files that interact with NRPy's C parameter interface.
# +
# Step 3.d.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
NGHOSTS = par.Cparameters("int",thismodule,["NGHOSTS"], 3)
# Step 3.d.ii: Set free_parameters.h
with open(os.path.join(out_dir,"free_parameters.h"),"w") as file:
file.write("""
// Override parameter defaults with values based on command line arguments and NGHOSTS.
// We'll use this grid. It has one point and one ghost zone.
params.NGHOSTS = 3;
params.Nxx0 = atoi(argv[1]);
params.Nxx1 = atoi(argv[2]);
params.Nxx2 = atoi(argv[3]);
params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*params.NGHOSTS;
params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*params.NGHOSTS;
params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*params.NGHOSTS;
// Step 0d: Set up space and time coordinates
// Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]:
const REAL xxmin[3] = {-0.1,-0.1,-0.1};
const REAL xxmax[3] = { 0.1, 0.1, 0.1};
params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0);
params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1);
params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2);
//printf("dxx0,dxx1,dxx2 = %.5e,%.5e,%.5e\\n",params.dxx0,params.dxx1,params.dxx2);
params.invdx0 = 1.0 / params.dxx0;
params.invdx1 = 1.0 / params.dxx1;
params.invdx2 = 1.0 / params.dxx2;
\n""")
# Generates declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(out_dir))
# -
# <a id='module'></a>
#
# ## Step 1.e: Generate `GiRaFFE_NRPy` Files \[Back to [top](#toc)\]
# $$\label{module}$$
#
# Here, we generate the functions we want to test by calling the function found [here](../../edit/in_progress/GiRaFFE_NRPy/GiRaFFE_NRPy_PPM.py) and documented in [this tutorial](Tutorial-Start_to_Finish-GiRaFFE_NRPy-PPM.ipynb).
import GiRaFFE_NRPy.GiRaFFE_NRPy_PPM as PPM
PPM.GiRaFFE_NRPy_PPM(os.path.join(out_dir,subdir))
# <a id='electric_field'></a>
#
# ## Step 1.f: Calculate the $E_i$ field contribution to $\partial_t A_i$ \[Back to [top](#toc)\]
# $$\label{electric_field}$$
#
# Here, we generate the functions necessary to calculate the electric flux on the cell faces, which is the algorithm we are specifically trying to test.
# +
import GiRaFFE_NRPy.Afield_flux as Af
# We will pass values of the gridfunction on the cell faces into the function. This requires us
# to declare them as C parameters in NRPy+. We will denote this with the _face infix/suffix.
alpha_face = gri.register_gridfunctions("AUXEVOL","alpha_face")
gamma_faceDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gamma_faceDD","sym01")
beta_faceU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","beta_faceU")
# We'll need some more gridfunctions, now, to represent the reconstructions of BU and ValenciavU
# on the right and left faces
Valenciav_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_rU",DIM=3)
B_rU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_rU",DIM=3)
Valenciav_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","Valenciav_lU",DIM=3)
B_lU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","B_lU",DIM=3)
AD = ixp.register_gridfunctions_for_single_rank1("EVOL","AD",DIM=3)
Memory_Read = """const double alpha_face = auxevol_gfs[IDX4S(ALPHA_FACEGF, i0,i1,i2)];
const double gamma_faceDD00 = auxevol_gfs[IDX4S(GAMMA_FACEDD00GF, i0,i1,i2)];
const double gamma_faceDD01 = auxevol_gfs[IDX4S(GAMMA_FACEDD01GF, i0,i1,i2)];
const double gamma_faceDD02 = auxevol_gfs[IDX4S(GAMMA_FACEDD02GF, i0,i1,i2)];
const double gamma_faceDD11 = auxevol_gfs[IDX4S(GAMMA_FACEDD11GF, i0,i1,i2)];
const double gamma_faceDD12 = auxevol_gfs[IDX4S(GAMMA_FACEDD12GF, i0,i1,i2)];
const double gamma_faceDD22 = auxevol_gfs[IDX4S(GAMMA_FACEDD22GF, i0,i1,i2)];
const double beta_faceU0 = auxevol_gfs[IDX4S(BETA_FACEU0GF, i0,i1,i2)];
const double beta_faceU1 = auxevol_gfs[IDX4S(BETA_FACEU1GF, i0,i1,i2)];
const double beta_faceU2 = auxevol_gfs[IDX4S(BETA_FACEU2GF, i0,i1,i2)];
const double Valenciav_rU0 = auxevol_gfs[IDX4S(VALENCIAV_RU0GF, i0,i1,i2)];
const double Valenciav_rU1 = auxevol_gfs[IDX4S(VALENCIAV_RU1GF, i0,i1,i2)];
const double Valenciav_rU2 = auxevol_gfs[IDX4S(VALENCIAV_RU2GF, i0,i1,i2)];
const double B_rU0 = auxevol_gfs[IDX4S(B_RU0GF, i0,i1,i2)];
const double B_rU1 = auxevol_gfs[IDX4S(B_RU1GF, i0,i1,i2)];
const double B_rU2 = auxevol_gfs[IDX4S(B_RU2GF, i0,i1,i2)];
const double Valenciav_lU0 = auxevol_gfs[IDX4S(VALENCIAV_LU0GF, i0,i1,i2)];
const double Valenciav_lU1 = auxevol_gfs[IDX4S(VALENCIAV_LU1GF, i0,i1,i2)];
const double Valenciav_lU2 = auxevol_gfs[IDX4S(VALENCIAV_LU2GF, i0,i1,i2)];
const double B_lU0 = auxevol_gfs[IDX4S(B_LU0GF, i0,i1,i2)];
const double B_lU1 = auxevol_gfs[IDX4S(B_LU1GF, i0,i1,i2)];
const double B_lU2 = auxevol_gfs[IDX4S(B_LU2GF, i0,i1,i2)];
REAL A_rhsD0 = 0; REAL A_rhsD1 = 0; REAL A_rhsD2 = 0;
"""
Memory_Write = """rhs_gfs[IDX4S(AD0GF,i0,i1,i2)] += A_rhsD0;
rhs_gfs[IDX4S(AD1GF,i0,i1,i2)] += A_rhsD1;
rhs_gfs[IDX4S(AD2GF,i0,i1,i2)] += A_rhsD2;
"""
indices = ["i0","i1","i2"]
indicesp1 = ["i0+1","i1+1","i2+1"]
for flux_dirn in range(3):
Af.calculate_E_i_flux(flux_dirn,True,alpha_face,gamma_faceDD,beta_faceU,\
Valenciav_rU,B_rU,Valenciav_lU,B_lU)
E_field_to_print = [\
-sp.Rational(1,4)*Af.E_fluxD[(flux_dirn+1)%3],\
-sp.Rational(1,4)*Af.E_fluxD[(flux_dirn+2)%3],\
]
E_field_names = [\
"A_rhsD"+str((flux_dirn+1)%3),\
"A_rhsD"+str((flux_dirn+2)%3),\
]
desc = "Calculate the electric flux on the left face in direction " + str(flux_dirn) + "."
name = "calculate_E_field_D" + str(flux_dirn) + "_right"
outCfunction(
outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],const REAL *auxevol_gfs,REAL *rhs_gfs",
body = Memory_Read \
+outputC(E_field_to_print,E_field_names,"returnstring",params="outCverbose=False").replace("IDX4","IDX4S")\
+Memory_Write,
loopopts ="InteriorPoints",
rel_path_for_Cparams=os.path.join("../"))
desc = "Calculate the electric flux on the left face in direction " + str(flux_dirn) + "."
name = "calculate_E_field_D" + str(flux_dirn) + "_left"
outCfunction(
outfile = os.path.join(out_dir,subdir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],const REAL *auxevol_gfs,REAL *rhs_gfs",
body = Memory_Read.replace(indices[flux_dirn],indicesp1[flux_dirn]) \
+outputC(E_field_to_print,E_field_names,"returnstring",params="outCverbose=False").replace("IDX4","IDX4S")\
+Memory_Write,
loopopts ="InteriorPoints",
rel_path_for_Cparams=os.path.join("../"))
# -
# <a id='exact_flux'></a>
#
# ## Step 1.g: Calculate the *exact* flux of $\epsilon_{ijk} v^j B^k$ \[Back to [top](#toc)\]
# $$\label{exact_flux}$$
#
# Here, we generate a function to analytically calculate the electric flux on the cell faces for comparison. We'll need to import the Levi-Civita tensor for this.
# +
import WeylScal4NRPy.WeylScalars_Cartesian as weyl
# import GRHD.equations as GH
# GH.compute_sqrtgammaDET(gamma_faceDD)
LeviCivitaDDD = ixp.LeviCivitaSymbol_dim3_rank3()
driftvU = ixp.zerorank1()
for i in range(3):
# TODO: don't use _face metric gridfunctions once we're in curved space!
driftvU[i] = alpha_face*ValenciavU[i]-beta_faceU[i]
A_rhsD = ixp.zerorank1()
for i in range(3):
for j in range(3):
for k in range(3):
A_rhsD[i] += -LeviCivitaDDD[i][j][k]*driftvU[j]*BU[k]
A_rhsD_to_print = [\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD0"),rhs=A_rhsD[0]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD1"),rhs=A_rhsD[1]),\
lhrh(lhs=gri.gfaccess("rhs_gfs","AD2"),rhs=A_rhsD[2]),\
]
desc = "Calculate analytic electric field, part of the right-hand side of AD"
name = "calculate_E_exactD"
outCfunction(
outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name,
params ="const paramstruct *params,REAL *xx[3],const REAL *auxevol_gfs,REAL *rhs_gfs",
body = fin.FD_outputC("returnstring",A_rhsD_to_print,params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts ="AllPoints,Read_xxs")
# -
# <a id='mainc'></a>
#
# # Step 2: `Induction_Equation_unit_test.c`: The Main C Code \[Back to [top](#toc)\]
# $$\label{mainc}$$
#
# Now that we have our vector potential and analytic magnetic field to compare against, we will start writing our unit test. We'll also import common C functionality, define `REAL`, the number of ghost zones, and the faces, and set the standard macros for NRPy+ style memory access.
# +
# %%writefile $out_dir/Afield_flux_unit_test.c
// These are common packages that we are likely to need.
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "string.h" // Needed for strncmp, etc.
#include "stdint.h" // Needed for Windows GCC 6.x compatibility
#include <time.h> // Needed to set a random seed.
#define REAL double
#include "declare_Cparameters_struct.h"
REAL a,b,c,d,e,f;
// Standard NRPy+ memory access:
#define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * (k) ) )
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
# -
# We'll now define the gridfunction names.
# +
# %%writefile -a $out_dir/Afield_flux_unit_test.c
// Let's also #define the NRPy+ gridfunctions
#define VALENCIAVU0GF 0
#define VALENCIAVU1GF 1
#define VALENCIAVU2GF 2
#define BU0GF 3
#define BU1GF 4
#define BU2GF 5
#define VALENCIAV_RU0GF 6
#define VALENCIAV_RU1GF 7
#define VALENCIAV_RU2GF 8
#define B_RU0GF 9
#define B_RU1GF 10
#define B_RU2GF 11
#define VALENCIAV_LU0GF 12
#define VALENCIAV_LU1GF 13
#define VALENCIAV_LU2GF 14
#define B_LU0GF 15
#define B_LU1GF 16
#define B_LU2GF 17
#define GAMMA_FACEDD00GF 18
#define GAMMA_FACEDD01GF 19
#define GAMMA_FACEDD02GF 20
#define GAMMA_FACEDD11GF 21
#define GAMMA_FACEDD12GF 22
#define GAMMA_FACEDD22GF 23
#define BETA_FACEU0GF 24
#define BETA_FACEU1GF 25
#define BETA_FACEU2GF 26
#define ALPHA_FACEGF 27
#define FLUXD0GF 28
#define FLUXD1GF 29
#define FLUXD2GF 30
#define NUM_AUXEVOL_GFS 31
#define AD0GF 0
#define AD1GF 1
#define AD2GF 2
#define NUM_EVOL_GFS 3
# -
# Now, we'll handle the different A2B codes. There are several things to do here. First, we'll add `#include`s to the C code so that we have access to the functions we want to test. We must also create a directory and copy the files to that directory. We will choose to do this in the subfolder `A2B` relative to this tutorial.
#
# +
# %%writefile -a $out_dir/Afield_flux_unit_test.c
// Some specific definitions needed for this file
typedef struct __gf_and_gz_struct__ {
REAL *gf;
int gz_lo[4],gz_hi[4];
} gf_and_gz_struct;
const int VX=0,VY=1,VZ=2,BX=3,BY=4,BZ=5;
const int NUM_RECONSTRUCT_GFS = 6;
#include "Afield_flux/reconstruct_set_of_prims_PPM_GRFFE_NRPy.c"
#include "Afield_flux/loop_defines_reconstruction_NRPy.h"
#include "calculate_BU.h"
#include "calculate_ValenciavU.h"
#include "calculate_E_exactD.h"
// These are the functions we want to test.
#include "Afield_flux/calculate_E_field_D0_right.h"
#include "Afield_flux/calculate_E_field_D0_left.h"
#include "Afield_flux/calculate_E_field_D1_right.h"
#include "Afield_flux/calculate_E_field_D1_left.h"
#include "Afield_flux/calculate_E_field_D2_right.h"
#include "Afield_flux/calculate_E_field_D2_left.h"
# -
# Now, we'll write the main method. First, we'll set up the grid. In this test, we cannot use only one point. As we are testing a three-point stencil, we can get away with a minimal $3 \times 3 \times 3$ grid. Then, we'll write the A fields. After that, we'll calculate the magnetic field two ways.
# +
# %%writefile -a $out_dir/Afield_flux_unit_test.c
int main(int argc, const char *argv[]) {
paramstruct params;
#include "set_Cparameters_default.h"
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
#include "set_Cparameters-nopointer.h"
// We'll define our grid slightly different from how we normally would. We let our outermost
// ghostzones coincide with xxmin and xxmax instead of the interior of the grid. This means
// that the ghostzone points will have identical positions so we can do convergence tests of them. // Step 0d.ii: Set up uniform coordinate grids
REAL *xx[3];
xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0);
xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1);
xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2);
for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS)*dxx0;
for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS)*dxx1;
for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS)*dxx2;
//for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) printf("x[%d] = %.5e\n",j,xx[0][j]);
// This is the array to which we'll write the NRPy+ variables.
REAL *auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);
REAL *rhs_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);
REAL *rhs_exact_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0);
LOOP_REGION(0,Nxx_plus_2NGHOSTS2,0,Nxx_plus_2NGHOSTS1,0,Nxx_plus_2NGHOSTS0) {
auxevol_gfs[IDX4S(GAMMA_FACEDD00GF,i0,i1,i2)] = 1.0; // Flat Space
auxevol_gfs[IDX4S(GAMMA_FACEDD01GF,i0,i1,i2)] = 0.0; // Flat Space
auxevol_gfs[IDX4S(GAMMA_FACEDD02GF,i0,i1,i2)] = 0.0; // Flat Space
auxevol_gfs[IDX4S(GAMMA_FACEDD11GF,i0,i1,i2)] = 1.0; // Flat Space
auxevol_gfs[IDX4S(GAMMA_FACEDD12GF,i0,i1,i2)] = 0.0; // Flat Space
auxevol_gfs[IDX4S(GAMMA_FACEDD22GF,i0,i1,i2)] = 1.0; // Flat Space
auxevol_gfs[IDX4S(BETA_FACEU0GF,i0,i1,i2)] = 0.0; // Flat Space
auxevol_gfs[IDX4S(BETA_FACEU1GF,i0,i1,i2)] = 0.0; // Flat Space
auxevol_gfs[IDX4S(BETA_FACEU2GF,i0,i1,i2)] = 0.0; // Flat Space
auxevol_gfs[IDX4S(ALPHA_FACEGF,i0,i1,i2)] = 1.0; // Flat Space
}
a = (double)(rand()%20-10);
b = (double)(rand()%20-10);
c = (double)(rand()%20-10);
d = (double)(rand()%20-10);
e = (double)(rand()%20-10);
f = (double)(rand()%20-10);
//printf("a,b,c,d,e,f = %f,%f,%f,%f,%f,%f\n",a,b,c,d,e,f);
// Calculate the initial data and the exact solution.
calculate_BU(¶ms,xx,auxevol_gfs);
calculate_ValenciavU(¶ms,xx,auxevol_gfs);
//for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] -= 0.5*dxx0;
calculate_E_exactD(¶ms,xx,auxevol_gfs,rhs_exact_gfs);
// Set up the pointers and constants for the reconstruction procedure.
gf_and_gz_struct in_prims[NUM_RECONSTRUCT_GFS], out_prims_r[NUM_RECONSTRUCT_GFS], out_prims_l[NUM_RECONSTRUCT_GFS];
int which_prims_to_reconstruct[NUM_RECONSTRUCT_GFS],num_prims_to_reconstruct;
const int Nxxp2NG012 = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
REAL temporary[Nxxp2NG012];
int ww=0;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU0GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU0GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU0GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU1GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU1GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU1GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAVU2GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_RU2GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*VALENCIAV_LU2GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU0GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU0GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU0GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU1GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU1GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU1GF;
ww++;
in_prims[ww].gf = auxevol_gfs + Nxxp2NG012*BU2GF;
out_prims_r[ww].gf = auxevol_gfs + Nxxp2NG012*B_RU2GF;
out_prims_l[ww].gf = auxevol_gfs + Nxxp2NG012*B_LU2GF;
ww++;
// Prims are defined AT ALL GRIDPOINTS, so we set the # of ghostzones to zero:
for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { in_prims[i].gz_lo[j]=0; in_prims[i].gz_hi[j]=0; }
// Left/right variables are not yet defined, yet we set the # of gz's to zero by default:
for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { out_prims_r[i].gz_lo[j]=0; out_prims_r[i].gz_hi[j]=0; }
for(int i=0;i<NUM_RECONSTRUCT_GFS;i++) for(int j=1;j<=3;j++) { out_prims_l[i].gz_lo[j]=0; out_prims_l[i].gz_hi[j]=0; }
ww=0;
which_prims_to_reconstruct[ww]=VX; ww++;
which_prims_to_reconstruct[ww]=VY; ww++;
which_prims_to_reconstruct[ww]=VZ; ww++;
which_prims_to_reconstruct[ww]=BX; ww++;
which_prims_to_reconstruct[ww]=BY; ww++;
which_prims_to_reconstruct[ww]=BZ; ww++;
num_prims_to_reconstruct=ww;
// In each direction, perform the PPM reconstruction procedure.
// Then, add the fluxes to the RHS as appropriate.
for(int flux_dirn=0;flux_dirn<3;flux_dirn++) {
// This function is housed in the file: "reconstruct_set_of_prims_PPM_GRFFE_NRPy.c"
reconstruct_set_of_prims_PPM_GRFFE_NRPy(¶ms, auxevol_gfs, flux_dirn+1, num_prims_to_reconstruct,
which_prims_to_reconstruct, in_prims, out_prims_r, out_prims_l, temporary);
if(flux_dirn==0) {
calculate_E_field_D0_right(¶ms,xx,auxevol_gfs,rhs_gfs);
calculate_E_field_D0_left(¶ms,xx,auxevol_gfs,rhs_gfs);
}
else if(flux_dirn==1) {
calculate_E_field_D1_right(¶ms,xx,auxevol_gfs,rhs_gfs);
calculate_E_field_D1_left(¶ms,xx,auxevol_gfs,rhs_gfs);
}
else {
calculate_E_field_D2_right(¶ms,xx,auxevol_gfs,rhs_gfs);
calculate_E_field_D2_left(¶ms,xx,auxevol_gfs,rhs_gfs);
}
}
char filename[100];
sprintf(filename,"out%d-numer.txt",Nxx0);
FILE *out2D = fopen(filename, "w");
// We print the difference between approximate and exact numbers.
int i0 = Nxx_plus_2NGHOSTS0/2;
int i1 = Nxx_plus_2NGHOSTS1/2;
int i2 = Nxx_plus_2NGHOSTS2/2;
printf("Numerical: %.15e\n",rhs_gfs[IDX4S(AD0GF,i0,i1,i2)]);
printf("Analytic: %.15e\n",rhs_exact_gfs[IDX4S(AD0GF,i0,i1,i2)]);
printf("Numerical: %.15e\n",rhs_gfs[IDX4S(AD1GF,i0,i1,i2)]);
printf("Analytic: %.15e\n",rhs_exact_gfs[IDX4S(AD1GF,i0,i1,i2)]);
printf("Numerical: %.15e\n",rhs_gfs[IDX4S(AD2GF,i0,i1,i2)]);
printf("Analytic: %.15e\n\n",rhs_exact_gfs[IDX4S(AD2GF,i0,i1,i2)]);
//printf("i0,i1,i2 = %d,%d,%d\n",i0,i1,i2);
fprintf(out2D,"%.16e\t%.16e\t%.16e\t %e %e %e\n",
rhs_exact_gfs[IDX4S(AD0GF,i0,i1,i2)]-rhs_gfs[IDX4S(AD0GF,i0,i1,i2)],
rhs_exact_gfs[IDX4S(AD1GF,i0,i1,i2)]-rhs_gfs[IDX4S(AD1GF,i0,i1,i2)],
rhs_exact_gfs[IDX4S(AD2GF,i0,i1,i2)]-rhs_gfs[IDX4S(AD2GF,i0,i1,i2)],
xx[0][i0],xx[1][i1],xx[2][i2]
);
fclose(out2D);
}
# -
# <a id='compile_run'></a>
#
# ## Step 2.a: Compile and run the code
#
# $$\label{compile_run}$$
#
# Now that we have our file, we can compile it and run the executable.
# +
import time
print("Now compiling, should take ~2 seconds...\n")
start = time.time()
cmd.C_compile(os.path.join(out_dir,"Afield_flux_unit_test.c"), os.path.join(out_dir,"Afield_flux_unit_test"))
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
print("Now running...\n")
start = time.time()
# !./Validation/Afield_flux_unit_test 2 2 2
# To do a convergence test, we'll also need a second grid with twice the resolution.
# !./Validation/Afield_flux_unit_test 4 4 4
end = time.time()
print("Finished in "+str(end-start)+" seconds.\n\n")
# -
# <a id='convergence'></a>
#
# # Step 3: Code validation: Verify that relative error in numerical solution converges to zero at the expected order \[Back to [top](#toc)\]
# $$\label{convergence}$$
#
# For testing purposes, we have only checked these algorithms on a small grid. By construction, we have only guaranteed ourselves output from the functions we are testing at a point, so we will simply print the convergence order at that point after processing our outputs below.
# +
import numpy as np
import matplotlib.pyplot as plt
Data1 = np.loadtxt("out2-numer.txt")
Data2 = np.loadtxt("out4-numer.txt")
print("The following quantities converge at the listed order (should be ~1 for Shock data, ~2 otherwise):")
print("A_rhsD0: "+str(np.log2(np.abs(Data1[0]/Data2[0]))))
print("A_rhsD1: "+str(np.log2(np.abs(Data1[1]/Data2[1]))))
print("A_rhsD2: "+str(np.log2(np.abs(Data1[2]/Data2[2]))))
# -
# <a id='latex_pdf_output'></a>
#
# # Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
#
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.pdf](Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-A2B.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Afield_flux")
| in_progress/Tutorial-Start_to_Finish_UnitTest-GiRaFFE_NRPy-Afield_flux.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import string
yelp= pd.read_csv('yelp.csv')
yelp.head()
yelp.info()
yelp.describe()
yelp['text length'] = yelp['text'].apply(len)
yelp.head()
g = sns.FacetGrid(yelp, col="stars")
g.map(plt.hist, "text length")
sns.boxplot(x='stars', y = 'text length', data = yelp)
sns.countplot(x='stars', data = yelp)
stars = yelp.groupby('stars').mean()
stars
stars.corr()
sns.heatmap(stars.corr(),cmap = 'coolwarm', annot = True)
yelp_class = yelp[(yelp.stars == 1) | (yelp.stars == 5)]
#yelp_class.head()
X = yelp_class['text']
y = yelp_class['stars']
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=101)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb.fit(X_train, y_train)
predictions = nb.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, predictions))
print ('\n')
print(classification_report(y_test, predictions))
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
# +
pipeline = Pipeline ([
('cv', CountVectorizer()),
('tfidf', TfidfTransformer()),
('classifier', MultinomialNB())
])
X = yelp_class['text']
y = yelp_class['stars']
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
# -
pipeline.fit(X_train, y_train)
pred = pipeline.predict(X_test)
print(classification_report(y_test,pred))
print('\n')
print(confusion_matrix(y_test, pred))
X = yelp_class['text']
y = yelp_class['stars']
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3,random_state=101)
pipeline_2 = Pipeline ([
('cv2', CountVectorizer()),
('classifier2', MultinomialNB())
])
pipeline_2.fit(X_train, y_train)
pred_2 = pipeline_2.predict(X_test)
print(classification_report(y_test,pred_2))
print('\n')
print(confusion_matrix(y_test, pred_2))
# +
mess = 'Sample message! Notice: it has punctuation.'
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# -
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stopwords.words('english')[0:10] # Show some stop words
nopunc.split()
clean_mess = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
clean_mess
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
| NLP_Yelp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.5 64-bit (''pydeep'': virtualenv)'
# language: python
# name: python36564bitpydeepvirtualenvfedb6568ef3d4e13bf936bb2af8dcf94
# ---
# # Transfer Function - Satlin
# ## Loading modules
import ipyvolume as ipv
import numpy as np
from ipywidgets import interactive
import matplotlib.pyplot as plt
# ## Helper functions
# +
def na(x):
return np.array(x)
def draw_grid(x,y,z):
ipv.plot_wireframe(x, y, z * np.ones(x.shape), color="black")
def show_point(x,y,z):
x = na([float(x)])
y = na([float(y)])
z = na([float(z)])
ipv.scatter(x,y,z, color="blue", marker="sphere",size=5)
# -
# ## Define Neuron
def neuron(f,input,weight,bias):
return f(input * weight + bias)
# ## Define Neural Transfer Function: hardlim
# +
def satlin(n):
a = n.copy()
a[a < 0] = 0
a[a > 1] = 1
return a
neural_function = satlin
# +
# %matplotlib inline
def f(weight,bias):
plt.figure(2)
x = np.linspace(-2, 2, num=1000)
a = neuron(neural_function,x,weight,bias)
plt.plot(x, a)
plt.ylim(-3, 3)
plt.xlim(-2, 2)
plt.show()
interactive_plot = interactive(f, weight=(-1,1,0.1), bias=(-3, 3, 0.1))
output = interactive_plot.children[-1]
output.layout.height = '350px'
interactive_plot
# +
a = np.arange(-5, 5)
U, V = np.meshgrid(a, a)
def f(weight1,weight2,bias):
X = U
Y = V
Z = X+Y+bias * np.ones(Y.shape)
a = neural_function(weight1 * X + weight2 * Y + bias*np.ones(Y.shape))
ipv.figure(width=400,height=600, offline=True)
ipv.plot_surface(X, Y, a, color="orange")
ipv.plot_wireframe(X, Y, a, color="red")
show_point(0,0,0)
draw_grid(X,Y,0)
ipv.show()
interactive_plot = interactive(f, weight1=(-1,1,0.1), weight2=(-1,1,0.1), bias=(-3, 3, 0.1))
output = interactive_plot.children[-1]
interactive_plot
# -
| src/neuralnetworkdesign_hagan/chapter02/TranferFunctions/Transfer_function_satlin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] azdata_cell_guid="5353c044-9920-478b-b1f8-e98119b73a21"
# Migrate a Database to a Azure SQL Managed Instance
# =============================================
#
# Description
# -----
#
# Copies the database from an on-premises SQL instance to an Azure SQL Managed Instance.
| SQL-Hybrid-Cloud-Toolkit/content/offline-migration/db-to-MI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#获取歌单中所有歌曲id
# -*- coding:utf-8 -*-
import requests
import json
import re
from bs4 import BeautifulSoup
#输入歌单url
url = 'http://music.163.com/artist?id=' + str(6460)
#添加header获取动态网页信息
headers = {"Host":" music.163.com",
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
#不必要的header属性可能会影响响应报文的编码方式,所以把它们注释掉#
"Accept":" text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#"Accept-Language":" zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
#"Referer":"http://music.163.com/",
#"Cookie": "JSESSIONID-WYYY=k52%2FPjMyNbX0v38jH2efUXwEIZpw2NagEUzwTX%2FgifMsoMswU6yo3NN%5C%2Bb9jCpsRFZIc6lvPUK9wEjgBzwM%2B1T%2FRyvRGHhqyWbdvEcugCbNqTihfxHK1el66fk%2BNntcSwGVOBMEwlcFDBusingcH76NIeAQwbC6h%5CcipxCdO8T5IfBVO%3A1510825875526; _iuqxldmzr_=32; _ntes_nnid=e5ec3ba6b841b9d3eadcb910066f4dcb,1510815153893; _ntes_nuid=e5ec3ba6b841b9d3eadcb910066f4dcb; __utma=94650624.1386008069.1510815154.1510815154.1510824076.2; __utmz=94650624.1510815154.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmb=94650624.2.10.1510824076; __utmc=94650624",
#"Connection": "keep-alive",
#"Upgrade-Insecure-Requests": "1"
}
#只传url不能获得响应,需要传header
request = urllib.request.Request(url,headers=headers)
response = urllib.request.urlopen(request)
#不decode的话text是十六进制,不是中文
html = response.read().decode('utf-8','ignore')
soup = BeautifulSoup(html)
#print("html:",html)
#print("soup:",soup)
#web_data = requests.get(singer_url,headers = headers)
#print(web_data.text)
#soup = BeautifulSoup(soup, 'lxml')
#print(web_data)
#print(web_data.text)
singer_name = soup.select("#artist-name")
print(singer_name)
r = soup.find('ul', {'class': 'f-hide'}).find_all('a')
r = (list(r))
music_id_set=[]
for each in r:
song_name = each.text # print(each.text)
song_id = each.attrs["href"]
music_id_set.append(song_id[9:])
print(music_id_set)
# +
#根据歌词id提取歌词
lrc_url = 'http://music.163.com/api/song/lyric?' + 'id=' + str(531051217) + '&lv=1&kv=1&tv=-1'
lyric = requests.get(lrc_url)
json_obj = lyric.text
j = json.loads(json_obj)
lrc = j['lrc']['lyric']
pat = re.compile(r'\[.*\]')
lrc = re.sub(pat, "", lrc)
lrc = lrc.strip()
print(lrc)
# +
#一次性爬取网易云音乐我的歌单里面所有歌曲的歌词
import json
import requests
import re
import urllib
from bs4 import *
url = "http://music.163.com/playlist?id=63306090"
headers = {"Host":" music.163.com",
"User-Agent":" Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
#不必要的header属性可能会影响响应报文的编码方式,所以把它们注释掉#
"Accept":" text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#"Accept-Language":" zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
#"Referer":"http://music.163.com/",
#"Cookie": "JSESSIONID-WYYY=k52%2FPjMyNbX0v38jH2efUXwEIZpw2NagEUzwTX%2FgifMsoMswU6yo3NN%5C%2Bb9jCpsRFZIc6lvPUK9wEjgBzwM%2B1T%2FRyvRGHhqyWbdvEcugCbNqTihfxHK1el66fk%2BNntcSwGVOBMEwlcFDBusingcH76NIeAQwbC6h%5CcipxCdO8T5IfBVO%3A1510825875526; _iuqxldmzr_=32; _ntes_nnid=e5ec3ba6b841b9d3eadcb910066f4dcb,1510815153893; _ntes_nuid=e5ec3ba6b841b9d3eadcb910066f4dcb; __utma=94650624.1386008069.1510815154.1510815154.1510824076.2; __utmz=94650624.1510815154.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmb=94650624.2.10.1510824076; __utmc=94650624",
#"Connection": "keep-alive",
#"Upgrade-Insecure-Requests": "1"
}
#只传url不能获得响应,需要传header
request = urllib.request.Request(url,headers=headers)
response = urllib.request.urlopen(request)
#不decode的话text是十六进制,不是中文
html = response.read().decode('utf-8','ignore')
soup = BeautifulSoup(html)
#打开1.txt 把歌单中的歌词写入
f=open('lx_songlist.txt','w',encoding='utf-8')
for item in soup.ul.children:
#取出歌单里歌曲的id 形式为:/song?id=11111111
song_id = item('a')[0].get("href",None)
#利用正则表达式提取出song_id的数字部分sid
pat = re.compile(r'[0-9].*$')
sid = re.findall(pat,song_id)[0]
#这里的url是真实的歌词页面
url = "http://music.163.com/api/song/lyric?"+"id="+str(sid)+"&lv=1&kv=1&tv=-1"
html = requests.post(url)
json_obj = html.text
#歌词是一个json对象 解析它
j = json.loads(json_obj)
try:
lyric = j['lrc']['lyric']
except KeyError:
lyric = "无歌词"
pat = re.compile(r'\[.*\]')
lrc = re.sub(pat,"",lyric)
lrc = lrc.strip()
#print(lrc)
f.write(lrc)
f.close()
# -
| get_lyrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/2series/rockwall_analytics/blob/master/winery_s2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="DsfrJlEeD13a" colab_type="text"
# # Building a wineRatingPredictor
# ## Part 2: Set Metric & Baseline, Selection & Tuning of Models
# ## 17 May 2020
# ## About <NAME>
# > As a Data Scientist and former head of global fintech research at Malastare.ai, I find fulfillment tacking challenges to solve complex problems using data
#
# 
#
# ML workflow to building a **wineRantingPredictor**:
#
# 1. Data Preprocessing
# 2. EDA
# 3. Feature Engineering & Feature Selection
# 4. Set Evaluation Metric & Establish Baseline
# 5. Selecting Models based on Evaluation Metric
# 6. Perform Hyperparameter Tuning on the Selected Model
# 7. Train & Evaluate the Model
# 8. Interpret Model Predictions
# 9. Draw Conclusions
#
# ## This notebook covers steps 4 to 6
# + id="hb3QLQbJDoVf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="9ea160cf-d328-4c6b-9b7b-851ccdb87e8e"
# manipulation libraries
import pandas as pd
import numpy as np
# visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# to display visuals in the notebook
# %config InlineBackend.figure_format='retina'
# to enable high resolution plots
# normalization and random-search and error metric
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
# potential machine Learning Models
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
import lightgbm as lgb
# to save ML models
# import pickle
# + id="wqfOaAhWEYP_" colab_type="code" colab={}
# functions to use in the notebook
def fit_evaluate_model(model, X_train, y_train, X_valid, y_valid):
# function to train a given model
# return mean squared error of the
# actuals and predictions
model.fit(X_train, y_train)
y_predicted = model.predict(X_valid)
return mean_squared_error(y_valid, y_predicted)
def convert_features_to_array(features):
# function to convert feature df
# to an array
num_rows = len(features)
num_cols = len(features.columns)
features_array = (np.array(features).reshape((num_rows, num_cols)))
return features_array
def convert_target_to_array(target):
# function to convert target df
# to an array
target_array = (np.array(target).reshape((-1, )))
return target_array
# + id="koEKSO82HeTO" colab_type="code" colab={}
# load data
X_train = pd.read_csv('X_train.csv')
y_train = pd.read_csv('y_train.csv')
X_valid = pd.read_csv('X_valid.csv')
y_valid = pd.read_csv('y_valid.csv')
# + id="3mHN33aeHyOe" colab_type="code" colab={}
# convert data to an array
X_train_array = convert_features_to_array(X_train)
X_valid_array = convert_features_to_array(X_valid)
y_train_array = convert_target_to_array(y_train)
y_valid_array = convert_target_to_array(y_valid)
# + [markdown] id="LoQfDJsLX9zY" colab_type="text"
# # 4. Set Evaluation Metric & Establish Baseline
#
# ### **Set Evaluation Metric**
#
# 
#
# > Gradient descent is the process of gradually decreasing the cost function (i.e. MSE) by tweaking parameter(s) iteratively until you have reached a minimum
#
# **MSE (Mean square error)** is the average of the squared error that is used as the loss function for least squares regression: It is the sum, over all the data points, of the square of the difference between the predicted and actual target variables, divided by the number of data points
#
# ## Steps to find the MSE
#
# 1. Find the equation for the regression line
# \begin{equation*} \hat{Y}_i = \hat{\beta}_0 + \hat{\beta}_1 X_i + \hat{\epsilon}_i \end{equation*}
#
# 2. Insert X values in the equation found in step 1 in order to get the respective Y values i.e.
# \begin{equation*} \hat{Y}_i \end{equation*}
#
# 3. Now subtract the new Y values $$(i.e. \hat{Y}_i)$$ from the original Y values. Thus, found values are the error terms. It is also known as the vertical distance of the given point from the regression line
# \begin{equation*} Y_i - \hat{Y}_i \end{equation*}
#
# 4. Square the errors found in step 3
# \begin{equation*} {(Y_i - \hat{Y}_i)}^2 \end{equation*}
#
# 5. Sum up all the squares
# \begin{equation*} \sum_{i=1}^{N}(Y_i - \hat{Y}_i)^2 \end{equation*}
#
# 6. Divide the value found in step 5 by the total number of observations
# \begin{equation*} MSE = \frac{1}{N}\sum_{i=1}^{N}(Y_i - \hat{Y}_i)^2 \end{equation*}
#
# *In other words, we are evaluating our model by looking at the measure of how large the squared errors (residuals) are spread out*
#
# **MSE** is selected as an *error metric*, because it is interpretable, it is analogous to variance and it also aligns with our selected algorithm's error minimization criteria
#
# On the other hand, this *error metric* is sensitive to extreme values or outliers, as it takes the square of the differences between the actual and predicted values, in the presence of extreme values and outliers difference that grows quadratically
#
# ### **Establish Baseline**
# Prior to model selection we are going to construct a *baseline metric* with MSE
#
# A *baseline metric* can be explained as generating a naive guess of the target value
#
# > Before you resort to ML, set up a baseline to solve the problem as if you know zero data science. You may be amazed at how effective this baseline is. It can be as simple as recommending the top N popular items or other rule-based logic. This baseline can also server as a good benchmark for ML
#
# **We are going to select and tune the models that beats our baseline by a significant margin**
#
# *Should our models be unable to beat our baleline, then ML may not be the best approach to solving this problem or the entire data pre-processing steps needs re-consideration*
#
# **For our regression problem, a simple baseline is to predict the variance of the mean of the training set to the validation set. This approach aligns with our evaluation metric MSE as well!**
# + id="v5BtxLFjH-eU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e7ceaffb-016e-4670-cb97-a24122f00b28"
# set baseline as mean of training set's target value
baseline = (np.mean(y_train_array))
baseline
# + id="akDsI7_7aX2n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f57554a1-df1c-4876-865c-59ef999293fc"
# calculate MSE baseline
# variance of the mean from our Training set to Validation set
mse_baseline = (np.mean(np.square(baseline - y_valid_array)))
print("Baseline error is:", round(mse_baseline, 2))
# + [markdown] id="yGEVrU1Hb5EO" colab_type="text"
# This shows the average variance between training points and validation points is 9.01. i.e, the sum of squared residuals on average of training points to validation points is 9.01
#
# # 5. Selecting Models based on Evaluation Metric
# We are going to try 1 Linear Algorithm, 2 Distance-based Algorithms, and 3 Tree-based algorithms
#
# # + Linear regression
# # + K-nearest neighbors
# # + Support vector machines
# # + Random forests regressor
# # + Extra trees regressor
# # + Light gradient boosting machines
#
# We are going to train our models using the training set, and compare their performances by looking at the **MSE** for predictions on the validation set
#
# For now, we will not go into the details of each model, we will observe the run time and the **MSE** reported by each model. We will elaborate on the best model in the last notebook
#
# ### **Linear Regression Algo**
# + id="UOWeByXlbjtm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="5849e3c3-35ed-468f-fed5-03a46852612d"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
lr = LinearRegression()
mse_lr=fit_evaluate_model(lr,
X_train_array,
y_train_array,
X_valid_array,
y_valid_array)
print("MSE of linear regression:", mse_lr)
# + [markdown] id="_0L0pS3schK9" colab_type="text"
# This algo beats our baseline metric, 9.01, indicating that it may be a candidate for a good predictor
# + [markdown] id="84CjJKREi8L8" colab_type="text"
# ### **Normalize Datasets for KNN and SVM**
# Distance-based algos uses the euclidian distance to train models, thus varying ranges causes distance-based algos to generate inaccurate predictions
#
# In order to differentiate between distance-based models, we are going to scale down the features with normalization
# + id="g27EGxUacaXv" colab_type="code" colab={}
# create scaler
scaler = StandardScaler()
# apply normalization to training set and transform training set
X_train_array_scaled = (scaler.fit_transform(X_train_array, y_train_array))
# transform validation set
X_valid_array_scaled = scaler.transform(X_valid_array)
# + [markdown] id="s2vIlDjbdNjp" colab_type="text"
# ### **KNN Regressor**
# + id="hyhNK9wndJ6i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="e90a203b-ef26-4e60-a258-505e7ea8c7ee"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
knn = KNeighborsRegressor()
mse_knn = fit_evaluate_model(knn,
X_train_array_scaled,
y_train_array,
X_valid_array_scaled,
y_valid_array)
print("MSE of knn regressor:", mse_knn)
# + [markdown] id="Tfzbb9aPmNPH" colab_type="text"
# This algo performs better than our linear regression algo, indicating that it may be a candidate for a good predictor
# + [markdown] id="069scz-KdYTI" colab_type="text"
#
#
#
# ### **SVM Regressor**
# + id="quTQHhm1dUSi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="641cf7e2-5154-4e19-8d2a-fdd812466524"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
svm = SVR()
mse_svm = fit_evaluate_model(svm,
X_train_array_scaled,
y_train_array,
X_valid_array_scaled,
y_valid_array)
print('MSE of Support Vector Machines:', mse_svm)
# + [markdown] id="AcLthnciduGa" colab_type="text"
# This algo performed better than the KNN at a higher run-time. All in all MSE decreased 35% showing that this algorithm might be a candidate for building a good predictor
#
# ### **Random Forest Regressor**
# + id="UeJ3A12HdqXD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="001f1ef9-dd7e-4d4a-c5ac-8ce2a4a6279f"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
rf = RandomForestRegressor(random_state=42)
mse_rf = fit_evaluate_model(rf,
X_train_array,
y_train_array,
X_valid_array,
y_valid_array)
print("MSE of Random Forests", mse_rf)
# + [markdown] id="JZBotF4deDcj" colab_type="text"
# This algo performed better than SVM at less run-times. It decreased the MSE 44%, and displaces SVM from the good-predictor list
#
# ### **Extra Trees Regressor**
# + id="VqJrZdP9eAaz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="0d587d79-429d-471c-a100-581dbe462622"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
xrf = ExtraTreesRegressor(random_state=42)
mse_xrf = fit_evaluate_model(xrf,
X_train_array,
y_train_array,
X_valid_array,
y_valid_array)
print("MSE of Random Forests", mse_xrf)
# + [markdown] id="p66m8Kw5eXkg" colab_type="text"
# This algo performed worse than the Random Forest and SVM, thus indicating it is not a good candidate for a good predictor
#
# ### **Light Gradient Boosting Regressor**
# + id="SsI_R8_oeTGE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="e87a366d-1efc-49e9-f291-61f960f30224"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
lgbm = lgb.LGBMRegressor(random_state=42)
mse_lgbm = fit_evaluate_model(lgbm,
X_train_array,
y_train_array,
X_valid_array,
y_valid_array)
print("MSE of light gradient boosting algorithm", mse_lgbm)
# + [markdown] id="cA5GrIcaew2c" colab_type="text"
# This algo has the best performance of all tree models. It lowered the baseline MSE by 45% and indicates that it is a potential candidate for a good predictor at a lower run time
#
# # Visualize Model Selection Output
# + id="tXUQJ2_PeuZH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 645} outputId="417eb76b-4c93-4008-c94a-317a66d9de7c"
# create dataframe of mse and model and sort values
performance_comparison = pd.DataFrame({'Model':
['Linear Regression', 'K-Nearest Neighbor',
'Support Vector Machines', 'Random Forest',
'Extra Trees', 'LightGBM'],
'MSE':
[mse_lr, mse_knn, mse_svm,
mse_rf, mse_xrf, mse_lgbm]})
performance_comparison = performance_comparison.sort_values(by='MSE', ascending=True)
# the plot
plt.figure(figsize=(10,10))
ax = sns.barplot(x='MSE', y='Model', data=performance_comparison, palette='PuRd_d')
# title arrange labels
plt.yticks(size=14)
plt.xticks(size=14)
plt.title('MSE for Different Model Selection', size=16)
# + [markdown] id="92m2_zKKBHht" colab_type="text"
# Random forest and Light GBM algos showed they are potentially good predictors for our **wineRatingPredictor**
#
# Both algos decreased baseline MSE by more than 40%
#
# Next, we attempt to make additional improvements to RF and LGBM using Random Search with Cross Validation, and Hyperparameter Tuning
#
# # 6. Perform Hyperparameter Tuning on Selected Model
# We are going to search for the best set of parameters with **random search and k-fold cross validation**
#
# **Random search** is the process of randomly combining the defined parameters and comparing the defined score, *MSE, for this problem*, with iterations. So, the most optimal dataset might be different than the *random search* results. However, this search algo is fast and run-time efficient
#
# **K-fold cv** is the method used to assess the performance of hyperparameters on the entire dataset. Rather than splitting the dataset into 2 static subsets of training and validation sets, the dataset is divided equally for the given K. The model is trained with K-1 subsets and tested on Kth subset iteratively. This process makes each process more robust
# + id="rgtrBcoOA7UI" colab_type="code" colab={}
# add back df to perform Random Search + CV
X = pd.concat([X_train, X_valid])
y = pd.concat([y_train, y_valid])
X_array = convert_features_to_array(X)
y_array = convert_target_to_array(y)
# + [markdown] id="O4AZVtUJEd7e" colab_type="text"
# ### **Light GBM**
# Parameters to optimized
#
# # + **boosting type**: tree optimization method, default is gdbt
# # + **n_estimators**: number of trees to be used in the model, default is 100
# # + **learning_rate**: how fast algorithm optimizes the built trees, default is 0.1
# # + **colsample_bytree**: sub-sample ratio of columns when constructing each tree, default is 1
# + id="bHTuKBX5EaFY" colab_type="code" colab={}
# define search parameters
boosting_type = ['gbdt', 'dart', 'goss', 'rf']
n_estimators = [100, 200, 300, 500, 1000]
learning_rate = [0.01, 0.02, 0.05, 0.1]
colsample_bytree = [0.5, 0.6, 0.7, 0.8, 0.9, 1]
# define the grid for hyperparameters to search
hyperparameter_grid = {'boosting_type': boosting_type,
'n_estimators': n_estimators,
'learning_rate': learning_rate,
'colsample_bytree': colsample_bytree}
# + id="1uWnIrgmFNSx" colab_type="code" colab={}
# create randomized search object
lgbm_random_cv = RandomizedSearchCV(estimator=lgbm,
param_distributions=hyperparameter_grid,
cv=4,
n_iter=25,
scoring='neg_mean_squared_error',
n_jobs=-1,
verbose=1,
return_train_score=True,
random_state=42)
lgbm_random_cv.fit(X, y)
# + id="5-_Rn4XeFXbs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 115} outputId="1c5b8586-56b7-47d2-9b8c-08ab1594fbf4"
lgbm_random_cv.best_estimator_
# + [markdown] id="J8HGMH7EFxsL" colab_type="text"
# After hyperparameter tuning, best set of hyperpramaters are determined as:
#
# # + **boosting_type**: gbdt
# # + **n_estimators**: 1000
# # + **colsample_bytree**: 0.5
# # + **learning_rate**: 0.02
#
# ### **Revisit MSE**
# + id="I0WxzSk4Fp_0" colab_type="code" colab={}
lgbm_random_cv_model = lgbm_random_cv.best_estimator_
# + id="noOVmHWFG38k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66} outputId="c176ae86-a002-402b-d44f-3b9c5c174e71"
# we begin our exploration with small samples
# before scaling it to production volume
# hence, its logical to gauge the time on how long the algo runs
# %%time
lgbm = lgb.LGBMRegressor(random_state=42)
mse_lgbm_rcv = fit_evaluate_model(lgbm_random_cv_model,
X_train_array,
y_train_array,
X_valid_array,
y_valid_array)
print('MSE of light gradient boosting algorithm', mse_lgbm_rcv)
# + [markdown] id="y5OLN0uZHFK_" colab_type="text"
# MSE for lgbm decreased from 4.97 to 4.85, which is a 3% improvement compared to light gbm built with default parameters, at a higher run-time
#
# ### **Random Forest**
# Parameters to be optimized
#
# # + **n_estimators**: number of trees to be used in the model, default is 100
# # + **min_samples_split**: minimum number of samples required to split an internal node, default value is 2
# # + **min_samples_leaf**: minimum number of samples required to be at a leaf node, default value is 1
# # + **max_features**: number of features to consider when looking for the best split, default value is auto
# + id="pgY2jvY2G_W2" colab_type="code" colab={}
# define search parameters
n_estimators = [100, 200, 300, 500, 1000]
min_samples_split = [2, 4, 6, 10]
min_samples_leaf = [1, 2, 4, 6, 8]
max_features = ['auto', 'sqrt', 'log2', None]
# define the grid of hyperparameters to search
hyperparameter_grid = {'n_estimators': n_estimators,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'max_features': max_features}
# + id="zEiLM0B9HpgG" colab_type="code" colab={}
# create randomized search object
rf_random_cv = RandomizedSearchCV(estimator=rf,
param_distributions=hyperparameter_grid,
cv=4,
n_iter=25,
scoring='neg_mean_squared_error',
n_jobs=-1,
verbose=1,
return_train_score=True,
random_state=42)
rf_random_cv.fit(X, y)
# + id="ENxRG0wVHx0z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 132} outputId="8e9cfab8-5cd6-42cb-a2fa-35109da43017"
rf_random_cv.best_estimator_
# + [markdown] id="jZ7caPrNIGMr" colab_type="text"
# After hyperparameter tuning, best set of hyperpramaters are determined as
#
# # + **n_estimators**: 200
# # + **min_samples_split**: 4
# # + **min_samples_leaf**: 2
# # + **max_features**: sqrt
#
# ### **Revisit MSE**
# + id="RWv0vP4UJyH4" colab_type="code" colab={}
rf_random_cv_model = rf_random_cv.best_estimator_
# + id="-vss_slaJ4Fd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66} outputId="313d105e-b623-4905-8bc4-ebb4874d940d"
# %%time
mse_rf_rcv = fit_evaluate_model(rf_random_cv_model,
X_train_array,
y_train_array,
X_valid_array,
y_valid_array)
print('MSE of random forests algorithm', mse_rf_rcv)
# + [markdown] id="44WNS6noKC5o" colab_type="text"
# # Conclusion
# We started out with the baseline MSE of 9.01. linear regression, knn, svm, and extra trees regressor could only beat the baseline metric with a slight difference. Thus, eliminating them from the potential good predictor model list
#
# *RF and light GBM* have beaten the baseline MSE by more than 40% which is a significant improvement. Thus they are selected as potential good predictors
#
# Hyperparameter tuning was performed on the potential good predictors, where we searched for more suitable parameters for a given feature set. Light GBM's MSE improved from 4.97 to 4.85, and the tuned model run-time resulted in 1.17 seconds
#
# On the other hand, RF MSE improved more after the hyperparamater tuning. Its MSE decreased from 5.41 to 4.99 and the tuned model run-time resulted in 1.13 seconds which is lower than the initial RF model
#
# Although tuned light GBM's MSE is lower than the tuned RF model, I'll select the RF algo as my predictor. My goal was to illustrate a good prediction for wine points are possible by using **machine learning**, and my tuned RF model has already satisfied that requirement
#
# Other important considerations for our model selection are:
# # + the improvement on the MSE between the initial and tuned model,
# # + algorithm explainability and
# # + model run-times
#
# Let's save our fine-tuned RF model as output for this file, and continue by training our model on the entire training set in the last notebook
# + id="bugZKOA6J9h7" colab_type="code" colab={}
# filename = 'random_forests_model.sav'
# pickle.dump(rf_random_cv_model, open(filename, 'wb'))
# + [markdown] id="zHqLowBHD0mP" colab_type="text"
# 
# + id="mVV7MFlsTlpW" colab_type="code" colab={}
| Winery/winery_s2.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,Rmd
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
library(keras) # FashionMNIST dataset
library(nnet) # Neural networks
library(caret) # Cross Validation - loads nnet directly on trainControl
library(doParallel) # Parallel cross-validation.
# -
# # Dataset
# First we load the dataset from keras package. _Check legacyLoad.R to see how to load the dataset without using the package._
fashion <- dataset_fashion_mnist()
str(fashion)
attach(fashion) # So we can access test and train directly!
# We get the following structure:
#
# - train: Training dataset
# + x: the predictors, 28x28 pixels image in grayscale.
# + y: the response
# - test: Testing datset (with x and y)
#
# We can see the images with the following function:
# ## dataset visualization
# +
rotate <- function(x) t(apply(x, 2, rev))
show_image <- function(imgarray, col=gray(12:1/12), ...) {
image(rotate(matrix(imgarray, nrow=28)), col=col, ...)
}
show_image(train$x[2,,])
show_image(train$x[10,,])
# -
# ## Response reencode
# Notice that in y we have an integer from 0 to 9 (10 classes). They are in fact the following:
# - 0: T-shirt/top
# - 1: Trouser
# - 2: Pullover
# - 3: Dress
# - 4: Coat
# - 5: Sandal
# - 6: Shirt
# - 7: Sneaker
# - 8: Bag
# - 9: Ankle boot
#
# We recode the response variable to factor.
# +
classString <- c("T-shirt/top","Trouser", "Pullover", "Dress", "Coat", "Sandal",
"Shirt","Sneaker", "Bag","Ankle boot")
# y+1 because 0 is the first class and in R we start indexing at 1!
train$yFactor <- as.factor(classString[train$y+1])
test$yFactor <- as.factor(classString[test$y+1])
# -
# For the CNN we use one hot encoding to produce a vector of 10 values per sample, with a one on the class (probablity of belonging to a given class).
train$yOneHot <- class.ind(train$yFactor)
test$yOneHot <- class.ind(test$yFactor)
str(train$yOneHot)
train$y[1:10]
train$yOneHot[1:10,]
# *class.ind* reorders the classes alfabetically, therefore we need to revert this order to the original provided. We use *match* over the column names to get a vector of the reorder to match the column names to **classString**.
colnames(train$yOneHot)
classString
(m <- match(classString, colnames(test$yOneHot)))
train$yOneHot <- train$yOneHot[,m]
test$yOneHot <- test$yOneHot[,m]
# Now the order is correct
colnames(train$yOneHot)
classString
# ## Add missing dimension
# Convolutional layers will expect the input to have 4 dimensions:
# - Sample dimension
# - Height dimension
# - Width dimension
# - Channel dimension
#
# In our case we have only one channel as the image is grayscale. If it's a color image we would have 3 or 4 channels (Red, Green, Blue and Alpha (transparency)). We need to add the missing dimension, however this will not modify the data.
dim(train$x) <- c(dim(train$x),1)
dim(test$x) <- c(dim(test$x),1)
# ## Create a dataset for nnet
# Now we prepare join the X and the Y in a data.frame.
nnetData <- data.frame(train$x, class=train$yFactor)
nnetDataTest <- data.frame(test$x, class=test$yFactor)
# # Training a Neural Network
# We can train the model directly as follows, but we will use _caret's_
# _trainControl_ for CrossValidation.
model.nnet <- nnet(class ~ ., data=nnetData, size=50, maxit=300,decay=0.5, MaxNWts = 39760)
# Specifically a 5 fold cross-validation. We don't go for a 10 fold
# cross-validation as it will take a lot of time to compute.
## specify 5-CV
K <- 5
trc <- trainControl (method="repeatedcv", number=K, repeats=1)
(decays <- 10^seq(-3,0,by=0.25))
# We now specify that we want to execute the cross validation in parallel:
# Use all cores except one (recommended if you want to use your computer for something else). Or half of the lenght of the decays (RAM issues).
cores <- min(detectCores()-1, ceiling(length(decays)/2))
registerDoParallel(cores = cores)
# Beware with the number of cores used, it will impact in the RAM usage. ~10GB per thread with 60K Fashion MNIST samples. **Don't execute this training if you don't have a big machine, just load the model.**
#
# The cross-validation process will take about 30 hours using 7 cores of a Intel(R)
# Xeon(R) CPU E5-2630 v4 @ 2.20GHz and about 80 GB of RAM.
#
# Remember that we're training (number_of_decay_param x number_of_folds) = 14 x 5
# = 70 models.
## WARNING: this takes some time
model.5CV <- train (class ~ ., data=nnetData, method='nnet', maxit = 300, trace = FALSE,
tuneGrid = expand.grid(.size=50,.decay=decays), trControl=trc, MaxNWts=39760)
# Save model
save(model.5CV, file="nnet.mod")
load("nnet.mod")
model.5CV
# The best model we got has an accuracy of 84%. Not bad at all for a 10 class
# classification problem.
pred <- predict(model.5CV,nnetDataTest)
(t <- table(nnetDataTest$class, pred))
(accuracy <- sum(diag(t))/sum(t))
# Test also gives us 84% of accuracy. Nice model :)
#
# # Convolutional Neural Networks
#
# ## Model architecture definition: LeNet
#
# Now we have to define the CNN architecture. In this case we use LeNet, proposed
# by LeCun et al. (Gradient-based learning applied to document recognition.
# Proceedings of the IEEE, november 1998).
#
# It is composed by two packs of convolutional-activation(tanh)-pooling layers and
# two fully connected layers with a softmax layer at the end.
#
# In Keras, as in most of the packages, we define layers as objects and the
# connections between those objects. In this case we implicitly connect everything
# using the %>% operator.
lenet <- keras_model_sequential() %>%
# First convolutional block
layer_conv_2d(filters=20, kernel_size=c(5,5), activation="tanh",
input_shape=c(28,28,1), padding="same") %>% # We define here the input size
layer_max_pooling_2d(pool_size=c(2,2),strides=c(2,2)) %>%
# Second convolutional block
layer_conv_2d(filters=50, kernel_size=c(5,5), activation="tanh",
input_shape=c(28,28,1), padding="same") %>%
layer_max_pooling_2d(pool_size=c(2,2),strides=c(2,2)) %>%
# Flatten the matrix to a vector for the fully connected layers
layer_flatten() %>%
# First fully connected block
layer_dense(units=500, activation="tanh") %>%
# Second fully connected block
layer_dense(units=10, activation="softmax")
# This last layer will produce the final classification (probability of
# belonging to a class). 10 different units, 10 different classes.
# Now we check the architecture we have defined:
lenet
# Notice that we're adjusting 1 million parameters this time. With the nnet one layer network we were training just 39.760 parameters.
#
# Last thing we have to do is to specify which optimizaton algorithm and metrics
# we want to use with the compile step.
sgd <- optimizer_sgd(
lr=0.05,
decay=0.001,
momentum=0.8,
clipnorm=1.
)
lenet %>% compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics = "accuracy"
)
# ## Model training
#
# Now we're going to train the network using CPU (if you're not using
# tensorflow-gpu). Mind that if you want to use GPUs you need to have the GPU
# version of the package and the required Nvidia packages (check PlaidML for
# non-Nvidia GPUs).
lenet %>% fit(
train$x,
train$yOneHot,
batch_size=50,
validation_split=0.2,
epochs=10
)
# Takes about 3 minuts with 40 cores and 18 GB of RAM. It may take less with GPUs!
# And now we save the trained model for convenience:
lenet %>% save_model_hdf5("lenet-FashionMNIST.h5")
# ## Predicting using the model
#
# Predicting the label for the test set
lenet <- load_model_hdf5("lenet-FashionMNIST.h5")
lenet
pred_prob <- predict(lenet, test$x)
head(pred_prob)
# For each element we get the probability of that element to be of each class, therefore we search for the value that is maximum in each row and then we create the confusion matrix.
# +
predClass <- apply(pred_prob,1,which.max)
predClass <- classString[predClass] # And change the integers by their class tag
trueClass <- test$yFactor
# Now we do a confusion matrix and analyze it
(cMatrix <- table(trueClass,predClass))
# -
correctClass <- sum(diag(cMatrix))
total <- sum(cMatrix)
(accuracy <- correctClass/total)
# We're getting about a 90% of accuracy that may be improved with further tunning of the network. Notice that, for example, there are 30 ankle boots classified as sneakers, that can have similar shapes. Also there are 113 pullovers are classified as coats.
| keras/training/FashionMNIST-training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Encoder as a retriever
# Sometimes the user's query does not match any document, especially for small corpora. It is where neural search becomes very interesting. The encoder can play the role of a spare wheel to find documents when traditional retrievers have not found anything.
from cherche import retrieve, rank, data
from sentence_transformers import SentenceTransformer
# Let's load a dummy dataset
documents = data.load_towns()
documents[:2]
# First, we will perform a search with a TfIdf to show that the model's ability to retrieve documents may be limited.
retriever = retrieve.TfIdf(key="id", on=["article", "title"], documents=documents, k=10)
retriever
# There is a single document that match the query "food" using default TfIdf.
retriever("food")
# We can now compare these results with the `retrieve.Encoder` using Sentence Bert. The `add` method takes time because the retriever will compute embeddings for every document. Once this is done, it saves the embeddings in the `all-mpnet-base-v2.pkl` file. It will not be computed twice.
# +
retriever = retrieve.Encoder(
key = "id",
on = ["title", "article"],
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2").encode,
k = 5,
path = "all-mpnet-base-v2.pkl"
)
retriever.add(documents=documents)
# -
# As can be seen, the encoder recalls more documents, even if they do not systematically contain the word "food". These documents seem relevant.
retriever("food")
(retriever + documents)("food")
# We can create a fancy neural search pipeline to benefit from TfIdf precision and Sentence Transformers recall using union operator `|`.
encoder = SentenceTransformer("sentence-transformers/all-mpnet-base-v2").encode
# +
# Precision pipeline
precision = (
retrieve.TfIdf(key="id", on=["article", "title"], documents=documents, k = 30) +
rank.Encoder(key="id", on=["title", "article"], encoder=encoder, k=5, path="all-mpnet-base-v2.pkl")
)
# Recall pipeline
recall = retrieve.Encoder(key="id", on=["title", "article"], encoder=encoder, k=5, path="all-mpnet-base-v2.pkl")
search = precision | recall
search.add(documents=documents)
# -
# Our pipeline will first propose documents from the `precision` pipeline and then documents proposed by the `recall` pipeline.
search("food")
search += documents
search("food")
| docs/examples/encoder_retriever.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:old_env]
# language: python
# name: conda-env-old_env-py
# ---
# cd C:/Users/sadan/OneDrive/GitHub/PR-AAAI22-SDU-ST1-AE/results/finetuned_models/model_all_xb_v1_512/output
#clean stopword from inside the form
def clean_stops_inform(t):
t = [f.split() for f in t]
for ls in t:
if ls[0] == 'a':
del ls[0]
if ls[-1] == 'is':
del ls[-1]
t = [[' '.join(sublist)] for sublist in t]
flat_list = [item for sublist in t for item in sublist]
return flat_list
# # Adding to Constantin's function
# +
# improved function
import pandas as pd
import nltk
from nltk.corpus import stopwords
#en_stop_words = set(stopwords.words('english'))
en_stop_words = (stopwords.words('english'))
sw = ['is a', 'and a', 'and', 'on a', 'is'] # added some other words
stop_words = en_stop_words +sw
# df = pd.read_csv('eng_scientific_dev_xb_512.tsv', sep='\t')
def string_to_list(string):
return [e.strip()[1:-1] for e in string[1:-1].split(",")]
def get_reduced_list(pred_list):
new_lf_pred = []
for form1 in string_to_list(pred_list):
flag = True
if not [x for x in form1.split() if x not in stop_words and len(x) > 3]: # deleted words that are less than 3 char
continue
for form2 in string_to_list(pred_list):
if form1 == form2:
continue
if form1 in form2:
flag = False
break
if flag:
new_lf_pred.append(form1)
return clean_stops_inform(new_lf_pred)
for index, row in data.iterrows():
print(row["long-forms-text"], "\n\t\t", row["LF_Pred"])
print("\t\t", get_reduced_list(row["LF_Pred"]))
# -
# # Compare to Constantin function
# +
import pandas as pd
import nltk
from nltk.corpus import stopwords
en_stop_words = set(stopwords.words('english'))
#df = pd.read_csv('eng_scientific_dev_xb_512.tsv', sep='\t')
def string_to_list(string):
return [e.strip()[1:-1] for e in string[1:-1].split(",")]
def get_reduced_list(pred_list):
new_lf_pred = []
for form1 in string_to_list(pred_list):
flag = True
if not [x for x in form1.split() if x not in en_stop_words]:
continue
for form2 in string_to_list(pred_list):
if form1 == form2:
continue
if form1 in form2:
flag = False
break
if flag:
new_lf_pred.append(form1)
return new_lf_pred
for index, row in data.iterrows():
print(row["long-forms-text"], "\n\t\t", row["LF_Pred"])
print("\t\t", get_reduced_list(row["LF_Pred"]))
# -
# # Applying function to the data
# !mkdir cleaned_longform
# cd cleaned_longform/
eng_leg = pd.read_csv('eng_legal_dev_xb_512.csv', sep='\t')
dan = pd.read_csv('dan_dev_xb_512.csv', sep='\t')
esp = pd.read_csv('esp_dev_xb_512.csv', sep='\t')
fre = pd.read_csv('fre_dev_xb_512.csv', sep='\t')
per = pd.read_csv('per_dev_xb_512.csv', sep='\t')
vie = pd.read_csv('vie_dev_xb_512.csv', sep='\t')
# cd cleaned_longform/
clean_df(eng_leg,'cl_eng_leg_dev')
clean_df(dan,'cl_dan_dev')
clean_df(esp,'cl_esp_dev')
clean_df(fre,'cl_fre_dev')
clean_df(per,'cl_per_dev')
clean_df(vie,'cl_vie_dev')
# # creating Jsons with the new indices
data['cl_lf'] = data['LF_Pred'].apply(get_reduced_list)
data.head(10)
# # Get indices for long froms
def get_index(keywords,texts):
TRAIN_DATA = []
for text in texts:
entities = []
t_low = text.lower()
for keyword in keywords:
k_low = keyword.lower()
begin = t_low.find(k_low) # index if substring found and -1 otherwise
if begin != -1:
end = begin + len(keyword)
entities.append([begin, end])
TRAIN_DATA.extend(entities)
return TRAIN_DATA
tx = [[t] for t in data.text.to_list()]
tx[10]
lf = data.cl_lf.tolist()
# +
indeces = []
for l , t in zip(lf,tx):
indeces.append(get_index(l,t))
# -
data['cl_lf_ind'] = indeces
data.head(10)
data['ID']= data['ID'].astype(str)
newdata = data[['text','AN_Pred_idxs','cl_lf_ind','ID']]
newdata.rename(columns={'AN_Pred_idxs':'acronyms', 'cl_lf_ind':'long-forms'}, inplace=True)
newdata.head()
newdata['acronyms'] = newdata['acronyms'].apply(literal_eval)
# load and then parse and then save
import json
jsond = newdata.to_json( orient='records', indent=4)
parsed = json.loads(jsond)
# parsed
with open('cleaneng_sciwithnewind.json', 'w') as fout:
json.dump(parsed , fout,indent=4,)
# !python scorer.py -g "data/english/scientific/dev.json" -p cleaneng_sciwithnewind.json -v
# # JASON FOR OTHER FILES
# cd results/finetuned_models/model_all_xb_v1_512/output/cleaned_longform
# +
eng_leg = pd.read_csv('cl_eng_leg_dev.csv')
dan = pd.read_csv('cl_dan_dev.csv')
esp = pd.read_csv('cl_esp_dev.csv')
fre = pd.read_csv('cl_fre_dev.csv')
per = pd.read_csv('cl_per_dev.csv')
vie = pd.read_csv('cl_vie_dev.csv')
# +
# not sure how the cleaning went with other languages needs checking
# not sure about the indices
# -
eng_leg.head()
# +
def process_df (df):
df['lf_cleaned'] = df['lf_cleaned'].apply(literal_eval)
df['AN_Pred_idxs'] = df['AN_Pred_idxs'].apply(literal_eval)
df['ID'] = df['ID'].astype(str)
tx = [[t] for t in df.text.to_list()]
lf = df.lf_cleaned.tolist()
indeces = []
for l , t in zip(lf,tx):
indeces.append(get_index(l,t))
df['cl_lf_ind'] = indeces
newdf = df[['text','AN_Pred_idxs','cl_lf_ind','ID']]
newdf.rename(columns={'AN_Pred_idxs':'acronyms', 'cl_lf_ind':'long-forms'}, inplace=True)
# newdf['acronyms'] = newdf['acronyms'].apply(literal_eval)
# newdf['long-forms'] = newdf['long-forms'].apply(literal_eval)
# newdf['ID'] = newdf['ID'].astype(str)
return newdf
# -
eng_leg_dfn = process_df(eng_leg)
eng_leg_dfn.head()
eng_leg_dfn['ID']=eng_leg_dfn['ID'].astype(str)
eng_leg_dfn.info()
jsond = eng_leg_dfn.to_json( orient='records', indent=4)
parsed = json.loads(jsond)
# parsed
with open('test_legal2.json', 'w') as fout:
json.dump(parsed , fout,indent=4,)
pwd
# cd ..
# !python scorer.py -g "data/english/legal/dev.json" -p test_legal2.json -v
dan_df = process_df(dan)
esp_df = process_df(esp)
fre_df = process_df(fre)
per_df = process_df(per)
vie_df = process_df(vie)
# cd results/finetuned_models/model_all_xb_v1_512/output/cleaned_longform
jsond = vie_df.to_json( orient='records', indent=4)
parsed = json.loads(jsond)
# parsed
with open('cl_vie_dev.json', 'w') as fout:
json.dump(parsed , fout,indent=4,)
# ls
| cleaning_lf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# default_exp hagerstrand
# -
# # hagerstrand
#
# > API details.
#hide
from nbdev.showdoc import *
# export
import sys
from random import randint
from random import uniform
import numpy as np
from scipy.spatial.distance import cdist
from skimage import data, io, filters
sys.setrecursionlimit(11500)
# ## Diffusion
#
# Description....
# export
class Diffusion(object):
"""General class for all types of diffusion"""
#por lo pronto solo la creación del espacio se deriva a las clases hijas?
def __init__(self,mif_size=5,pob=20,initial_diff=[(50,50)],
p0=0.3, max_iter=15):
self._pob = pob
self._p0 = p0
self.max_iter = max_iter
self.mif_size = mif_size
self.iteration = 0
self._infected_pop = []
self._tmp_adopted = []
self._clean = False
self._initial_diff = initial_diff
self.time_series = []
self.mif_size = mif_size
def initialize_mif(self,mif_size):
"""Initialize the MIF"""
x = np.linspace(0.5,mif_size - 0.5,mif_size)
y = np.linspace(0.5,mif_size - 0.5,mif_size)
xv,yv = np.meshgrid(x,y)
points = np.array(list(zip(np.ravel(xv),np.ravel(yv))))
center = np.array([[mif_size/2 + 0.5,mif_size/2 + 0.5]])
#print(points)
#print(center)
dist = cdist(center,points)
dist = dist/np.sum(dist)
#Everything: has to be different to respect the user's p0
# print(type(mif_size), type(mif_size/2), mif_size/2)
dist.reshape(mif_size, mif_size)[int(mif_size/2 + 0.5), int(mif_size/2 + 0.5)] = self._p0
dist = dist/np.sum(dist)
return np.cumsum(dist)
def _mif2delta(self,index):
"""Returns a tuple with the increments to get to the propagated frame."""
return np.unravel_index(index,(self.mif_size,self.mif_size))
def _select_from_mif(self):
"""Returns an address (pob_adress) from the MIF."""
rnd = uniform(0,1)
index = np.nonzero(self._mif>rnd)[0][0]
return self._mif2delta(index)
def _clean_adopters(self):
"""Clean and initialize before a new simulation."""
self._infected_pop = []
self._tmp_adopted = []
self._pop_array = np.zeros((len(np.ravel(self.space)),self._pob),
dtype=bool)
self.time_series = []
for c in self._initial_diff:
self.space[c[0],c[1]] = 1
#We also modify the original settlers:
index = self._space2pop_index(c)
self._pop_array[index][0] = True
self._infected_pop.append((index,0))
self._clean = False
# ## SimpleDiffusion
#
# Describe....
# export
class SimpleDiffusion(Diffusion):
"""Simple model of spatial diffusion based on Hägerstrand.
1.- Homogeneous and isotropic space
2.- A single initial diffuser
3.- ....other assumptions...
:param N: int Number of rows in simulation space.
:param M: int Number of columns in simulation space.
:param mif_size: int MIF matrix (square) size (must be non).
:param pob: int population in each cell.
:param initial_diff: [(int,int)] Coordinate list of start diffusers.
:param p0: float Probability of self-diffusion.
:param max_iter: int Maximum number of iterations.
:attribute space: np.array(M,N,dtype=int) Available space.
:attribute _pop_array: np.array(M*N,pob,dtype=bool) array of population in each cell
:attribute _infected_pop: list (space_idx,int) List of the adopting cell indices.
The first entry is the flattened index of the cell
in the space array and the second is the number of
the settler in pop_array. That is, the list of addresses
of each infected resident.
:attribute results: np.array((M,N,max_iter)) Save the results of each iteration.
:attribute time_series: list int Propagations for each iteration.
:attribute _clean: bool Indicates if we have saved results.
"""
def __init__(self,N=100,M=100,mif_size=5,pob=20,initial_diff=[(50,50)],
p0=0.3, max_iter=15):
super().__init__(mif_size, pob, initial_diff, p0, max_iter)
# super(SimpleDiffusion,self).__init__(mif_size,pob,initial_diff,
# p0, max_iter)
self.M = M
self.N = N
self.space = np.zeros((self.N,self.M),dtype=int)
self._pop_array = np.zeros((len(np.ravel(self.space)),pob),
dtype=bool)
self.result = np.zeros((M,N,max_iter),dtype=int)
for c in initial_diff:
if c[0] > M or c[1] > N:
raise ValueError("The coordinates on the starting difusors do not belong to the space")
#Modificamos también a los pobladores originales:
index = self._space2pop_index(c)
self._pop_array[index][0] = True
self._infected_pop.append((index,0))
if self.mif_size%2 == 0:
raise ValueError("MIF size must be non")
else:
self._mif = self.initialize_mif(self.mif_size)
def initialize_mif(self,mif_size):
return super(SimpleDiffusion,self).initialize_mif(self.mif_size)
def _propagate(self,pob_adress):
"""It propagates towards the inhabitant in pob_adress if it is non-adopter.
:param pob_adress: (int,int) the address of the inhabitant to propagate.
The first entry is the index (flattened) in space and
the second is the number of the settler in the cell
"""
#checo si es no-adoptante
if self._pop_array[pob_adress[0]][pob_adress[1]] == False:
self._pop_array[pob_adress[0]][pob_adress[1]] = True
self._tmp_adopted.append(pob_adress)
#print "infecté al " + str(pob_adress)
else:
pass
def _space2pop_index(self,index):
"""Transform the index of space into the index of the pop_array.
:param index (int,int) the index to transform
"""
# print(type(index), index)
return np.ravel_multi_index(index,dims=(self.M,self.N))
def _pop2space_index(self,index):
"""Return the tuple (i,j) that corresponds to the flattened index."""
return np.unravel_index(index, (self.M,self.N))
def _mif2delta(self,index):
"""Returns a tuple with the increments to get to the propagated frame."""
return super(SimpleDiffusion,self)._mif2delta(index)
def _random_adress(self):
"""Returns a random address (pob_adress)."""
return (randint(0,(self.M*self.N) - 1),randint(0,self._pob - 1))
def _select_from_mif(self):
"""Returns an address (pob_adress) from the MIF."""
return super(SimpleDiffusion,self)._select_from_mif()
def _get_propagation_adress(self,adress):
"""Returns a pop_adress address propagated by the MIF"""
#print "Propagó: " + str(adress)
delta = self._select_from_mif()
delta = (delta[0] - int(self.mif_size/2+0.5),delta[1] - int(self.mif_size/2+0.5))
space_adress = self._pop2space_index(adress[0])
prop_space_adress = (space_adress[0] + delta[0],
space_adress[1] + delta[1])
try:
habitant = randint(0,self._pob - 1)
return (self._space2pop_index(prop_space_adress),habitant)
except ValueError:
return self._get_propagation_adress(adress)
def _clean_adopters(self):
"""Clean and initialize before a new simulation."""
return super(SimpleDiffusion,self)._clean_adopters()
def spatial_diffusion(self):
"""Propagate the Hagerstrand way."""
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
print("finished")
print("There are %i adopters out of a total of %i inhabitants" \
% (np.sum(self._pop_array),self.M*self.N*self._pob))
print("The total number of iterations performed is: %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
propagated_adress = self._get_propagation_adress(adress)
self._propagate(propagated_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "Hay %i adoptantes" % len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.M,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.spatial_diffusion()
def random_diffusion(self):
"""Randomly propagates in space."""
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
#self.space = np.sum(s._pop_array,axis=1).reshape(s.M,s.N)
print("finished")
print("There are %i adopters out of a total of %i inhabitants" \
% (np.sum(self._pop_array),self.M*self.N*self._pob))
print("The total number of iterations performed is: %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
rand_adress = self._random_adress()
if adress == rand_adress:
#TODO: you have to change, it could happen to get twice the same
rand_adress = self._random_adress()
self._propagate(rand_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "Hay %i adoptantes" % len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.M,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.random_diffusion()
def mixed_diffusion(self,proportion=0.5):
""" Mix the two types of diffusion.
In each iteration he randomly chooses, according to proportion, the
points that diffuse randomly and those that do so spatially.
:param proportion: float Proportion of adopters who diffuse spatially.
"""
if proportion < 0 or proportion > 1:
raise ValueError("The proportion must be between 0 and 1.")
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
#self.space = np.sum(s._pop_array,axis=1).reshape(s.M,s.N)
print("finished")
print("There are %i adopters out of a total of %i inhabitants" \
% (np.sum(self._pop_array),self.M*self.N*self._pob))
print("The total number of iterations performed is: %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
rnd = uniform(0,1)
if rnd <= proportion:
propagated_adress = self._get_propagation_adress(adress)
self._propagate(propagated_adress)
else:
rand_adress = self._random_adress()
if adress == rand_adress:
#TODO: you have to change, it could happen to get twice the same
rand_adress = self._random_adress()
self._propagate(rand_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "There are %i adopters %i len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.M,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.mixed_diffusion(proportion)
s = SimpleDiffusion(50,50,9,20,[(20,20)],0.3,15)
s.spatial_diffusion()
s.random_diffusion()
# ## Advanced Diffusion
#
# This part of the code is a class created based on the Hägerstrand model and data inherited from simpleDiffusion it is in charge of searching a number of adoptants from a total of inhabitants in a heterogeneous space.
# export
class AdvancedDiffusion(Diffusion):
"""Hägerstrand-based spatial diffusion model, with heterogeneous space.
1.- Isotropic space
2.- A single initial diffuser
3.- .... Other assumptions ...
:param N: int Number of rows and columns in the simulation space.
:param mif_size: int MIF matrix size (square) (must be odd).
:param pob: int maximum population at each cell.
:param density: int Number of Number of initial population nuclei.
:param amplitud: float Gaussian filter width to blur the population.
:param initial_diff: [(int,int)] Coordinate list of start diffusers
:param p0: float Auto-difussion probability
:param max_iter: int Maximum number of iterations
:attribute space: np.array(N,N,dtype=int) Available space
:attribute _pop_array: np.array(N*N,pob,dtype=bool) array of inhabitants in each cell
:attribute _infected_pop: list (space_idx,int) List of adoptive cell indices.
The first entry is the flattened index of the cell in the space matrix
and the second is the number of the settler in pop_array. That is,
the list of addresses of each infected resident.
:attribute results: np.array((N,N,max_iter)) Save results of each iteration.
:attribute time_series: list int Propagation of each iteration.
:attribute _clean: bool Indicates if Indicates if there are saved results.
"""
def __init__(self,N=100,mif_size=5,pob=20,initial_diff=[(50,50)],
p0=0.3, max_iter=25,density=20,amplitud=4.0):
super(AdvancedDiffusion,self).__init__(mif_size,pob,initial_diff, p0,
max_iter)
self.N = N
self.density = density
self.amplitud = amplitud
self.space = np.zeros((self.N,self.N),dtype=int)
points = self.N * np.random.random((2, self.density ** 2))
self.space[(points[0]).astype(int), (points[1]).astype(int)] = 1
self.space = filters.gaussian(self.space, sigma= self.N / (self.amplitud * self.density))
# We rescale to the value of the maximum pop and convert to integer:
self.space *= self._pob / self.space.max()
self.space = self.space.astype(int)
self._pop_array = np.zeros((len(np.ravel(self.space)),self._pob),
dtype=bool)
self.result = np.zeros((self.N,self.N,max_iter),dtype=int)
for c in initial_diff:
if c[0] > self.N or c[1] > self.N:
raise ValueError("Coordinates of initial diffusers do not fall in space")
# We also modify original settlers:
index = self._space2pop_index(c)
self._pop_array[index][0] = True
self._infected_pop.append((index,0))
if self.mif_size%2 == 0:
raise ValueError("MIF size must be odd")
else:
self._mif = self.initialize_mif(self.mif_size)
def _space2pop_index(self,index):
"""Transform the index of space into the index of the pop_array.
:param index (int,int) index to transform
"""
return np.ravel_multi_index(index,dims=(self.N,self.N))
def _pop2space_index(self,index):
"""Returns the tuple (i, j) that corresponds to the flattened index."""
return np.unravel_index(index,(self.N,self.N))
def _mif2delta(self,index):
"""Returns the tuple with the increments to get to the propagated frame."""
return super(AdvancedDiffusion,self)._mif2delta(index)
def _select_from_mif(self):
"""Returns an address (pob_adress) from the MIF."""
return super(AdvancedDiffusion,self)._select_from_mif()
def _random_adress(self):
"""Returns a random address (pob_adress)."""
i = randint(0,self.N - 1)
j = randint(0,self.N - 1)
pop_idx = self._space2pop_index((i,j))
return (pop_idx,randint(0,self.space[i,j] - 1))
def _get_propagation_adress(self,adress):
"""Returns an address propagated from the MIF (pop_adress)."""
#print "Propagates: " + str(adress)
delta = self._select_from_mif()
delta = (delta[0] - int(self.mif_size/2+0.5),delta[1] - int(self.mif_size/2+0.5))
space_adress = self._pop2space_index(adress[0])
prop_space_adress = (space_adress[0] + delta[0],
space_adress[1] + delta[1])
try:
# print(prop_space_adress[0],prop_space_adress[1])
# print(self.space[prop_space_adress[0],prop_space_adress[1]])
habitant = randint(0,self.space[prop_space_adress[0],prop_space_adress[1]])
return (self._space2pop_index(prop_space_adress),habitant)
except ValueError as e:
return self._get_propagation_adress(adress)
def _propagate(self,pob_adress):
"""Propagates through inhabitant in pob_adress if it is not-adoptant.
:param pob_adress: (int,int) The direction of inhabitant to propagate.
The first entry is the index (flattened) in space
and the second is the number of the settler in the cell
"""
# Check if it is not-adoptant
try:
if self._pop_array[pob_adress[0]][pob_adress[1]] == False:
self._pop_array[pob_adress[0]][pob_adress[1]] = True
self._tmp_adopted.append(pob_adress)
else:
pass
except IndexError:
# This means we are infecting someone outside the space
pass
def _clean_adopters(self):
"""Clean and initialize before start a new simulation."""
return super(AdvancedDiffusion,self)._clean_adopters()
def spatial_diffusion(self):
"""Propagates Hagerstrand like."""
# If we have results already, we must to clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.M*self.N*self._pob):
print("Done")
print("There are %i adoptants from a total of %i inhabitants" \
% (np.sum(self._pop_array),self.N * self.N * self._pob))
print("The total number of iterations performed is %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
propagated_adress = self._get_propagation_adress(adress)
self._propagate(propagated_adress)
self._infected_pop.extend(self._tmp_adopted)
#print "Hay %i adoptantes" % len(self._infected_pop)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.N,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.spatial_diffusion()
def random_diffusion(self):
"""Propagates randomly in space."""
#If we already have results, we must clean and initialize
if self._clean:
self._clean_adopters()
if self.iteration == (self.max_iter or
np.sum(self._pop_array) >= self.N*self.N*self._pob):
#self.space = np.sum(s._pop_array,axis=1).reshape(s.M,s.N)
print("Done")
print("There are %i adoptants from a total of %i inhabitantes" \
% (np.sum(self._pop_array),self.N*self.N*self._pob))
print("The total number of iterations performed is %i" % self.iteration)
self.iteration = 0
self._clean = True
return None
else:
for adress in self._infected_pop:
rand_adress = self._random_adress()
if adress == rand_adress:
#TODO: must change, it could obtain twice the same
rand_adress = self._random_adress()
self._propagate(rand_adress)
self._infected_pop.extend(self._tmp_adopted)
self.result[:,:,self.iteration] = np.sum(self._pop_array,
axis=1).reshape(self.N,self.N)
self.time_series.append(len(self._tmp_adopted))
self.iteration += 1
self._tmp_adopted = []
return self.random_diffusion()
ad = AdvancedDiffusion(100,5,25,[(50,50)],0.3,25,25,2.8)
ad.spatial_diffusion()
| 00_hagerstrand.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %autosave 0
# + code_folding=[0]
#Import Packages
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import interp
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import validation_curve
from numpy import random
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeClassifier
# + code_folding=[0]
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# + code_folding=[0]
def printcfm(y_test,y_pred,title='confusion matrix'):
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Sem Perda','Perda'],
title=title)
# + code_folding=[0]
def plotRoc(y_real, y_pred_prob):
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_real, y_pred_prob)
# Calculate AUC
auc = roc_auc_score(y_real, y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.text(1, 0.5, "AUC: %3.3f" % (auc), {'color': 'C2', 'fontsize': 18}, va="center", ha="right")
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# + code_folding=[0]
#Setando configurações de visualização
pd.options.display.max_rows=350
pd.options.display.max_columns=60
# + code_folding=[0]
# df=pd.read_csv('baseProjeto_over.csv', index_col=0)
# df
#df.columns
# X=df[['ATRIB_MAX1',
# 'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
# 'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
# X.head()
# X.info()
# #cat=['MGP1_sim', 'MGP2_sim', 'MGP3_sim', 'MGP4_sim',
# 'MGP5_sim', 'MGP6_sim', 'MGP7_sim', 'MGP8_sim', 'MGP9_sim', 'MGP10_sim',
# 'MGP11_sim', 'MGP12_sim', 'MGP13_sim', 'MGP14_sim',]
# #X[cat] = X[cat].astype('category')
# X.info()
# y = df['Perda30']
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=42, stratify=y)
#sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
#for train_index, test_index in sss.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = X[train_index], X[test_index]
# #y_train, y_test = y[train_index], y[test_index]
# + code_folding=[0]
# train=pd.read_csv('baseProjetoTrainOver.csv', index_col=0)
# test=pd.read_csv('baseProjetoTest.csv', index_col=0)
# + code_folding=[0]
# train=pd.read_csv('baseProjetoTrainOverFase1.csv', index_col=0)
# test=pd.read_csv('baseProjetoTestFase1.csv', index_col=0)
dfFase1=pd.read_csv('baseProjeto_entradaModelo_fase1.csv', index_col=0)
# X_train = dfFase1[['ATRIB_MAX1',
# 'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
# 'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
# # X_test = dfFase1[['ATRIB_MAX1',
# # 'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
# # 'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
# y_train = dfFase1['Perda30']
# # y_test = dfFase1['Perda30']
# +
# Using all phases data and sppliting between train and test
# # train=pd.read_csv('baseProjetoTrainOver.csv', index_col=0)
# # test=pd.read_csv('baseProjetoTest.csv', index_col=0)
# Using phase 1 data and sppliting between train and test
train=pd.read_csv('baseProjetoTrainOverFase1.csv', index_col=0)
test=pd.read_csv('baseProjetoTestFase1.csv', index_col=0)
X_train = train[['ATRIB_MAX1',
'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
X_test = test[['ATRIB_MAX1',
'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
y_test = test['Perda30']
y_train = train['Perda30']
# -
X_train = X_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
y_train.value_counts().plot(kind='bar', title='Count (Perda30)');
y_test.value_counts().plot(kind='bar', title='Count (Perda30)');
# <br>
# ## Neural Network - Scaled with StandardScaller
steps = [('scaler', StandardScaler()),(('neural', MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 32), random_state=42, max_iter=500, warm_start=True)))]
pipeline = Pipeline(steps)
# hidden_layer_sizes=(n1, n2,..., nx) <br>
# n1 = number of neurons in hidden layer_1 <br>
# nx = number of neurons in hidden layer_x <br>
neural_scaled = pipeline.fit(X_train, y_train)
# +
#for i in range(len(y_pred)):
# print(y_pred_prob[i],y_pred[i])
# +
# cv_scores = cross_val_score(pipeline, X, y, cv=5)
# +
# print(cv_scores)
# print("Average 5-Fold CV Score: {}".format(np.mean(cv_scores)))
# -
# <br>
# ## Neural Network - Scaled with MinMaxScaller
steps = [('scaler', MinMaxScaler()),(('neural', MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 32), random_state=42, max_iter=1000, warm_start=True)))]
pipeline = Pipeline(steps)
# hidden_layer_sizes=(n1, n2,..., nx) <br>
# n1 = number of neurons in hidden layer_1 <br>
# nx = number of neurons in hidden layer_x <br>
#neural_scaled = pipeline.fit(X_train, y_train)
pipeline.fit(X_train, y_train)
print(neural_scaled)
y_pred = pipeline.predict(X_train)
accuracy_score(y_train, y_pred)
y_pred_prob = pipeline.predict_proba(X_train)[:,1]
y_scores = cross_val_predict(pipeline, X_train, y_train, cv=5, method='predict_proba' )
y_train_pred = cross_val_predict(pipeline, X_train, y_train, cv=5)
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
# print(y_scores)
# print(np.mean(y_scores))
# +
# for i in range(len(y_pred)):
# print(y_pred_prob[i],y_pred[i], y_scores[i])
# -
plotRoc(y_train, y_scores)
printcfm(y_train_pred, y_pred, title='confusion matrix')
print(classification_report(y_train_pred, y_pred))
# +
# cv_scores = cross_val_score(pipeline, X_train, y_train, cv=5)
# print(cv_scores)
# -
# ## Fine-tunning the model.
# To turn on Fine-tunning: <br>
# define ft = 1
ft = 0
# ### 1 - Grid Search
if ft == 1 :
rn = MLPClassifier(max_iter=1000, random_state=42)
parameters = {'solver': ['lbfgs','adam','sgd'], 'alpha': 10.0 ** -np.arange(1, 7),
'hidden_layer_sizes': [x for x in itertools.product((5,10,20,30,60,100),repeat=3)]
}
cv = GridSearchCV(rn, param_grid=parameters, verbose=3, n_jobs=-1)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
# rf.fit(X_train, y_train);
cv.fit(X_train_scaled, y_train);
if ft == 1:
print("Best params: ", cv.best_params_,)
print("Best Score: %3.3f" %(cv.best_score_))
y_pred = cv.predict(X_train_scaled)
final_model =cv.best_estimator_
print(final_model)
# ### Best Model Result (11/2019) - 38 Wells
# Using cross validation
#
# MLPClassifier(activation='relu', alpha=0.01, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(10, 30, 5), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ### Best Model Result (11/2018) - 38 Wells
#
# MLPClassifier(activation='relu', alpha=0.001, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(60, 10, 30), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ### Best Model Result (11/2018) - 89 Wells
# MLPClassifier(activation='relu', alpha=0.01, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(30, 100, 5), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ### Best Model Result (09/2018) - 89 Wells
# MLPClassifier(activation='relu', alpha=alpha, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(5, 60), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ## Regularization of the best model
# # <font color = 'red'> Fill alpha value </fontcolor>
alpha=0.1
# ##### Alpha is a parameter for regularization term, aka penalty term, that combats overfitting by constraining the size of the weights. Increasing alpha may fix high variance (a sign of overfitting) by encouraging smaller weights, resulting in a decision boundary plot that appears with lesser curvatures. Similarly, decreasing alpha may fix high bias (a sign of underfitting) by encouraging larger weights, potentially resulting in a more complicated decision boundary.
# <br>
steps = [('scaler', StandardScaler()),(('neural', MLPClassifier(activation='relu', alpha=alpha, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(50, 32), learning_rate='constant',
learning_rate_init=0.001, max_iter=1000, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)))]
pipeline = Pipeline(steps)
#neural_scaled = pipeline.fit(X_train, y_train)
pipeline.fit(X_train, y_train)
# ## Predicting the Classes in Trainning Set
y_train_pred = pipeline.predict(X_train)
y_train_prob = pipeline.predict_proba(X_train)[:,1]
acc_train = accuracy_score(y_train, y_train_pred)
auc_train = roc_auc_score(y_train, y_train_pred)
plotRoc(y_train, y_train_prob)
auc_train = roc_auc_score(y_train, y_train_prob)
printcfm(y_train, y_train_pred, title='confusion matrix')
print(classification_report(y_train, y_train_pred))
# ## Precision/Recall Tradeoff
# +
#y_scores = cross_val_predict(pipeline, X_train, y_train, cv=3, method='predict_proba' )
# print(y_scores)
# print(np.mean(y_scores))
# +
#y_pred_prob
# +
# y_scores.shape
# +
# # hack to work around issue #9589 in Scikit-Learn 0.19.0
# if y_scores.ndim == 2:
# y_scores = y_scores[:, 1]
# -
precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0, 1])
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
cv_scores = cross_val_score(pipeline, X_train, y_train, cv=3)
print(cv_scores)
print(np.mean(cv_scores))
# +
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1.01, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
plt.show()
# +
# precisions, recalls, thresholds = precision_recall_curve(y_train, y_pred_prob)
# def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
# plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
# plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
# plt.xlabel("Threshold")
# plt.legend(loc="upper left")
# plt.ylim([0, 1])
# plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
# plt.show()
# -
# ## Varying the Threshold for train set
predict_mine = np.where(y_train_prob > 0.5, 1, 0)
y_train_pred_90 = (y_scores > .5)
precision = precision_score(y_train, y_train_pred_90)
recall = recall_score(y_train, y_train_pred_90)
print(precision, recall)
printcfm(y_train, predict_mine, title='confusion matrix')
print(classification_report(y_train, predict_mine))
# # Evaluating the model with Cross-Validation
y_pred_prob = pipeline.predict_proba(X_train)[:,1]
y_scores = cross_val_predict(pipeline, X_train, y_train, cv=5, verbose=3, method='predict_proba')
y_train_pred = cross_val_predict(pipeline, X_train, y_train, cv=5, verbose=3)
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
# print(y_scores)
# print(np.mean(y_scores))
plotRoc(y_train, y_scores)
auc_cv = roc_auc_score(y_train, y_scores)
# auc_train = roc_auc_score(y_train, y_train_pred)
# auc_train
printcfm(y_train, y_train_pred, title='confusion matrix')
print(classification_report(y_train, y_train_pred))
# # Evaluating the model with LOO
loo = LeaveOneOut()
loo.get_n_splits(dfFase1)
for train, test in loo.split(dfFase1):
print("%s %s" % (train, test))
cv=loo
y_pred_prob = pipeline.predict_proba(X_train)[:,1]
y_scores = cross_val_predict(pipeline, X_train, y_train, cv=cv, verbose=10, method='predict_proba', n_jobs=-1)
y_train_pred = cross_val_predict(pipeline, X_train, y_train, cv=cv, verbose=10)
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
# print(y_scores)
# print(np.mean(y_scores))
plotRoc(y_train, y_scores)
auc_LoO = roc_auc_score(y_train, y_scores)
auc_LoO
printcfm(y_train, y_train_pred, title='confusion matrix')
print(classification_report(y_train, y_train_pred))
# # Evaluating the model with Repeated K fold
# + code_folding=[0]
def perform_repeated_cv(X, y , model):
#set random seed for repeatability
random.seed(1)
#set the number of repetitions
n_reps = 45
# perform repeated cross validation
accuracy_scores = np.zeros(n_reps)
precision_scores= np.zeros(n_reps)
recall_scores = np.zeros(n_reps)
auc_scores = np.zeros(n_reps)
#result_pred = pd.DataFrame(index=np.arange(30))
result_pred = y
##############################
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig = plt.figure(figsize=(20, 10))
###############################
for u in range(n_reps):
#randomly shuffle the dataset
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
# X = X[indices]
# y = y[indices] #dataset has been randomly shuffled
X = X.iloc[indices]
y = y.iloc[indices] #dataset has been randomly shuffled
#initialize vector to keep predictions from all folds of the cross-validation
y_predicted = np.zeros(y.shape)
probas = np.zeros(y.shape)
#perform 10-fold cross validation
kf = KFold(n_splits=4 , random_state=142)
for train, test in kf.split(X):
#split the dataset into training and testing
# X_train = X[train]
# X_test = X[test]
# y_train = y[train]
# y_test = y[test]
X_train = X.iloc[train]
X_test = X.iloc[test]
y_train = y.iloc[train]
y_test = y.iloc[test]
# #standardization
# scaler = preprocessing.StandardScaler().fit(X_train)
# X_train = scaler.transform(X_train)
# X_test = scaler.transform(X_test)
#train model
clf = model
clf.fit(X_train, y_train)
#make predictions on the testing set
y_predicted[test] = clf.predict(X_test)
# print(y_predicted[test],y_test,type(y_predicted))
#y_train_pred_array = np.append(y_train_pred_array,y_train_pred)
# print(result_pred)
###############################plot
# probas_ = clf.predict_proba(X_test)
probas[test] = clf.predict_proba(X_test)[:, 1]
# print(probas[test], type(probas), probas.size)
# print(y,y_predicted)
#result_pred = y
df_pred = pd.DataFrame(y_predicted, index=y.index,columns=[u])
result_pred = pd.concat([result_pred, df_pred], axis=1)
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y, probas)
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
#roc_auc = auc(fpr, tpr) - Change to obtain AUC by predict proba
#06/11 - 23:26 roc_auc = roc_auc_score(y, y_predicted)
roc_auc = roc_auc_score(y, probas)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (u, roc_auc))
################################
#record scores
accuracy_scores[u] = accuracy_score(y, y_predicted)
precision_scores[u] = precision_score(y, y_predicted)
recall_scores[u] = recall_score(y, y_predicted)
#06/11 - 18:39 auc_scores[u] = roc_auc_score(y, y_predicted)
auc_scores[u] = roc_auc_score(y, probas)
###############################plot
# print(result_pred)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
mean_auc = np.mean(aucs)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=5)
plt.show()
################################
#return all scores
return accuracy_scores, precision_scores, recall_scores, auc_scores, result_pred
# + code_folding=[]
accuracy_scores, precision_scores, recall_scores, auc_scores, result_pred = perform_repeated_cv(X_train, y_train, pipeline)
# -
print(accuracy_scores, accuracy_scores.size)
print(precision_scores, recall_scores)
print(auc_scores, auc_scores.size)
fig = plt.figure(figsize=(20, 10))
plt.plot(auc_scores, '--o')
plt.legend(loc='lower right')
plt.ylabel('AUC', fontsize=20);
plt.xlabel('Repetições', fontsize=20);
plt.tick_params(axis='both', which='major', labelsize=20);
plt.tick_params(axis='both', which='minor', labelsize=18);
#plt.xlim([0, 18])
#plt.ylim([0.5, 1])
plt.legend(('Acurácia', 'AUC'), loc='lower right', prop={'size': 20})
plt.show()
auc_scores.mean()
auc_scores.std()
print("Accuracy: %0.2f (+/- %0.2f)" % (np.mean(auc_scores), np.std(auc_scores)))
# +
#result_pred.to_csv('result_kfold_MLP.csv', encoding='utf-8')
# -
# # Predicting the Classes in Test Set
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
y_pred_prob = pipeline.predict_proba(X_test)[:,1]
plotRoc(y_test, y_pred_prob)
auc_test = roc_auc_score(y_test, y_pred_prob)
printcfm(y_test, y_pred, title='confusion matrix')
print(classification_report(y_test, y_pred))
# + [markdown] code_folding=[]
# ## Varying the Threshold for test set
# -
predict_mine = np.where(y_pred_prob > .0, 1, 0)
printcfm(y_test, predict_mine, title='confusion matrix')
print(classification_report(y_test, predict_mine))
# ## Results
print("alpha: ", alpha)
print("AUC Train: %3.3f" % (auc_train))
print("AUC Repeated k-fold: %0.2f (+/- %0.2f)" % (np.mean(auc_scores), np.std(auc_scores)))
print("AUC LoO: %3.3f" % (auc_LoO))
print("AUC test: %3.3f" % (auc_test))
print("AUC cv: %3.3f" % (auc_cv))
#print("Accuracy Train: %3.2f%%" % (acc_train*100))
#print("Accuracy Test %3.2f%%" % (acc_test*100))
# # Draft
# + code_folding=[]
# validation curve off
vc = 0
# -
if vc == 1:
#X=np.concatenate((X_train_scaled,X_test_scaled),axis=0)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X=X_train_scaled
# print(X)
#y=np.append(y_train,y_test)
y=y_train
# print(y)
param_range = 10.0 ** np.arange(-10, 10)
final_model = MLPClassifier(activation='relu', alpha=alpha, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(50, 32), learning_rate='constant',
learning_rate_init=0.001, max_iter=1000, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
# + code_folding=[]
if vc == 1:
print(__doc__)
plt.rcParams["figure.figsize"] = (20,10)
plt.rcParams.update({'font.size': 20})
plt.grid(True,which="both", linestyle='--')
train_scores, test_scores = validation_curve(
final_model, X, y, param_name="alpha", param_range=param_range,
cv=10, scoring="roc_auc", n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with RF")
plt.xlabel("alpha")
plt.ylabel("AUC")
#plt.ylim(0.0, 1.1)
#plt.xlim(-1, 22)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
# -
if vc == 1:
print(__doc__)
plt.rcParams["figure.figsize"] = (20,10)
plt.rcParams.update({'font.size': 20})
plt.grid(True,which="both", linestyle='--')
train_scores, test_scores = validation_curve(
final_model, X, y, param_name="alpha", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=-1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVC")
plt.xlabel("$\gamma$")
plt.ylabel("Accuracy")
#plt.ylim(0.0, 1.1)
#plt.xlim(-1, 22)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Treino",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Validação cruzada",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
np.arange(-1, 10)
# ## Export results
export = 1
MLP_df = pd.concat([X_test, y_test], axis=1) # features and actual
MLP_df['Predicted'] = y_pred # creates a predicted column to the complete_df, now you'll have features, actual, and predicted
MLP_df
if export == 1:
MLP_df.to_csv('MLP_results.csv', encoding='utf-8')
| Model-Study/mlModelsMlpKFoldLooRepeated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Precision-Recall
#
#
# Example of Precision-Recall metric to evaluate classifier output quality.
#
# Precision-Recall is a useful measure of success of prediction when the
# classes are very imbalanced. In information retrieval, precision is a
# measure of result relevancy, while recall is a measure of how many truly
# relevant results are returned.
#
# The precision-recall curve shows the tradeoff between precision and
# recall for different threshold. A high area under the curve represents
# both high recall and high precision, where high precision relates to a
# low false positive rate, and high recall relates to a low false negative
# rate. High scores for both show that the classifier is returning accurate
# results (high precision), as well as returning a majority of all positive
# results (high recall).
#
# A system with high recall but low precision returns many results, but most of
# its predicted labels are incorrect when compared to the training labels. A
# system with high precision but low recall is just the opposite, returning very
# few results, but most of its predicted labels are correct when compared to the
# training labels. An ideal system with high precision and high recall will
# return many results, with all results labeled correctly.
#
# Precision ($P$) is defined as the number of true positives ($T_p$)
# over the number of true positives plus the number of false positives
# ($F_p$).
#
# $P = \frac{T_p}{T_p+F_p}$
#
# Recall ($R$) is defined as the number of true positives ($T_p$)
# over the number of true positives plus the number of false negatives
# ($F_n$).
#
# $R = \frac{T_p}{T_p + F_n}$
#
# These quantities are also related to the ($F_1$) score, which is defined
# as the harmonic mean of precision and recall.
#
# $F1 = 2\frac{P \times R}{P+R}$
#
# Note that the precision may not decrease with recall. The
# definition of precision ($\frac{T_p}{T_p + F_p}$) shows that lowering
# the threshold of a classifier may increase the denominator, by increasing the
# number of results returned. If the threshold was previously set too high, the
# new results may all be true positives, which will increase precision. If the
# previous threshold was about right or too low, further lowering the threshold
# will introduce false positives, decreasing precision.
#
# Recall is defined as $\frac{T_p}{T_p+F_n}$, where $T_p+F_n$ does
# not depend on the classifier threshold. This means that lowering the classifier
# threshold may increase recall, by increasing the number of true positive
# results. It is also possible that lowering the threshold may leave recall
# unchanged, while the precision fluctuates.
#
# The relationship between recall and precision can be observed in the
# stairstep area of the plot - at the edges of these steps a small change
# in the threshold considerably reduces precision, with only a minor gain in
# recall.
#
# **Average precision** (AP) summarizes such a plot as the weighted mean of
# precisions achieved at each threshold, with the increase in recall from the
# previous threshold used as the weight:
#
# $\text{AP} = \sum_n (R_n - R_{n-1}) P_n$
#
# where $P_n$ and $R_n$ are the precision and recall at the
# nth threshold. A pair $(R_k, P_k)$ is referred to as an
# *operating point*.
#
# AP and the trapezoidal area under the operating points
# (:func:`sklearn.metrics.auc`) are common ways to summarize a precision-recall
# curve that lead to different results. Read more in the
# `User Guide <precision_recall_f_measure_metrics>`.
#
# Precision-recall curves are typically used in binary classification to study
# the output of a classifier. In order to extend the precision-recall curve and
# average precision to multi-class or multi-label classification, it is necessary
# to binarize the output. One curve can be drawn per label, but one can also draw
# a precision-recall curve by considering each element of the label indicator
# matrix as a binary prediction (micro-averaging).
#
# <div class="alert alert-info"><h4>Note</h4><p>See also :func:`sklearn.metrics.average_precision_score`,
# :func:`sklearn.metrics.recall_score`,
# :func:`sklearn.metrics.precision_score`,
# :func:`sklearn.metrics.f1_score`</p></div>
#
#
# In binary classification settings
# --------------------------------------------------------
#
# Create simple data
# ..................
#
# Try to differentiate the two first classes of the iris data
#
#
# +
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
import numpy as np
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Limit to the two first classes, and split into training and test
X_train, X_test, y_train, y_test = train_test_split(X[y < 2], y[y < 2],
test_size=.5,
random_state=random_state)
# Create a simple classifier
classifier = svm.LinearSVC(random_state=random_state)
classifier.fit(X_train, y_train)
y_score = classifier.decision_function(X_test)
# -
# Compute the average precision score
# ...................................
#
#
# +
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(y_test, y_score)
print('Average precision-recall score: {0:0.2f}'.format(
average_precision))
# -
# Plot the Precision-Recall curve
# ................................
#
#
# +
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from inspect import signature
precision, recall, _ = precision_recall_curve(y_test, y_score)
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
# -
# In multi-label settings
# ------------------------
#
# Create multi-label data, fit, and predict
# ...........................................
#
# We create a multi-label dataset, to illustrate the precision-recall in
# multi-label settings
#
#
# +
from sklearn.preprocessing import label_binarize
# Use label_binarize to be multi-label like settings
Y = label_binarize(y, classes=[0, 1, 2])
n_classes = Y.shape[1]
# Split into training and test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5,
random_state=random_state)
# We use OneVsRestClassifier for multi-label prediction
from sklearn.multiclass import OneVsRestClassifier
# Run classifier
classifier = OneVsRestClassifier(svm.LinearSVC(random_state=random_state))
classifier.fit(X_train, Y_train)
y_score = classifier.decision_function(X_test)
# -
# The average precision score in multi-label settings
# ....................................................
#
#
# +
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(Y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(Y_test, y_score,
average="micro")
print('Average precision score, micro-averaged over all classes: {0:0.2f}'
.format(average_precision["micro"]))
# -
# Plot the micro-averaged Precision-Recall curve
# ...............................................
#
#
#
# +
plt.figure()
plt.step(recall['micro'], precision['micro'], color='b', alpha=0.2,
where='post')
plt.fill_between(recall["micro"], precision["micro"], alpha=0.2, color='b',
**step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(
'Average precision score, micro-averaged over all classes: AP={0:0.2f}'
.format(average_precision["micro"]))
# -
# Plot Precision-Recall curve for each class and iso-f1 curves
# .............................................................
#
#
#
# +
from itertools import cycle
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
plt.show()
| 01 Machine Learning/scikit_examples_jupyter/model_selection/plot_precision_recall.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pwd
# +
import pandas as pd
import os
import shutil
import numpy as np
from tqdm import tqdm # pretty pretty
import ants
import nibabel as nib
# -
df = pd.read_csv('../Data/general_csv.csv') #grab master list
df['is_nii_gz'] = [file.endswith('.nii.gz') for file in df['local_paths'].values]
df = df[df['is_nii_gz']]
print(df.shape)
df.head(2)
def safe_mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
else:
pass
def write_json(data,filepath):
import json
#data = json.dumps(data)
with open(filepath, 'w') as outfile:
json.dump(data, outfile)
study_name = 'Mapping Thalamocortical Networks Across Development in ASD'
study_df = df[df['collection_title']==study_name] # Slice
study_df.to_csv('../Data/DS2075.csv')
print(study_df.shape)
study_df.head(2)
study_subjects = np.unique(study_df['subjectkey'].values)
nsubjects = len(study_subjects)
#print(nsubjects)
#nsubjects = len(study_df)
collection_id = study_df['collection_id'].values[0]
collection_id
ndar_root = '../../ndar_fmri/' # What to take scans
bids_root = '../../' # Where to put scans
def check_has_anat_and_epi(sub):
#sub = study_subjects[s]
sub_df = study_df.iloc[study_df['subjectkey'].values==sub]
fmri_idx = sub_df['image_description'].values=='fMRI'
anat_idx = sub_df['image_description'].values=='T1'
return fmri_idx.sum()>0 and anat_idx.sum()>0
has_anat_and_epi = np.array([check_has_anat_and_epi(s) for s in study_subjects])
use_subjects = study_subjects[has_anat_and_epi]
nsubjects = len(use_subjects)
print(nsubjects)
s
# +
s = 0
sub = use_subjects[s]
sub_df = study_df.iloc[study_df['subjectkey'].values==sub]
fmri_idx = sub_df['image_description'].values=='fMRI'
anat_idx = sub_df['image_description'].values=='T1'
epi_fn = sub_df.iloc[fmri_idx]['local_paths'].values[0]
anat_fn = sub_df.iloc[anat_idx]['local_paths'].values[0]
epi_path = os.path.join(ndar_root,epi_fn[2::])
anat_path = os.path.join(ndar_root,anat_fn[2::])
#ants.slice_image(ants.image_read(epi_path),axis=3,idx=0).plot_ortho(flat=True)
#ants.image_read(anat_path).plot_ortho(flat=True)
epi_dest = os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s+1:03d}','func',f'sub-{s+1:03d}_task-rest_bold.nii.gz')
anat_dest = os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s+1:03d}','anat',f'sub-{s+1:03d}_T1w.nii.gz')
print('done')
# -
# MAKE THE BIDS DIRECTORY
safe_mkdir(os.path.join(bids_root,f'ds-{collection_id}'))
for s in range(1,nsubjects+1):
safe_mkdir(os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s:03d}'))
safe_mkdir(os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s:03d}','func'))
safe_mkdir(os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s:03d}','anat'))
for s in tqdm(range(0,nsubjects)):
sub = use_subjects[s]
sub_df = study_df.iloc[study_df['subjectkey'].values==sub]
fmri_idx = sub_df['image_description'].values=='fMRI'
anat_idx = sub_df['image_description'].values=='T1'
epi_fn = sub_df.iloc[fmri_idx]['local_paths'].values[0]
anat_fn = sub_df.iloc[anat_idx]['local_paths'].values[0]
epi_path = os.path.join(ndar_root,epi_fn[2::])
anat_path = os.path.join(ndar_root,anat_fn[2::])
#ants.slice_image(ants.image_read(epi_path),axis=3,idx=0).plot_ortho(flat=True)
#ants.image_read(anat_path).plot_ortho(flat=True)
epi_dest = os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s+1:03d}','func',f'sub-{s+1:03d}_task-rest_bold.nii.gz')
anat_dest = os.path.join(bids_root,f'ds-{collection_id}',f'sub-{s+1:03d}','anat',f'sub-{s+1:03d}_T1w.nii.gz')
## This really should've worked... smh
#shutil.copyfile(src=anat_path, dst=anat_dest, follow_symlinks=True)
#shutil.copyfile(src=epi_path, dst=epi_dest, follow_symlinks=True)
# World Famous Aglinskas hax
t1 = ants.image_read(anat_path)
bold = ants.image_read(epi_path)
t1.to_filename(anat_dest) # [ERR] _T1w.nii[.gz] files must have exactly three dimensions. (code: 95 - T1W_FILE_WITH_TOO_MANY_DIMENSIONS)
bold.to_filename(epi_dest)
bold_json = {"RepetitionTime" : bold.spacing[-1],
"TaskName" : 'rest'}
# Write BOLD .json
write_json(bold_json,epi_dest.replace('.nii.gz','.json')) ### [ERR] You have to define 'RepetitionTime' for this file. (code: 10 - REPETITION_TIME_MUST_DEFINE)
# Holy shit it's still putting up a fight... ## [ERR] Repetition time was not defined in seconds, milliseconds or microseconds in the scan's header. (code: 11 - REPETITION_TIME_UNITS)
im = nib.load(epi_dest)
header = im.header.copy()
header.set_xyzt_units(xyz='mm', t='sec')
nib.nifti1.Nifti1Image(im.get_fdata(), None, header=header).to_filename(epi_dest)
assert nib.load(epi_dest).header.get_xyzt_units()==('mm', 'sec'),'timing missing from header'
# +
## write the dataset_description.json
import json
##data = {'Name' : study_df['collection_title'].values[0] ,
##'BIDSVersion' : '20.2.0'}
data = {
"Name" : study_df["collection_title"].values[0] ,
"RepetitionTime": 2.0,
"SliceTiming" : 2.0 ,
"TaskName" : "taskrest" ,
"BIDSVersion" : "20.2.0"
#'RepetitionTime': study_df['mri_repetition_time_pd'].values[0]
}
json_string = json.dumps(data)
print(json_string)
# Directly from dictionary
with open(os.path.join(bids_root,f'ds-{collection_id}','dataset_description.json'), 'w') as outfile:
json.dump(json_string, outfile)
# -
ants.image_read(os.path.join('~/ds-2075/sub-034/anat/sub-034_T1w.nii.gz')).plot_ortho(flat=True)
ants.image_read(os.path.join('~/ds-2075/sub-001/anat/sub-001_T1w.nii.gz')).plot_ortho(flat=True)
ants.image_read(os.path.join('~/ds-2075/sub-050/anat/sub-050_T1w.nii.gz')).plot_ortho(flat=True)
| Code/new-organize-subjects2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variation due to `word2vec`'s random initialisation
#
# - fairly small at the word analogy task
# +
# %cd ~/NetBeansProjects/ExpLosion/
from itertools import chain
from notebooks.common_imports import *
from gui.output_utils import *
from gui.user_code import pretty_names, pairwise_significance_exp_ids
sns.timeseries.algo.bootstrap = my_bootstrap
sns.categorical.bootstrap = my_bootstrap
# -
def get(corpus='amazon_grouped-tagged', rep=0, avg=False, reorder=False,
composers=['Add', 'Mult', 'Left', 'Right'], k=[3]):
query_dict = {
'expansions__use_similarity': 0,
'expansions__neighbour_strategy':'linear',
'expansions__vectors__dimensionality': 100,
'document_features_ev': 'AN+NN',
'document_features_tr': 'J+N+AN+NN',
'expansions__allow_overlap': False,
'expansions__entries_of': None,
'expansions__vectors__algorithm': 'word2vec',
'expansions__vectors__composer__in': composers,
'expansions__vectors__unlabelled': 'wiki',
'expansions__decode_handler': 'SignifiedOnlyFeatureHandler',
'expansions__noise': 0,
'expansions__use_similarity': 0,
'expansions__k__in':k,
'expansions__vectors__unlabelled_percentage': 15,
'expansions__vectors__rep': rep,
'expansions__vectors__avg': avg,
'expansions__vectors__reorder': reorder,
'labelled':corpus}
return [foo.id for foo in Experiment.objects.filter(**query_dict)]
ids = list(chain.from_iterable(get(rep=r) for r in [0, 1, 2]))
print(ids)
df = dataframe_from_exp_ids(ids, fields_to_include={'View':'expansions__vectors__rep',
'Composer': 'expansions__vectors__composer'})
with sns.color_palette("cubehelix", 4):
g = sns.factorplot(data=df, x='Composer', y='Accuracy', hue='View',
hue_order='0 1 2'.split(),
kind='bar', ci=68, aspect=2);
plt.savefig('plot-w2v_random_init_var.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.1)
ids = list(chain.from_iterable(get(rep=r, composers=['Add']) for r in [0, 1, 2]))
ids
# are the differences significant
sign_df, _, _ = get_demsar_params(ids, ['expansions__vectors__rep'])
sign_df
# # Repeats on R2 corpus
# Does the smaller R2 dataset find differences between repeats on the a sample of approx the same size?
# ### There is a difference up to 4%, but it is not significant
ids = list(chain.from_iterable(get(corpus='reuters21578/r8-tagged-grouped', rep=r) for r in [0, 1, 2]))
print(ids)
get_demsar_params(ids, ['expansions__vectors__rep'])[0]
# +
ids = get(rep=0, composers=['Add'], k=[3,30]) +\
get(rep=3, avg=True, composers=['Add'], k=[3,30]) +\
list(chain.from_iterable(get(rep=i, reorder=True, composers=['Add'], k=[3,30]) for i in [2,3,4,5]))
print(ids)
df = dataframe_from_exp_ids(ids, fields_to_include={'rep':'expansions__vectors__rep',
'avg':'expansions__vectors__avg',
'dice':'expansions__vectors__reorder',
'k': 'expansions__k',
'Composer': 'expansions__vectors__composer'}).convert_objects(convert_numeric=True)
df['method'] = 'avg3'
df.loc[df.rep==0, 'method'] = 'std'
for i in [2,3,4,5]:
df.loc[(df.rep==i) & (df.dice==1), 'method'] = 'dice%d'%i
df = df.drop('avg dice rep'.split(), axis=1)
df['Method'] = df.method
with sns.color_palette("cubehelix", 6):
g = sns.factorplot(data=df, x='k', y='Accuracy', hue='Method',
kind='bar', ci=68, aspect=2);
g.set(ylim=(.3, None))
plt.savefig('plot-w2v_random_init_boost.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.1)
# -
Experiment.objects.get(expansions__k=30, expansions__vectors__composer='Add',
expansions__vectors__reorder=False).id
# 55= k=30, 100% wiki
# 75= k=3, 15%wiki
get_ci(55)[1], get_ci(370)[1]
#
# # Are the differences significant?
ids1 = [i for i in ids if Experiment.objects.get(id=i).expansions.vectors.composer=='Add']
print(ids1)
get_demsar_params(ids, ['expansions__vectors__composer',
'expansions__vectors__rep'])[0]
# # Compare unigram vectors between multiple runs
# How many of the top `n` neighbour of some entries are the same accross multiple runs
# +
from glob import glob
from discoutils.thesaurus_loader import Vectors as V
from random import sample
from itertools import combinations
pattern = '/lustre/scratch/inf/mmb28/FeatureExtractionToolkit/word2vec_vectors/word2vec-wiki-15perc.unigr.strings.rep*'
files = sorted(glob(pattern))
thes = [V.from_tsv(f) for f in files]
for t in thes:
t.init_sims(n_neighbors=100)
# -
def dice(n1, n2):
return 2 * len(set(n1) & set(n2))/ (len(n1) + len(n2))
for i,j in combinations(range(len(thes)), 2):
print(i, j, dice(thes[i].keys(), thes[j].keys()))
def dice_loop(words, thes, log=False):
dice_data = []
for i, j in combinations(range(len(thes)), 2):
pair_id = '%d & %r'%(i+1, j+1 if j < 3 else 'A')
if log:
print('Doing pair', pair_id, flush=True)
for num, word in enumerate(words):
n1 = [x[0] for x in thes[i].get_nearest_neighbours(word)]
n2 = [x[0] for x in thes[j].get_nearest_neighbours(word)]
if n1 and n2:
dice_data.append([pair_id, word, dice(n1, n2)])
return dice_data
sampled_words = sample(list(thes[0].keys()), 5000)
dice_data = dice_loop(sampled_words, thes, log=True)
# +
df3 = pd.DataFrame(dice_data, columns='Views Word Dice'.split())
with sns.axes_style("white"):
g = sns.FacetGrid(df3, col="Views", col_wrap=3);
g.map(sns.distplot, 'Dice', kde=True);
for ax in g.axes.flat:
sparsify_axis_labels(ax)
ax.set_xlim(0, 1.01)
ax.set_yticklabels([])
sns.despine(left=True, bottom=True)
plt.savefig('plot-w2v_random_init_neigh_overlap.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.1)
# -
# # Observations
# Neighbours tend to be quite different over multiple runs, but the overall accuracy of the classification task changes very little
# # Qualitative analysis
# See below
# Seems to me good neighbours (which seem sensible) tend to be the same across repeated runs
def multiway_dice(entry, thesauri):
df = pd.DataFrame(dice_loop([entry], thesauri), columns='Pair Word Dice'.split())
return df.Dice.mean()
df = compare_neighbours(thes, [0, 1, 2, 4, 5],
words=['balkans/N', 'lesbian/J', 'ottawa/N', 'sneaker/N', 'essay/N', 'falsify/V', 'inborn/J'])
df['mw_dice'] = [multiway_dice(feat, thes) for feat in df.index]
df.to_csv('compare_repeated_w2v.csv')
df.sort('mw_dice')
print(pd.DataFrame(df.stack()).to_latex())
df = compare_neighbours(thes, [0, 1, 2, 4, 5])
df.head()
all_feats = set.union(*[set(v.keys()) for v in thes[:3]])
len(all_feats)
from collections import Counter
Counter(sum(f in v for v in thes[:3]) for f in all_feats)
pattern = '/lustre/scratch/inf/mmb28/FeatureExtractionToolkit/word2vec_vectors/word2vec-wiki-100perc.unigr.strings.rep0'
v_avg = V.from_tsv(pattern)
v_avg.init_sims(n_neighbors=10)
len(v_avg)
new_entries = set(v_avg.keys()) - set(thes[0].keys())
old_entries = set(v_avg.keys()) & set(thes[0].keys())
new_nouns = [x for x in new_entries if x.endswith('/N')]
len(new_entries), len(old_entries), len(new_nouns)
new_entries
v_avg.get_nearest_neighbours('measured/J')
| notebooks/w2v_repeats_effect_of_random_initialisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from preproc.filters import markov_filter, rnn_filter
from eda.tools import seq_to_num, acc_score, prep_submit
from models.baseline import Baseline
from models.diff_table import DiffTable
from models.markov_chain import MarkovChain
from models.linear_model import LinearModel
from models.Nonlinear_model import NonLinearModel
from models.lin_reg import LinReg
from models.pipeline import Pipeline
from models.rnn import RNN
df_train = pd.read_csv("data/train.csv", index_col=0)
df_test = pd.read_csv('data/test.csv', index_col=0)
X_train, y_train = seq_to_num(df_train.Sequence, pad=False)
X_test, y_test = seq_to_num(df_test.Sequence, pad=False)
models = [
('DT', DiffTable(), None),
('LRR', LinearModel(), None),
('NLRR', NonLinearModel(), None),
('MC', MarkovChain(), markov_filter),
('RNN', RNN(), rnn_filter),
('LR', LinReg(), None)
]
pipe = Pipeline(models, verbose=True)
ind, pred = pipe.predict(X_train)
acc_score(y_train[ind], pred[ind])
len(ind)
df_validate = pd.read_csv('data/kaggle_test.csv', index_col=0)
X_val = seq_to_num(df_validate.Sequence, pad=False, target_split=False)
models = [
('DT', DiffTable(), None),
('LRR', LinearModel(), None),
('NLRR', NonLinearModel(), None),
('MC', MarkovChain(), markov_filter),
('RNN', RNN(), rnn_filter),
('LR', LinReg(), None)
]
pipe = Pipeline(models, fallback=Baseline(), verbose=True)
pred = pipe.predict(X_val)
prep_submit(pred, 'submit_with_linreg.csv')
df_after_trie = pd.read_csv('submit/result_prefic_trie_for_test.csv', index_col=0)
not_predicted_ind = df_after_trie[df_after_trie.Last.map(lambda x: x == '0')].index
df_val = pd.read_csv('data/kaggle_test.csv', index_col=0)
X_val = seq_to_num(df_val.Sequence, target_split=False, pad=False)
X_val_skipped = X_val[X_val.index.isin(not_predicted_ind)]
models = [
('DT', DiffTable(), None),
('LRR', LinearModel(), None),
('NLRR', NonLinearModel(), None),
('MC', MarkovChain(), markov_filter),
('RNN', RNN(), rnn_filter),
('LR', LinReg(), None)
]
X_val_skipped[:1].tolist()
pipe = Pipeline(models, fallback=Baseline(), verbose=True)
pred = pipe.predict(X_val_skipped)
df_after_trie[df_after_trie.Last == '0'] = np.expand_dims(pred, 1)
prep_submit(df_after_trie.Last.astype(np.float64), 'collect_them_all.csv')
| eda/pipe_all.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import ipywidgets as widgets
import IPython
from IPython.display import display, clear_output
import warnings
resolution = 300 #dpi
tick_size = 18
fontlabel_size = 18
figure_width = 377 / 25.4 #conversion to mm is 25.4
figure_height = 233 / 25.4 #conversion to mm is 25.4
figure_size = (figure_width, figure_height)
params = {
'lines.markersize' : 2,
'axes.labelsize': fontlabel_size,
'legend.fontsize': fontlabel_size,
'xtick.labelsize': tick_size,
'ytick.labelsize': tick_size,
'figure.figsize': figure_size,
'xtick.direction': 'in', # direction: {in, out, inout}
'ytick.direction': 'in', # direction: {in, out, inout}
'axes.spines.top': False,
'axes.spines.right': False,
'xtick.major.pad': 8,
'ytick.major.pad': 8,
'font.family' : 'serif,',
'ytick.labelsize' : fontlabel_size,
'xtick.labelsize' : fontlabel_size,
'axes.linewidth' : 1.2
}
plt.rcParams.update(params)
# +
mfs_range = np.linspace(0,1,1000)
filepath_CRC_data = 'CRC Solution Properties.txt'
df_CRC_data = pd.read_csv(filepath_CRC_data, sep = '\t', comment= '#', encoding='latin-1').drop('Row',axis=1)
#df_CRC_data.fillna('Empty', inplace=True)
CAS_numbers = df_CRC_data.CAS_Reg_No.unique()
grouped_CRC_data = df_CRC_data.groupby('CAS_Reg_No')
# +
#Descriptors of solutes
solutions = ['SubHeader',
'Solute',
'Synonym',
'CAS_Reg_No',
'Mol_wt',
'data',
'MFS_molal',
'MFS_molar',
'Density',
'Refractive_index_n',
'T_freeze_supression',
'Viscosity_dynamic']
#Different ways of expressing concentration
conc_measurements = ['Mass_prct',
'Mass_fraction',
'Molality_m',
'Molarity_c']
#Concentrative properties of solution
solution_properties = ['Density',
'Refractive_index_n',
'T_freeze_supression',
'Viscosity_dynamic']
three_lists = [solutions,
conc_measurements,
solution_properties]
#turn lists into dicts
solutions = dict.fromkeys(solutions)
conc_measurements = dict.fromkeys(conc_measurements)
solution_properties = dict.fromkeys(solution_properties)
conc_measurements['Mass_prct'] = 'W/W Mass / %'
conc_measurements['Mass_fraction'] = 'Mass Fraction Solute'
conc_measurements['Molality_m'] = 'Molality / mol/kg'
conc_measurements['Molarity_c'] = 'Molarity / mol/L'
solution_properties['Density'] = 'Density / kg/L'
solution_properties['Refractive_index_n'] = 'Refractive Index'
solution_properties['T_freeze_supression'] = '-$\delta$T / K'
solution_properties['Viscosity_dynamic'] = 'Dynamic Viscosity / Pas'
# -
for key in solutions:
solutions[key] = []
if key == 'data':
for number in CAS_numbers:
solutions[key].append(grouped_CRC_data.get_group(number))
continue
if key == 'MFS_molal' or key == 'MFS_molar' or key == 'Density' or key == 'Refractive_index_n' or key == 'T_freeze_supression'or key == 'Viscosity_dynamic':
solutions[key] = [None] * len(solutions['CAS_Reg_No'])
continue
for number in CAS_numbers:
solutions[key].append(grouped_CRC_data.get_group(number).iloc[0][key])
# +
def GUI():
solute_dropdown = widgets.Dropdown(options = solutions['Solute'], value = 'Sodium chloride')
conc_dropdown = widgets.Dropdown(options = conc_measurements.keys(), value = 'Mass_fraction')
property_dropdown = widgets.Dropdown(options = solution_properties.keys(), value = 'Density')
output = widgets.Output()
def show_poly_fit(solute_name, x_series , y_series, order = 3):
'''
Takes the solute name and chosen poperties to plot and performs a poly fit
'''
data = solutions['data'][solutions['Solute'].index(solute_name)]
mfs_to_molal = solutions['MFS_molal'][solutions['Solute'].index(solute_name)]
mfs_to_molar = solutions['MFS_molar'][solutions['Solute'].index(solute_name)]
with output:
fig, ax = plt.subplots(constrained_layout=True)
'''# move the toolbar to the bottom
fig.canvas.toolbar_position = 'bottom'''
ax.grid(True)
line_data, = ax.plot(data[x_series], data[y_series], color = 'k', lw = 4, label = "Reference Data")
#get a poly fit to ratio
try:
poly_fit = np.polyfit(data['Mass_fraction'],
data[y_series],
order)
poly_function = np.poly1d(poly_fit)
#showing fit to mfs molal or molar ratio
if x_series == 'Mass_prct':
line_fit, = ax.plot(100 * mfs_range, poly_function(mfs_range), ls = ':', lw = 3, color = 'b', label = 'Mass % Fit')
elif x_series == 'Molality_m':
line_fit, = ax.plot(mfs_to_molal(mfs_range), poly_function(mfs_range), ls = ':', lw = 3, color = 'magenta', label = 'Molality Fit')
elif x_series == 'Molarity_c':
line_fit, = ax.plot(mfs_to_molar(mfs_range), poly_function(mfs_range), ls = ':', lw = 3, color = 'cyan', label = 'Molarity Fit')
elif x_series == 'Mass_fraction':
line_fit, = ax.plot(mfs_range, poly_function(mfs_range), ls = ':', lw = 3, color = 'r', label = 'MFS Fit')
solutions[y_series][solutions['Solute'].index(solute_name)] = poly_function
except Exception as e:
print(e)
warnings.warn('Failed to parameterise data}')
solutions[y_series][solutions['Solute'].index(solute_name)] = None
pass
plt.legend()
ax.set_xlabel(x_series)
ax.set_ylabel(y_series)
plt.show()
return
def show_mol_ratio(solute_name, order = 3):
data = solutions['data'][solutions['Solute'].index(solute_name)]
with output:
fig, (ax0, ax1) = plt.subplots( 1, 2, constrained_layout=True)
ax0.set_xlabel(conc_measurements['Mass_fraction'])
ax1.set_xlabel(conc_measurements['Mass_fraction'])
ax0.set_ylabel(conc_measurements['Molality_m'])
ax1.set_ylabel(conc_measurements['Molarity_c'])
line_a, = ax0.plot(data.Mass_fraction, data.Molality_m, color = 'k', lw = 4)
line_b, = ax1.plot(data.Mass_fraction, data.Molarity_c, color = 'k', lw = 4)
try:
molal_fit = np.poly1d(np.polyfit(data.Mass_fraction, data.Molality_m, order))
molar_fit = np.poly1d(np.polyfit(data.Mass_fraction, data.Molarity_c, order))
line_fit_a, = ax0.plot(mfs_range, molal_fit(mfs_range), ls = ':', lw = 3, color = 'dodgerblue', label = 'MFS to Molality Fit')
#solution_properties['MFS_molal'] = np.poly1d(np.polyfit(data.Mass_fraction, data.Molality_m, order))
line_fit_b, = ax1.plot(mfs_range, molar_fit(mfs_range), ls = ':', lw = 3, color = 'dodgerblue', label = 'MFS to Molarity Fit')
#solution_properties['MFS_molar'] = np.poly1d(np.polyfit(data.Mass_fraction, data.Molarity_c, order))
plt.legend()
plt.show()
solutions['MFS_molal'][solutions['Solute'].index(solute_name)] = molal_fit
solutions['MFS_molar'][solutions['Solute'].index(solute_name)] = molar_fit
return
except:
plt.show()
warnings.warn("Failed to parameterise MFS to either Molality or Molarity. Consider interpolating from experimental data if possible")
solutions['MFS_molal'][solutions['Solute'].index(solute_name)] = None
solutions['MFS_molar'][solutions['Solute'].index(solute_name)] = None
return
return
def solute_dropdown_handler(change):
#output.clear_output()
with output:
#data = solutions['data'][solutions['Solute'].index(change.new)]
show_mol_ratio(change.new)
show_poly_fit(change.new, conc_dropdown.value, property_dropdown.value)
IPython.display.clear_output(wait=True)
return
def conc_dropdown_handler(change):
with output:
#data = solutions['data'][solutions['Solute'].index(solute_dropdown.value)]
show_mol_ratio(solute_dropdown.value)
show_poly_fit(solute_dropdown.value, conc_dropdown.value, property_dropdown.value)
IPython.display.clear_output(wait=True)
return
def property_dropdown_handler(change):
#output.clear_output()
with output:
#data = solutions['data'][solutions['Solute'].index(solute_dropdown.value)]
show_mol_ratio(solute_dropdown.value)
show_poly_fit(solute_dropdown.value, conc_dropdown.value, property_dropdown.value)
IPython.display.clear_output(wait=True)
return
solute_dropdown.observe(solute_dropdown_handler, names = 'value')
conc_dropdown.observe(conc_dropdown_handler, names = 'value')
property_dropdown.observe(property_dropdown_handler, names = 'value')
input_widgets = widgets.HBox([solute_dropdown, conc_dropdown, property_dropdown])
display(input_widgets)
display(output)
#IPython.display.clear_output(wait=True)
# -
GUI()
# +
def show_poly_fit(solute_name, x_series , y_series, order = 3):
'''
Takes the solute name and chosen poperties to plot and performs a poly fit
'''
data = solutions['data'][solutions['Solute'].index(solute_name)]
mfs_to_molal = solutions['MFS_molal'][solutions['Solute'].index(solute_name)]
mfs_to_molar = solutions['MFS_molar'][solutions['Solute'].index(solute_name)]
fig, ax = plt.subplots(constrained_layout=True)
'''# move the toolbar to the bottom
fig.canvas.toolbar_position = 'bottom'''
ax.grid(True)
line_data, = ax.plot(data[x_series], data[y_series], color = 'k', lw = 4, label = "Reference Data")
#get a poly fit to ratio
try:
poly_fit = np.polyfit(data['Mass_fraction'],
data[y_series],
order)
poly_function = np.poly1d(poly_fit)
#showing fit to mfs molal or molar ratio
if x_series == 'Mass_prct':
line_fit, = ax.plot(100 * mfs_range, poly_function(mfs_range), ls = ':', lw = 3, color = 'b', label = 'Mass % Fit')
elif x_series == 'Molality_m':
line_fit, = ax.plot(mfs_to_molal(mfs_range), poly_function(mfs_range), ls = ':', lw = 3, color = 'magenta', label = 'Molality Fit')
elif x_series == 'Molarity_c':
line_fit, = ax.plot(mfs_to_molar(mfs_range), poly_function(mfs_range), ls = ':', lw = 3, color = 'cyan', label = 'Molarity Fit')
elif x_series == 'Mass_fraction':
line_fit, = ax.plot(mfs_range, poly_function(mfs_range), ls = ':', lw = 3, color = 'r', label = 'MFS Fit')
solutions[y_series][solutions['Solute'].index(solute_name)] = poly_function
except Exception as e:
print(e)
warnings.warn('Failed to parameterise data}')
solutions[y_series][solutions['Solute'].index(solute_name)] = None
pass
plt.legend()
ax.set_xlabel(x_series)
ax.set_ylabel(y_series)
plt.show()
return
def show_mol_ratio(solute_name, order = 3):
data = solutions['data'][solutions['Solute'].index(solute_name)]
fig, (ax0, ax1) = plt.subplots( 1, 2, constrained_layout=True)
ax0.set_xlabel(conc_measurements['Mass_fraction'])
ax1.set_xlabel(conc_measurements['Mass_fraction'])
ax0.set_ylabel(conc_measurements['Molality_m'])
ax1.set_ylabel(conc_measurements['Molarity_c'])
line_a, = ax0.plot(data.Mass_fraction, data.Molality_m, color = 'k', lw = 4)
line_b, = ax1.plot(data.Mass_fraction, data.Molarity_c, color = 'k', lw = 4)
try:
molal_fit = np.poly1d(np.polyfit(data.Mass_fraction, data.Molality_m, order))
molar_fit = np.poly1d(np.polyfit(data.Mass_fraction, data.Molarity_c, order))
line_fit_a, = ax0.plot(mfs_range, molal_fit(mfs_range), ls = ':', lw = 3, color = 'dodgerblue', label = 'MFS to Molality Fit')
#solution_properties['MFS_molal'] = np.poly1d(np.polyfit(data.Mass_fraction, data.Molality_m, order))
line_fit_b, = ax1.plot(mfs_range, molar_fit(mfs_range), ls = ':', lw = 3, color = 'dodgerblue', label = 'MFS to Molarity Fit')
#solution_properties['MFS_molar'] = np.poly1d(np.polyfit(data.Mass_fraction, data.Molarity_c, order))
plt.legend()
plt.show()
solutions['MFS_molal'][solutions['Solute'].index(solute_name)] = molal_fit
solutions['MFS_molar'][solutions['Solute'].index(solute_name)] = molar_fit
return
except:
plt.show()
warnings.warn("Failed to parameterise MFS to either Molality or Molarity. Consider interpolating from experimental data if possible")
solutions['MFS_molal'][solutions['Solute'].index(solute_name)] = None
solutions['MFS_molar'][solutions['Solute'].index(solute_name)] = None
return
return
# +
compounds_list = ['Sodium chloride', 'Potassium chloride',
'Potassium iodide',
'Sodium nitrate', 'Potassium nitrate',
'Sodium sulfate', 'Potassium sulfate', 'Magnesium sulfate']
for name in compounds_list:
print (name)
show_mol_ratio(name)
show_poly_fit(name, 'Molarity_c', 'Density')
# +
for name in compounds_list:
plt.plot(mfs_range, solutions['MFS_molar'][solutions['Solute'].index(name)](mfs_range), ':', label = name)
plt.scatter(solutions['data'][solutions['Solute'].index(name)].Mass_fraction,
solutions['data'][solutions['Solute'].index(name)].Molarity_c,
s = 20)
print(name, solutions['MFS_molar'][solutions['Solute'].index(name)](0.05))
plt.xlim(0,0.6)
plt.ylim(0,15)
plt.xlabel('MFS')
plt.ylabel('Molarity / mol/L')
plt.legend()
plt.show()
# -
solutions['MFS_molar'][solutions['Solute'].index('Sodium nitrate')](0.05)
solutions['MFS_molar'][solutions['Solute'].index('Potassium nitrate')](0.0595)
def get_properties(name, quant_term, property_name):
show_mol_ratio(name)
show_poly_fit(name, quant_term, property_name)
return
| src/Solution Properties.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import gensim
import pandas as pd
import nltk
from nltk.corpus import stopwords
from sklearn.decomposition import PCA
from matplotlib import pyplot
import numpy as np
f = open("./ads.json")
data = json.load(f)
f.close()
'''
l = []
i = 0
for obj in f.readlines():
obj = obj.strip('\n')
obj = obj.replace('&', '')
obj = obj.replace('•', '')
obj = obj.replace('.', '')
#print(obj)
json_obj = json.loads(obj)
l.append(json_obj)
if i < 5:
print(json_obj['number'])
print(json_obj['desc'])
i += 1
'''
# +
symbols_remove = [ '<p>', '</p>', '</em>', '<em>', '<b>', '</b>', '<br>', '<div>', '</div>',
'<strong>', '</strong>', '\n*', '\n', '\t', '\xa0', '"' ]
#./test.txt
f = open('./wordvectors/data/sv.txt', 'a')
#i = 0
for d in data:
#if i > 5:
# break
text = d['description']['text']
for sym in symbols_remove:
text = text.replace(sym, '')
text = text.replace('&', 'och')
text = text.replace('/', ' ')
text = text.replace('\r', ' ')
f.write(text+'\r\n')
#i += 1
f.close()
# -
text
data = {'Number':[l[0]['number']], 'Description':[l[0]['desc'][4:]]}
df = pd.DataFrame(data)
eng_data = {'Number':[], 'Description':[]}
for i in range(1,len(l)-1):
if ' is ' in l[i]['desc'][4:]:
eng_data['Number'].append(l[i]['number'])
eng_data['Description'].append(l[i]['desc'][4:])
else:
df = df.append({'Number':l[i]['number'], 'Description':l[i]['desc'][4:]}, ignore_index=True)
df_eng = pd.DataFrame(eng_data)
print(df_eng.tail(2))
print(df.tail(2))
model = gensim.models.Word2Vec.load('./wordvectors/data/sv.bin')
sentences = []
f = open('./wordvectors/data/sv.txt', 'a+')
for index, row in df.iterrows():
#if index > 5:
# break
desc = row['Description'].lower()
str_num = str(row['Number'])
f.write(desc + " " + str_num + '\r\n')
sent = desc.split()
sent.append(str_num)
print(sent)
sentences.append(sent)
f.close()
vector_size = 300
window_size = 5
vocab_size = 20000
num_negative = 5
def get_min_count(sents):
'''
Args:
sents: A list of lists. E.g., [["I", "am", "a", "boy", "."], ["You", "are", "a", "girl", "."]]
Returns:
min_count: A uint. Should be set as the parameter value of word2vec `min_count`.
'''
global vocab_size
from itertools import chain
fdist = nltk.FreqDist(chain.from_iterable(sents))
# the count of the the top-kth word
min_count = fdist.most_common(vocab_size)[-1][1]
return min_count
min_count = get_min_count(sentences)
model2 = gensim.models.Word2Vec(sentences, size=vector_size, min_count=min_count, negative= num_negative, window=window_size)
model.reset_from(model2)
#nltk.download()
from nltk.corpus import stopwords
stopwords = stopwords.words('swedish')
sent_wo_stop = []
for s in sentences:
sent_wo_stop.append([word for word in s if word not in stopwords])
print(sent_wo_stop[0])
print(sentences[0])
#model.wv.similarity('47', 'nischat')
#model.wv.similar_by_vector()
model['driven'] == mean_vector(model, ['driven'])
model.wv.similarity('Hej', 'hej')
X = model.wv.__getitem__(model.wv.vocab)
pca = PCA(n_components=2)
result = pca.fit_transform(X)
# create a scatter plot of the projection
pyplot.scatter(result[0:29, 0], result[0:29, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
if i > 30:
break
pyplot.annotate(word, xy=(result[i, 0], result[i, 1]))
pyplot.show()
def mean_vector(model, sentence):
words = [word for word in sentence if word in model.wv.vocab]
if len(words) >= 1:
return np.mean(model.wv.__getitem__(words), axis=0)
else:
return []
soft_skills1 = ['stark arbetsmoral', 'pålitlig', 'positiv attityd', 'driven', 'lagspelare', 'organiserad',
'presterar bra under stress', 'effektiv kommunikatör', 'flexibel', 'självsäker']
soft_skills = []
for i, s in enumerate(soft_skills1):
s = s.split()
soft_skills.append(s)
print(soft_skills)
sent = sentences + soft_skills
# +
#sent1 = mean_vector(model, sentences[0])
#print(sentences[0])
#print(sentences[1])
#sent2 = mean_vector(model, sentences[1])
means = []
sentes = []
i = 0
for s in sent_wo_stop:#sentences:
#if i > 178:
# break
s_new = mean_vector(model, s)
if len(s_new) > 0:
means.append(s_new)
sentes.append(s)
i += 1
# -
means_soft_skills = []
softs = []
i = 0
for s in soft_skills:
#if i > 178:
# break
if s[0].isupper():
s = [x.lower() for x in s]
s_new = mean_vector(model, s)
if len(s_new) > 0:
means_soft_skills.append(s_new)
softs.append(soft_skills1[i])
i += 1
softs
len(sent)
# +
#sent = []
#sent.append(sent1)
#sent.append(sent2)
pca = PCA(n_components=2)
result1 = pca.fit_transform(means)
pyplot.figure(num=None, figsize=(18, 16), dpi=100, facecolor='w', edgecolor='k')
pyplot.scatter(result1[:, 0], result1[:, 1])
for i, s in enumerate(sentes):
pyplot.annotate(s[-1], xy=(result1[i, 0], result1[i, 1]))
#if i > 4694:
# break
pyplot.show()
# +
#sent = []
#sent.append(sent1)
#sent.append(sent2)
pca = PCA(n_components=2)
result1 = pca.fit_transform(means_soft_skills)
pyplot.figure(num=None, figsize=(10, 8), dpi=100, facecolor='w', edgecolor='k')
pyplot.scatter(result1[:, 0], result1[:, 1])
for i, s in enumerate(softs):
#if i == len(soft_skills)-1:
# break
pyplot.annotate(s, xy=(result1[i, 0], result1[i, 1]))
#if i > 4695:
# break
pyplot.show()
# -
result1
len(means)
| extract_ads.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Logistic Regression of Classic Titanic Survivor Dataset
# ## Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# ## The Data
train=pd.read_csv("train.csv")
train.head()
# # Exploratory Data Analysis
train.describe()
train.info()
# # Exploratory data analysis
train.isnull().sum()
sns.heatmap(train.isnull(),yticklabels=False)
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train,palette='RdBu_r')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
sns.countplot(x='Survived', hue='Pclass',data=train)
sns.displot(train['Age'].dropna(),kde=False,bins=10,alpha = 0.5)
sns.countplot(x='SibSp',data=train)
train['Fare'].hist(color='green',bins=40,figsize=(8,4))
# # Data Cleaning
plt.figure(figsize=(12,6))
sns.boxplot(x='Pclass', y='Age', data=train)
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass ==1:
return 40
elif Pclass ==2:
return 30
else:
return 24
else:
return Age
train['Age'] = train[['Age', 'Pclass']].apply(impute_age, axis = 1)
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
train.drop('Cabin',axis=1,inplace=True)
train.head()
# ## Converting Categorical Features
train.dropna(inplace=True)
train.info()
sex=pd.get_dummies(train['Sex'],drop_first=False)
embark=pd.get_dummies(train['Embarked'],drop_first=False)
train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
train=pd.concat([train,sex,embark],axis=1)
train.head()
sns.heatmap(train.corr(),annot=True,cmap='RdYlGn',linewidths=0.2)
fig=plt.gcf()
fig.set_size_inches(16,10)
plt.show()
# # Building model
#
# ## Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train.drop('Survived',axis=1),
train['Survived'], test_size=0.30,
random_state=101)
# ## Training and Predicting
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression(max_iter= 1000)
logmodel.fit(X_train,y_train)
predictions = logmodel.predict(X_test)
from sklearn.metrics import confusion_matrix
accuracy=confusion_matrix(y_test,predictions)
accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_test,predictions)
accuracy
predictions
# ## Evaluation
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
| Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mobilenet
#
# MobileNet is a simple but efficient and not very computationally intensive convolutional neural networks for mobile vision applications. MobileNet is widely used in many real-world applications which includes object detection, fine-grained classifications, face attributes, and localization.
#
# MobileNet introduces **Depth-wise separable convolutions** namely,
#
# + Depth-wise convolution
# + Point-wise convolution
#
#
# The Depth-wise separable convolution is comprising of two layers, the depth-wise convolution, and the point-wise convolution. Basically the first layer is used to filter the input channels and the second layer is used to combine them to create a new feature.
#
# In the figure below we can see the working of these convolutions and how they decrease the cost of computation.
#
# 
#
# The entire Network Structure
#
# 
#
# Left: Standard Convolution followed by batch normalization and RELU. Right: Depthwise convolution layer and pointwise convolution layer, each followed by batch normalization and RELU.
#
# 
#
# From the above image, we can see that every convolution layer followed by a batch normalization and a ReLU. Also, a final average pooling is been introduced just before the fully connected layer to reduce the spatial dimension to 1.
# Note that the above architecture has 28 layers by counting widthwise and pointwise convolution as separate layers.
#
# ### Parameters of MobileNet
#
# Although the base MobileNet architecture is already small and computationally not very intensive, it has two different global hyperparameters to effectively reduce the computational cost further. One is the width multiplayer and another is the resolution wise multiplayer.
#
# + **Width Multiplier: Thinner Models**
# For further reduction of computational cost, they introduced a simple parameter called Width Multiplier also refer as α.
# For each layer, the width multiplier α will be multiplied with the input and the output channels(N and M) in order to narrow a network. Computational Cost: Depthwise separable convolution with width multiplier
# Here α will vary from 0 to 1, with typical values of [1, 0.75, 0.5 and 0.25]. When α = 1, called as baseline MobileNet and α < 1, called as reduced MobileNet. Width Multiplier has the effect of reducing computational cost by α².
#
#
# + **Resolution Multiplier: Reduced Representation**
# The second parameter to reduce the computational cost effectively. Also known as ρ.
# For a given layer, the resolution multiplier ρ will be multiplied with the input feature map. Now we can express the computational cost by applying width multiplier and resolution multiplier
#
#
| CNN Architectures/2017_MobileNet.ipynb |
# +
"""
Weddings II - Record RSVPs
"""
# Create a method in Guest to record a guests's rsvp to your invitation. It should record whether they have any dietary restrictions (e.g. vegetarian, kosher, halal, etc.) and whether they're bringing a plus one. If they are bringing a plus one, it should record the name of the plus one and his/her dietary restrictions if any. These values should be stored in instance attributes.
# Try out this method on at least one instance of Guest and at least one instance of Bridesmaid.
class Guest():
def __init__(self, name, phone, invite_sent = False):
self.name = name
self.phone = phone
self.invite_sent = invite_sent
self.diet = None
self.rsvp = None
self.plus_one = None
self.plus_one_name = None
self.plus_one_diet = None
def send_invite(self):
self.invite_sent = True
return None
def record_rsvp(self, rsvp, diet, plus_one, plus_one_name = None, plus_one_diet = None):
if rsvp == 'no' or rsvp == 'No':
self.rsvp = False
elif rsvp == 'yes' or rsvp == 'Yes':
self.rsvp = True
self.diet = diet
if plus_one == 'no' or plus_one == 'No':
self.plus_one = False
elif plus_one == 'yes' or plus_one == 'Yes':
self.plus_one = True
self.plus_one_name = plus_one_name
self.plus_one_diet = plus_one_diet
class Bridesmaid(Guest):
def __init__(self, name, phone, invite_sent = False):
self.name = name
self.phone = phone
self.invite_sent = invite_sent
self.diet = None
self.rsvp = None
self.plus_one = None
self.plus_one_name = None
self.plus_one_diet = None
klauss = Guest('<NAME>', 3748807716)
michelle = Guest('<NAME>', 9205150102)
angelika = Bridesmaid('<NAME>', 2019352087)
# Klauss is a vegetarian, and his plus one Vincent keeps halal.
klauss.record_rsvp('yes', 'Vegetarian', 'Yes', '<NAME>', 'Halal')
print('Klauss')
print(klauss.rsvp)
print(klauss.diet)
print(klauss.plus_one)
print(klauss.plus_one_name)
print(klauss.plus_one_diet)
print('\n')
# Michelle has no dietary restrictions and is not bringing a plus one.
michelle.record_rsvp('Yes', None, 'No')
print('Michelle')
print(michelle.rsvp)
print(michelle.diet)
print(michelle.plus_one)
print(michelle.plus_one_name)
print(michelle.plus_one_diet)
print('\n')
# Angelika keeps kosher, but her plus one Claude has no dietary restrictions.
angelika.record_rsvp('Yes', 'Kosher', 'yes', '<NAME>', None)
print('Angelika')
print(angelika.rsvp)
print(angelika.diet)
print(angelika.plus_one)
print(angelika.plus_one_name)
print(angelika.plus_one_diet)
| pset_classes/wedding_guests/solutions/nb/p2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from tqdm import tqdm
# -
df = pd.read_csv('../data/20160411_Minto_Run2 Sequences.corrected_dates_20160506.csv', index_col=0)
# df[df['Dates_Corrected_By_Brandt']]
df['Sequence Accession'] = df['Sequence Accession'].str.strip('*')
df.set_index('Sequence Accession', inplace=True)
df = df[df['Date_Corrected_By_Brandt'] == '*']
df.shape
# The "corrected" data are only correct w.r.t. the "Date_Corrected_By_Brandt" column. The "Date_still_incorrect" column has been taken care of on the other side.
ird_df = pd.read_csv('../data/20160411_Minto_Run2 Sequences.backup_20160518.csv')
ird_df['Sequence Accession'] = ird_df['Sequence Accession'].str.strip('*')
ird_df.set_index('Sequence Accession', inplace=True)
ird_df.head()
for r, d in tqdm(df.iterrows()):
ird_df.set_value(r, 'Collection Date', d['Collection Date'])
ird_df.loc[r]
| notebooks/01-explore-corrected.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''oneNeuron'': conda)'
# name: python3
# ---
# +
import pandas as pd
from utils.model import Perceptron
from utils.all_utils import prepare_data, save_model, save_plot
OR = {
"x1": [0,0,1,1],
"x2": [0,1,0,1],
"y": [0,1,1,1],
}
df = pd.DataFrame(OR)
df
X,y = prepare_data(df)
ETA = 0.3 # 0 and 1
EPOCHS = 10
model_OR = Perceptron(eta=ETA, epochs=EPOCHS)
model_OR.fit(X, y)
_ = model_OR.total_loss()
save_model(model_OR, filename="or.model")
save_plot(df, "or.png", model_OR)
| Demo-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `yacman` features and usage
#
# This short tutorial show you the features of `yacman` package in action.
#
# First, let's prepare some data to work with
# +
import yaml
yaml_dict = {'cfg_version': 0.1, 'lvl1': {'lvl2': {'lvl3': {'entry': ['val1', 'val2']}}}}
yaml_str = """\
cfg_version: 0.1
lvl1:
lvl2:
lvl3:
entry: ["val1","val2"]
"""
filepath = "test.yaml"
with open(filepath, 'w') as f:
data = yaml.dump(yaml_dict, f)
import yacman
# -
# ## `YacAttMap` object creation
#
# There are multiple ways to initialize an object of `YacAttMap` class:
# 1. **Read data from a YAML-formatted file**
yacmap = yacman.YacAttMap(filepath=filepath)
yacmap
# 2. **Read data from an `entries` mapping**
yacmap = yacman.YacAttMap(entries=yaml_dict)
yacmap
# 3. **Read data from a YAML-formatted string**
yacmap = yacman.YacAttMap(yamldata=yaml_str)
yacmap
# ## File locks; race-free writing
# Instances of `YacAttMap` class support race-free writing and file locking, so that **it's safe to use them in multi-user contexts**
#
# They can be created with or without write capabilities. Writable objects create a file lock, which prevents other processes managed by `yacman` from updating the source config file.
#
# `writable` argument in the object constructor can be used to toggle writable mode. The source config file can be updated on disk (using `write` method) only if the `YacAttMap` instance is in writable mode
# +
yacmap = yacman.YacAttMap(filepath=filepath, writable=False)
try:
yacmap.write()
except OSError as e:
print("Error caught: {}".format(e))
# -
# The write capabilities can be granted to an object:
yacmap = yacman.YacAttMap(filepath=filepath, writable=False)
yacmap.make_writable()
yacmap.write()
# Or withheld:
yacmap.make_readonly()
# If a file is currently locked by other `YacAttMap` object. The object will not be made writable/created with write capabilities until the lock is gone. If the lock persists, the action will fail (with a `RuntimeError`) after a selected `wait_time`, which is 10s by default:
# +
yacmap = yacman.YacAttMap(filepath=filepath, writable=True)
try:
yacmap1 = yacman.YacAttMap(filepath=filepath, writable=True, wait_max=1)
except RuntimeError as e:
print("\nError caught: {}".format(e))
yacmap.make_readonly()
# -
# Lastly, the `YacAttMap` class instances **can be used in a context manager**. This way the source config file will be locked, possibly updated (depending on what the user chooses to do), safely written to and unlocked with a single line of code:
# +
yacmap = yacman.YacAttMap(filepath=filepath)
with yacmap as y:
y.test = "test"
yacmap1 = yacman.YacAttMap(filepath=filepath)
yacmap1
# -
# ## Key aliases in `AliasedYacAttMap`
#
# `AliasedYacAttMap` is a child class of `YacAttMap` that supports top-level key aliases.
#
# ### Defining the aliases
#
# There are two ways the aliases can be defined at the object construction stage:
#
# 1. By passing a literal aliases dictionary
# 2. By passing a function to be executed on the object itself that returns the dictionary
#
# In any case, the resulting aliases mapping has to follow the format presented below:
aliases = {
"key_1": ["first_key", "key_one"],
"key_2": ["second_key", "key_two", "fav_key"],
"key_3": ["third_key", "key_three"]
}
# #### Literal aliases dictionary
# The `aliases` argument in the `AliasedYacAttmap` below is a Python `dict` with that maps the object keys to collection of aliases (Python `list`s of `str`). This format is strictly enforced.
aliased_yacmap = yacman.AliasedYacAttMap(entries={'key_1': 'val_1', 'key_2': 'val_2', 'key_3': 'val_3'},
aliases=aliases)
print(aliased_yacmap)
# Having set the aliases we can key the object either with the literal key or the aliases:
aliased_yacmap["key_1"] == aliased_yacmap["first_key"]
aliased_yacmap["key_two"] == aliased_yacmap["fav_key"]
# #### Aliases returning function
#
# The `aliases` argument in the `AliasedYacAttmap` below is a Python `callable` that takes the obejcect itself as an argument and returns the desired aliases mapping. This is especially useful when the object itself contains the aliases definition, for example:
entries={
'key_1': {'value': 'val_1', 'aliases': ['first_key']},
'key_2': {'value': 'val_2', 'aliases': ['second_key']},
'key_3': {'value': 'val_3', 'aliases': ['third_key']}
}
aliased_yacmap = yacman.AliasedYacAttMap(entries=entries,
aliases=lambda x: {k: v.__getitem__("aliases", expand=False) for k, v in x.items()})
print(aliased_yacmap)
aliased_yacmap["key_1"] == aliased_yacmap["first_key"]
| docs/usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from setuptools import setup
with open(os.path.join(os.path.realpath(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-resumator',
version='1.1.6',
packages=['resumator'],
install_requires=[
'django-solo>=1.1.0',
'Pillow>=3.0.0',
],
include_package_data=True,
license='MIT License',
description='A lightweight Django app to create Web-based resumes.',
long_description=README,
url='https://github.com/AmmsA/django-resumator',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 3: Hello Vectors
#
# Welcome to this week's programming assignment on exploring word vectors.
# In natural language processing, we represent each word as a vector consisting of numbers.
# The vector encodes the meaning of the word. These numbers (or weights) for each word are learned using various machine
# learning models, which we will explore in more detail later in this specialization. Rather than make you code the
# machine learning models from scratch, we will show you how to use them. In the real world, you can always load the
# trained word vectors, and you will almost never have to train them from scratch. In this assignment, you will:
#
# - Predict analogies between words.
# - Use PCA to reduce the dimensionality of the word embeddings and plot them in two dimensions.
# - Compare word embeddings by using a similarity measure (the cosine similarity).
# - Understand how these vector space models work.
#
#
#
# ## 1.0 Predict the Countries from Capitals
#
# In the lectures, we have illustrated the word analogies
# by finding the capital of a country from the country.
# We have changed the problem a bit in this part of the assignment. You are asked to predict the **countries**
# that corresponds to some **capitals**.
# You are playing trivia against some second grader who just took their geography test and knows all the capitals by heart.
# Thanks to NLP, you will be able to answer the questions properly. In other words, you will write a program that can give
# you the country by its capital. That way you are pretty sure you will win the trivia game. We will start by exploring the data set.
#
# <img src = 'map.jpg' width="width" height="height" style="width:467px;height:300px;"/>
#
# ### 1.1 Importing the data
#
# As usual, you start by importing some essential Python libraries and then load the dataset.
# The dataset will be loaded as a [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html),
# which is very a common method in data science.
# This may take a few minutes because of the large size of the data.
# +
# Run this cell to import packages.
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import get_vectors
# +
data = pd.read_csv('capitals.txt', delimiter=' ')
data.columns = ['city1', 'country1', 'city2', 'country2']
# print first five elements in the DataFrame
data.head(5)
# -
# ***
#
# ### To Run This Code On Your Own Machine:
# Note that because the original google news word embedding dataset is about 3.64 gigabytes,
# the workspace is not able to handle the full file set. So we've downloaded the full dataset,
# extracted a sample of the words that we're going to analyze in this assignment, and saved
# it in a pickle file called `word_embeddings_capitals.p`
#
# If you want to download the full dataset on your own and choose your own set of word embeddings,
# please see the instructions and some helper code.
#
# - Download the dataset from this [page](https://code.google.com/archive/p/word2vec/).
# - Search in the page for 'GoogleNews-vectors-negative300.bin.gz' and click the link to download.
# Copy-paste the code below and run it on your local machine after downloading
# the dataset to the same directory as the notebook.
#
# ```python
# import nltk
# from gensim.models import KeyedVectors
#
#
# embeddings = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary = True)
# f = open('capitals.txt', 'r').read()
# set_words = set(nltk.word_tokenize(f))
# select_words = words = ['king', 'queen', 'oil', 'gas', 'happy', 'sad', 'city', 'town', 'village', 'country', 'continent', 'petroleum', 'joyful']
# for w in select_words:
# set_words.add(w)
#
# def get_word_embeddings(embeddings):
#
# word_embeddings = {}
# for word in embeddings.vocab:
# if word in set_words:
# word_embeddings[word] = embeddings[word]
# return word_embeddings
#
#
# # Testing your function
# word_embeddings = get_word_embeddings(embeddings)
# print(len(word_embeddings))
# pickle.dump( word_embeddings, open( "word_embeddings_subset.p", "wb" ) )
# ```
#
# ***
# Now we will load the word embeddings as a [Python dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries).
# As stated, these have already been obtained through a machine learning algorithm.
word_embeddings = pickle.load(open("word_embeddings_subset.p", "rb"))
len(word_embeddings) # there should be 243 words that will be used in this assignment
# Each of the word embedding is a 300-dimensional vector.
print("dimension: {}".format(word_embeddings['Spain'].shape[0]))
# ### Predict relationships among words
#
# Now you will write a function that will use the word embeddings to predict relationships among words.
# * The function will take as input three words.
# * The first two are related to each other.
# * It will predict a 4th word which is related to the third word in a similar manner as the two first words are related to each other.
# * As an example, "Athens is to Greece as Bangkok is to ______"?
# * You will write a program that is capable of finding the fourth word.
# * We will give you a hint to show you how to compute this.
#
# A similar analogy would be the following:
#
# <img src = 'vectors.jpg' width="width" height="height" style="width:467px;height:200px;"/>
#
# You will implement a function that can tell you the capital of a country.
# You should use the same methodology shown in the figure above. To do this,
# compute you'll first compute cosine similarity metric or the Euclidean distance.
# ### 1.2 Cosine Similarity
#
# The cosine similarity function is:
#
# $$\cos (\theta)=\frac{\mathbf{A} \cdot \mathbf{B}}{\|\mathbf{A}\|\|\mathbf{B}\|}=\frac{\sum_{i=1}^{n} A_{i} B_{i}}{\sqrt{\sum_{i=1}^{n} A_{i}^{2}} \sqrt{\sum_{i=1}^{n} B_{i}^{2}}}\tag{1}$$
#
# $A$ and $B$ represent the word vectors and $A_i$ or $B_i$ represent index i of that vector.
# & Note that if A and B are identical, you will get $cos(\theta) = 1$.
# * Otherwise, if they are the total opposite, meaning, $A= -B$, then you would get $cos(\theta) = -1$.
# * If you get $cos(\theta) =0$, that means that they are orthogonal (or perpendicular).
# * Numbers between 0 and 1 indicate a similarity score.
# * Numbers between -1-0 indicate a dissimilarity score.
#
# **Instructions**: Implement a function that takes in two word vectors and computes the cosine distance.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li> Python's<a href="https://docs.scipy.org/doc/numpy/reference/" > NumPy library </a> adds support for linear algebra operations (e.g., dot product, vector norm ...).</li>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html" > numpy.dot </a>.</li>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html">numpy.linalg.norm </a>.</li>
# </ul>
# </p>
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def cosine_similarity(A, B):
'''
Input:
A: a numpy array which corresponds to a word vector
B: A numpy array which corresponds to a word vector
Output:
cos: numerical number representing the cosine similarity between A and B.
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
dot = np.dot(A,B)
norma = np.linalg.norm(A)
normb = np.linalg.norm(B)
cos = dot/(norma * normb)
### END CODE HERE ###
return cos
# +
# feel free to try different words
king = word_embeddings['king']
queen = word_embeddings['queen']
cosine_similarity(king, queen)
# -
# **Expected Output**:
#
# $\approx$ 0.6510956
# ### 1.3 Euclidean distance
#
# You will now implement a function that computes the similarity between two vectors using the Euclidean distance.
# Euclidean distance is defined as:
#
# $$ \begin{aligned} d(\mathbf{A}, \mathbf{B})=d(\mathbf{B}, \mathbf{A}) &=\sqrt{\left(A_{1}-B_{1}\right)^{2}+\left(A_{2}-B_{2}\right)^{2}+\cdots+\left(A_{n}-B_{n}\right)^{2}} \\ &=\sqrt{\sum_{i=1}^{n}\left(A_{i}-B_{i}\right)^{2}} \end{aligned}$$
#
# * $n$ is the number of elements in the vector
# * $A$ and $B$ are the corresponding word vectors.
# * The more similar the words, the more likely the Euclidean distance will be close to 0.
#
# **Instructions**: Write a function that computes the Euclidean distance between two vectors.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html" > numpy.linalg.norm </a>.</li>
# </ul>
# </p>
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def euclidean(A, B):
"""
Input:
A: a numpy array which corresponds to a word vector
B: A numpy array which corresponds to a word vector
Output:
d: numerical number representing the Euclidean distance between A and B.
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# euclidean distance
d = np.linalg.norm(A-B)
### END CODE HERE ###
return d
# Test your function
euclidean(king, queen)
# **Expected Output:**
#
# 2.4796925
# ### 1.4 Finding the country of each capital
#
# Now, you will use the previous functions to compute similarities between vectors,
# and use these to find the capital cities of countries. You will write a function that
# takes in three words, and the embeddings dictionary. Your task is to find the
# capital cities. For example, given the following words:
#
# - 1: Athens 2: Greece 3: Baghdad,
#
# your task is to predict the country 4: Iraq.
#
# **Instructions**:
#
# 1. To predict the capital you might want to look at the *King - Man + Woman = Queen* example above, and implement that scheme into a mathematical function, using the word embeddings and a similarity function.
#
# 2. Iterate over the embeddings dictionary and compute the cosine similarity score between your vector and the current word embedding.
#
# 3. You should add a check to make sure that the word you return is not any of the words that you fed into your function. Return the one with the highest score.
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def get_country(city1, country1, city2, embeddings):
"""
Input:
city1: a string (the capital city of country1)
country1: a string (the country of capital1)
city2: a string (the capital city of country2)
embeddings: a dictionary where the keys are words and values are their embeddings
Output:
countries: a dictionary with the most likely country and its similarity score
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# store the city1, country 1, and city 2 in a set called group
group = set((city1, country1, city2))
# get embeddings of city 1
city1_emb = embeddings[city1]
# get embedding of country 1
country1_emb = embeddings[country1]
# get embedding of city 2
city2_emb = embeddings[city2]
# get embedding of country 2 (it's a combination of the embeddings of country 1, city 1 and city 2)
# Remember: King - Man + Woman = Queen
vec = country1_emb - city1_emb + city2_emb
# Initialize the similarity to -1 (it will be replaced by a similarities that are closer to +1)
similarity = -1
# initialize country to an empty string
country = ''
# loop through all words in the embeddings dictionary
for word in embeddings.keys():
# first check that the word is not already in the 'group'
if word not in group:
# get the word embedding
word_emb = embeddings[word]
# calculate cosine similarity between embedding of country 2 and the word in the embeddings dictionary
cur_similarity = cosine_similarity(vec, word_emb)
# if the cosine similarity is more similar than the previously best similarity...
if cur_similarity > similarity:
# update the similarity to the new, better similarity
similarity = cur_similarity
# store the country as a tuple, which contains the word and the similarity
country = (word, similarity)
### END CODE HERE ###
return country
# Testing your function, note to make it more robust you can return the 5 most similar words.
get_country('Athens', 'Greece', 'Cairo', word_embeddings)
# **Expected Output:**
#
# ('Egypt', 0.7626821)
# ### 1.5 Model Accuracy
#
# Now you will test your new function on the dataset and check the accuracy of the model:
#
# $$\text{Accuracy}=\frac{\text{Correct # of predictions}}{\text{Total # of predictions}}$$
#
# **Instructions**: Write a program that can compute the accuracy on the dataset provided for you. You have to iterate over every row to get the corresponding words and feed them into you `get_country` function above.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Use <a href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.iterrows.html" > pandas.DataFrame.iterrows </a>.</li>
# </ul>
# </p>
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def get_accuracy(word_embeddings, data):
'''
Input:
word_embeddings: a dictionary where the key is a word and the value is its embedding
data: a pandas dataframe containing all the country and capital city pairs
Output:
accuracy: the accuracy of the model
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# initialize num correct to zero
num_correct = 0
# loop through the rows of the dataframe
for i, row in data.iterrows():
# get city1
city1 = row['city1']
# get country1
country1 = row['country1']
# get city2
city2 = row['city2']
# get country2
country2 = row['country2']
# use get_country to find the predicted country2
predicted_country2, _ = get_country(city1, country1, city2, word_embeddings)
# if the predicted country2 is the same as the actual country2...
if predicted_country2 == country2:
# increment the number of correct by 1
num_correct += 1
# get the number of rows in the data dataframe (length of dataframe)
m = len(data)
# calculate the accuracy by dividing the number correct by m
accuracy = num_correct/m
### END CODE HERE ###
return accuracy
# **NOTE: The cell below takes about 30 SECONDS to run.**
accuracy = get_accuracy(word_embeddings, data)
print(f"Accuracy is {accuracy:.2f}")
# **Expected Output:**
#
# $\approx$ 0.92
# # 3.0 Plotting the vectors using PCA
#
# Now you will explore the distance between word vectors after reducing their dimension.
# The technique we will employ is known as
# [*principal component analysis* (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis).
# As we saw, we are working in a 300-dimensional space in this case.
# Although from a computational perspective we were able to perform a good job,
# it is impossible to visualize results in such high dimensional spaces.
#
# You can think of PCA as a method that projects our vectors in a space of reduced
# dimension, while keeping the maximum information about the original vectors in
# their reduced counterparts. In this case, by *maximum infomation* we mean that the
# Euclidean distance between the original vectors and their projected siblings is
# minimal. Hence vectors that were originally close in the embeddings dictionary,
# will produce lower dimensional vectors that are still close to each other.
#
# You will see that when you map out the words, similar words will be clustered
# next to each other. For example, the words 'sad', 'happy', 'joyful' all describe
# emotion and are supposed to be near each other when plotted.
# The words: 'oil', 'gas', and 'petroleum' all describe natural resources.
# Words like 'city', 'village', 'town' could be seen as synonyms and describe a
# similar thing.
#
# Before plotting the words, you need to first be able to reduce each word vector
# with PCA into 2 dimensions and then plot it. The steps to compute PCA are as follows:
#
# 1. Mean normalize the data
# 2. Compute the covariance matrix of your data ($\Sigma$).
# 3. Compute the eigenvectors and the eigenvalues of your covariance matrix
# 4. Multiply the first K eigenvectors by your normalized data. The transformation should look something as follows:
#
# <img src = 'word_embf.jpg' width="width" height="height" style="width:800px;height:200px;"/>
# **Instructions**:
#
# You will write a program that takes in a data set where each row corresponds to a word vector.
# * The word vectors are of dimension 300.
# * Use PCA to change the 300 dimensions to `n_components` dimensions.
# * The new matrix should be of dimension `m, n_componentns`.
#
# * First de-mean the data
# * Get the eigenvalues using `linalg.eigh`. Use `eigh` rather than `eig` since R is symmetric. The performance gain when using `eigh` instead of `eig` is substantial.
# * Sort the eigenvectors and eigenvalues by decreasing order of the eigenvalues.
# * Get a subset of the eigenvectors (choose how many principle components you want to use using `n_components`).
# * Return the new transformation of the data by multiplying the eigenvectors with the original data.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html" > numpy.mean(a,axis=None) </a> : If you set <code>axis = 0</code>, you take the mean for each column. If you set <code>axis = 1</code>, you take the mean for each row. Remember that each row is a word vector, and the number of columns are the number of dimensions in a word vector. </li>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html" > numpy.cov(m, rowvar=True) </a>. This calculates the covariance matrix. By default <code>rowvar</code> is <code>True</code>. From the documentation: "If rowvar is True (default), then each row represents a variable, with observations in the columns." In our case, each row is a word vector observation, and each column is a feature (variable). </li>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html" > numpy.linalg.eigh(a, UPLO='L') </a> </li>
# <li>Use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html" > numpy.argsort </a> sorts the values in an array from smallest to largest, then returns the indices from this sort. </li>
# <li>In order to reverse the order of a list, you can use: <code>x[::-1]</code>.</li>
# <li>To apply the sorted indices to eigenvalues, you can use this format <code>x[indices_sorted]</code>.</li>
# <li>When applying the sorted indices to eigen vectors, note that each column represents an eigenvector. In order to preserve the rows but sort on the columns, you can use this format <code>x[:,indices_sorted]</code></li>
# <li>To transform the data using a subset of the most relevant principle components, take the matrix multiplication of the eigenvectors with the original data. </li>
# <li>The data is of shape <code>(n_observations, n_features)</code>. </li>
# <li>The subset of eigenvectors are in a matrix of shape <code>(n_features, n_components)</code>.</li>
# <li>To multiply these together, take the transposes of both the eigenvectors <code>(n_components, n_features)</code> and the data (n_features, n_observations).</li>
# <li>The product of these two has dimensions <code>(n_components,n_observations)</code>. Take its transpose to get the shape <code>(n_observations, n_components)</code>.</li>
# </ul>
# </p>
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def compute_pca(X, n_components=2):
"""
Input:
X: of dimension (m,n) where each row corresponds to a word vector
n_components: Number of components you want to keep.
Output:
X_reduced: data transformed in 2 dims/columns + regenerated original data
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# mean center the data
X_demeaned = X-np.mean(X, axis=0)
print(X_demeaned.shape)
# calculate the covariance matrix
covariance_matrix = np.cov(X_demeaned, rowvar=False)
print(covariance_matrix.shape)
# calculate eigenvectors & eigenvalues of the covariance matrix
eigen_vals, eigen_vecs = np.linalg.eigh(covariance_matrix)
# print(type(eigen_vals), eigen_vals)
# print(type(eigen_vecs), eigen_vecs)
# sort eigenvalue in increasing order (get the indices from the sort)
idx_sorted = np.argsort(eigen_vals)
# reverse the order so that it's from highest to lowest.
idx_sorted_decreasing = idx_sorted[::-1]
# sort the eigen values by idx_sorted_decreasing
eigen_vals_sorted = eigen_vals[idx_sorted_decreasing]
# print('sorted values',eigen_vals_sorted)
# sort eigenvectors using the idx_sorted_decreasing indices
eigen_vecs_sorted = eigen_vecs[:,idx_sorted_decreasing]
# print('sorted vector', eigen_vecs_sorted)
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
eigen_vecs_subset = eigen_vecs_sorted[:,:n_components]
# print("required components",eigen_vecs_subset )
# transform the data by multiplying the transpose of the eigenvectors
# with the transpose of the de-meaned data
# Then take the transpose of that product.
X_reduced = (np.dot(eigen_vecs_subset.T,X_demeaned.T)).T
### END CODE HERE ###
return X_reduced
# Testing your function
np.random.seed(1)
X = np.random.rand(3, 10)
X_reduced = compute_pca(X, n_components=2)
print("Your original matrix was " + str(X.shape) + " and it became:")
print(X_reduced)
# **Expected Output:**
#
# Your original matrix was: (3,10) and it became:
#
# <table>
# <tr>
# <td>
# 0.43437323
# </td>
# <td>
# 0.49820384
# </td>
# </tr>
# <tr>
# <td>
# 0.42077249
# </td>
# <td>
# -0.50351448
# </td>
# </tr>
# <tr>
# <td>
# -0.85514571
# </td>
# <td>
# 0.00531064
# </td>
# </tr>
# </table>
#
# Now you will use your pca function to plot a few words we have chosen for you.
# You will see that similar words tend to be clustered near each other.
# Sometimes, even antonyms tend to be clustered near each other. Antonyms
# describe the same thing but just tend to be on the other end of the scale
# They are usually found in the same location of a sentence,
# have the same parts of speech, and thus when
# learning the word vectors, you end up getting similar weights. In the next week
# we will go over how you learn them, but for now let's just enjoy using them.
#
# **Instructions:** Run the cell below.
# +
words = ['oil', 'gas', 'happy', 'sad', 'city', 'town',
'village', 'country', 'continent', 'petroleum', 'joyful']
# given a list of words and the embeddings, it returns a matrix with all the embeddings
X = get_vectors(word_embeddings, words)
print('You have 11 words each of 300 dimensions thus X.shape is:', X.shape)
# +
# We have done the plotting for you. Just run this cell.
result = compute_pca(X, 2)
plt.scatter(result[:, 0], result[:, 1])
for i, word in enumerate(words):
plt.annotate(word, xy=(result[i, 0] - 0.05, result[i, 1] + 0.1))
plt.show()
# -
# **What do you notice?**
#
# The word vectors for 'gas', 'oil' and 'petroleum' appear related to each other,
# because their vectors are close to each other. Similarly, 'sad', 'joyful'
# and 'happy' all express emotions, and are also near each other.
| 1 - NLP with Classification and Vector Spaces/Week3/C1_W3_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Text Classification
import csv
import collections
import numpy as np
# Load documents...
# +
classes=('ham', 'spam')
textos = []
y = []
with open("./SMSSpamCollection.txt", encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
for i, row in enumerate(reader):
if row["class"] == classes[1]:
y.append(1)
else:
y.append(0)
textos.append(row["text"])
# -
# Tokenization
# +
from nltk.tokenize import TweetTokenizer
tokenizer = TweetTokenizer()
docs = []
for t in textos:
doc = collections.Counter()
for w in tokenizer.tokenize(t):
doc[w] += 1
docs.append(doc)
# -
docs[0]
# Compute IDFs...
# +
voc_length = 3000
tf = collections.Counter()
df = collections.Counter()
for d in docs:
for w in d:
tf[w] += d[w]
df[w] += 1
idfs = {}
for w in tf:
if tf[w] > 2:
idfs[w] = np.log(len(docs)/df[w])
voc = sorted(idfs, key=idfs.get, reverse=True)[:voc_length]
# -
# Represent the documents using the new vocabulary...
indice = {}
for i,w in enumerate(sorted(voc)):
indice[w] = i
docrep = []
for d in docs:
valores = np.zeros([len(voc)])
for w in d:
if w in indice:
valores[ indice[w] ] = d[w]
docrep.append ( valores )
# ## Logistic Regression
# +
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
lr = LogisticRegression()
scores = cross_val_score(lr, docrep, y, cv=5)
print("Folds accuracy: ", scores)
# -
# ## Multinomial Naive Bayes
# +
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import cross_val_score
nb = MultinomialNB()
scores = cross_val_score(nb, docrep, y, cv=5)
print("Folds accuracy: ", scores)
# +
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
svmc = LinearSVC(max_iter=10)
scores = cross_val_score(svmc, docrep, y, cv=5)
print("Folds accuracy: ", scores)
# -
| #8 NLP/TextClassification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qiskit import *
from qiskit.tools.visualization import plot_histogram
import math
import random
def randomGen(n):
a=[]
for i in range(n+1):
a.append(random.randint(0,1))
return a
def xH(a):
qbits= a
cir.x(0)
for i in range(n+1):
cir.h(i)
cir.barrier()
return cir
def unknown(a):
a.reverse()
if a[0] == 1:
cir.x(0)
for i in range(1,len(a)):
#print(i)
if a[i]== 1:
cir.cx(i,0)
cir.barrier()
for i in range(len(a)):
cir.h(i)
cir.barrier()
qinput=[]
for i in range(1,len(a)):
qinput.append(i)
cir.measure(qinput, qinput)
return cir.draw()
# +
n = int(input("enter number of bits:" ))
cir=QuantumCircuit(n+1, n+1)
bits = randomGen(n)
print(bits)
xH(bits)
print (unknown(bits))
simulator= Aer.get_backend('qasm_simulator')
results= execute(cir, backend = simulator, shots= 1024).result()
counts=results.get_counts()
print(counts)
# -
#
#
| QuantumCircuitIntro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''env'': venv)'
# name: python3
# ---
# +
from pprint import pprint
from gensim.corpora.dictionary import Dictionary
from news_nlp import get_tfidf
from news_nlp_gensim import preprocess, get_doc_similarity_scores
from news import json_dir
import os
import json
urls = list()
url_dict = dict()
docs = list()
for f in os.listdir(json_dir):
fp = os.path.join(json_dir, f)
if os.path.isfile(fp) and '--nlp--applied' in fp:
print(f"loading {fp}")
with open(fp, 'r', encoding='utf8') as fin:
n_list = json.load(fin)
for n in n_list:
# pprint(n)
doc = n['ni']['header'] + ' '+ n['ni']['summary']
urls.append(n['ni']['url'])
url_dict[n['ni']['url']] = doc
docs.append(doc)
docs_idx = [[i,d] for i,d in enumerate(docs)]
# +
d_scores = list()
# DOCUMENT SIMILARITY
found_ids = set()
found_groups = list()
def process_doc(docs_idx, idx, doc):
these_docs = [
[i,d] for i,d in docs_idx if i != idx
# and i not in found_ids
]
this_group = [[idx, doc]]
found_ids.add(idx)
if these_docs:
ds_tfidf = get_tfidf(doc ,[i_d[1] for i_d in these_docs])
ds_gensim = [1] * len(ds_tfidf)
# ds_gensim = get_doc_similarity_scores(
# preprocess(doc), [preprocess(i_d[1]) for i_d in these_docs]
# )
# print(document_scores[0])
i_s_d = [
[i_d[0], s_tfidf, s_gensim, i_d[1]]
for i_d, s_tfidf, s_gensim
in zip(these_docs, ds_tfidf, ds_gensim)
# if s_tfidf * s_gensim > 0.1
# if s_tfidf * s_gensim > 0.2
]
i_s_d.sort(reverse=True, key=lambda x: x[1] * x[2])
if i_s_d:
print(doc[:100])
c = 0
for i, s_tfidf, s_gensim, d in i_s_d:
c += 1
if c > 5:
break
d_scores.append([s_tfidf, s_gensim])
found_ids.add(i)
# this_group.append([i, d])
print("--", i, round(s_tfidf * s_gensim,3), d[:100])
# input("enter to continue...")
found_groups.append(this_group)
t_list = list()
from threading import Thread
for idx, doc in enumerate(docs):
# process_doc(docs_idx, idx, doc)
t = Thread(target=process_doc, args=[docs_idx, idx, doc])
t_list.append(t)
t.start()
if len(t_list) >= 8:
for t_ in t_list:
t_.join()
t_list = list()
for t_ in t_list:
t_.join()
print(f"found {len(docs_idx)} docs, {len(found_groups)} groups")
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
X = np.array(d_scores)
print(X.shape)
plt.scatter(X[:,0], X[:,1], s = 50, c = 'b')
plt.show()
# +
from sklearn.cluster import KMeans
Kmean = KMeans(n_clusters=2)
Kmean.fit(X)
print(Kmean.cluster_centers_)
plt.scatter(X[:,0], X[:,1], s = 50, c = 'b')
for i in Kmean.cluster_centers_:
plt.scatter(i[0], i[1], s = 50, c = 'g', marker='s')
print(i)
plt.show()
# +
min_ds = 1
for l, ds in zip(Kmean.labels_, d_scores):
if l == 1:
# print(ds)
if min_ds > ds[0]:
min_ds = ds[0]
print("final KMeans min for high ds", min_ds)
Y = Kmean.labels_
# +
from sklearn import svm
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
w = clf.coef_[0]
x_0 = -clf.intercept_[0]/w[0]
margin = w[0]
print(clf.intercept_ / w[0] * -1)
print(x_0)
# +
pos = list()
neg = list()
for l, ds in zip(Kmean.labels_, d_scores):
if l == 1:
pos.append(ds)
else:
neg.append(ds)
pos = np.array(pos)
neg = np.array(neg)
print(pos.shape, neg.shape)
plt.figure()
x_min, x_max = np.floor(X.min()), np.ceil(X.max())
y_min, y_max = -3, 3
yy = np.linspace(y_min, y_max)
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), np.zeros(XX.size)]).reshape(XX.shape)
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.plot(x_0*np.ones(shape=yy.shape), yy, 'k-')
plt.plot(x_0*np.ones(shape=yy.shape) - margin, yy, 'k--')
plt.plot(x_0*np.ones(shape=yy.shape) + margin, yy, 'k--')
plt.scatter(pos, .3 + np.zeros(shape=pos.shape), s=80, marker='o', facecolors='none')
plt.scatter(neg, -.3 + np.zeros(shape=neg.shape), s=80, marker='^', facecolors='none')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
| news_kmeans_discover.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import required packages and functions and set the session seed
import numpy as np
np.random.seed(1234)
from tensorflow import set_random_seed
set_random_seed(1234)
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D
from keras.layers import Dropout, SpatialDropout2D
from keras.applications import VGG19
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
from keras.datasets import fashion_mnist
from keras.utils import to_categorical
from keras import models
from keras import layers
from keras import optimizers
# Load the Fashion MNIST data from Keras
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Normalize the image data by dividing through the maximum pixel value (=255)
train_images = train_images / train_images.max()
test_images = test_images / test_images.max()
# Build a simple three-layer (1 hidden layer) model
# The input size is 28 x 28 pixels and is flattened to a vector of length 784
# The activation function is RELU (rectified linear unit) and performs the
# multiplication of input and weights (plus bias)
# The output (softmax) layer returns probabilities for all ten classes
three_layer_model = Sequential()
three_layer_model.add(Flatten(input_shape = (28, 28)))
three_layer_model.add(Dense(128, activation = 'relu'))
three_layer_model.add(Dense(10, activation = 'softmax'))
# Compile the model with accuracy metric and adam optimizer
# Sparse categorical cross-entropy is the loss function for integer labels
# Fit the model using 70 percent of the data and 10 epochs
three_layer_model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'adam', metrics = ['accuracy'])
three_layer_model.fit(train_images, train_labels, epochs = 10,
validation_split = 0.3, verbose = 2)
# Compute and print the test loss and accuracy
test_loss, test_acc = three_layer_model.evaluate(test_images, test_labels)
print("Model with three layers and ten epochs -- Test loss:", test_loss * 100)
print("Model with three layers and ten epochs -- Test accuracy:", test_acc * 100)
# Similarly as before, build a five-layer (3 hidden layers) model
five_layer_model = Sequential()
five_layer_model.add(Flatten(input_shape = (28, 28)))
five_layer_model.add(Dense(128, activation = 'relu'))
five_layer_model.add(Dense(128, activation = 'relu'))
five_layer_model.add(Dense(128, activation = 'relu'))
five_layer_model.add(Dense(10, activation = 'softmax'))
# Compile the model with accuracy metric and adam optimizer
# Fit the model using 70 percent of the data and 10 epochs
five_layer_model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'adam', metrics = ['accuracy'])
five_layer_model.fit(train_images, train_labels, epochs = 10,
validation_split = 0.3, verbose = 2)
# Compute and print the test loss and accuracy
test_loss, test_acc = five_layer_model.evaluate(test_images, test_labels)
print("Model with five layers and ten epochs -- Test loss:", test_loss * 100)
print("Model with five layers and ten epochs -- Test accuracy:", test_acc * 100)
# Similarly as before, build a ten-layer (8 hidden layers) model
ten_layer_model = Sequential()
ten_layer_model.add(Flatten(input_shape = (28, 28)))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(128, activation = 'relu'))
ten_layer_model.add(Dense(10, activation = 'softmax'))
# Compile the model with accuracy metric and adam optimizer
# Fit the model using 70 percent of the data and 10 epochs
ten_layer_model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'adam', metrics = ['accuracy'])
ten_layer_model.fit(train_images, train_labels, epochs = 10,
validation_split = 0.3, verbose = 2)
# Compute and print the test loss and accuracy
test_loss, test_acc = ten_layer_model.evaluate(test_images, test_labels)
print("Model with ten layers and ten epochs -- Test loss:", test_loss * 100)
print("Model with ten layers and ten epochs -- Test accuracy:", test_acc * 100)
# Compile the model with accuracy metric and adam optimizer
# Fit the model using 70 percent of the data and 50 epochs
three_layer_model_50_epochs = three_layer_model.fit(train_images, train_labels,
epochs = 50, validation_split = 0.3,
verbose = 2)
# Compute and print the test loss and accuracy
test_loss, test_acc = three_layer_model.evaluate(test_images, test_labels)
print("Model with three layers and fifty epochs -- Test loss:", test_loss * 100)
print("Model with three layers and fifty epochs -- Test accuracy:", test_acc * 100)
# +
# Plot loss as function of epochs
plt.subplot(1, 2, 1)
plt.plot(three_layer_model_50_epochs.history['val_loss'], 'blue')
plt.plot(three_layer_model_50_epochs.history['loss'], 'red')
plt.legend(['Cross-validation', 'Training'], loc = 'upper left')
plt.ylabel('Loss')
plt.xlabel('Epoch')
# Plot accuracy as function of epochs
plt.subplot(1, 2, 2)
plt.plot(three_layer_model_50_epochs.history['val_acc'], 'blue')
plt.plot(three_layer_model_50_epochs.history['acc'], 'red')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.subplots_adjust(wspace = .35)
# Include plot title and show the plot
plt.suptitle('Model loss and accuracy over epochs for a three-layer neural network')
plt.show()
# -
# Calculate and print predictions versus actual labels
predictions = three_layer_model.predict(test_images)
for i in range(10):
print("Prediction " + str(i) + ": " + str(np.argmax(np.round(predictions[i]))))
print("Actual " + str(i) + ": " + str(test_labels[i]))
# Reload the data for a convolutional neural network
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Reshape the data to the correct format (the last 1 stands for greyscale)
train_images = train_images.reshape(60000, 28, 28, 1)
test_images = test_images.reshape(10000, 28, 28, 1)
# Convert the image data to numeric data and normalize them
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
train_images = train_images / train_images.max()
test_images = test_images / test_images.max()
# One-hot encode the label data
# Convert every number to a vector of the length of the number of categories
# The vector has zero everywhere except a one on the position of the number it
# represents. Example: 3 = [0 0 0 1 0 0 0 0 0 0]
train_labels_bin = to_categorical(train_labels)
test_labels_bin = to_categorical(test_labels)
# Build a convolutional neural network with two convolutional layers
conv_model = Sequential()
conv_model.add(Conv2D(128, (3, 3), input_shape = (28, 28, 1)))
conv_model.add(Activation('relu'))
conv_model.add(MaxPooling2D(pool_size = (2, 2)))
conv_model.add(Conv2D(128, (3, 3)))
conv_model.add(Activation('relu'))
conv_model.add(MaxPooling2D(pool_size = (2, 2)))
conv_model.add(Flatten())
conv_model.add(Dense(128))
conv_model.add(Dense(10))
conv_model.add(Activation('softmax'))
# Compile and fit the model with adam optimizer and accuracy metric
# Categorical cross-entropy is the loss function for one-hot encoded labels and
# batch size equal to the number of neurons in the convolutional layers and 10 epochs
conv_model.compile(loss = "categorical_crossentropy",
optimizer = 'adam', metrics = ['accuracy'])
conv_model.fit(train_images, train_labels_bin, batch_size = 128,
epochs = 10, verbose = 2)
# Compute and print the test loss and accuracy
test_loss, test_acc = conv_model.evaluate(test_images, test_labels_bin)
print("Convolutional model ten epochs -- Test loss:", test_loss * 100)
print("Convolutional model ten epochs -- Test accuracy:", test_acc * 100)
# Build a convolutional neural network with two convolutional layers
# Decrease number of neurons and add dropout to reduce overfitting
conv_model_reduce_overfit = Sequential()
conv_model_reduce_overfit.add(Conv2D(64, (3, 3), input_shape = (28, 28, 1)))
conv_model_reduce_overfit.add(Activation('relu'))
conv_model_reduce_overfit.add(MaxPooling2D(pool_size = (2, 2)))
conv_model_reduce_overfit.add(Dropout(0.5))
conv_model_reduce_overfit.add(Conv2D(64, (3, 3)))
conv_model_reduce_overfit.add(SpatialDropout2D(0.5))
conv_model_reduce_overfit.add(Activation('relu'))
conv_model_reduce_overfit.add(MaxPooling2D(pool_size = (2, 2)))
conv_model_reduce_overfit.add(Flatten())
conv_model_reduce_overfit.add(Dense(64))
conv_model_reduce_overfit.add(Dropout(0.5))
conv_model_reduce_overfit.add(Dense(10))
conv_model_reduce_overfit.add(Activation('softmax'))
# Compile and fit the model with adam optimizer and accuracy metric
# Categorical cross-entropy is the loss function for one-hot encoded labels and
# batch size equal to the number of neurons in the convolutional layers and 10 epochs
# Add early stopping to avoid overfitting
conv_model_reduce_overfit.compile(loss = "categorical_crossentropy",
optimizer = 'adam', metrics = ['accuracy'])
conv_callback = keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 3)
conv_model_reduce_overfit.fit(train_images, train_labels_bin, validation_split = 0.3,
epochs = 10, verbose = 2, callbacks = [conv_callback], batch_size = 64)
# Compute and print the test loss and accuracy
test_loss, test_acc = conv_model_reduce_overfit.evaluate(test_images, test_labels_bin)
print("Convolutional model ten epochs reduced overfit -- Test loss:", test_loss * 100)
print("Convolutional model ten epochs reduced overfit -- Test accuracy:", test_acc * 100)
# Calculate and print predictions versus actual labels
predictions = conv_model_reduce_overfit.predict(test_images)
for i in range(10):
print("Prediction " + str(i) + ": " + str(np.argmax(np.round(predictions[i]))))
print("Actual " + str(i) + ": " + str(test_labels[i]))
| simple_neural_network_fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import importlib
importlib.reload(logging) # see https://stackoverflow.com/a/21475297/1469195
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
# +
# %%capture
import os
import site
os.sys.path.insert(0, '/home/schirrmr/code/reversible/')
os.sys.path.insert(0, '/home/schirrmr/braindecode/code/braindecode/')
os.sys.path.insert(0, '/home/schirrmr/code/explaining/reversible//')
# %load_ext autoreload
# %autoreload 2
import numpy as np
import logging
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
# %matplotlib inline
# %config InlineBackend.figure_format = 'png'
matplotlib.rcParams['figure.figsize'] = (12.0, 1.0)
matplotlib.rcParams['font.size'] = 14
import seaborn
seaborn.set_style('darkgrid')
from reversible2.sliced import sliced_from_samples
from numpy.random import RandomState
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import copy
import math
import itertools
import torch as th
from braindecode.torch_ext.util import np_to_var, var_to_np
from reversible2.splitter import SubsampleSplitter
from reversible2.view_as import ViewAs
from reversible2.affine import AdditiveBlock
from reversible2.plot import display_text, display_close
th.backends.cudnn.benchmark = True
# +
from reversible2.high_gamma import load_train_test, to_signal_target
train_inputs, test_inputs = load_train_test(subject_id=4, car=True,n_sensors=22,final_hz=256,
start_ms=500, stop_ms=1500,half_before=True,
only_load_given_sensors=False)
cuda = True
train_set, valid_set = to_signal_target(train_inputs, test_inputs)
# -
from reversible2.pad import ZeroPadChans
from reversible2.scale import ScaleAndShift
# ## Scale automatically
from reversible2.scale import ScaleAndShift
from reversible2.models import deep_invertible
feature_model = deep_invertible(
n_chans, input_time_length, n_chan_pad, filter_length_time)
176*32
from reversible2.graph import Node
feature_model = Node(None, model)
t_out.shape
t_out.shja
t_out = feature_model(train_inputs[0][:2])
inverted = feature_model.invert(t_out)
assert th.allclose(train_inputs[0][:2], inverted, rtol=1e-3, atol=1e-4)
inverted - train_inputs[0][:2]
t_out.shape
inverted - t_out
from reversible2.invert import invert
m
# +
from braindecode.torch_ext.modules import Expression
from reversible2.rfft import RFFT
from braindecode.torch_ext.optimizers import AdamW
from reversible2.models import deep_invertible
from reversible2.scale import scale_to_unit_var
n_chans = train_set.X.shape[1]
n_classes = 2
input_time_length = train_set.X.shape[2]
n_iters = 5
dfs = []
for _ in range (n_iters):
n_chan_pad = 0
filter_length_time = 11
model = deep_invertible(n_chans, input_time_length, n_chan_pad, filter_length_time)
model.add_module("select_dims", Expression(lambda x: x[:,:2,0]))
model.add_module("softmax", nn.LogSoftmax(dim=1))
from reversible2.models import WrappedModel
model = WrappedModel(model)
model.cuda()
for module in model.network.modules():
if hasattr(module, 'log_factor'):
module._forward_hooks.clear()
module.register_forward_hook(scale_to_unit_var)
model.network(train_inputs[0].cuda());
for module in model.network.modules():
if hasattr(module, 'log_factor'):
module._forward_hooks.clear()
from copy import deepcopy
model_to_train = deepcopy(model)
lr = 1 * 0.001
weight_decay = 0.5 * 0.001
optimizer = AdamW(model_to_train.parameters(), lr=lr,
weight_decay=weight_decay)
max_epochs = 50
model_to_train.compile(loss=F.nll_loss, optimizer=optimizer, iterator_seed=1, )
model_to_train.fit(train_set.X, train_set.y, epochs=max_epochs, batch_size=64,
scheduler='cosine',
validation_data=(valid_set.X, valid_set.y), )
dfs.append(model_to_train.epochs_df)
# -
import pandas as pd
pd.concat([df.iloc[-1:] for df in dfs])
import pandas as pd
pd.concat([df.iloc[-1:] for df in dfs])
import pandas as pd
pd.concat([df.iloc[-1:] for df in dfs])
| notebooks/simpler-invnet-20-june-2019/Deep4_Invertible_Chain_Clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# ## Bday wishes for a friend I
wishes_for_friend_raw = pd.read_csv('../data/raw/bday_wishes_for_friend.csv', delimiter=";")
wishes_for_friend_raw
wishes_for_friend_raw.iloc[0].text
# ## Bday wishes for a friend II
wishes_for_friend_raw_2 = pd.read_csv('../data/raw/bday_wishes_for_friend_2.csv', delimiter=";")
wishes_for_friend_raw_2
# ## Bday wishes for a friend VI
wishes_for_friend_raw_6 = pd.read_csv('../data/raw/bday_wishes_for_friend_6.csv', delimiter=";")
wishes_for_friend_raw_6
# ## Bday wishes for a friend VII
wishes_for_friend_raw_7 = pd.read_csv('../data/raw/bday_wishes_for_friend_7.csv', delimiter=";")
wishes_for_friend_raw_7
# ## Bday wishes for a friend VIII
wishes_for_friend_raw_8 = pd.read_csv('../data/raw/bday_wishes_for_friend_8.csv', delimiter=";")
wishes_for_friend_raw_8
| notebooks/Validate_Data_Format.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import face_recognition
# ! dir
multiple_faces = face_recognition.load_image_file("emma-both-law.png")
multiple_faces.shape
import matplotlib.pyplot as plt
plt.imshow(multiple_faces)
locs = face_recognition.face_locations(multiple_faces, model="cnn")
locs
y1, x2, y2, x1 = locs[2]
face1 = multiple_faces[y1:y2, x1:x2, :]
plt.imshow(face1)
plt.show()
emma_st1 = face_recognition.load_image_file("emma_stone1.jpg")
emma_st_em1 = face_recognition.face_encodings(emma_st1)[0]
emma_st2 = face_recognition.load_image_file("emma_stone2.jpg")
emma_st_em2 = face_recognition.face_encodings(emma_st2)[0]
results = face_recognition.compare_faces(emma_st_em2, emma_st_em1)
print(results)
emma_st2 = face_recognition.load_image_file("jen_law1.jpg")
emma_st_em2 = face_recognition.face_encodings(emma_st2)[0]
results = face_recognition.compare_faces(emma_st_em2, emma_st_em1)
print(results)
# database
face_database = {
"emma_stone": face_recognition.face_encodings( face_recognition.load_image_file("emma_stone1.jpg") )[0],
"emma_watson": face_recognition.face_encodings( face_recognition.load_image_file("emma-watson1.jpg") )[0],
"jen_law": face_recognition.face_encodings( face_recognition.load_image_file("jen_law1.jpg") )[0],
}
# +
# recognition
random_enc = face_recognition.face_encodings( face_recognition.load_image_file("emma_stone3.jpg") )[0]
for k in face_database:
print(k) # name
if face_recognition.compare_faces( [face_database[k]], random_enc )[0] == True:
print(f"Face matched with {k}")
break
# -
| face-recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: 59e43299af438c040623fe1ed75598134e5c49a9e638af191f2054cd46fed4b5
# name: python3
# ---
import pandas as pd
# +
#importing identified aca posts
posts = pd.read_csv('../data/processed/aca_posts_2.csv')
posts.head()
# +
#now doing it for post titles
from wordcloud import WordCloud
#Join the different posts together.
#first titles
long_string = ','.join(list(posts['processed_title'].values))
#now join the post text
long_string = ','.join(list(posts['processed_text'].values))
#Create a WordCloud object
wordcloud = WordCloud(background_color="white", max_words=5000, contour_width=3, contour_color='steelblue')
# Generate a word cloud
wordcloud.generate(long_string)
# Visualize the word cloud
wordcloud.to_image()
# -
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
sns.set_style('whitegrid')
# %matplotlib inline
# +
# Helper function
def plot_10_most_common_words(count_data, count_vectorizer):
import matplotlib.pyplot as plt
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts+=t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(2, figsize=(15, 15/1.6180))
plt.subplot(title='10 most common words')
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
sns.barplot(x_pos, counts, palette='husl')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.show()
# Initialise the count vectorizer with the English stop words
count_vectorizer = CountVectorizer(stop_words='english')
#making a column that is all text
posts['all_text'] = posts['processed_title'].str.cat(posts['processed_text'], sep = ' ')
#getting rid of 'insurance'
posts['all_text'] = posts['all_text'].map(lambda x: re.sub('insurance', '', x))
#getting rid of 'plan'
posts['all_text'] = posts['all_text'].map(lambda x: re.sub('plan', '', x))
#removing extra spaces
posts['all_text'] = posts['all_text'].replace(r" ", " ", regex=True)
# Fit and transform the processed titles
count_data = count_vectorizer.fit_transform(posts['all_text'])
# Visualise the 10 most common words
plot_10_most_common_words(count_data, count_vectorizer)
# +
import warnings
warnings.simplefilter("ignore", DeprecationWarning)# Load the LDA model from sk-learn
from sklearn.decomposition import LatentDirichletAllocation as LDA
# Helper function
def print_topics(model, count_vectorizer, n_top_words):
words = count_vectorizer.get_feature_names()
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #%d:" % topic_idx)
print(" ".join([words[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
# + tags=[]
number_words = 15
#topics = [5, 10, 20, 50, 100, 250]
topics = [10, 15, 20, 25, 35, 50]
for topic in topics:
# Create and fit the LDA mode;
print('With {} number of topics'.format(str(topic)))
lda = LDA(n_components = topic, n_jobs=-1)
lda.fit(count_data)
# Print the topics found by the LDA model
print("Topics found via LDA:")
print_topics(lda, count_vectorizer, number_words)
print('******************************************************************************************************************************')
# -
posts['all_text'][0]
# +
posts['all_text'] = posts['all_text'].map(lambda x: re.sub(r'i\'m', 'i am', x))
posts['all_text'] = posts['all_text'].map(lambda x: re.sub(' s ', ' ', x))
posts['all_text'] = posts['all_text'].map(lambda x: re.sub(r' \'', '', x))
posts['all_text'] = posts['all_text'].map(lambda x: re.sub(r'\'', '', x))
posts['all_text'] = posts['all_text'].replace(r" ", " ", regex=True)
posts['all_text'][2]
# +
import string
print(string.punctuation)
| notebooks/01-vm-title-and-post-exploring.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
# %matplotlib inline
# Set up figure size and DPI for screen demo
plt.rcParams['figure.figsize'] = (6,4)
plt.rcParams['figure.dpi'] = 150
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
# Basic interact usage with integer slider
def f(x):
plt.plot(np.arange(0,10), x*np.arange(0,10))
plt.ylim(-30,30)
interact(f, x=1)
# Range & Step size
def f(x):
plt.plot(np.arange(0,10), x*np.arange(0,10))
plt.ylim(-30,30)
interact(f, x=(-3,3,0.5))
# Automatically choose appropriate widget
rands = np.random.rand(100)
def f(x):
if x:
plt.plot(rands, 'b')
else:
plt.plot(rands, 'r')
interact(f, x=True)
# +
# interact as a decorator
@interact(x='Title of plot')
def f(x):
plt.title(x)
# +
# Multiple widgets
def f(a,b):
plt.plot(np.arange(0,10), a*np.power(np.arange(0,10), b))
plt.title("Power Law: $x=ay^b$")
interact(f, a=1, b=3)
# +
# Fixed value
def f(a,b):
plt.plot(np.arange(0,10), a*np.power(np.arange(0,10), b))
plt.title("Power Law: $x=ay^b$")
interact(f, a=1, b=fixed(2))
# +
# Dropdowns
def f(colour):
plt.plot(np.arange(0,10), np.power(np.arange(0,10), 5), c=colour)
plt.title("Power Law: $x=ay^b$")
colours=['red', 'green', 'blue']
interact(f, colour=colours)
# +
# Dropdowns with dicts
def f(b):
plt.plot(np.arange(0,10), np.power(np.arange(0,10), b))
plt.title("Power Law: $x=ay^b$")
powers = {'one':1, 'two':2, 'three':3}
interact(f, b=powers)
# -
| Chapter05/Interactive plots in the Jupyter Notebook.ipynb |