code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 121} colab_type="code" executionInfo={"elapsed": 107521, "status": "ok", "timestamp": 1539617783349, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="ZtLCrbxiIgJg" outputId="22a89fcc-6593-4d39-ecb2-afd3280db83c"
from google.colab import drive
drive.mount('/content/gdrive')
# + colab={} colab_type="code" id="iKCqU207IJrF"
import os
import numpy as np
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from google.colab import files
# + colab={} colab_type="code" id="JRGXpB-8IJrQ"
data_path = os.path.join("/content/gdrive/My Drive/", "DRU-MAWI-project/ICHI14_dataset/data")
patient_list = ['002','003','005','007','08a','08b','09a','09b', '10a','011','013','014','15a','15b','016',
'017','018','019','020','021','022','023','025','026','027','028','029','030','031','032',
'033','034','035','036','037','038','040','042','043','044','045','047','048','049','051']
# + colab={} colab_type="code" id="rAkhERdxIJrT"
train_patient_list, test_patient_list = train_test_split(patient_list, random_state=100, test_size=0.3)
test_patient_list, valid_patient_list = train_test_split(test_patient_list, random_state=100, test_size=0.5)
# + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" executionInfo={"elapsed": 1362, "status": "ok", "timestamp": 1539617788011, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="YHxtdjtQlXiO" outputId="c2b07401-bbef-4ef7-8c09-150ee360da72"
print(len(patient_list))
print(len(train_patient_list))
print(len(valid_patient_list))
print(len(test_patient_list))
# + colab={"base_uri": "https://localhost:8080/", "height": 87} colab_type="code" executionInfo={"elapsed": 1300, "status": "ok", "timestamp": 1539617789496, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="oVyQga2AmI7R" outputId="d506aad9-bb9c-41a0-923c-cef0864c1a95"
print(train_patient_list)
print(valid_patient_list)
print(test_patient_list)
# + colab={} colab_type="code" id="wj-8RYiRL2We"
def change_labels(sample):
"""
Returns:
sample - contains only label 1(awake) and 0(sleep) for polisomnography
"""
sample.gt[sample.gt==0] = 8
sample.gt[np.logical_or.reduce((sample.gt==1, sample.gt==2, sample.gt==3, sample.gt==5))] = 0
sample.gt[np.logical_or.reduce((sample.gt==6, sample.gt==7, sample.gt==8))] = 1
return sample
#-------------------------------------------------------------------------
def decoder(sample):
'''
Returns:
decoded_sample - contains accelerometer and ps data for each sensor record, ndarray of shape (n_records, 4)
'''
sample = np.repeat(sample, sample.d, axis=0)
n_records = sample.shape[0]
decoded_sample = np.zeros((n_records, 4))
decoded_sample[:, 0] = sample.x
decoded_sample[:, 1] = sample.y
decoded_sample[:, 2] = sample.z
decoded_sample[:, 3] = sample.gt
return decoded_sample
#-------------------------------------------------------------------------
def divide_by_windows(decoded_sample, window_len=60):
"""
Parameters:
wondow_len - length of each window in seconds, int
Returns:
X - accelerometer data, ndarray of shape (n_windows, window_len, 3)
y - polisomnography data, ndarray of shape (n_windows, )
"""
window_len *= 100
n_windows = decoded_sample.shape[0] // window_len
X = np.zeros((n_windows, window_len, 3))
y = np.zeros(n_windows)
for i in range(n_windows):
X[i] = decoded_sample[window_len * i: window_len * i + window_len, 0: 3]
ones = np.count_nonzero(decoded_sample[window_len*i: window_len*i+window_len, 3])
if ones >= (window_len / 2):
y[i] = 1
else:
y[i] = 0
return X, y
#-------------------------------------------------------------------------
def get_one_patient_data(data_path, patient, window_len=60):
"""
Returns:
X, y - for one patient
"""
sample = np.load("%s/p%s.npy"%(data_path, patient)).view(np.recarray)
sample = change_labels(sample)
sample = decoder(sample)
X, y = divide_by_windows(sample, window_len)
return X, y
#-------------------------------------------------------------------------
def get_data_for_model(data_path, patient_list, window_len=60):
"""
Returns:
X, y - for all patient list, ndarray of shape (n_records, n_features, n_channels=3)
"""
X_all_data = []
y_all_data = []
for patient in patient_list:
X, y = get_one_patient_data(data_path, patient, window_len)
X_all_data.append(X)
y_all_data.append(y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data
#-------------------------------------------------------------------------
def get_dawnsampled_data(data_path, patient_list, window_len=60, dawnsample="pca", n_components=10, n_windows=10):
"""
Parameters:
dawnsample - "pca", "mean", "max", "mode", None - determine the type of data reducing
Returns:
X, y - reduced data for all patient list and combine several windows data, ndarray of shape (n_records, n_components * n_windows, n_channels=3)
"""
X_all_data = []
y_all_data = []
for patient in patient_list:
X, y = get_one_patient_data(data_path, patient, window_len)
if dawnsample.lower() == "pca":
X = reduce_data_pca(X, n_components=n_components)
elif dawnsample.lower() == "mean":
X = reduce_data_mean(X, n_components=n_components)
elif dawnsample.lower() == "max":
X = reduce_data_max(X, n_components=n_components)
elif dawnsample.lower() == "mode":
X = reduce_data_mode(X, n_components=n_components)
elif dawnsample.lower() == "simple":
X = reduce_data_simple(X, n_components=n_components)
X_new = np.zeros((X.shape[0] - n_windows, X.shape[1] * (n_windows + 1), X.shape[2]))
for i in range(0, X.shape[0] - n_windows):
X_buff = X[i]
for j in range(1, n_windows + 1):
X_buff = np.concatenate([X_buff, X[i+j]], axis=0)
X_new[i] = X_buff
if n_windows != 0:
y = y[(n_windows//2): -(n_windows//2)]
X_all_data.append(X_new)
y_all_data.append(y)
#np.save(("X_p%s.npy"%(patient)), X_new)
#np.save(("y_p%s.npy"%(patient)), y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data
def reduce_data_pca(X, n_components=300):
"""
Parameters:
X - ndarray of shape (n_samples, n_features)
Returns:
X, y - reduced data, ndarray of shape (n_records, n_features, n_channels=3)
"""
pca1 = PCA(n_components)
pca2 = PCA(n_components)
pca3 = PCA(n_components)
pca1.fit(X[:, :, 0])
pca2.fit(X[:, :, 1])
pca3.fit(X[:, :, 2])
X1 = pca1.transform(X[:, :, 0])
X2 = pca2.transform(X[:, :, 1])
X3 = pca3.transform(X[:, :, 2])
X_reduced = np.concatenate([X1, X2, X3], axis=1).reshape(X.shape[0], n_components, 3)
return X_reduced
def reduce_data_max(X, n_components=600):
"""
Parameters:
X - ndarray of shape (n_samples, n_features)
Returns:
X, y - reduced data, ndarray of shape (n_records, n_components, n_channels=3)
"""
X_reduced = np.zeros((X.shape[0], n_components, 3))
window_len = X.shape[1] // n_components
for i in range(n_components):
X_reduced[:, i, :] = np.amax(X[:, i * window_len: (i + 1) * window_len, :], axis=1)
X_reduced = X_reduced.reshape(X.shape[0], n_components, 3)
return X_reduced
def reduce_data_mean(X, n_components=600):
"""
Parameters:
X - ndarray of shape (n_samples, n_features)
Returns:
X, y - reduced data, ndarray of shape (n_records, n_components, n_channels=3)
"""
X_reduced = np.zeros((X.shape[0], n_components, 3))
window_len = X.shape[1] // n_components
for i in range(n_components):
X_reduced[:, i, :] = np.mean(X[:, i * window_len: (i + 1) * window_len, :], axis=1)
X_reduced = X_reduced.reshape(X.shape[0], n_components, 3)
return X_reduced
def reduce_data_mode(X, n_components=600):
"""
Parameters:
X - ndarray of shape (n_samples, n_features)
Returns:
X, y - reduced data, ndarray of shape (n_records, n_components, n_channels=3)
"""
from scipy.stats import mode
X_reduced = np.zeros((X.shape[0], n_components, 3))
window_len = X.shape[1] // n_components
for i in range(n_components):
X_reduced[:, i, :] = mode(X[:, i * window_len: (i + 1) * window_len, :], axis=1)
X_reduced = X_reduced.reshape(X.shape[0], n_components, 3)
return X_reduced
def reduce_data_simple(X, n_components=600):
"""
Parameters:
X - ndarray of shape (n_samples, n_features)
Returns:
X, y - reduced data, ndarray of shape (n_records, n_components, n_channels=3)
"""
X_reduced = np.zeros((X.shape[0], n_components, 3))
window_len = X.shape[1] // n_components
for i in range(n_components):
X_reduced[:, i, :] = X[:, i * window_len, :]
X_reduced = X_reduced.reshape(X.shape[0], n_components, 3)
return X_reduced
# + colab={} colab_type="code" id="VT7dhZvRIJrX"
X_train, y_train = get_data_for_model(data_path, train_patient_list, window_len=60)
X_valid, y_valid = get_data_for_model(data_path, valid_patient_list, window_len=60)
X_test, y_test = get_data_for_model(data_path, test_patient_list, window_len=60)
# + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" executionInfo={"elapsed": 844, "status": "ok", "timestamp": 1539617845057, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="88XbUDyOIJrc" outputId="6656c6a8-dbc1-434f-8bf5-b7b247679903"
print(X_train.shape)
print(X_valid.shape)
print(X_test.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 108897, "status": "ok", "timestamp": 1539617954190, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="4oE0-ifHUdJJ" outputId="24a4fb83-042c-4955-fb49-85277605a2b1"
# %%time
X_train, y_train = get_dawnsampled_data(data_path, train_patient_list, window_len=60, dawnsample="pca", n_components=60, n_windows=12)
X_valid, y_valid = get_dawnsampled_data(data_path, valid_patient_list, window_len=60, dawnsample="pca", n_components=60, n_windows=12)
X_test, y_test = get_dawnsampled_data(data_path, test_patient_list, window_len=60, dawnsample="pca", n_components=60, n_windows=12)
# + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" executionInfo={"elapsed": 908, "status": "ok", "timestamp": 1539617955150, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="54b8IOL2IJrl" outputId="4958b1fe-8c9c-4617-df5f-6b5d16309866"
print(X_train.shape)
print(y_train.shape)
print(X_valid.shape)
print(X_test.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1989, "status": "ok", "timestamp": 1539617957396, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="whTzvWqWNl7O" outputId="da1dd160-d33b-4c67-dbff-aa6d0b3d1621"
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Conv1D, MaxPooling1D
from keras.models import Sequential
from keras.optimizers import SGD, Adam
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint, EarlyStopping
# + colab={"base_uri": "https://localhost:8080/", "height": 1126} colab_type="code" executionInfo={"elapsed": 2592, "status": "ok", "timestamp": 1539617987006, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="s-ze4t-Sk9BE" outputId="aa5b0a8b-1e95-41d8-9136-f7638b3cd1b2"
NN = Sequential()
NN.add(Conv1D( 32, 10, input_shape=(780, 3), activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(Dropout(0.5))
NN.add(Conv1D( 32, 10, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(MaxPooling1D( pool_size=4))
NN.add(Dropout(0.5))
NN.add(Conv1D( 64, 10, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(Dropout(0.5))
NN.add(Conv1D( 64, 10, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(MaxPooling1D( pool_size=4))
NN.add(Dropout(0.5))
NN.add(Conv1D( 128, 10, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(Dropout(0.5))
NN.add(Conv1D( 128, 10, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(MaxPooling1D( pool_size=4))
NN.add(Dropout(0.5))
NN.add(Flatten())
NN.add(Dense(16, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(Dropout(0.5))
NN.add(Dense(16, activation="relu", kernel_initializer="he_uniform", kernel_regularizer=l2(0.1)))
NN.add(BatchNormalization())
NN.add(Dropout(0.5))
NN.add(Dense(1, activation="sigmoid", kernel_initializer="glorot_uniform", kernel_regularizer=l2(0.1)))
NN.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
print(NN.summary())
# + colab={} colab_type="code" id="4GX6t1xvk80Q"
callbacks = [ModelCheckpoint('CNN_model_raw_data_weights.hdf5', monitor='val_acc', save_best_only=True), EarlyStopping(monitor='val_loss', patience=5)]
# + colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" executionInfo={"elapsed": 113827, "status": "ok", "timestamp": 1539618472158, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="pjo6YtmNzqDD" outputId="dbb97863-bac2-4c5d-b76c-820afeb6e62b"
# %%time
NN.fit(X_train, y_train,
batch_size=64,
epochs=30,
validation_data=(X_valid, y_valid),
callbacks=callbacks,
verbose=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 3520, "status": "ok", "timestamp": 1539618148585, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="oyksgYY55H01" outputId="b50c68a4-3526-49cc-bebc-2d27e6a0b937"
scores = NN.evaluate(X_test, y_test)
print("Test accuracy =", scores[1])
# + colab={} colab_type="code" id="ikgszrntYSaS"
NN.save_weights("CNN_12w_pca60_raw_data_weights.hdf5")
# + colab={} colab_type="code" id="g-FGHwxfgtjC"
files.download('CNN_model_raw_data_weights.hdf5')
# + colab={} colab_type="code" id="518uijixWFc0"
# Load best model
NN.load_weights("CNN_model_raw_data_weights.hdf5")
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 2034, "status": "ok", "timestamp": 1539618156144, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="8PmvsOTNWLvd" outputId="2e712cb9-837b-4e2f-bdc4-80c03efb8cb4"
scores = NN.evaluate(X_test, y_test)
print("Test accuracy =", scores[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 1906, "status": "ok", "timestamp": 1539618159514, "user": {"displayName": "\u0412\u0438\u043a\u0442\u043e\u0440 \u041e\u0432\u0435\u0440\u043a\u043e", "photoUrl": "", "userId": "04713510988786792129"}, "user_tz": -180} id="ASQWAR4hWOXq" outputId="291df6df-e5ed-4fc9-96fb-a255761990b4"
scores = NN.evaluate(X_valid, y_valid)
print("Valid accuracy =", scores[1])
# + colab={} colab_type="code" id="ZEG-smEOq5Zr"
saved_model = NN.to_json()
with open("CNN_model_raw_data.json", "w") as json_file:
json_file.write(saved_model)
files.download('CNN_model_raw_data.json')
# + [markdown] colab_type="text" id="5D5qO2J8APT2"
# pca 300, 10 windows: max test acc = 70% , ~20 min, ~ 30 epoch, EarlyStopping = 5
#
# max 300, 10 windows: max test acc = 0.6996, ~30 min, ~40 epoch, EarlyStopping = 10
#
# mean 300, 10 windows: max test acc = 0.5449, ~20 min, ~28 epoch, EarlyStopping = 5
#
# pca 350, 10 windows: max test acc = 0.698, ~10min, ~18 epoch, EarlyStopping = 5
#
# pca 60, 10 windows: max test acc = 0.7256, 2 mi, 11, epoch, EarlyStopping = 5
#
# pca 60, 20 windows: max test acc =0.71, 5 min, 17 epoch, EarlyStopping = 5 , file 4
#
# pca 60, 60 windows: max test acc =0.7243, 7 min, 10 epoch, EarlyStopping = 5 , file 6
#
# pca 120, 16 windows: max test acc =0.7070, 9 min, 21 epoch, EarlyStopping = 7 , file 8
#
# after strong regularization
#
# pca 60, 12 windows: max test acc =0.7269, 10 min, 30 epoch, EarlyStopping = 10
#
# pca 300, 20 windows: max test acc = 0.6881, ~20 min, ~15 epoch, EarlyStopping = 10
#
# pca 60, 20 windows: max test acc =0.7222, 9 min, 25 epoch, EarlyStopping = 10
#
# + colab={} colab_type="code" id="cQzZjhtwmVIa"
| neural_networks/CNN_raw_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
import time
from datetime import timedelta
import math
tf.__version__
# +
# Convolutional Layer 1.
filter_size1 = 5 # Convolution filters are 5 x 5 pixels.
num_filters1 = 16 # There are 16 of these filters.
# Convolutional Layer 2.
filter_size2 = 5 # Convolution filters are 5 x 5 pixels.
num_filters2 = 36 # There are 36 of these filters.
# Fully-connected layer.
fc_size = 128 # Number of neurons in fully-connected layer.
# -
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
data.test.cls = np.argmax(data.test.labels, axis=1)
# +
# We know that MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 10
# -
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# +
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
# -
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_conv_layer(input, # The previous layer.
num_input_channels, # Num. channels in prev. layer.
filter_size, # Width and height of each filter.
num_filters, # Number of filters.
use_pooling=True): # Use 2x2 max-pooling.
# Shape of the filter-weights for the convolution.
# This format is determined by the TensorFlow API.
shape = [filter_size, filter_size, num_input_channels, num_filters]
# Create new weights aka. filters with the given shape.
weights = new_weights(shape=shape)
# Create new biases, one for each filter.
biases = new_biases(length=num_filters)
# Create the TensorFlow operation for convolution.
# Note the strides are set to 1 in all dimensions.
# The first and last stride must always be 1,
# because the first is for the image-number and
# the last is for the input-channel.
# But e.g. strides=[1, 2, 2, 1] would mean that the filter
# is moved 2 pixels across the x- and y-axis of the image.
# The padding is set to 'SAME' which means the input image
# is padded with zeroes so the size of the output is the same.
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Add the biases to the results of the convolution.
# A bias-value is added to each filter-channel.
layer += biases
# Use pooling to down-sample the image resolution?
if use_pooling:
# This is 2x2 max-pooling, which means that we
# consider 2x2 windows and select the largest value
# in each window. Then we move 2 pixels to the next window.
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Rectified Linear Unit (ReLU).
# It calculates max(x, 0) for each input pixel x.
# This adds some non-linearity to the formula and allows us
# to learn more complicated functions.
layer = tf.nn.relu(layer)
# Note that ReLU is normally executed before the pooling,
# but since relu(max_pool(x)) == max_pool(relu(x)) we can
# save 75% of the relu-operations by max-pooling first.
# We return both the resulting layer and the filter-weights
# because we will plot the weights later.
return layer, weights
def flatten_layer(layer):
# Get the shape of the input layer.
layer_shape = layer.get_shape()
# The shape of the input layer is assumed to be:
# layer_shape == [num_images, img_height, img_width, num_channels]
# The number of features is: img_height * img_width * num_channels
# We can use a function from TensorFlow to calculate this.
num_features = layer_shape[1:4].num_elements()
# Reshape the layer to [num_images, num_features].
# Note that we just set the size of the second dimension
# to num_features and the size of the first dimension to -1
# which means the size in that dimension is calculated
# so the total size of the tensor is unchanged from the reshaping.
layer_flat = tf.reshape(layer, [-1, num_features])
# The shape of the flattened layer is now:
# [num_images, img_height * img_width * num_channels]
# Return both the flattened layer and the number of features.
return layer_flat, num_features
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
# Create new weights and biases.
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
# Calculate the layer as the matrix multiplication of
# the input and weights, and then add the bias-values.
layer = tf.matmul(input, weights) + biases
# Use ReLU?
if use_relu:
layer = tf.nn.relu(layer)
return layer
# +
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, 10], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
# -
layer_conv1, weights_conv1 = \
new_conv_layer(input=x_image,
num_input_channels=num_channels,
filter_size=filter_size1,
num_filters=num_filters1,
use_pooling=True)
layer_conv1
layer_conv2, weights_conv2 = \
new_conv_layer(input=layer_conv1,
num_input_channels=num_filters1,
filter_size=filter_size2,
num_filters=num_filters2,
use_pooling=True)
layer_conv2
layer_flat, num_features = flatten_layer(layer_conv2)
layer_flat
num_features
layer_fc1 = new_fc_layer(input=layer_flat,
num_inputs=num_features,
num_outputs=fc_size,
use_relu=True)
layer_fc1
layer_fc2 = new_fc_layer(input=layer_fc1,
num_inputs=fc_size,
num_outputs=num_classes,
use_relu=False)
layer_fc2
y_pred = tf.nn.softmax(layer_fc2)
y_pred_cls = tf.argmax(y_pred, dimension=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.initialize_all_variables())
train_batch_size = 64
# +
# Counter for total number of iterations performed so far.
total_iterations = 0
def optimize(num_iterations):
# Ensure we update the global variable rather than a local copy.
global total_iterations
# Start-time used for printing time-usage below.
start_time = time.time()
for i in range(total_iterations,
total_iterations + num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batch, y_true_batch = data.train.next_batch(train_batch_size)
print('x_batch.shape: ', x_batch.shape)
print('y_true_batch.shape: ', y_true_batch.shape)
print('y_true_batch: ', y_true_batch)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
# Print status every 100 iterations.
if i % 100 == 0:
# Calculate the accuracy on the training-set.
acc = session.run(accuracy, feed_dict=feed_dict_train)
# Message for printing.
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
# Print it.
print(msg.format(i + 1, acc))
# Update the total number of iterations performed.
total_iterations += num_iterations
# Ending time.
end_time = time.time()
# Difference between start and end-times.
time_dif = end_time - start_time
# Print the time-usage.
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
# -
def plot_example_errors(cls_pred, correct):
# This function is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.test.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.test.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
def plot_confusion_matrix(cls_pred):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.test.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# +
# Split the test-set into smaller batches of this size.
test_batch_size = 256
def print_test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# Number of images in the test-set.
num_test = len(data.test.images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_test, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_test:
# The ending index for the next batch is denoted j.
j = min(i + test_batch_size, num_test)
# Get the images from the test-set between index i and j.
images = data.test.images[i:j, :]
# Get the associated labels.
labels = data.test.labels[i:j, :]
# Create a feed-dict with these images and labels.
feed_dict = {x: images,
y_true: labels}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Convenience variable for the true class-numbers of the test-set.
cls_true = data.test.cls
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / num_test
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
# -
print_test_accuracy()
optimize(num_iterations=1)
print_test_accuracy()
optimize(num_iterations=99)
print_test_accuracy(show_example_errors=True)
optimize(num_iterations=900)
print_test_accuracy(show_example_errors=True)
optimize(num_iterations=9000)
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
session.close()
| deep-learning/convolutional-neural-networks/CNN_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="qrlRNGGwPO-A" colab={"base_uri": "https://localhost:8080/"} outputId="a8c2da9e-6d5a-4b05-bd40-e296157a8eb7"
from google.colab import drive
drive.mount('/content/drive')
# + id="lqVbGIfxPOaJ"
import sys
sys.path.append('/content/drive/MyDrive/Unbiased_news/python_modules')
# + id="3MiLTrsbc1RN" colab={"base_uri": "https://localhost:8080/"} outputId="90df18e8-3882-48fd-c125-2e23ffaffff0"
import nltk
nltk.download('punkt')
nltk.download('stopwords')
import pandas as pd
import numpy as np
import random
from collections import defaultdict
from ast import literal_eval
from collections import Counter
import re
import unicodedata
from nlp_preprocessing import *
from topic_modeling import *
from sklearn.feature_extraction.text import TfidfVectorizer, ENGLISH_STOP_WORDS, CountVectorizer
import spacy
import pickle
from sklearn.metrics.pairwise import cosine_similarity
sp_nlp = spacy.load('en')
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
# pd.set_option('display.max_colwidth', None)
pd.reset_option('display.max_colwidth')
# %load_ext autoreload
# %autoreload 2
# + [markdown] id="olT6zBMaCDYS"
# ## Classification Ideas
#
# Vader for sentiment - built on social media
# textblob sentiment -
# compound output for
#
# word complexity - count words longer than 5 letters or avg length
# no. of words in document
#
# # # ! and ? in doc for sensationalism
#
# spacy tagging on proper nouns. NER.
#
# NER on doc & occurance %
#
# sentiment on headlines
# NER for headlines and checking sentiment on those NER in the article
#
# passive & active voice
#
#
# + [markdown] id="SeMQSeLEc1RS"
# # Data Import
# + colab={"base_uri": "https://localhost:8080/", "height": 618} id="PJatsJNqc1RS" outputId="99fe4be2-ff85-48c6-80a2-c94149d523ec"
# df = pd.read_csv("Data/data_NLP_round1.csv")
df = pd.read_csv("/content/drive/MyDrive/Unbiased_news/Data/data_NLP_round1.csv")
df.head()
# + [markdown] id="7fClcjS0c1RT"
# Since each news article can contain slightly different unicode formatting, its best to convert everything to ascii format, to make it easier to work the data. All incomptabile characters will be converted or dropped. Since we are working with English, the hope is that a majority of the data is retained.
# **But we can come to this later to see how much data is being dropped.**
# + colab={"base_uri": "https://localhost:8080/", "height": 77} id="K0zV6lRpc1RT" outputId="1cc4d838-e60f-4a46-8750-6bb232fd6110"
# Ensuring everything is in ascii format and removing any wierd formatings.
df['text_ascii'] = df.text.map(lambda x: unicodedata.normalize('NFKD', x).encode('ascii', 'ignore').decode('ascii'))
df[['text','text_ascii']].sample()
# + colab={"base_uri": "https://localhost:8080/"} id="ZfA0dCbgMv2t" outputId="2c4b8341-ddf4-4930-8ccb-ad29d41087a1"
df.news_source.unique()
# + [markdown] id="NStM2_tec1RU"
# # Pre-processing to work on
#
# 1. Better cleaning process - Post lemma and pre lemma? what else??
# 1. Compound term extraction - incl. punctuation separated & space separated
# 1. Named entity extraction & linkage (eg: hong_kong vs hong kong)
# + [markdown] id="rzP3KWFeCDYV"
# # Breaking Into Sentences
#
# Let's split by sentences.
# + id="3ZnlbS90CDYV"
def sent_split(article):
sent_list2 = []
sent_list = nltk.sent_tokenize(article)
for sent in sent_list:
sent_list2.extend(sent.split('\n\n'))
return sent_list2
def simple_cleaning(text_sent):
text_sent = text_sent.lower()
if ((text_sent == 'ad') or (text_sent.find('click here') >= 0 ) or (text_sent.find('sign up here') >= 0 ) or
(text_sent.find('sign up for daily') >= 0 ) or (text_sent.find('sign up for the') >= 0 ) or
(text_sent.find('contributed to this') >= 0 ) or (text_sent.find('all rights reserved') > 0 ) or
(text_sent.find('reported from') >= 0 ) or (text_sent.find('contributed reporting') >= 0 ) or
(text_sent.find('want fox news') >= 0) or (text_sent == '') or
(text_sent.find('the washington times, llc') >= 0) or (text_sent.find('sign up for our') >= 0) or
(text_sent.find('daily to your inbox') >= 0)
):
return False
elif len((re.sub('[^a-z\s]', '', text_sent)).split()) <= 5:
return False
else:
return True
# + colab={"base_uri": "https://localhost:8080/"} id="We2f8bTgZKFA" outputId="993e9a71-2c44-4f58-ee09-b3bbea79222c"
df.columns
# + id="Z6Qc_OCrCDYV"
df_sentences = df[['number','global_bias','title','date','news_title','news_link','bias','news_source','text_ascii']].copy(deep=True)
# # Splitting each para into a list of paras
df_sentences['text_sent_list'] = df_sentences.text_ascii.map(sent_split)
# # Exploding the paragraphs into a dataframe, where each row has a paragraph
df_sentences_col = pd.DataFrame(df_sentences.text_sent_list.explode())
df_sentences_col.rename(columns={'text_sent_list':'text_sent'}, inplace=True)
# # # Cleaning up some portions of the expansion
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent == 'AD')]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('click here', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('sign up here', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('sign up for daily', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('sign up for the', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('contributed to this', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('All rights reserved', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('reported from', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.str.contains('contributed reporting', case=False))]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.isna())]
# # df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent == '')]
df_sentences_col = df_sentences_col[df_sentences_col.text_sent.map(simple_cleaning)]
df_sentences_col = df_sentences_col[~(df_sentences_col.text_sent.isna())]
# # Joining the exploded dataframe back, so that other metadata can be associated with it
df_sentences = df_sentences.join(df_sentences_col, how='left').reset_index()
df_sentences.rename(columns={'index':'article'}, inplace=True)
df_sentences.drop(columns='text_sent_list', inplace=True)
# Dropping entire articles from left & right for which nothing got joined in the above statement.
article_nums_todrop = df_sentences[df_sentences.text_sent.isna()].number.tolist()
df_sentences = df_sentences[~(df_sentences.number.isin(article_nums_todrop))].reset_index(drop = True)
# getting paragraph numbering
df_sentences['text_count'] = df_sentences.groupby('article').cumcount()
del df_sentences_col
# + id="PMPbByDAAPoa"
df_sentences.loc[df_sentences.text_sent.map(lambda x: len(x.split()) == 6), 'text_sent']
# + colab={"base_uri": "https://localhost:8080/"} id="pEfYT0Ci-WTX" outputId="047a08b1-3d67-4082-f8ba-964ca46bf046"
df_sentences.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 363} id="CXZWhZI4TzfK" outputId="c0b797fd-458e-47df-ad59-f85592781f9c"
df_sentences.head()
# + id="CjboNdAEL0in"
df_sentences.to_csv("/content/drive/MyDrive/Unbiased_news/Data/sent_expanded_ready_for_modeling_v2.csv", index=False)
# + [markdown] id="Wl0lhMxAc1RU"
# # Breaking Into Paras
#
# Let's breakout each news article into paragraphs and expand this into a new dataframe.
# These paragraphs will be treated as individual documents that will be used to vectorize & topic model. Post which, for a given overall news headline, each paragraph from the left & right bias will be compared to see pair up paragraphs.
# + id="9gDD-7E9c1RU"
df_expanded = df[['number','global_bias','title','news_source','text_ascii']].copy(deep=True)
# Splitting each para into a list of paras
df_expanded['text_paras_list'] = df_expanded.text_ascii.str.split('\n\n')
# Exploding the paragraphs into a dataframe, where each row has a paragraph
df_expanded_col = pd.DataFrame(df_expanded.text_paras_list.explode())
df_expanded_col.rename(columns={'text_paras_list':'text_paras'}, inplace=True)
# Cleaning up some portions of the expansion
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras == 'AD')]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('click here', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('sign up here', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('sign up for daily', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('sign up for the', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('contributed to this', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('All rights reserved', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('reported from', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.str.contains('contributed reporting', case=False))]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras.isna())]
df_expanded_col = df_expanded_col[~(df_expanded_col.text_paras == '')]
# Joining the exploded dataframe back, so that other metadata can be associated with it
df_expanded = df_expanded.join(df_expanded_col, how='left').reset_index()
df_expanded.rename(columns={'index':'article'}, inplace=True)
df_expanded.drop(columns='text_paras_list', inplace=True)
#Dropping entire articles from left & right for which nothing got joined in the above statement.
article_nums_todrop = df_expanded[df_expanded.text_paras.isna()].number.tolist()
df_expanded = df_expanded[~(df_expanded.number.isin(article_nums_todrop))].reset_index()
# # getting paragraph numbering
df_expanded['para_count'] = df_expanded.groupby('article').cumcount()
# + [markdown] id="5tdFpPZZc1RV"
# # Pre-processing
# + [markdown] id="u9DhzFU0c1RV"
# ## Lemmatization
#
# Lemmatizing first helps preserve as much meaning of the word as possible, while separating out punctuation as needed. It also preserves entity names.
# **Only need to link compound words somehow**
# + colab={"base_uri": "https://localhost:8080/"} id="kqVlt7ojc1RV" outputId="7d2460e8-b4cb-42dc-8d87-6d57e4321405"
# %%time
df_sentences['text_sent_lemma'] = df_sentences.text_sent.map(spacy_lemmatization)
df_sentences[['text_sent', 'text_sent_lemma']].sample(2)
# df_expanded['text_paras_lemma'] = df_expanded.text_paras.map(spacy_lemmatization)
# df_expanded[['text_paras', 'text_paras_lemma']].sample(2)
# + id="HG_qRqYwc1RW" outputId="3a8c3446-9fa9-40d1-a861-c2b57a304398"
pd.set_option('display.max_colwidth', None)
print(df_expanded.sample()[['text_paras','text_paras_lemma']])
pd.reset_option('display.max_colwidth')
# + [markdown] id="Cm-DLUEfc1RX"
# ## Misc Cleaning
#
# Misc. cleaning of the documents. Currently this involves just removing email addresses, website links & any non-alphanumeric characters.
# + colab={"base_uri": "https://localhost:8080/", "height": 106} id="-UBBLPotc1RY" outputId="639c3f1c-ea62-4191-b3e1-15cb424b61c9"
df_sentences['text_sent_misc_clean'] = df_sentences.text_sent_lemma.map(cleaning)
df_sentences[['text_sent_lemma','text_sent_misc_clean']].sample(2)
# df_expanded['text_paras_misc_clean'] = df_expanded.text_paras_lemma.map(cleaning)
# df_expanded[['text_paras_lemma','text_paras_misc_clean']].sample(2)
# + id="XmvjETnmc1RY" outputId="da310b8d-7961-4feb-92aa-b2912b492256"
pd.set_option('display.max_colwidth', None)
print(df_expanded.loc[18300,['text_paras','text_paras_misc_clean']])
pd.reset_option('display.max_colwidth')
# + id="RHP0uxufc1RZ" outputId="d702d58d-7b5f-461b-8794-6da5d9cbc055"
pd.set_option('display.max_colwidth', None)
print(df_expanded.sample()[['text_paras','text_paras_misc_clean']])
pd.reset_option('display.max_colwidth')
# + [markdown] id="R88-KvxPU1f9"
# ## Remove Stop-words
#
# Apart from using SK Learn's stop words list, we add additional words that have to be removed from the corpus.
# The additional words are identified using an interative process of topic modeling and reviewing the top words showing up.
#
# + colab={"base_uri": "https://localhost:8080/"} id="Kx2POYEZc1RZ" outputId="03a26a61-ca04-4e2d-982d-edf890164776"
# %%time
custom_stop_words = ['ad', 'advertisement', '000', 'mr', 'ms', 'said', 'going', 'dont', 'think', 'know', 'want', 'like', 'im', 'thats', 'told', \
'lot', 'hes', 'really', 'say', 'added', 'come', 'great','newsletter','daily','sign','app',\
'click','app','inbox', 'latest', 'jr','everybody','`']
df_sentences['text_sent_stopwords'] = df_sentences.text_sent_misc_clean.map(lambda x: remove_stopwords(x, custom_words=custom_stop_words))
# df_expanded['text_paras_stopwords'] = df_expanded.text_paras_misc_clean.map(lambda x: remove_stopwords(x, custom_words=custom_stop_words))
# df_expanded['text_paras_stopwords'] = df_expanded.text_paras_stopwords.map(lambda x: remove_stopwords(x, remove_words_list = [], \
# custom_words = custom_stop_words))
# df_expanded[['text_paras_lemma','text_paras_stopwords']].sample(2)
df_sentences[['text_sent_misc_clean','text_sent_stopwords']].sample(2)
# + colab={"base_uri": "https://localhost:8080/"} id="5snHEeqx-vwx" outputId="201ae064-df0b-4a3d-92a7-32d387718983"
pd.set_option('display.max_colwidth', None)
print(df_expanded.sample()[['text_paras','text_paras_stopwords']])
pd.reset_option('display.max_colwidth')
# + [markdown] id="pACmmb0eVUaU"
# ## Remove Small Words
#
# All words less than 3 characters seem to not add much value. Hence, they shall be removed; unless its a number.
# + id="0Jbf4bysSoY9"
from nltk.tokenize import word_tokenize
def remove_small_words(text, length = 2):
"""
Removes words smaller than a certain length, unless its a digit.
"""
tokens = word_tokenize(text)
tokens = [token.strip() for token in tokens]
tokens = [token for token in tokens if (len(token) > length) or (token.isdigit())]
return ' '.join(tokens)
# + colab={"base_uri": "https://localhost:8080/", "height": 106} id="RYMyEDY7P7PT" outputId="a80ac58a-31c8-4413-b46a-a57320f7f1a9"
df_sentences['text_sent_no_small_words'] = df_sentences.text_sent_stopwords.map(remove_small_words)
df_sentences[['text_sent_misc_clean','text_sent_no_small_words']].sample(2)
# df_expanded['text_paras_no_small_words'] = df_expanded.text_paras_stopwords.map(remove_small_words)
# df_expanded[['text_paras_misc_clean','text_paras_no_small_words']].sample(2)
# + id="9p-_wj2hVR-r"
pd.set_option('display.max_colwidth', None)
print(df_expanded.sample()[['text_paras','text_paras_no_small_words']])
pd.reset_option('display.max_colwidth')
# + id="7NCpd3MdCDYe"
df_sentences['text_final'] = df_sentences['text_sent_no_small_words'].fillna(value=' ')
# df_expanded['text_final'] = df_expanded['text_paras_no_small_words'].fillna(value=' ')
# + colab={"base_uri": "https://localhost:8080/", "height": 669} id="gUuhDJGu4ECn" outputId="ca5c9396-0de6-472d-9781-1deeac95619a"
df_sentences.sample(5)
# + id="wcsbkcudCDYf"
df_sentences.to_csv("/content/drive/MyDrive/Unbiased_news/Data/sent_expanded_ready_for_modeling.csv", index=False)
# df_expanded.to_csv('Data/paras_expanded_ready_for_modeling.csv', index=False)
# + colab={"base_uri": "https://localhost:8080/"} id="g9LQNZa_CaeK" outputId="b51305f3-132e-4511-ed7c-ab57f2ec1e92"
df_expanded.columns
# + [markdown] id="_KqpvUz5CDYf"
# **Below, we are assigning last pre-processed column to a 'text_final' column so that downstream functions dont have to be changed. Just the below code to indicate the final column as needed.**
# + id="UVQKfZs1CDYg" outputId="90591fdc-6ceb-42b0-bbe8-8e39ea955792"
df_expanded.shape
# + id="awg5eWCPCDYg"
# df_expanded.to_csv("/content/drive/MyDrive/Unbiased_news/Data/paras_expanded_ready_for_modeling.csv", index=False)
df_expanded = pd.read_csv('Data/paras_expanded_ready_for_modeling.csv')
# + id="aDkhsovHBpk3"
# with open("/content/drive/MyDrive/Unbiased_news/Data/tfidf_vectorizer.pickle", 'wb') as model_file:
# pickle.dump(review_word_matrix_tfidf, model_file)
# + [markdown] id="g1DKQlc3CDYg"
# # Vectorizer & Topic Modeling
# + id="dxNtlK1DCDYh" outputId="e1536ba1-03fa-48e5-a4db-3cce1fe5466b"
# %%time
params = {'stop_words':'english','min_df': 10, 'max_df': 0.5, 'ngram_range':(1, 1),}
cv = CountVectorizer(**params)
review_word_matrix_cv = cv.fit_transform(df_expanded['text_final'])
review_vocab_cv = cv.get_feature_names()
lda_cv, score_cv, topic_matrix_cv, word_matrix_cv = lda_topic_modeling(review_word_matrix_cv, vocab = review_vocab_cv, n = 100)
# + colab={"base_uri": "https://localhost:8080/"} id="zN6yQeHEc1Ra" outputId="8ab4a222-e654-47e0-d359-6f00e2b18617"
# %%time
params = {'stop_words':'english','min_df': 10, 'max_df': 0.5, 'ngram_range':(1, 1),}
tfidf = TfidfVectorizer(**params)
review_word_matrix_tfidf = tfidf.fit_transform(df_expanded['text_final'])
review_vocab_tfidf = tfidf.get_feature_names()
lda_tfidf, score_tfidf, topic_matrix_tfidf, word_matrix_tfidf = lda_topic_modeling(review_word_matrix_tfidf, vocab = review_vocab_tfidf, n = 200)
# + [markdown] id="G4c4Oq2mc1Ra"
# ### Exploring The Topic Models
#
# Let's take a look at the topic model to see what we've got.
# + [markdown] id="6vfjjQjSc1Rb"
# Looking at the top words for each topics, there are a number of filler words which we could remove to make the topics a lot more senseful. Additionally, all numbers except for years can be removed too. Lastly, a way needs to be identified for detecting compound words, especially names of places, like Hong Kong, North America etc
# + id="enRD2a4D-0BI"
import pickle
with open("/content/drive/MyDrive/Unbiased_news/Data/lda_100_topics_model.pickle", 'wb') as model_file:
pickle.dump(lda_tfidf, model_file)
# + [markdown] id="o-T5dhmaCDYh"
# # Pairing the Articles
# + [markdown] id="Tl6DeOMhCDYi"
# ## LDA with 100 Topics & Count Vectorizer
# + id="jNOm-llPCDYi"
df_expanded_100_cv_topics = df_expanded.join(topic_matrix_cv)
# + id="IfVafQNBCDYi" outputId="60555fcc-6909-4ca7-af2c-6c6e1296b1d8"
pd.set_option('display.max_colwidth', None)
# article numbers - 3474, 5
one_topic = df_expanded_100_cv_topics[df_expanded_100_cv_topics.number == 3474].dropna(subset=['text_final'])
left_article = one_topic[one_topic.global_bias == 'From the Left']
right_article = one_topic[one_topic.global_bias == 'From the Right']
left_article_len = len(left_article)
right_article_len = len(right_article)
smaller_article,bigger_article = (left_article,right_article) if left_article_len < right_article_len else (right_article,left_article)
counter = 1
for index, row in smaller_article.iterrows():
X = bigger_article.loc[:,'topic_0':'topic_19']
y = row.loc['topic_0':'topic_19'].values.reshape(1,-1)
similarity_scores = cosine_similarity(X,y).flatten()
indices = np.argsort(similarity_scores)
print(f"*** Para {counter} *** ")
print(row['text_paras'])
print(similarity_scores[indices[-1:-4:-1]])
print(bigger_article.iloc[indices[-1:-4:-1]].loc[:,'text_paras'])
index_to_drop = bigger_article.index[indices[-1]]
bigger_article.drop(index = index_to_drop, inplace = True)
print('\n')
counter += 1
pd.reset_option('display.max_colwidth')
# + [markdown] id="D-a1MYZVCDYi"
# ## LDA with 200 Topics
# + id="_geKfCtSCDYi" outputId="d22325d0-651d-4c91-b162-079fe82a8bbe"
df_expanded.shape
# + id="X7k79NsICDYi"
# df_expanded_20_topics = pd.read_csv("Data/paras_expanded_with_topics.csv")
df_expanded_200_topics = pd.read_csv("Data/paras_expanded_200_topics.csv")
# + id="SsP3ac88CDYj" outputId="c581df42-096a-4079-ac82-8c7ea98211c7"
pd.set_option('display.max_colwidth', None)
# article numbers - 3474, 5
one_topic = df_expanded_200_topics[df_expanded_200_topics.number == 5].dropna(subset=['text_final'])
left_article = one_topic[one_topic.global_bias == 'From the Left']
right_article = one_topic[one_topic.global_bias == 'From the Right']
left_article_len = len(left_article)
right_article_len = len(right_article)
smaller_article,bigger_article = (left_article,right_article) if left_article_len < right_article_len else (right_article,left_article)
counter = 1
for index, row in smaller_article.iterrows():
X = bigger_article.loc[:,'topic_0':'topic_19']
y = row.loc['topic_0':'topic_19'].values.reshape(1,-1)
similarity_scores = cosine_similarity(X,y).flatten()
indices = np.argsort(similarity_scores)
print(f"*** Para {counter} *** ")
print(row['text_paras'])
print(similarity_scores[indices[-1:-4:-1]])
print(bigger_article.iloc[indices[-1:-4:-1]].loc[:,'text_paras'])
index_to_drop = bigger_article.index[indices[-1]]
bigger_article.drop(index = index_to_drop, inplace = True)
print('\n')
counter += 1
pd.reset_option('display.max_colwidth')
# + [markdown] id="UsZcLGAtCDYj"
# ## LDA with 100 Topics
# + id="nxuaJZXKCDYj"
df_expanded_100_topics = pd.read_csv("Data/paras_expanded_100_topics.csv")
# + id="Fy-usTI3CDYj" outputId="858c1b1d-4fff-424f-c940-7cfe0c20497b"
pd.set_option('display.max_colwidth', None)
one_topic = df_expanded_100_topics[df_expanded_100_topics.number == 3474].dropna(subset=['text_final'])
left_article = one_topic[one_topic.global_bias == 'From the Left']
right_article = one_topic[one_topic.global_bias == 'From the Right']
left_article_len = len(left_article)
right_article_len = len(right_article)
smaller_article,bigger_article = (left_article,right_article) if left_article_len < right_article_len else (right_article,left_article)
counter = 1
for index, row in smaller_article.iterrows():
X = bigger_article.loc[:,'topic_0':'topic_99']
y = row.loc['topic_0':'topic_99'].values.reshape(1,-1)
similarity_scores = cosine_similarity(X,y).flatten()
indices = np.argsort(similarity_scores)
print(f"*** Para {counter} *** ")
print(row['text_paras'])
print(similarity_scores[indices[-1:-4:-1]])
print(bigger_article.iloc[indices[-1:-4:-1]].loc[:,'text_paras'])
index_to_drop = bigger_article.index[indices[-1]]
bigger_article.drop(index = index_to_drop, inplace = True)
print('\n')
counter += 1
pd.reset_option('display.max_colwidth')
# + [markdown] id="J_hxXtdGCDYj"
# ## Top2Vec Modeling
# + id="lc0L1YzrCDYk"
top2vec_model = Top2Vec.load("Data/top2vec_deep_learn_model")
# + id="Y97c_uxkCDYk" outputId="1ee714f6-1bdc-4ade-c458-5dc2af38dc3f"
top2vec_model.get_num_topics()
# + id="Vr1WlWX8CDYk"
topic_sizes, topic_nums = top2vec_model.get_topic_sizes()
# + id="SEOHAAhJCDYk"
df_expanded['text_paras'] = df_expanded.text_paras.fillna(value=' ')
# df_expanded_top2vec = df_expanded.join(pd.DataFrame(columns=topic_nums, index=df_expanded.index).add_prefix("topic_"))
# df_expanded_top2vec.loc[:,'topic_0':'topic_209'] = 0
# del df_expanded_no_nulls
# + id="At1vQ3_WCDYk" outputId="d11f40da-0ba3-4c37-9788-061df92d1233"
help(top2vec_model)
# + id="TsEHqjlZCDYk" outputId="52c3cfb7-cc52-48cd-bc8e-b341ffa38bb5"
top2vec_model_deeplearn = Top2Vec(documents = df_expanded.text_paras.tolist(), speed='deep-learn', document_ids = df_expanded.index.tolist(), workers = 4, )
# + id="JQeEe3WaCDYl"
# top2vec_model_deeplearn_sent_enc = Top2Vec(documents = df_expanded.text_paras.tolist(), embedding_model='universal-sentence-encoder',
# speed='deep-learn', document_ids = df_expanded.index.tolist(), workers = 4, )
# + id="P-0SeHfiCDYl" outputId="78ace202-80a8-48e5-c99d-a26d4cdfe045"
topic_sizes, topic_nums = top2vec_model_deeplearn.get_topic_sizes()
top2vec_model_deeplearn.get_num_topics()
# + id="EhpwCm9TCDYl"
df_expanded_top2vec_deep_learn = df_expanded.join(pd.DataFrame(columns=topic_nums, index=df_expanded.index).add_prefix("topic_"))
df_expanded_top2vec_deep_learn.loc[:,'topic_0':'topic_219'] = 0
# + id="uKzzQWWBCDYl"
documents, document_scores, document_ids = top2vec_model.search_documents_by_topic(topic_num=0, num_docs=15)
# documents
# + id="ibGSE7maCDYl" outputId="d96a9a97-de33-425d-acff-b96bdaacf3c9"
df_expanded_top2vec_deep_learn.loc[document_ids,'topic_0']
# + id="EbEl32asCDYm"
for i in topic_nums:
topic = 'topic_' + str(i)
documents, document_scores, document_ids = top2vec_model_deeplearn.search_documents_by_topic(topic_num=i, num_docs=topic_sizes[i])
df_expanded_top2vec_deep_learn.loc[document_ids,topic] = document_scores
# + id="URUHo3OFCDYm" outputId="91cd4430-ffe9-4418-936e-313a0ea56948"
pd.set_option('display.max_colwidth', None)
one_topic = df_expanded_top2vec_deep_learn[df_expanded_top2vec_deep_learn.number == 3474].dropna(subset=['text_final'])
left_article = one_topic[one_topic.global_bias == 'From the Left']
right_article = one_topic[one_topic.global_bias == 'From the Right']
left_article_len = len(left_article)
right_article_len = len(right_article)
smaller_article,bigger_article = (left_article,right_article) if left_article_len < right_article_len else (right_article,left_article)
counter = 1
for index, row in smaller_article.iterrows():
X = bigger_article.loc[:,'topic_0':'topic_99']
y = row.loc['topic_0':'topic_99'].values.reshape(1,-1)
similarity_scores = cosine_similarity(X,y).flatten()
indices = np.argsort(similarity_scores)
print(f"*** Para {counter} *** ")
print(row['text_paras'])
print(similarity_scores[indices[-1:-4:-1]])
print(bigger_article.iloc[indices[-1:-4:-1]].loc[:,'text_paras'])
index_to_drop = bigger_article.index[indices[-1]]
bigger_article.drop(index = index_to_drop, inplace = True)
print('\n')
counter += 1
pd.reset_option('display.max_colwidth')
# + id="Sd747FmCLv46"
| 2_NLP_Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import string
from tqdm import tqdm
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
stopwords_set = set(stopwords.words('english'))
def tokenize(document):
tokens = [token.lower() for token in document.split()]
punc_patt = re.compile(rf'[{string.punctuation}]')
tokens = [punc_patt.sub('',token) for token in tokens]
tokens = [token for token in tokens if not token in stopwords_set]
return tokens
def get_similarity(headline,body):
headline_tokens = set(tokenize(headline))
bodies_token = set(tokenize(body))
intersec = headline_tokens.intersection(bodies_token)
return len(intersec)
def process_data(bodies_path,stances_path,folder_prefix="fnc-1"):
complete_bodies_path = '/'.join((folder_prefix,bodies_path))
complete_stances_path = '/'.join((folder_prefix,stances_path))
df_bodies = pd.read_csv(complete_bodies_path)
df_stances = pd.read_csv(complete_stances_path)
df_all = pd.merge(df_stances,df_bodies,on ="Body ID")
X = []
Y = [0 if stance == "unrelated" else 1 for stance in df_all["Stance"] ]
for headline, body_id, stance, articleBody in tqdm(df_all.values):
similarity = get_similarity(headline,articleBody)
X.append(similarity)
return np.array(X).reshape(-1,1), np.array(Y)
X_train, Y_train = process_data('train_bodies.csv','train_stances.csv')
X_test, Y_test = process_data('competition_test_bodies.csv','competition_test_stances.csv')
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
algorithms = [
("Decision Tree",DecisionTreeClassifier()),
("Logistic Regression",LogisticRegression(solver="lbfgs")),
("Naive Bayes",MultinomialNB()),
("LDA",LinearDiscriminantAnalysis()),
("Kneighbors",KNeighborsClassifier(3))
]
for name,algorithm in algorithms:
algorithm.fit(X_train,Y_train)
print(name)
accuracy_test = accuracy_score(Y_test,algorithm.predict(X_test))
accuracy_train = accuracy_score(Y_train,algorithm.predict(X_train))
print(f"Test : {accuracy_test}")
print(f"Train : {accuracy_train}")
print("="*50)
# -
# ### PR Curve
#
# For our model , we would like to have a high precision model, because it is more costly to missclasify an article as related if it was actually unrelated then the inverse. So we're aiming for around ~ 98
# +
import matplotlib.pyplot as plt
log_reg = LogisticRegression(solver="lbfgs")
log_reg.fit(X_train,Y_train)
def find_matching_point(thresholds,precisions,recalls,desired_precision):
for threshold,precision, recall in zip(thresholds,precisions,recalls):
if precision > desired_precision:
return (threshold,precision,recall)
preds = log_reg.predict_proba(X_test)[:,1]
precisions, recalls, thresholds = precision_recall_curve(Y_test,preds)
plt.plot(recalls,precisions,label="Lr curve")
plt.legend()
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("PR curve")
print(find_matching_point(thresholds,precisions,recalls,0.97))
plt.show()
# -
import pickle
with open("log_reg.pickle","wb") as f:
pickle.dump(log_reg,f)
threshold,*_ = find_matching_point(thresholds,precisions,recalls,desired_precision=0.97)
with open("threshold.txt","w") as f:
f.write(str(threshold))
print("DONE")
| backend/model/detect_unrelated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="HlEQmz3DXHyq" colab_type="code" colab={}
# if you use Google Colab
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="M7BIPkIsXwBR" colab_type="text"
# ## Data Preperation
# + id="5SNVTJAkXxe4" colab_type="code" colab={}
def replace_split_to_tab (readed_file_path, written_file_path):
read_text = open(readed_file_path).read()
new_text = read_text.replace(' ||| ', '\t')
open(written_file_path, 'w').write(new_text)
# + id="r-fJogQkZWIs" colab_type="code" colab={}
readed_file_path = 'path_to/data/train/atis.uw.train.txt'
written_file_path = 'path_to/data/train/atis.uw.train.tabed.txt'
replace_split_to_tab(readed_file_path, written_file_path)
# + id="mEdmoxZ0bJch" colab_type="code" colab={}
_readed_file_path = 'path_to/data/test/atis.uw.test.txt'
written_file_path = 'path_to/data/test/atis.uw.test.tabed.txt'
replace_split_to_tab(readed_file_path, written_file_path)
# + id="q2c0gv8paxoo" colab_type="code" colab={}
with open(written_file_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
# + id="jZGb0Jhsa1v1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971405953, "user_tz": -180, "elapsed": 928, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="b84f9632-4526-4ae8-e8f7-37224071c116"
lines[155]
# + id="tGizanILa3AV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971484264, "user_tz": -180, "elapsed": 954, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="2987ee0f-902b-4284-c5c0-305cdeab8c09"
len(lines)
# + id="IJcYEAnxa4P8" colab_type="code" colab={}
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
# + id="4E_F4HwOa5lk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971488489, "user_tz": -180, "elapsed": 1006, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="597337da-c416-4615-a2d0-103e9bddc4c2"
num_samples = 10000
num_samples
# + id="gtRj_ym0biIt" colab_type="code" colab={}
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split('\t')
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
# + id="qSSoAE6TbkyV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971507517, "user_tz": -180, "elapsed": 1049, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="158ebd23-dfb1-4d6d-df95-11e669694255"
input_texts[155]
# + id="n8_kXRD8bns9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971516798, "user_tz": -180, "elapsed": 937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="091c2e33-d6e0-44b3-e2a8-a60cc652281c"
target_texts[155]
# + id="4oL3UVJsby_F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} executionInfo={"status": "ok", "timestamp": 1577971563215, "user_tz": -180, "elapsed": 955, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="9e7d0e1e-d018-41c7-e07a-c25a408084f5"
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
# + id="oC-MQEUtb1y9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1577971575280, "user_tz": -180, "elapsed": 1196, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="238f151e-cdce-42df-a146-1eb13e3f10d1"
print(input_characters)
# + id="V1H-RW0db3Yd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1577971585182, "user_tz": -180, "elapsed": 1032, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="fb950bd0-f7e9-4b7b-cca0-a83c1c1b67ae"
print(target_characters)
# + id="EBa7jrTjb861" colab_type="code" colab={}
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
# + id="6BXjyJeab_KQ" colab_type="code" colab={}
import numpy as np
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
# + id="ttOreCwVb_hK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971627351, "user_tz": -180, "elapsed": 649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="b0491746-8557-4031-9856-bf03f9a000ae"
encoder_input_data.shape
# + id="45mPFyvicAwm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971628974, "user_tz": -180, "elapsed": 657, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="3e007d85-2d59-4bd3-835c-512831fb57e2"
decoder_input_data.shape
# + id="DdIjuRsQcGLO" colab_type="code" colab={}
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
# + id="2_LsLXEdcIuG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1577971715819, "user_tz": -180, "elapsed": 685, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="96cfd2fc-1998-4f6b-e08e-14d2ee8d5bb0"
encoder_input_data[155].shape
# + [markdown] id="q8wb8I9AkhAT" colab_type="text"
# ## Building the Model
# + id="uyzcpeFHkhnb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 82} executionInfo={"status": "ok", "timestamp": 1577973860124, "user_tz": -180, "elapsed": 2556, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="cf806b0b-5f0b-41c7-c7cf-c78f56f61905"
import keras, tensorflow
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
# + id="A0m0gQwhkk0z" colab_type="code" colab={}
batch_size = 64 # batch size for training
epochs = 100 # number of epochs to train for
latent_dim = 256 # latent dimensionality of the encoding space
# + id="y7Tpz4Dvkoeu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 145} executionInfo={"status": "ok", "timestamp": 1577973879836, "user_tz": -180, "elapsed": 2050, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="e0669274-738a-442b-ec87-4197659ffd2a"
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# + id="1LRJgnKzkrmN" colab_type="code" colab={}
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# + id="jIsSD8PrlCiF" colab_type="code" colab={}
model = Model(inputs=[encoder_inputs, decoder_inputs],
outputs=decoder_outputs)
# + [markdown] id="Iemc4k8KlEuH" colab_type="text"
# ## Training the Model
# + id="9r9FgecslGGG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 469} executionInfo={"status": "ok", "timestamp": 1577974004083, "user_tz": -180, "elapsed": 969, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="158f4adf-fbc6-43a6-d589-da5188255ecb"
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.summary()
# + id="ci1NYztWlJvl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1577986925044, "user_tz": -180, "elapsed": 2787418, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="77059f90-3872-4c42-8470-c6cbf753e6fd"
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
model.save('path_to/results/seq2seq_text2sql.h5')
# + [markdown] id="hBBWa7qpWbam" colab_type="text"
# ## Testing the Model
# + id="yMt3cjiDWfpW" colab_type="code" colab={}
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.load_weights('path_to/results/seq2seq_text2sql.h5')
# + id="2jwCZDWKWurq" colab_type="code" colab={}
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# + id="Uh6FUSHFWxQd" colab_type="code" colab={}
# reverse-lookup token index to turn sequences back to characters
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
# + id="X1-xYn7eW0zM" colab_type="code" colab={}
def decode_sequence(input_seq):
# encode the input sequence to get the internal state vectors.
states_value = encoder_model.predict(input_seq)
# generate empty target sequence of length 1 with only the start character
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, target_token_index['\t']] = 1.
# output sequence loop
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# sample a token and add the corresponding character to the
# decoded sequence
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# check for the exit condition: either hitting max length
# or predicting the 'stop' character
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# update the target sequence (length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# update states
states_value = [h, c]
return decoded_sentence
# + id="iZWW-PklW_nJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 577} executionInfo={"status": "ok", "timestamp": 1577987113670, "user_tz": -180, "elapsed": 32880, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="e65170ef-594d-4206-abf5-77325875e2f3"
for seq_index in range(10):
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence)
# + id="dU_F-EFWZfDa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} executionInfo={"status": "ok", "timestamp": 1577987764589, "user_tz": -180, "elapsed": 6301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="001636b4-a6ed-49a8-bcb0-09be1207cba7"
input_sentence = "find me a flight from cincinnati to any airport in the new york city area"
test_sentence_tokenized = np.zeros(
(1, max_encoder_seq_length, num_encoder_tokens), dtype='float32')
for t, char in enumerate(input_sentence):
test_sentence_tokenized[0, t, input_token_index[char]] = 1.
print(input_sentence)
print(decode_sequence(test_sentence_tokenized))
# + [markdown] id="mjTeu5soXYpA" colab_type="text"
# ## Evaluation
# + id="3Okog26TXgnr" colab_type="code" colab={}
def load_lined_text(file_path):
text = open(file_path, encoding='utf-8').read().split('\n')
return text
# + id="d1ckV7yOXqLg" colab_type="code" colab={}
test_data = load_lined_text('path_to/data/test/atis.uw.test.tabed.txt')
# + id="mbMBq45QXZ9H" colab_type="code" colab={}
val_input_texts = []
val_target_texts = []
#line_ix = 12000
for line in test_data:
try:
input_text, target_text = line.split('\t')
val_input_texts.append(input_text)
val_target_texts.append(target_text)
except:
continue
val_encoder_input_data = np.zeros(
(len(val_input_texts), max([len(txt) for txt in val_input_texts]),
num_encoder_tokens), dtype='float32')
for i, input_text in enumerate(val_input_texts):
for t, char in enumerate(input_text):
val_encoder_input_data[i, t, input_token_index[char]] = 1.
# + id="DqzfETjbaC5x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 757} executionInfo={"status": "ok", "timestamp": 1577987914706, "user_tz": -180, "elapsed": 34009, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCGmpRPF21sBF4FQoQvkbo3t1wJPMnRFLdhNhED=s64", "userId": "07466696460023830380"}} outputId="a99ca2fa-455b-4b66-8970-fd42505be52c"
for seq_index in range(10):
input_seq = val_encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', val_input_texts[seq_index])
print('Decoded sentence:', decoded_sentence[:-1])
print('Ground Truth sentence:', val_target_texts[seq_index])
| code/text2sql_learning_atis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ocean interior carbon storage: Changes in preformed vs regenerated carbon over the historic period (1850-2014)
# +
# %matplotlib inline
import xarray as xr
import intake
import numpy as np
from cmip6_preprocessing.preprocessing import read_data
import warnings
import matplotlib.pyplot as plt
# util.py is in the local directory
# it contains code that is common across project notebooks
# or routines that are too extensive and might otherwise clutter
# the notebook design
import util
# -
# ## Connect to `DASK` cluster
# +
from dask.distributed import Client
client = Client("tcp://10.32.78.2:36773")
client
# -
# ## Define functions for use later:
# - _check_data_issues_ : parse errata reported by modeling centers from es-doc.org
# - _calc_o2sat_gsw_ : calculate saturated oxygen concetration using the Gibbs Seawater Toolbox (TEOS10)
# - _plot_atlantic_pacific_sections_ : plot arbitrary Atlantic and Pacific sections of scalar quantities or trends
# - _compute_slope_ : calculate linear slope of input data
# +
# Functions
def check_data_issues(catolog,issue_location="https://errata.es-doc.org/1/issue/retrieve-all",isurl=True):
import urllib.request, json
import fnmatch
import warnings
if isurl:
# Read the "es-doc.org" website (kinda slow if you need to access several times)
with urllib.request.urlopen(issue_location) as url:
esdoc_data = json.loads(url.read())
else:
# Read a JSON file downloaded from the "es-doc.org" website
with open(issue_location) as json_file:
esdoc_data = json.load(json_file)
# How many issues
print("Read in {} issues from es-doc.org".format(esdoc_data['count']))
# each issue has these keys
#print(dict(data['issues'][0]).keys())
# print first issue details
#print(dict(esdoc_data['issues'][0])['institute'])
#print(dict(esdoc_data['issues'][0])['title'])
#print(dict(esdoc_data['issues'][0])['description'])
#print(dict(esdoc_data['issues'][0])['severity'])
#print(dict(esdoc_data['issues'][0])['status'])
#print(dict(esdoc_data['issues'][0])['datasets'])
for ivar in range(len(catolog.df)):
var='.'.join(['CMIP6',
dict(catolog.df)['activity_id'] .iloc[ivar],
dict(catolog.df)['institution_id'].iloc[ivar],
dict(catolog.df)['source_id'] .iloc[ivar],
dict(catolog.df)['experiment_id'] .iloc[ivar],
dict(catolog.df)['member_id'] .iloc[ivar],
dict(catolog.df)['table_id'] .iloc[ivar],
dict(catolog.df)['variable_id'] .iloc[ivar],
dict(catolog.df)['grid_label'] .iloc[ivar]
])
#print(var)
for issue in range(0,esdoc_data['count']):
problem = fnmatch.filter(dict(esdoc_data['issues'][issue])['datasets'], '*'+var+'*')
if problem:
warning_string='''DATA ISSUE REPORTED\nFrom: {0}; Issue Severity: {1}; Status: {1}.\n{3}:\n{4}.'''
warnings.warn((warning_string.format(
dict(esdoc_data['issues'][issue])['institute'].upper(),
dict(esdoc_data['issues'][issue])['severity'],
dict(esdoc_data['issues'][issue])['status'],
dict(esdoc_data['issues'][issue])['title'],
dict(esdoc_data['issues'][issue])['description'])))
def calc_o2sat_gsw(practical_salinity,potential_temperature):
import gsw
'''
#Calculate Absolute Salinity
gsw_as=xr.apply_ufunc(gsw.SA_from_SP,
practical_salinity,
pressure,
longitude,
latitude,
dask='parallelized',
output_dtypes=[float])
#Calculate Conservative Temperature
gsw_ct=xr.apply_ufunc(gsw.CT_from_t,
gsw_as,
potential_temperature,
pressure,
dask='parallelized',
output_dtypes=[float])
#Calculate Oxygen Saturation
gsw_o2sat=xr.apply_ufunc(gsw.O2sol,
gsw_as,
gsw_ct,
pressure,
longitude,
latitude,
dask='parallelized',
output_dtypes=[float])
'''
gsw_o2sat=xr.apply_ufunc(gsw.O2sol_SP_pt,
practical_salinity,
potential_temperature,
dask='parallelized',
output_dtypes=[float])
return gsw_o2sat*1.0245e-3 # Convert umol/kg to mol/m3
def plot_atlantic_pacific_sections(data_dict,varname,units=None,fac=1,lev=np.arange(0,11,1),colors='viridis',cextend='both',plot_trend=False):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=2, nrows=len(data_dict.keys()),figsize=[15, 3*len(data_dict.keys())])
A = 0
for model in data_dict.keys():
# Find mean longitude of arbitrary sections in the Atlantic (~340E) and Pacific (~200E)
if dic_dict[model].lon.min()<0:
long=dic_dict[model].lon.values
lonmean=np.mean(np.where(long<0, long+360, long),axis=0)
else:
lonmean=dic_dict[model].lon.mean('y').values
xloca=np.abs(lonmean-340).argmin()
xlocp=np.abs(lonmean-200).argmin()
alatgrid,levgrid=np.meshgrid(data_dict[model].lat.isel(x=xloca),data_dict[model].lev)
platgrid,levgrid=np.meshgrid(data_dict[model].lat.isel(x=xlocp),data_dict[model].lev)
if plot_trend:
aslope = compute_slope(data_dict[model][varname].isel(x=xloca)
.chunk({'time': -1,'y': 100}))
aslope = aslope.mean('member_id')*12 # in mol/m^3/year
pslope = compute_slope(data_dict[model][varname].isel(x=xlocp)
.chunk({'time': -1,'y': 100}))
pslope = pslope.mean('member_id')*12 # in mol/m^3/year
cba=axes[A,0].contourf(alatgrid,levgrid,(aslope*fac),cmap=colors,levels=lev,extend=cextend)
cbp=axes[A,1].contourf(platgrid,levgrid,(pslope*fac),cmap=colors,levels=lev,extend=cextend)
cbara=fig.colorbar(cba,ax=axes[A,0],ticks=lev[::2],extend=cextend)
cbara.solids.set_edgecolor("face")
cbarp=fig.colorbar(cbp,ax=axes[A,1],ticks=lev[::2],extend=cextend)
cbarp.solids.set_edgecolor("face")
axes[A,0].set_title(' '.join(['Atlantic',model,varname.upper(),'trend',units]))
axes[A,0].set_facecolor('black')
axes[A,1].set_title(' '.join(['Pacific' ,model,varname.upper(),'trend',units]))
axes[A,1].set_facecolor('black')
else:
cba=axes[A,0].contourf(alatgrid,levgrid,data_dict[model][varname]
.isel(time=slice(-13,-1)).mean({'time','member_id'}).isel(x=xloca)*fac,
levels=lev,cmap=colors,extend=cextend)
cbp=axes[A,1].contourf(platgrid,levgrid,data_dict[model][varname]
.isel(time=slice(-13,-1)).mean({'time','member_id'}).isel(x=xlocp)*fac,
levels=lev,cmap=colors,extend=cextend)
cbara=fig.colorbar(cba,ax=axes[A,0],ticks=lev[::2],extend=cextend)
cbara.solids.set_edgecolor("face")
cbarp=fig.colorbar(cbp,ax=axes[A,1],ticks=lev[::2],extend=cextend)
cbarp.solids.set_edgecolor("face")
axes[A,0].set_title(' '.join(['Atlantic',model,varname.upper(),units]))
axes[A,0].set_facecolor('black')
axes[A,1].set_title(' '.join(['Pacific' ,model,varname.upper(),units]))
axes[A,1].set_facecolor('black')
#if model == 'MIROC-ES2L':
# # This model is half Arctic!!
# axes[A,0].set_xlim(75,data_dict[model][varname].y.min())
# axes[A,1].set_xlim(data_dict[model][varname].y.min(),75)
#else:
# axes[A,0].set_xlim(data_dict[model][varname].y.max(),data_dict[model][varname].y.min())
axes[A,0].set_xlim(80,-90)
axes[A,0].set_ylim(6000,0)
axes[A,0].set_xlabel("")
axes[A,0].set_ylabel("")
axes[A,1].set_xlim(-90,60)
axes[A,1].set_ylim(6000,0)
axes[A,1].set_xlabel("")
axes[A,1].set_ylabel("")
A +=1
plt.subplots_adjust(hspace=0.3)
plt.show()
return fig, axes
return xloca, xlocp
def _compute_slope(y):
"""
Private function to compute slopes at each grid cell using
polyfit.
"""
x = np.arange(len(y))
return np.polyfit(x, y, 1)[0] # return only the slope
def compute_slope(da):
"""
Computes linear slope (m) at each grid cell.
Args:
da: xarray DataArray to compute slopes for
Returns:
xarray DataArray with slopes computed at each grid cell.
"""
# apply_ufunc can apply a raw numpy function to a grid.
#
# vectorize is only needed for functions that aren't already
# vectorized. You don't need it for polyfit in theory, but it's
# good to use when using things like np.cov.
#
# dask='parallelized' parallelizes this across dask chunks. It requires
# an output_dtypes of the numpy array datatype coming out.
#
# input_core_dims should pass the dimension that is being *reduced* by this operation,
# if one is being reduced.
slopes = xr.apply_ufunc(_compute_slope,
da,
vectorize=True,
dask='parallelized',
input_core_dims=[['time']],
output_dtypes=[float],
)
return slopes
# -
# ## Open the ESM Datastore, either on the Google Cloud or Cheyenne at NCAR
# +
if util.is_ncar_host():
col = intake.open_esm_datastore("../catalogs/glade-cmip6.json")
else:
#col = intake.open_esm_datastore("../catalogs/pangeo-cmip6_update_2019_10_18.json")
col = intake.open_esm_datastore("../catalogs/pangeo-cmip6-noQC.json")
#import pprint
uni_dict = col.unique(['source_id', 'experiment_id', 'table_id'])
#pprint.pprint(uni_dict, compact=True)
# -
# ## Start reading CMIP6 data first for `dic` and then `o2`, `thetao`, and `so` (salinity)
# +
# read all data with dic from regridded models
# (Cant use "cmip6_preprocessing.preprocessing.read_data"
# because "lon" is not a variable, even though it's a coordinate)
models = set(uni_dict['source_id']['values']) # all the models
cat = col.search(experiment_id = ['historical'],
grid_label = 'gr',
table_id = 'Omon',
variable_id = 'dissic')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = models.intersection({model for model in cat.df.source_id.unique().tolist()})
models=models-{'GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
print(models)
# Load the data
dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': False},
cdf_kwargs ={'chunks':{'time':50}, 'decode_times': False})
dic_dict={}
for model in dset_dict.keys():
dic_dict[dset_dict[model].source_id] =dset_dict[model].rename({'lon':'x','lat':'y'})
lon ,lat = np.meshgrid(dset_dict[model].lon.values,dset_dict[model].lat.values)
dic_dict[dset_dict[model].source_id]['lon']=xr.DataArray(lon, dims=['y', 'x'])
dic_dict[dset_dict[model].source_id]['lat']=xr.DataArray(lat, dims=['y', 'x'])
# read all data with dic native models
models = set(uni_dict['source_id']['values']) # all the models
cat = col.search(experiment_id = ['historical'],
grid_label = 'gn',
table_id = 'Omon',
variable_id = 'dissic')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = models.intersection({model for model in cat.df.source_id.unique().tolist()})
models=models-{'GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
print(models)
with warnings.catch_warnings(): # these lines just make sure that the warnings dont clutter your notebook
warnings.simplefilter("ignore")
dset_dict = read_data(col,
experiment_id = ['historical'],
grid_label = ['gn'],
variable_id = ['dissic'],
table_id = ['Omon'],
source_id = models,
required_variable_id = ['dissic'])
# Append or overwrite the regridded values with those on native grid
for model in dset_dict.keys():
dic_dict[model]=dset_dict[model]
# Correct CESM2 has depth in cm
dic_dict['CESM2']['lev']=dic_dict['CESM2']['lev']/100
print(dic_dict.keys())
# +
models = set(uni_dict['source_id']['values']) # all the models
# read all data with O2 from regridded models
# (Cant use "cmip6_preprocessing.preprocessing.read_data"
# because "lon" is not a variable, even though it's a coordinate)
bgc_dict={}
cat = col.search(experiment_id =['historical'],
grid_label = 'gr',
variable_id = 'o2',
table_id = 'Omon')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = models.intersection({model for model in cat.df.source_id.unique().tolist()}).intersection({model for model in dic_dict})
models = models-{'CESM2-WACCM','GFDL-CM4','GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
print(models)
# Load the data
dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': False},
cdf_kwargs= {'chunks':{'time':50}, 'decode_times': False})
for model in dset_dict.keys():
if dset_dict[model].source_id in models:
bgc_dict[dset_dict[model].source_id] =dset_dict[model].rename({'lon':'x','lat':'y'})
lon ,lat = np.meshgrid(dset_dict[model].lon.values,dset_dict[model].lat.values)
bgc_dict[dset_dict[model].source_id]['lon']=xr.DataArray(lon, dims=['y', 'x'])
bgc_dict[dset_dict[model].source_id]['lat']=xr.DataArray(lat, dims=['y', 'x'])
# read all data with O2 native models
models = set(uni_dict['source_id']['values']) # all the models
cat = col.search(experiment_id =['historical'],
grid_label = 'gn',
variable_id = 'o2',
table_id = 'Omon')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = models.intersection({model for model in cat.df.source_id.unique().tolist()}).intersection({model for model in dic_dict})
models = models-{'CESM2-WACCM','GFDL-CM4','GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
with warnings.catch_warnings(): # these lines just make sure that the warnings dont clutter your notebook
warnings.simplefilter("ignore")
dset_dict = read_data(col,
experiment_id = ['historical'],
grid_label = ['gn'],
variable_id = ['o2'],
table_id = ['Omon'],
source_id = models,
required_variable_id = ['o2'])
# Append or overwrite the regridded values with those on native grid
for model in dset_dict.keys():
if model in models:
bgc_dict[model]=dset_dict[model]
print(bgc_dict.keys())
# Get DIC into bgc_dict for calculating carbon components
for model in bgc_dict.keys():
bgc_dict[model]['dissic']=dic_dict[model]['dissic']
# +
# read all data with thetao, and so
theta_dict={}
cat = col.search(experiment_id = ['historical'],
grid_label = 'gr',
variable_id = 'thetao',
table_id = 'Omon')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = set(uni_dict['source_id']['values']) # all the models
models = models.intersection({model for model in cat.df.source_id.unique().tolist()}).intersection({model for model in bgc_dict})
models = models-{'CESM2-WACCM','GFDL-CM4','GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
# Load the data
dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': False},
cdf_kwargs={'chunks': {'time':50}, 'decode_times': False})
for model in dset_dict.keys():
if dset_dict[model].source_id in models:
theta_dict[dset_dict[model].source_id]=dset_dict[model].rename({'lon':'x','lat':'y'})
lon ,lat = np.meshgrid(dset_dict[model].lon.values,dset_dict[model].lat.values)
theta_dict[dset_dict[model].source_id]['lon']=xr.DataArray(lon, dims=['y', 'x'])
theta_dict[dset_dict[model].source_id]['lat']=xr.DataArray(lat, dims=['y', 'x'])
# read all data with theta native models (excluding GFDL, which didnt report O2 on native grid)
cat = col.search(experiment_id = ['historical'],
grid_label = 'gn',
variable_id = 'thetao',
table_id = 'Omon')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = set(uni_dict['source_id']['values']) # all the models
models = models.intersection({model for model in cat.df.source_id.unique().tolist()}).intersection({model for model in bgc_dict})
models = models-{'CESM2-WACCM','GFDL-CM4','GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
with warnings.catch_warnings(): # these lines just make sure that the warnings dont clutter your notebook
warnings.simplefilter("ignore")
dset_dict = read_data(col,
experiment_id = ['historical'],
grid_label = 'gn',
variable_id = 'thetao',
table_id = 'Omon',
source_id = models,
required_variable_id = ['thetao'])
# Append or overwrite the regridded values with those on native grid
for model in dset_dict.keys():
if model in models:
theta_dict[model]=dset_dict[model]
print(theta_dict.keys())
salt_dict={}
cat = col.search(experiment_id = ['historical'],
grid_label = 'gr',
variable_id = 'so',
table_id = 'Omon')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = set(uni_dict['source_id']['values']) # all the models
models = models.intersection({model for model in cat.df.source_id.unique().tolist()}).intersection({model for model in bgc_dict})
models = models-{'CESM2-WACCM','GFDL-CM4','GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
# Load the data
dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': False},
cdf_kwargs={'chunks': {'time':30}, 'decode_times': False})
for model in dset_dict.keys():
if dset_dict[model].source_id in models:
salt_dict[dset_dict[model].source_id] =dset_dict[model].rename({'lon':'x','lat':'y'})
lon ,lat = np.meshgrid(dset_dict[model].lon.values,dset_dict[model].lat.values)
salt_dict[dset_dict[model].source_id]['lon']=xr.DataArray(lon, dims=['y', 'x'])
salt_dict[dset_dict[model].source_id]['lat']=xr.DataArray(lat, dims=['y', 'x'])
# read all data with so native models (excluding GFDL, which didnt report O2 on native grid)
cat = col.search(experiment_id = ['historical'],
grid_label = 'gn',
variable_id = 'so',
table_id = 'Omon')
# Check the ES-DOC website for CMIP6 data issues if using the noQC catolog
if col.esmcol_path.find('noQC')!=-1:
check_data_issues(cat,issue_location="../catalogs/es-doc-retrieve-all-issues-20191029.json",isurl=False)
models = set(uni_dict['source_id']['values']) # all the models
models = models.intersection({model for model in cat.df.source_id.unique().tolist()}).intersection({model for model in bgc_dict})
models = models-{'CESM2-WACCM','GFDL-CM4','GISS-E2-1-G-CC','GISS-E2-1-G','AWI-CM-1-1-MR'}
models = list(models)
with warnings.catch_warnings(): # these lines just make sure that the warnings dont clutter your notebook
warnings.simplefilter("ignore")
dset_dict = read_data(col,
experiment_id = ['historical'],
grid_label = ['gn'],
variable_id = ['so'],
table_id = ['Omon'],
source_id = models,
required_variable_id = ['so'])
# Append or overwrite the regridded values with those on native grid
for model in dset_dict.keys():
if model in models:
salt_dict[model]=dset_dict[model]
print(salt_dict.keys())
# -
# ## Plot Atlantic and Pacific meridional sections of DIC for each model
fig, axes = plot_atlantic_pacific_sections(dic_dict,"dissic",units="[mmol/m3]",fac=1e3,lev=np.arange(2000,2420,20))
# ## Calculate Saturated Oxygen for each model
for model in bgc_dict.keys():
print(model)
bgc_dict[model]['gsw_o2sat']=calc_o2sat_gsw( salt_dict[model]['so'],
theta_dict[model]['thetao'])
# bgc_dict[model]['lat'],
# bgc_dict[model]['lon'],
# bgc_dict[model]['lev'])
bgc_dict[model]['gsw_o2sat'] = bgc_dict[model]['gsw_o2sat'].chunk({'time':5})
bgc_dict[model]['aou'] = (bgc_dict[model]['gsw_o2sat']-bgc_dict[model].o2).chunk({'time':5})
bgc_dict[model]['creg'] = bgc_dict[model]['aou'].chunk({'time':5})*(117/170)
bgc_dict[model]['cpre'] = (bgc_dict[model]['dissic']-bgc_dict[model]['creg']).chunk({'time':5})
# ## Plot regenerated carbon (`CREG=R_c:o2.AOU`) and preformed carbon (`CPRE=DIC-CREG`)
fig, axes = plot_atlantic_pacific_sections(bgc_dict,"creg",units="[mmol/m3]",fac=1e3,lev=np.arange(0,320,20))
fig, axes = plot_atlantic_pacific_sections(bgc_dict,"cpre",units="[mmol/m3]",fac=1e3,lev=np.arange(2000,2420,20))
# ## Compute linear trend in `DIC`, `Cpre`, and `Creg`.
fig, axes = plot_atlantic_pacific_sections(dic_dict,"dissic",plot_trend=True,
units="[mmol/m3/yr]",fac=1e3,lev=np.arange(-0.25,0.30,0.05),colors='RdBu_r')
fig, axes = plot_atlantic_pacific_sections(bgc_dict,"cpre",plot_trend=True,
units="[mmol/m3/yr]",fac=1e3,lev=np.arange(-0.25,0.30,0.05),colors='RdBu_r')
fig, axes = plot_atlantic_pacific_sections(bgc_dict,"creg",plot_trend=True,
units="[mmol/m3/yr]",fac=1e3,lev=np.arange(-0.25,0.30,0.05),colors='RdBu_r')
# ## Next Steps:
#
# 1. Try same analysis for climate change experients (ScenarioMIP).
# 1. Calculate proper zonal basin averages.
# 1. Extend the partitioning of `Cpre` to calculate saturated carbon, `Csat`, and disequilibrium carbon, `Cres` after Williams & Follows (2010).
# 1. Correct AOU for surface `O2` undersaturation.
# 1. Link to internal anthropogenic CO2 uptake and air-sea CO2 fluxes.
| notebooks/jml_dissic_prereg_allmodels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: aoc
# language: python
# name: aoc
# ---
# +
class Constraint:
def __init__(self, func):
self.func = func
def holds_true(self, *args, **kwargs):
return self.func(*args, **kwargs)
class Password:
def __init__(self, value, constraints=None):
self.value = value
self.constraints = constraints if constraints else []
def checks_out(self):
return all(constraint.holds_true(self.value) for constraint in self.constraints)
def has_n_digits(x, n=6):
return len(str(x)) == n
def is_in_range(x, lower=130254, upper=678275):
x = int(x)
return lower <= x <= upper
def has_bigram_of_same_digits(x):
x = str(x)
bigrams = zip(x, x[1:])
return any(len(set(bigram)) == 1 for bigram in bigrams)
def digits_dont_decrease_left_to_right(x):
x = str(x)
last_digit = x[0]
for digit in x:
if digit < last_digit:
return False
last_digit = digit
return True
constraints = [
Constraint(func=has_n_digits),
Constraint(func=is_in_range),
Constraint(func=has_bigram_of_same_digits),
Constraint(func=digits_dont_decrease_left_to_right),
]
lower, upper = 130254, 678275
passwords = [pw for i in range(lower, upper+1) if (pw := Password(value=i, constraints=constraints)).checks_out()]
len(passwords)
| puzzles/day_4/part_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ##### Installing Numpy into python inerpreter
# ##### ----- pip install numpy -----
# ###### Arrays in NumPy: NumPy's main object is the homogeneous multidimensional array.
#
# ###### It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers.
# ###### In NumPy dimensions are called axes. The number of axes is rank.
# ###### NumPy’s array class is called ndarray. It is also known by the alias array.
#
# +
# created a class methods that contain all important operations
import pandas as pd
import numpy as np
import pylab as p
from IPython.display import Image
df = pd.read_csv("/Users/tulasiram/pon<EMAIL> - Google Drive/My Drive/Machine_learning/python learning/input/car data.csv")
Image = Image('/Users/tulasiram/<EMAIL> - Google Drive/My Drive/Machine_learning/python learning/input/NumPy.jpeg')
class Numpy:
def __init__(self, train, Img):
self.train = train
self.Img = Img
display(self.Img)
display("This is the dataset", self.train.head(10))
def demonstrate(self):
def basic_array():
# Creating array object
arr = np.array(self.train.Selling_Price)
# Printing type of arr object
print("Array is of type: ", type(arr))
# Printing array dimensions (axes)
print("No. of dimensions: ", arr.ndim)
# Printing shape of array
print("Shape of array: ", arr.shape)
# Printing size (total number of elements) of array
print("Size of array: ", arr.size)
# Printing type of elements in array
print("Array stores elements of type: ", arr.dtype)
basic_array()
def array_creation():
print("----### array creation techniques using datatypes ###--- ")
# Creating array from list
my_list = list(self.train.Selling_Price)
list_array = np.array(my_list)
display("Array created using passed list:", list_array)
# Creating array from tuple
my_tuple = tuple(self.train.Selling_Price)
tuple_array = np.array(my_tuple)
display("Array created using passed tuple:", tuple_array)
# Creating array from set
my_set = set(self.train.Selling_Price)
set_array = np.array(my_set)
display("\nArray created using passed set:", set_array)
# Creating array from dictionary
my_dict = dict(self.train.Selling_Price)
dict_array = np.array(my_set)
display("\nArray created using passed dictionary:", dict_array)
# using np.arange
display("array aranged using np.arange():" ,np.arange(1, 2, 0.1))
#using np.zeros
b = np.zeros(2, dtype = int)
display("Matrix b : \n", b)
# using np.append
x = self.train["Selling_Price"].to_numpy()
y = self.train["Present_Price"].to_numpy()
res = np.append(x, y)
display("Appending two arrays using np.array()", res)
# using np.linspace
display("plotting Selling price values of 10 elements which are between 0 and 2")
x1 = np.linspace(self.train.Selling_Price == 0, self.train.Selling_Price == 2, 10)
y1 = np.ones(10)
p.plot(x1, y1, '*')
p.xlim(0, 1.2)
# Using MeshGrid
display(" ---- MeshGrid Function -----")
x = np.linspace(self.train.Selling_Price == 1,self.train.Selling_Price == 10, 10)
y = np.linspace(self.train.Present_Price == 1,self.train.Present_Price == 10, 10)
# The meshgrid function returns
# two 2-dimensional arrays
x_1, y_1 = np.meshgrid(x, y)
display(x_1)
display(y_1)
## generating 2-d Guassian Array
dst = np.sqrt(x*x+y*y)
# Initializing sigma and muu
sigma = 1
muu = 0.000
# Calculating Gaussian array
gauss = np.exp(-( (dst-muu)**2 / ( 2.0 * sigma**2 ) ) )
display("2D Gaussian array :\n", gauss)
# using numpy.core.fromrecords() method
g = np.core.records.fromrecords(self.train.Selling_Price,self.train.Present_Price)
display(g[0])
array_creation()
def numpy_manipulation():
arr = np.array(self.train.Selling_Price)
# creating copy of same array
a = arr.copy()
display("original array : ", arr)
display("copied array : ", a)
# viewing the array
v = arr.view()
display("array view : ", v)
# swapping an arrays
my_array = self.train[["Selling_Price","Present_Price"]].to_numpy()
my_array[[2, 0]] = my_array[[0, 2]]
display("After swapping arrays the last column and first column:",my_array)
# Stacking the array horizontally
out_arr = np.hstack((my_array[0:10]))
display("Output horizontally stacked array:\n ", out_arr)
# Stacking the array vertically
out_arr = np.vstack((my_array[0:10]))
print ("Output vertically stacked array:\n ", out_arr)
# Joining the arrays
arr1 = np.array(self.train["Selling_Price"])
arr2 = np.array(self.train["Present_Price"])
array_new = np.concatenate((arr1, arr2))
display("Joined arrays are:", array_new)
# you can also do in in required axis
array_axis = np.concatenate((arr1, arr2), axis=0)
display("Joined arrays on axis are:", array_axis)
# stack() function of NumPy joins two or more arrays along a new axis
array_new = np.stack((arr1, arr2), axis=0)
print(array_new)
# numpy.block is used to create nd-arrays from nested blocks of lists
block_1 = np.array([[1, 1], [1, 1]])
block_2 = np.array([[2, 2, 2], [2, 2, 2]])
block_new = np.block([block_1, block_2])
print(block_new)
# Filtering unique values in numpy
arr = np.array(self.train.Year)
g = np.unique(arr)
display("The unique values in year column :", g)
# Searching array for specific values
display("arr = {}".format(arr))
# looking for year 2014 in arr and storing its index in i
i = np.where(arr == 2014)
display("i = {}".format(i))
# Working with date and time
# creating a date
today = np.datetime64('2022-02-23')
print("Date is:", today)
print("Year is:", np.datetime64(today, 'Y'))
# creating array of dates in a month
dates = np.arange('2022-02', '2022-03', dtype='datetime64[D]')
print("\nDates of February, 2022:\n", dates)
print("Today is February:", today in dates)
# arithmetic operation on dates
dur = np.datetime64('2022-05-22') - np.datetime64('2020-05-22')
print("\nNo. of days:", dur)
print("No. of weeks:", np.timedelta64(dur, 'W'))
# sorting dates
a = np.array(['2022-02-22', '2020-10-13', '2019-05-22'], dtype='datetime64')
print("\nDates in sorted order:", np.sort(a))
numpy_manipulation()
def array_indexing():
print(" ---### Array Indexing ###--- ")
# Creating array from list
arr = self.train[["Selling_Price","Present_Price"]].to_numpy()
# Slicing
temp = arr[:10,:2]
display("Array with first 10 elements:", temp)
# boolean array indexing example
cond = arr > 10 # cond is a boolean array
temp = arr[cond]
display("\nElements greater than 10:\n", temp)
array_indexing()
def numpy_vectors():
vec1 = np.array(self.train.Selling_Price)
display("First Vector : " + str(vec1))
vec2 = np.array(self.train.Present_Price)
display("Second Vector : " + str(vec2))
scalar = 2
# Vector Dot Product
dot_product = vec1.dot(vec2)
# printing dot product
print("Dot Product : " + str(dot_product))
# multiplying vector and scaler
scalar_mul = vec1 * scalar
# printing dot product
display("Scalar Multiplication : " + str(scalar_mul))
numpy_vectors()
def arithematic_operations():
# Creating array list
list_array = np.array(self.train.Selling_Price)
# add 1 to every element
a = list_array+1
display("Adding 1 to every element:", a)
# subtract 3 from each element
b = list_array-3
display("Subtracting 3 from each element:", b)
# multiply each element by 10
c = list_array*10
display("Multiplying each element by 10:", c)
# square each element
d = list_array**2
display("Squaring each element:", d)
# transpose of array
e = list_array.T
display("Transpose of array:\n", e)
arithematic_operations()
def unary_operations():
arr = np.array(self.train.Selling_Price)
# maximum element of array
a = arr.max(axis=0)
display("Row-wise maximum elements:", a)
# minimum element of array
b = arr.min(axis=0)
display("Column-wise minimum elements:", b)
# sum of array elements
c = arr.sum()
print ("Sum of all array elements:", c)
# cumulative sum along each row
d = arr.cumsum()
print ("Cumulative sum along each row:\n", d)
unary_operations()
def binary_operators():
a = np.array(self.train.Selling_Price)
b = np.array(self.train.Present_Price)
# add arrays
display("Arrays sum:\n", a + b)
# multiply arrays (elementwise multiplication)
display("Array multiplication:\n", a*b)
# matrix multiplication
# This is one dimensional so it yields one dimensional matrix
display("Matrix one dimensional multiplication:\n", a.dot(b))
binary_operators()
def universal_functions():
# create an array of sine values
a = np.array([0, np.pi/2, np.pi])
display("Sine values of array elements:", np.sin(a))
# exponential values
a = np.array([0, 1, 2, 3])
display("Exponent of array elements:", np.exp(a))
# square root of array values
display("Square root of array elements:", np.sqrt(a))
universal_functions()
def sorting_arrays():
x = self.train[["Selling_Price","Present_Price"]].to_numpy()
# sorted array
display("Array elements in sorted order:\n", np.sort(x, axis = None))
# sort array row-wise
display("Row-wise sorted array:\n",np.sort(x, axis = 1))
# specify sort algorithm
display("Column wise sort by applying merge-sort:\n", np.sort(x, axis = 0, kind = 'mergesort'))
sorting_arrays()
obj = Numpy(df,Image)
obj.demonstrate()
| 08_NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Introduction to TensorBoard
#
# __TensorBoard__ is a visualization software that comes with any standard TensorFlow installation. In Google’s words: “The computations you'll use TensorFlow for (like training a massive deep neural network) can be complex and confusing. To make it easier to understand, debug, and optimize TensorFlow programs, we've included a suite of visualization tools called TensorBoard.”
#
# TensorFlow programs can range from very simple to super complex problems (using thousands of computations), and they all have two basic components, Operations and Tensors. As explained in the previous tutorials, the idea is that you create a model that consists of a set of operations, feed data in to the model and the tensors will flow between the operations until you get an output tensor, your result.
#
# When fully configured, TensorBoard window will look something like this:
#
# <img src="files/files/3_1.png" width="500" height="1000" >
#
# ___Fig1. ___ TensorBoard appearance
#
#
# TensorBoard was created as a way to help you understand the flow of tensors in your model so that you can debug and optimize it. It is generally used for two main purposes:
#
# __1. Visualizing the Graph__
#
# __2. Writing Summaries to Visualize Learning__
#
# We'll cover this two main usages of TensorBoard in this tutorial. Learning to use TensorBoard early and often will make working with TensorFlow much more enjoyable and productive.
# ## 1. Visualizing the Graph
#
# Will powerful, TensorFlow computation graphs can become extremely complicated. Visualizing the graph can help you understand and debug it. Here's an example of the visualization at work from TensorFlow website.
#
# <img src="files/files/3_2.gif" width="500" height="1000" >
#
# ___Fig2. ___ Visualization of a TensorFlow graph
#
# To make our TensorFlow program __TensorBoard-activated__, we need to add a very few lines of code to it. This will export the TensorFlow operations into a file, called __event file__ (or event log file). TensorBoard is able to read this file and give insight into the model graph and its performance.
#
# Now let's write a simple TensorFlow program and visualize its computation graph with TensorBoard.
#
# ### Example 1:
# Let's create two constants and add them together. Constant tensors can be defined simply by defining their value:
#
# +
import tensorflow as tf
# create graph
a = tf.constant(2)
b = tf.constant(3)
c = tf.add(a, b)
# launch the graph in a session
with tf.Session() as sess:
print(sess.run(c))
# -
# To visualize the program with TensorBoard, we need to write log files of the program. To write event files, we first need to create a __writer__ for those logs, using this code:
writer = tf.summary.FileWriter([logdir], [graph])
# where __[logdir]__ is the folder where you want to store those log files. You can choose [logdir] to be something meaningful such as './graphs'. The second argument __[graph]__ is the graph of the program we're working on. There are two ways to get the graph:
# 1. Call the graph using __tf.get_default_graph()__, which returns the default graph of the program
# 2. set it as __sess.graph__ which returns the session's graph (note that this requires us to already have created a session).
#
# We'll show both ways in the following example; however, the second way is more common. Either way, make sure to create a writer only after you’ve defined your graph. Otherwise, the graph visualized on TensorBoard would be incomplete.
#
# Let's add the writer to the first example and visualize the graph.
#
# +
import tensorflow as tf
# create graph
a = tf.constant(2)
b = tf.constant(3)
c = tf.add(a, b)
# creating the writer out of the session
# writer = tf.summary.FileWriter('./graphs', tf.get_default_graph())
# launch the graph in a session
with tf.Session() as sess:
# or creating the writer inside the session
writer = tf.summary.FileWriter('./graphs', sess.graph)
print(sess.run(c))
# -
# Now if you run this code, it creates a directory inside your current directory (beside your Python code) which contains the __event file__.
#
# <img src="files/files/3_3.png" width="300" height="600" >
#
# ___Fig3. ___ Created directory which contains the event file
#
#
# Next, go to Terminal and make sure that the present working directory is the same as where you ran your Python code. For example, here we can switch to the directory using
#
# $ cd ~/Desktop/tensorboard
#
# Then run:
#
# $ tensorboard --logdir="./graphs" --port 6006
#
# This will generate a link for you. ctrl+left click on that link (or simply copy it into your browser or just open your browser and go to http://localhost:6006/. This will show the TensorBoard page which will look like:
#
# <img src="files/files/3_4.png" width="500" height="1000" >
#
# ___Fig4. ___ TensorBoard page visualizing the graph generated in Example 1
#
# “Const” and “Const_1” in the graph correspond to a and b, and the node “Add” corresponds to c. The names we give them (a, b, and c) are just __Python-names__ which are for us to access them when we write code. They mean nothing for the internal TensorFlow. To make TensorBoard understand the names of your ops, you have to explicitly name them.
#
# Let's modify the code one more time and add the names:
#
#
#
# +
import tensorflow as tf
# create graph
a = tf.constant(2, "a")
b = tf.constant(3, "b")
c = tf.add(a, b, "addition")
# creating the writer out of the session
# writer = tf.summary.FileWriter('./graphs', tf.get_default_graph())
# launch the graph in a session
with tf.Session() as sess:
# or creating the writer inside the session
writer = tf.summary.FileWriter('./graphs', sess.graph)
print(sess.run(c))
# -
# <img src="files/files/3_5.png" width="500" height="1000" >
#
# ___Fig5. ___ TensorBoard page visualizing the graph generated in Example 1 with modified names
#
# __*Note:__ If you run your code several times with the same [logdir], there will be multiple event files in your [logdir]. TF will show only the latest graph and display the warning of multiple event files. To get rid of the warning, delete the event files you no longer need or save them in different [logdir] folders.
#
#
# ## 2. Writing Summaries to Visualize Learning
#
# So far we only focused on how to visualize the graph in TensorBoard. In this second part, we are now going to use a special operation called __summary__ to visualize the model parameters (like weights and biases of a neural network), metrics (like loss or accuracy value), and images (like input images to a network).
#
# __Summary__ is a special TensorBoard operation that takes in a regular tenor and outputs the summarized data to your disk (i.e. in the event file). Basically, there are three main types of summaries:
#
# __1. tf.summary.scalar:__ used to write a single scalar-valued tensor (like classificaion loss or accuracy value)
#
# __2. tf.summary.histogram:__ used to plot histogram of all the values of a non-scalar tensor (like weight or bias matrices of a neural network)
#
# __3. tf.summary.image:__ used to plot images (like input images of a network, or generated output images of an autoencoder or a GAN)
#
# In the following sections, we'll go through each of the above summary types in more details.
#
#
# ### 2.1. tf.summary.scalar:
# It's for writing the values of a scalar tensor that changes over time or iterations. In the case of neural networks (say a simple network for classification task), it's usually used to monitor the changes of loss function or classification accuracy.
#
# Let's run a simple example to get the point.
#
# ### Example 2:
# Randomly pick 100 values from a standard Normal distribution, _N(0, 1)_, and plot them one after the other.
#
# One way to do so is to simply create a variable and initialize it from a normal distribution (with mean=0 and std=1), then run a for loop in the session and initialize it 100 times. The code will be as follows and the required steps to write the summary is explained in the code:
#
# +
import tensorflow as tf
# create the scalar variable
x_scalar = tf.get_variable('x_scalar', shape=[], initializer=tf.truncated_normal_initializer(mean=0, stddev=1))
# step 1: create the scalar summary
first_summary = tf.summary.scalar(name='My_first_scalar_summary', tensor=x_scalar)
init = tf.global_variables_initializer()
# launch the graph in a session
with tf.Session() as sess:
# 2. creating the writer inside the session
writer = tf.summary.FileWriter('./graphs', sess.graph)
for step in range(100):
# loop over several initializations of the variable
sess.run(init)
# step 3: evaluate the scalar summary
summary = sess.run(first_summary)
# step 4: add the summary to the writer (i.e. to the event file)
writer.add_summary(summary, step)
print('Done with writing the scalar summary')
# -
# Let's pull up TensorBoard and checkout the result. Like before, you need to open terminal and type:
#
# $ tensorboard --logdir="./graphs" --port 6006
#
# where _"./graphs"_ is the name of directory you saved the event file into. If you open TensorBoard, you'll see a new tab named __"scalars"__ next to the earlier discussed __"graphs"__ tab (compare Fig. 5 with Fig. 6). The whole window looks like:
#
# <img src="files/files/3_6.png" width="500" height="1000" >
#
# ___Fig6. ___ TensorBoard page visualizing the written scalar summary
#
# As you see in the figure, the plot panel came under "My_first_scalar_summary" name which we determined in our code. The x-axis and y-axis shows the 100 steps and the corresponding values (random values from a standard normal dist.) of the variable respectively.
#
#
# ### 2.2. tf.summary.histogram:
# It's for plotting the histogram of the values of a non-scalar tensor. This gives us a view of how does the histogram (and the distribution) of the tensor values change over time or iterations. In the case of neural networks, it's commonly used to monitor the changes of weights and biases distributions. It's very useful in detecting irregular behavior of the network parameters (like when many of the weights shrink to almost zero or grow largely).
#
# Now let's go back to our previous example and add a histogram summary to it.
#
# ### Example 3:
# Continue the previous example by adding a matrix of size 30x40, whose entries come from a standard normal distribution. Initialize this matrix 100 times and plot the distribution of its entries over time.
#
# +
import tensorflow as tf
# create the variables
x_scalar = tf.get_variable('x_scalar', shape=[], initializer=tf.truncated_normal_initializer(mean=0, stddev=1))
x_matrix = tf.get_variable('x_matrix', shape=[30, 40], initializer=tf.truncated_normal_initializer(mean=0, stddev=1))
# step 1: create the summaries
scalar_summary = tf.summary.scalar('My_scalar_summary', x_scalar)
histogram_summary = tf.summary.histogram('My_histogram_summary', x_matrix)
init = tf.global_variables_initializer()
# launch the graph in a session
with tf.Session() as sess:
# 2. creating the writer inside the session
writer = tf.summary.FileWriter('./graphs', sess.graph)
for step in range(100):
# loop over several initializations of the variable
sess.run(init)
# step 3: evaluate the merged summaries
summary1 = sess.run(scalar_summary)
# step 4: add the summary to the writer (i.e. to the event file)
writer.add_summary(summary1, step)
# repeat step 3 and 4 for the histogram summary
summary2 = sess.run(histogram_summary)
writer.add_summary(summary2, step)
print('Done writing the summaries')
# -
#
| 1_Basics/3_Introduction_to_Tensorboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:islaenv]
# language: python
# name: conda-env-islaenv-py
# ---
# +
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from CASutils import mapplot_utils as mymaps
from CASutils import colorbar_utils as cbars
# -
plotpath="/project/cas/islas/python_plots/snowpaper/FIGURES/checkswe/"
lens1 = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/LENS1/SWE/swe_lens1_djf.nc")
lens2 = xr.open_dataset("/project/cas/islas/python_savs/snowpaper/DATA_SORT/LENS2/SWE/swe_lens2_djf.nc")
lens1m = lens1.mean(['member','time']).load()
lens2m = lens2.mean(['member','time']).load()
# +
fig = plt.figure(figsize=(16,16))
ax1 = mymaps.contourmap_bothcontinents_fill_nh_pos(fig, lens1m.swe, lens1m.lon, lens1m.lat, 10, -500,500,
'SWE, CESM1',0.05,0.32,0.8,0.95)
ax2 = mymaps.contourmap_bothcontinents_fill_nh_pos(fig, lens2m.swe, lens2m.lon, lens2m.lat, 10, -500, 500,
'SWE, CESM2',0.37,0.64,0.8,0.95)
ax3 = mymaps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(lens2m.swe) - np.array(lens1m.swe), lens1m.lon, lens1m.lat, 5, -100, 100,
'SWE, CESM2$-$CESM1',0.69,0.97,0.8,0.95)
ax = cbars.plotcolorbar(fig, 10,-500,500,'SWE (mm)',0.05,0.64,0.77,0.78,posneg='pos')
ax = cbars.plotcolorbar(fig, 5, -100, 100, 'SWE (mm)',0.69,0.97,0.77,0.78)
fig.savefig(plotpath+"SWE.jpeg", bbox_inches='tight', facecolor='white')
#ax2 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(fsno_clm5_djf), np.array(fsno_clm5_djf.lon), np.array(fsno_clm5_djf.lat),0.1,-1,1,
# '(b) Snow fraction, CAM6_CLM5',0.37,0.64,0.8,0.95)
#ax3 = maps.contourmap_bothcontinents_fill_nh_pos(fig, np.array(fsno_clm5_djf) - np.array(fsno_snowd_djf), np.array(fsno_snowd_djf.lon), np.array(fsno_snowd_djf.lat),0.1,-1,1,
# '(c) Snow fraction, CAM6_CLM5$-$SNWDENS', 0.69,0.97,0.8,0.95)
# -
# ### ESA_CCI
dat = xr.open_mfdataset("/project/mojave/observations/SWE/ESA_CCI/data/*.nc")
monyearstr = xr.DataArray(dat.indexes['time'].strftime('%Y-%m'), coords=dat.time.coords, name='monyearstr')
datmonthly = dat.groupby(monyearstr).mean('time', skipna=True)
# +
ystart=1979 ; yend=2014 ; nyears=yend-ystart+1
for iyear in np.arange(ystart,yend+1,1):
print(iyear)
djfdat = (datmonthly.sel(monyearstr=str(iyear)+"-11") + datmonthly.sel(monyearstr=str(iyear+1)+"-01") + datmonthly.sel(monyearstr=str(iyear+1)+"-02"))/3.
if (iyear == ystart):
swedjf = xr.DataArray(np.zeros([nyears, datmonthly.lat.size, datmonthly.lon.size]),
coords=[np.arange(ystart,yend+1,1), datmonthly.lat, datmonthly.lon],
dims=['year','lat','lon'], name='swedjf')
swedjf[iyear-ystart,:,:] = djfdat.swe
# -
swedjfm = swedjf.mean('year')
# +
fig = plt.figure(figsize=(16,16))
ax1 = mymaps.contourmap_bothcontinents_fill_nh_pos(fig, swedjfm, swedjfm.lon, swedjfm.lat, 10, -300,300,
'SWE, ESA_CCI',0.05,0.32,0.8,0.95)
x1 = mymaps.contourmap_bothcontinents_fill_nh_pos(fig, lens1m.swe, lens1m.lon, lens1m.lat, 10, -300,300,
'SWE, CESM1',0.37,0.64,0.8,0.95)
ax2 = mymaps.contourmap_bothcontinents_fill_nh_pos(fig, lens2m.swe, lens2m.lon, lens2m.lat, 10, -300, 300,
'SWE, CESM2',0.69,0.97,0.8,0.95)
ax = cbars.plotcolorbar(fig, 10,-300,300,'SWE (mm)',0.2,0.82,0.765,0.775,posneg='pos')
fig.savefig(plotpath+'swe_comparison.jpeg', bbox_inches='tight', facecolor='white')
# -
print(swedjfm)
ystart=1979 ; yend=2014 ; nyears=yend-ystart+1
for iyear in np.arange(ystart,yend+1,1):
dat = xr.open_mfdataset("")
# +
ystart=1979 ; yend=2014 ; nyears=yend-ystart+1
years = np.arange(ystart,yend+1,1)
years = years[years != 1981]
count=0
for iyear in np.arange(ystart,yend+1,1):
if (iyear != 1981):
dec = xr.open_dataset("/project/mojave/observations/GlobSnow/v3.0/"+str(iyear)+'12_northern_hemisphere_monthly_swe_0.25grid.nc')
jan = xr.open_dataset("/project/mojave/observations/GlobSnow/v3.0/"+str(iyear+1)+'01_northern_hemisphere_monthly_swe_0.25grid.nc')
feb = xr.open_dataset("/project/mojave/observations/GlobSnow/v3.0/"+str(iyear+1)+'02_northern_hemisphere_monthly_swe_0.25grid.nc')
djf = (31/90.)*dec.swe + (31./90.)*jan.swe + (28./90.)*feb.swe
if (iyear == ystart):
globsnow = xr.DataArray(np.zeros([nyears-1, djf.y.size, djf.x.size]),
coords=[years, djf.y, djf.x], dims=['year','y','x'], name='swe')
globsnow[count,:,:] = djf
count=count+1
#dat = xr.open_mfdataset("/project/mojave/observations/GlobSnow/v3.0/*.nc", concat_dim=['time',None,None])
# -
grid_out = xr.Dataset({'lat':(['lat'], lens1.lat)}, {'lon': (['lon'], lens1.lon)})
globsnow = globsnow.rename({'x':'lon', 'y':'lat'})
import xesmf as xe
globsnow = globsnow.rename({""})
regridder = xe.Regridder(globsnow, grid_out, 'bilinear', periodig=True, reuse_weights=False,
filename='wgtfile.nc')
print(globsnow)
print(dec.crs)
| FIGURES/checkswe/checkswe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# ## 1. Downloading spaCy models
#
# The first step is to download the spaCy model. The model has been pre-trained on annotated English corpora. You only have to run these code cells below the first time you run the notebook; after that, you can skip right to step 2 and carry on from there. (If you run them again later, nothing bad will happen; it’ll just download again.) You can also run spaCy in other notebooks on your computer in the future, and you’ll be able to skip the step of downloading the models.
# -
#Imports the module you need to download and install the spaCy models
import sys
#Installs the English spaCy model
# !{sys.executable} -m pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_trf-3.1.0/en_core_web_trf-3.1.0.tar.gz
# ## 2. Importing spaCy and setting up NLP
#
# Run the code cell below to import the spaCy module, and create a functions to loads the Englsih model and run the NLP algorithms (includes named-entity recognition).
# +
#Imports spaCy
import spacy
#Imports the English model
import en_core_web_trf
# -
#Sets up a function so you can run the English model on texts
nlp = en_core_web_trf.load()
# nlp = spacy.load("en_core_web_trf", disable=["tagger", "attribute_ruler", "lemmatizer"])
# ## 3. Importing other modules
#
# There’s various other modules that will be useful in this notebook. The code comments explain what each one is for. This code cell imports all of those.
# +
#io is used for opening and writing files
import io
#glob is used to find all the pathnames matching a specified pattern (here, all text files)
import glob
#os is used to navigate your folder directories (e.g. change folders to where you files are stored)
import os
# for handling data frames, etc.
import pandas as pd
# Import the spaCy visualizer
from spacy import displacy
# Import the Entity Ruler for making custom entities
from spacy.pipeline import EntityRuler
from spacy.language import Language # type: ignore
import requests
import csv
import pathlib
# # ! pip install spacy-lookup
# allows you to add custom entities for NER
#from spacy_lookup import Entity
# -
# ## 4. Diretory setup
#
# Assuming you’re running Jupyter Notebook from your computer’s home directory, this code cell gives you the opportunity to change directories, into the directory where you’re keeping your project files. I've put just a few of the ANSP volumes into a folder called `subset`.
# +
#Define the file directory here
filedirectory = '/Users/thalassa/streamlit/streamlit-ansp'
#Change the working directory to the one you just defined
os.chdir(filedirectory)
# -
species = pd.read_json("/Users/thalassa/streamlit/streamlit-ansp/data/ansp-taxa.json")
habitats = pd.read_json("/Users/thalassa/streamlit/streamlit-ansp/data/ansp-habitat.json")
# +
# Iterate through species and habitat dictionary to turn values into lists
species_dict = dict(species)
for key, val in species_dict.items():
species_dict[key] = [val,]
habitats_dict = dict(habitats)
for key, val in habitats_dict.items():
habitats_dict[key] = [val,]
@Language.factory(name="species_entity")
def create_species_entity(nlp: Language, name: str):
return Entity(name=name, keywords_dict=species_dict, label="TAXA")
@Language.factory(name="habitat_entity")
def create_habitat_entity(nlp: Language, name: str):
# habitats_list = list(habitats.Habitat)
return Entity(name=name, keywords_dict=habitats_dict, label="HABITAT")
# -
#ruler = EntityRuler(nlp)
#nlp.add_pipe(ruler)
nlp.add_pipe("species_entity")
nlp.add_pipe("habitat_entity")
nlp.to_disk("ansp_ner")
nlp.pipeline
# + [markdown] tags=[]
# ## 5. Define an entity ruler
#
# We need to tell the pipeline where we want to add in the new pipeline component. We want it to add the new entity ruler *before* we do the NER step.
# -
ruler = nlp.add_pipe("entity_ruler", before='ner') # <- this is directly from spacy documentation
# Load the new pattern (your list of custom entities) by adding them from the properly formatted jsonl file.
ruler.from_disk("/Users/thalassa/Rcode/blog/data/ansp-entity-ruler.jsonl")
| scripts/ansp-create-entity-ruler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## imoport pkgs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# ## Load Data
# +
## load data
df_settle_clendar_raw = pd.read_csv('data/seattle/calendar.csv')
df_settle_listings_raw = pd.read_csv('data/seattle/listings.csv')
df_settle_reviews_raw = pd.read_csv('data/seattle/reviews.csv')
df_boston_airbnb_calendar_raw = pd.read_csv('data/boston-airbnb-open-data/calendar.csv')
df_boston_airbnb_listings_raw = pd.read_csv('data/boston-airbnb-open-data/listings.csv')
df_boston_airbnb_reviews_raw = pd.read_csv('data/boston-airbnb-open-data/reviews.csv')
# -
# ## Explore Data
# ### df_settle_clendar_raw
info_dic = {'Feature':pd.Series(['listing_id','date','available','price']),
'Meaning':pd.Series(['清单id','日期','是否有效','价格']),
}
df_settle_clendar_raw_info = pd.DataFrame(info_dic)
df_settle_clendar_raw_info
df_settle_clendar_raw.info()
df_settle_clendar_raw.head()
## get all the NaN values in price
df_settle_clendar_raw[df_settle_clendar_raw['price'].isnull()].head()
# After explore the NaN value of df_settle_clendar_raw,it only appear in unavailable line.So we don't need to drop it.
# ### df_settle_listings_raw
# - The table of df_seattle_listings_raw's meaning:
#
# |Feature|Meaning|Relation|
# |:-:|:-:|:-:|
# |id|id|PK|
# |listing_url|清单url||
# |scrape_id|||
# |last_scraped|||
# |name|清单名||
# |summary|摘要||
# |space|空间||
# |description|描述||
# |experiences_offered|体验||
# |neighborhood_overview|邻居||
# |notes|记录||
# |transit|中转||
# |thumbnail_url|简介url||
# |medium_url|视频url||
# |picture_url|图片url||
# |xl_picture_url|x1图片url||
# |host_id|租客id||
# |host_url|租客url||
# |host_name|租客姓名||
# |host_since|租客||
# |host_location|租客地址||
# |host_about|租客信息||
# |host_response_time|租客响应时间||
# |host_response_rate|租客响应率||
# |host_acceptance_rate|租客验收率||
# |host_is_superhost|租客是会员||
# |host_thumbnail_url|租客简介url||
# |host_picture_url|租客图片url||
# |host_neighbourhood|租客邻居||
# |host_listings_count|租客清单数||
# |host_total_listings_count|租客清单总数||
# |host_verifications|租客验证信息||
# |host_has_profile_pic|租客是否有头像||
# |host_identity_verified|租客账户是否验证||
# |street|街道||
# |neighbourhood|邻居||
# |neighbourhood_cleansed|||
# |neighbourhood_group_cleansed|||
# |city|城市||
# |state|街道||
# |zipcode|邮编||
# |market|市场||
# |smart_location|智能定位||
# |country_code|国家代码||
# |country|国家||
# |latitude|纬度||
# |longitude|经度||
# |is_location_exact|位置是否准确||
# |property_type|房屋种类||
# |room_type|房型||
# |accommodates|容纳空间||
# |bathrooms|浴室||
# |bedrooms|卧室||
# |beds|床||
# |bed_type|床类型||
# |amenities|设施||
# |square_feet|平方||
# |price|价格||
# |weekly_price|价格(周)||
# |monthly_price|价格(月)||
# |security_deposit|保证金||
# |cleaning_fee|清洁费||
# |guests_included|客人数||
# |extra_people|额外的人||
# |minimum_nights|最少的夜晚||
# |maximum_nights|最多的夜晚||
# |calendar_updated|日历已更新||
# |has_availability|有空房||
# |availability_30|可租(30天)||
# |availability_60|可租(60天)||
# |availability_90|可租(90天)||
# |availability_365|可租(365天)||
# |calendar_last_scraped|||
# |number_of_reviews|评论数||
# |first_review|第一条评论||
# |last_review|最后一条评论||
# |review_scores_rating|评论分数||
# |review_scores_accuracy|评论分数(准确度)||
# |review_scores_cleanliness|评论分数(清洁度)||
# |review_scores_checkin|评论分数(入住)||
# |review_scores_communication|评论分数(交流)||
# |review_scores_location|评论分数(位置)||
# |review_scores_value|评论分数(价值)||
# |requires_license|需要许可证||
# |license|许可证||
# |jurisdiction_names|辖区名称||
# |instant_bookable|即时预订||
# |cancellation_policy|取消政策||
# |require_guest_profile_picture|需要租客头像||
# |require_guest_phone_verification|需要租客手机通过验证||
# |calculated_host_listings_count|计算租客清单数量||
# |reviews_per_month|每个月的评论||
#
df_settle_listings_raw.head()
df_settle_listings_raw.loc[0]
# ### df_settle_reviews_raw
df_settle_reviews_raw.info()
info_dic = {'Feature':pd.Series(['listing_id','id','date','reviewer_id','reviewer_name','comments']),
'Meaning':pd.Series(['清单id','id','日期','审核人id','审核人名','备注']),
'Relation':pd.Series(["df_seattle_listings_raw['id']",'PK','','','','']),
}
df_settle_reviews_raw_info = pd.DataFrame(info_dic)
df_settle_reviews_raw_info
df_settle_reviews_raw.head()
# ## Clean Up
## available settle
df_settle_clendar_raw.loc[df_settle_clendar_raw['available']=='t'].shape[0]/df_settle_clendar_raw.shape[0]
# 67.061% house available
# +
# Error Data
na_price = df_settle_clendar_raw[pd.isna(df_settle_clendar_raw['price'])]
error = na_price.loc[na_price['available']=='t']
print ('There are {} error data'.format(error.shape[0]))
# +
df_settle_clendar_raw['date'] = pd.to_datetime(df_settle_clendar_raw['date'])
df = df_settle_clendar_raw.set_index('date')
df['count'] = 1
df_t = df[df['available']=='t']
df_f = df[df['available']!='t']
df_t_period = df_t.resample('M').sum().to_period('M')
df_t_list = df_t_period.loc[:,['count']]
df_t_list.head()
# -
df_f_period = df_f.resample('M').sum().to_period('M')
df_f_list = df_f_period.loc[:,['count']]
df_f_list.head()
df_f_list.plot(title='2016/01-2017/01 unavailable house')
df_t_list.plot(title='2016/01-2017/01 available house')
merge_pd = df_settle_clendar_raw.dropna().merge(df_settle_listings_raw,left_on='listing_id',right_on='id',how='inner')
merge_pd.head()
def get_NaN_feature_names(df)->None:
"""
get feature have NaN values
:param df:dataframe
:return: None
"""
nan_list =[col_name for col_name in df.columns if df[col_name].isnull().sum() > 0]
print(nan_list)
get_NaN_feature_names(df_settle_listings_raw)
# #### features have NaN values:
# - 'summary', 'space', 'neighborhood_overview', 'notes', 'transit', 'thumbnail_url',
# - 'medium_url', 'xl_picture_url', 'host_name', 'host_since', 'host_location', 'host_about',
# - 'host_response_time', 'host_response_rate', 'host_acceptance_rate', 'host_is_superhost',
# - 'host_thumbnail_url', 'host_picture_url', 'host_neighbourhood', 'host_listings_count',
# - 'host_total_listings_count', 'host_has_profile_pic', 'host_identity_verified', 'neighbourhood',
# - 'zipcode', 'property_type', 'bathrooms', 'bedrooms', 'beds', 'square_feet', 'weekly_price',
# - 'monthly_price', 'security_deposit', 'cleaning_fee', 'first_review', 'last_review',
# - 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin',
# - 'review_scores_communication', 'review_scores_location', 'review_scores_value', 'license', 'reviews_per_month'
# ### which feature don't need to drop?
# 1. summary
# 2. space
# 3. neighborhood_overview
# 4. notes
# 5. transit
# 6. thumbnail_url
# 7. medium_url
# 8. xl_picture_url
# 9. host_since
# 10. host_about
# 11. host_response_time
# 12. host_response_rate
# 13. host_acceptance_rate
# 14. host_is_superhost
# 15. host_thumbnail_url
# 15. host_picture_url
# 16. host_listings_count
# 17. host_total_listings_count
# 18. host_has_profile_pic
# 19. host_identity_verified
# 20. neighbourhood
# 21. zipcode
# 22. weekly_price
# 23. monthly_price
# 24. reviews_per_month
#
# ### which feature need to drop or replace ?
#
# 1. host_name
# 2. host_location
# 3. host_is_superhost
# 4. property_type
# 5. bathrooms
# 6. bedrooms
# 7. beds
# 8. square_feet
# 9. security_deposit
# 10. cleaning_fee
# 11. first_review
# 12. last_review
# 13. review_scores_rating
# 14. review_scores_accuracy
# 15. review_scores_cleanliness
# 16. review_scores_checkin
# 17. review_scores_communication
# 18. review_scores_location
# 19. review_scores_value
# 20. license
# +
## replace NaN features
df_settle_listings_raw['host_is_superhost'].replace(['f', 't', np.NaN], [0, 1, 0], inplace = True)
df_settle_listings_raw['cleaning_fee'].replace([np.NaN], [0], inplace = True)
df_settle_listings_raw['bathrooms'].replace([np.NaN], [0], inplace = True)
df_settle_listings_raw['bedrooms'].replace([np.NaN], [0], inplace = True)
df_settle_listings_raw['security_deposit'].replace([np.NaN], [0], inplace = True)
## drop NaN features
df_settle_listings_raw.dropna(subset=['host_name','host_location','property_type',
'beds','first_review','last_review',
'review_scores_rating','review_scores_accuracy','review_scores_cleanliness',
'review_scores_checkin','review_scores_communication','review_scores_location',
'review_scores_value'],how='any',inplace=True)
df_settle_listings_raw.drop(['license','square_feet'], axis = 1, inplace = True)
# -
# ## explore Boston data
#
# ### df_boston_airbnb_listings_raw:
get_NaN_feature_names(df_boston_airbnb_listings_raw)
# #### We need to drop or replace NaN as df_settle_listings_raw do!
# drop NaN feature
df_boston_airbnb_listings_raw.dropna(subset=['host_location','city','zipcode','market','property_type',
'beds','first_review','last_review','review_scores_rating',
'review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',
'review_scores_communication','review_scores_location','review_scores_value'],how='any',inplace=True)
df_boston_airbnb_listings_raw.drop(['license','jurisdiction_names','neighbourhood','neighbourhood_group_cleansed','square_feet','has_availability'], axis = 1, inplace = True)
# replace NaN feature
df_boston_airbnb_listings_raw['bathrooms'].replace([np.NaN], [0], inplace = True)
df_boston_airbnb_listings_raw['bedrooms'].replace([np.NaN], [0], inplace = True)
df_boston_airbnb_listings_raw['security_deposit'].replace([np.NaN], [0], inplace = True)
df_boston_airbnb_listings_raw['cleaning_fee'].replace([np.NaN], [0], inplace = True)
df_boston_airbnb_reviews_raw.isnull().sum()
# ## Question 1: Is there any relationship between 'price' and 'review_score_rate'?
# +
feature_list = ['id', 'city', 'price', 'security_deposit', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin',
'review_scores_communication', 'review_scores_location', 'review_scores_value']
df_seattle = df_settle_listings_raw[feature_list]
df_boston = df_boston_airbnb_listings_raw[feature_list]
df_seattle.head()
# -
df_boston.head()
# +
def trans_price(df_input):
"""
convert price,security_deposit to float
:param df_input: dataframe
:return: dataframe
"""
df_input['price'] = pd.to_numeric(df_input['price'].replace(r'\$', '', regex = True).replace(r',', '', regex = True))
df_input['security_deposit'] = pd.to_numeric(df_input['security_deposit'].replace(r'\$', '', regex = True).replace(r',', '', regex = True))
return df_input
df_settle = trans_price(df_seattle)
df_settle.head()
# -
df_boston = trans_price(df_boston)
df_boston.head()
## check data type
df_seattle.info()
df_sum = np.sum(df_seattle['price'])
df_sum
# +
## plot
df_sum = np.sum(df_seattle['price'])
rng = np.random.RandomState(0)
colors = rng.rand(df_seattle['price'].shape[0])
sizes = 1000000 * df_seattle['price']/df_sum
plt.scatter(df_seattle['price'], df_seattle['review_scores_rating'],marker='o',c=colors,s=sizes,alpha=0.5,cmap='viridis')
plt.xlabel('Price')
plt.ylabel('Review Scores Rating')
plt.colorbar()
plt.title('Price/Review Scores Rating Plt')
plt.show()
# -
# ### Answer Q1
# Through the above pictures about 'Price and Review Scores Rating',it shows that the house which price is higher and it's review scores rating's distribute is almost get higher at the same time.And you will find the price of house lower than $400,most of their score are between 70 and 90,which is very high.So,every penny of it,if you want to find a house with high quality, you had better use more money,it's worth it.
# ## Data Modeling Question 2
# At this session,we try to find out some patterns between 'price' and other's 'scores'.I removed columns like 'id',and 'city' from the data,because them are unnecessary.We use a loop to plot the scatter between the price and any other score features.
#
# ### Question 2:Is there any pattern between other scores with price ?
#
#remove some columns,which are not required.
feature_plt = feature_list
feature_plt.remove('id')
feature_plt.remove('city')
feature_plt
# plot
max_val = len(feature_plt)
for f1 in range(max_val):
for f2 in range(f1 + 1, max_val):
plt.scatter(df_seattle[feature_plt[f1]], df_seattle[feature_plt[f2]])
plt.xlabel(feature_plt[f1])
plt.ylabel(feature_plt[f2])
plt.title('{}/{} plot'.format(feature_plt[f1], feature_plt[f2]))
plt.show()
break
# ### Answer Q2
#
# From the plot image,all the features are between the 'price' and 'scores', It seems like there's only price/security_deposit plot have effects with others.From the plot of price/security_deposit,it seems that not have any relationship with 'price'.Perhaps the expensive price houses were not care about the security_deposit,they face to higher level's consumers.
# ## Data Modeling Question 3
# At this section, we will try to find some pattern about the location and the price.
#
# ### Question 3:Is there any pattern between the location and the price?
# +
## from 'country','price' columns,find out some relationships
df_seattle = df_seattle[['city', 'price']]
df_boston = df_boston[['city', 'price']]
df_seattle['city'].replace(['Ballard, Seattle', 'Seattle ', '西雅图'],
['Ballard Seattle', 'Seattle', 'Seattle'], inplace = True)
set(df_seattle['city'])
# -
## we need to transfer data,because they have different name,but means the same place.
df_boston['city'].replace(['ALLSTON', 'Boston ', 'Boston (Charlestown)', 'Boston (Jamaica Plain)',
'Brighton ', 'Jamaica Plain ', 'Jamaica Plain (Boston)', 'Jamaica Plain, Boston',
'Jamaica Plain, MA', 'Mission Hill, Boston', 'ROXBURY CROSSING', 'Roslindale, Boston',
'boston', 'dorchester, boston ', 'east Boston '],
['Allston', 'Boston', 'Charlestown', 'Jamaica Plain',
'Brighton', 'Jamaica Plain', 'Jamaica Plain', 'Jamaica Plain',
'Jamaica Plain', 'Mission Hill', 'Roxbury Crossing', 'Roslindale',
'Boston', 'Dorchester', 'East Boston'], inplace = True)
set(df_boston['city'])
# +
# One hot
city_dict = {}
val = 0
for city in set(df_seattle.city.values):
city_dict[city] = val
val += 1
for city in set(df_boston.city.values):
city_dict[city] = val
val += 1
city_dict
# -
df_data = pd.concat([df_seattle,df_boston],ignore_index=True)
df_data.replace(city_dict,inplace=True)
df_data.head()
## plot use bar model
plt.style.use('seaborn-white')
plt.bar(df_data['city'],df_data['price'])
plt.xlabel('City')
plt.ylabel('Price')
plt.title('City/Price Plot')
plt.show()
# #### Answer Q3
# In this question,we find that there are many expensive houses in class 3 or class 5.One of them is Seattle,the other one is the center of Boston.Both of them have a convenient transportation and high cost of living.Of course,living in the center of city,you need more money to support living expenses and rent.
| airbnb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Scalability Experiment (Section 5.3)
#
# The experiment is designed to compare the execution time of different coarsening schemes over increasingly large graphs.
#
# * For consistency, we use a regular graph of increasing size (vertices, edges) but always the same degree
# * The reduction is fixed to 0.5. The execution time will only slightly increase for larger ratios (since the problem that has to be solved becomes easier at consecutive levels where the graph is smaller)
# * If the execution time exceeds a budget (set to 100 sec), computation is skipped.
#
# The code accompanies paper [Graph reduction with spectral and cut guarantees](http://www.jmlr.org/papers/volume20/18-680/18-680.pdf) by <NAME> published at JMLR/2019 ([bibtex](http://www.jmlr.org/papers/v20/18-680.bib)).
#
# This work was kindly supported by the Swiss National Science Foundation (grant number PZ00P2 179981).
#
# 15 March 2019
#
# [<NAME>](https://andreasloukas.blog)
#
# [](https://zenodo.org/badge/latestdoi/175851068)
#
# Released under the Apache license 2.0
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:80% !important; }</style>"))
# +
from graph_coarsening.coarsening_utils import *
import graph_coarsening.graph_lib as graph_lib
import graph_coarsening.graph_utils as graph_utils
import numpy as np
import scipy as sp
from scipy import io
from scipy.linalg import circulant
import time
import os
import matplotlib
import matplotlib.pylab as plt
import pygsp as gsp
from pygsp import graphs, filters
gsp.plotting.BACKEND = 'matplotlib'
# -
# Experiment parameters
N_all = np.logspace(2, 6, 30, dtype=np.int)
methods = ['heavy_edge', 'variation_edges', 'variation_neighborhoods', 'algebraic_JC', 'affinity_GS', 'kron']
K_all = [10,80] #[10, 20, 40]
r = 0.5
budget = 100 # don't run anything that takes longer than this (in seconds)
n_iterations = 10
deg = 10
algorithm = 'greedy'
n_methods = len(methods)
# print(deg*N_all/2)
# ### The actual experiment code (this will take long)
# If one needs to just see the results, skip running this part.
# +
rerun_all = False
rewrite_results = False
if rerun_all:
timings = np.zeros((len(N_all), len(K_all), n_methods, n_iterations)) * np.NaN
skip = np.zeros(len(methods))
for NIdx, N in enumerate(N_all):
G = graph_lib.models(N, 'regular', k=deg)
for KIdx, K in enumerate(K_all):
for methodIdx,method in enumerate(methods):
if skip[methodIdx] == 1 :
timings[NIdx, KIdx, methodIdx] = np.NaN
print('skipping: {}, {}, {}'.format(N, method, K))
continue
timing = 0
for iteration in range(n_iterations):
if method == 'kron':
start = time.time()
_, tmp = kron_coarsening(G, r=r, m=None)
end = time.time()
if tmp == None:
print('kron failed... skipping')
continue
else:
start = time.time()
_, _, Call, _ = coarsen(G, K=K, r=r, max_levels=4, method=method, algorithm=algorithm)
end = time.time()
if len(Call) >= 4: print('warning: too many levels for {}, r:{}, K:{}'.format(method, r, K) )
timings[NIdx, KIdx, methodIdx, iteration] = end-start
timing = np.mean(timings[NIdx, KIdx, methodIdx, :])
skip[methodIdx] = 1 if (timing > budget) else 0
print('N = {}, done!'.format(N))
if sum(skip) == len(methods): break
if rewrite_results:
filepath = os.path.join('..', 'results', 'experiment_scalability.npz')
print('.. saving to "' + filepath + '"')
np.savez(filepath, methods=methods, K_all=K_all, N_all=N_all, timings=timings, deg=deg, budget=budget)
print('done!')
# -
# ### Load results
# +
filepath = os.path.join('..', 'results', 'experiment_scalability.npz')
data = np.load(filepath)
methods, K_all, N_all, timings, deg, budget = data['methods'], data['K_all'], data['N_all'], data['timings'], data['deg'], data['budget']
# -
# ### Visualize them
#
# The produced figures are used in the paper
# +
matplotlib.rcParams.update({'font.size': 25})
from matplotlib import cm
colors = [ cm.ocean(x) for x in np.linspace(0, 0.95, len(methods)+1)]
colors[1] = [0.8,0,0]
colors[-2] = (np.array([127, 77, 34])/255).tolist()
size = 2.7*2.7;
print('The figures are drawn in the following in order:')
for KIdx, K in enumerate(K_all):
fig, axes = plt.subplots(1, 1, figsize=(1.618*size, size));
for methodIdx,method in reversed(list(enumerate(methods))):
lineWidth = 1.5; marker = 's'
method = method.replace('_', ' ')
if method == 'heavy edge':
method = 'heavy edge'
cIdx, line, marker = 0, ':', 's'
elif 'variation edges' in method:
method = 'local var. (edges)'
cIdx, line, marker, lineWidth = 2, '-', 'o', 1.5
elif (method == 'variation neighborhoods') or (method == 'variation neighborhood'):
method = 'local var. (neigh)'
cIdx, line, marker, lineWidth = 1, '-', 'o', 1.5
elif 'algebraic' in method:
method = 'algebraic dist.'
cIdx, line = 3, ':'
elif 'affinity' in method:
method = 'affinity'
cIdx, line = 4, ':'
elif method == 'kron':
method = 'kron'
cIdx, line, marker = 5, '--', 'x'
else:
continue
style = line + marker
color = colors[cIdx]
tmp = np.mean(timings[:,KIdx,methodIdx,:], 1)
tmp[tmp>budget] = np.NaN
axes.plot(N_all*deg/2, tmp, style, label='{}'.format(method), color=color, lineWidth=lineWidth, markersize=6)
axes.plot(np.array([10, N_all[-1]])*deg/2, [budget, budget], 'k:')
axes.set_xscale('log')
axes.set_yscale('log')
axes.set_xlabel('number of edges (M)')
axes.set_ylabel('execution time (sec)')
axes.set_ylim([0.02, budget+30])
axes.set_xlim([300, N_all[-1]])
legend0 = axes.legend(fontsize=22, loc='lower right', edgecolor=[1,1,1])
axes.text(500, 63, 'max execution time', fontsize=21)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
fig.tight_layout()
print('* experiment_scalability_K='+ str(K))
# fig.savefig(os.path.join('..', 'results', 'experiment_scalability_K='+ str(K) +'.pdf'))
# -
| examples/experiment_scalability.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Docker Exercise 03
#
# This exercise we're going to look at building our own images from a Dockerfile
#
# ### Create a file named `Dockerfile` with the following contents
# %%writefile Dockerfile
FROM nginx
COPY html /usr/share/nginx/html
# ### Create a directory named `html`
#
# **Hint:** `mkdir -p` ensures that if the directory exists, you don't get an error message when creating it again.
# + language="bash"
# mkdir -p html
# -
# In the `html` directory place a file named `hello.html` with the following contents
# %%writefile html/hello.html
<html>
<body>Hello Docker World</body>
</html>
# ### Create our Docker image from our Dockerfile
#
# Use `docker build` using the `Dockerfile` in the current directory and assign it the tag `my_http`
# + language="bash"
#
# -
# ### Check that our image has been created
#
# Use `docker images`
# + language="bash"
#
# -
# You can also list image information by an image tag using `docker images <image tag>`
#
# Use `docker images my_http`
# + language="bash"
#
# -
# ### Run the newly created docker image on host port 8080
#
# **Hint:** The `my_http` image exposes (opens) port 80 in the container.
# + language="bash"
#
# -
# ### Request `hello.html`
#
# Using `curl`, request `localhost:8080/hello.html` and confirm that you see the `Hello Docker World` message.
# + language="bash"
# curl localhost:8080/hello.html
# -
# ### Find the image ID
#
# Use `docker images <image name>` to query for the `my_http` image. You can use the `-aq` to fetch just the image ID.
# + language="bash"
#
# -
# ### Attempt to remove the image
#
# Use `docker rmi <image ID>` to remove an image with a specific ID.
#
# Use the image ID from the last cell where you figured out how to find the image ID for the `my_http` image.
#
# You should expect that the command will return an error:
# ```
# Error response from daemon: conflict
# ```
# + language="bash"
#
# -
# ### Stop the container
#
# Since the image is used by `my_http`, a running container, you must find stop the running container and then you can force remove the image.
# + language="bash"
#
# -
# ### Force the image removal
#
# Use `docker rmi <image tag> --force`
# + language="bash"
#
# -
# Check the images and confirm that you force removed the image.
# + language="bash"
#
# -
# ### Resources
#
# * [Docker Reference - builder](https://docs.docker.com/engine/reference/builder/)
# * [Docker Reference - build](https://docs.docker.com/engine/reference/commandline/build/)
# * [Docker Reference - images](https://docs.docker.com/engine/reference/commandline/images/)
# * [Docker Reference - rmi](https://docs.docker.com/engine/reference/commandline/rmi/)
| doc0/Exercise03/Exercise03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="5CQ2c3-7HIBK"
# # Document Summarization
#
# The idea of document summarization is a
# bit different from keyphrase extraction or topic modeling. In this case, the end result
# is still in the form of some document, but with a few sentences based on the length we
# might want the summary to be. This is similar to an abstract or an executive summary
# in a research paper. The main objective of automated document summarization is
# to perform this summarization without involving human input, except for running
# computer programs. Mathematical and statistical models help in building and
# automating the task of summarizing documents by observing their content and context.
#
# There are two broad approaches to document summarization using automated
# techniques. They are described as follows:
# - __Extraction-based techniques:__ These methods use mathematical
# and statistical concepts like SVD to extract some key subset of the
# content from the original document such that this subset of content
# contains the core information and acts as the focal point of the entire
# document. This content can be words, phrases, or even sentences.
# The end result from this approach is a short executive summary of a
# couple of lines extracted from the original document. No new content
# is generated in this technique, hence the name extraction-based.
# - __Abstraction-based techniques:__ These methods are more complex
# and sophisticated. They leverage language semantics to create
# representations and use natural language generation (NLG)
# techniques where the machine uses knowledge bases and semantic
# representations to generate text on its own and create summaries
# just like a human would write them. Thanks to deep learning, we can
# implement these techniques easily but they require a lot of data and
# compute.
#
# We will cover extraction based methods here due to constraints of needed a lot of data + compute for abstraction based methods. But you can leverage the seq2seq models you learnt in language translation on an appropriate dataset to build deep learning based abstractive summarizers
# + [markdown] colab_type="text" id="PMpQzV7lHIBK"
# # Install necessary dependencies
# + colab={"base_uri": "https://localhost:8080/", "height": 101} colab_type="code" id="7TKqj2jxDWjN" outputId="fb3de689-67c8-4189-d77d-cb942285663f"
import nltk
nltk.download('punkt')
nltk.download('stopwords')
# + [markdown] colab_type="text" id="Kqo1pul-HIBP"
# # Get Text Document
#
# We use the description of a very popular role-playing game (RPG) Skyrim from
# Bethesda Softworks for summarization.
# + colab={} colab_type="code" id="-sSrjtPxCZeq"
DOCUMENT = """
The Elder Scrolls V: Skyrim is an action role-playing video game developed by Bethesda Game Studios
and published by Bethesda Softworks. It is the fifth main installment in The Elder Scrolls series,
following The Elder Scrolls IV: Oblivion.
The game's main story revolves around the player character's quest to defeat Alduin the World-Eater,
a dragon who is prophesied to destroy the world. The game is set 200 years after the events of Oblivion
and takes place in the fictional province of Skyrim. Over the course of the game, the player completes
quests and develops the character by improving skills. The game continues the open-world tradition of
its predecessors by allowing the player to travel anywhere in the game world at any time, and to ignore
or postpone the main storyline indefinitely.
The team opted for a unique and more diverse open world than Oblivion's Imperial Province of Cyrodiil,
which game director and executive producer <NAME> considered less interesting by comparison.
The game was released to critical acclaim, with reviewers particularly mentioning the character advancement
and setting, and is considered to be one of the greatest video games of all time.
The Elder Scrolls V: Skyrim is an action role-playing game, playable from either a first or
third-person perspective. The player may freely roam over the land of Skyrim which is an open world
environment consisting of wilderness expanses, dungeons, cities, towns, fortresses, and villages.
Players may navigate the game world more quickly by riding horses or by utilizing a fast-travel system
which allows them to warp to previously discovered locations. The game's main quest can be completed or
ignored at the player's preference after the first stage of the quest is finished. However, some quests
rely on the main storyline being at least partially completed. Non-player characters (NPCs) populate the
world and can be interacted with in a number of ways: the player may engage them in conversation,
marry an eligible NPC, kill them or engage in a nonlethal "brawl". The player may
choose to join factions which are organized groups of NPCs — for example, the Dark Brotherhood, a band
of assassins. Each of the factions has an associated quest path to progress through. Each city and town
in the game world has jobs that the player can engage in, such as farming.
Players have the option to develop their character. At the beginning of the game, players create
their character by selecting their sex and choosing between one of several races including humans,
orcs, elves, and anthropomorphic cat or lizard-like creatures and then customizing their character's
appearance. Over the course of the game, players improve their character's skills which are numerical
representations of their ability in certain areas. There are eighteen skills divided evenly among the
three schools of combat, magic, and stealth. When players have trained skills enough to meet the
required experience, their character levels up. Health is depleted primarily when the player
takes damage and the loss of all health results in death. Magicka is depleted by the use of spells,
certain poisons and by being struck by lightning-based attacks. Stamina determines the player's
effectiveness in combat and is depleted by sprinting, performing heavy "power attacks"
and being struck by frost-based attacks. Skyrim is the first entry in The Elder Scrolls to
include dragons in the game's wilderness. Like other creatures, dragons are generated randomly in
the world and will engage in combat with NPCs, creatures and the player. Some dragons may attack
cities and towns when in their proximity. The player character can absorb the souls of dragons
in order to use powerful spells called "dragon shouts" or "Thu'um". A regeneration
period limits the player's use of shouts in gameplay.
Skyrim is set around 200 years after the events of The Elder Scrolls IV: Oblivion, although it is
not a direct sequel. The game takes place in Skyrim, a province of the Empire on the continent of
Tamriel, amid a civil war between two factions: the Stormcloaks, led by <NAME>, and the
Imperial Legion, led by General Tullius. The player character is a Dragonborn, a mortal born with
the soul and power of a dragon. Alduin, a large black dragon who returns to the land after being
lost in time, serves as the game's primary antagonist. Alduin is the first dragon created by Akatosh,
one of the series' gods, and is prophesied to destroy and consume the world.
"""
# + colab={} colab_type="code" id="-Sp4ZBphDBTy"
import re
DOCUMENT = re.sub(r'\n|\r', ' ', DOCUMENT)
DOCUMENT = re.sub(r' +', ' ', DOCUMENT)
DOCUMENT = DOCUMENT.strip()
# + [markdown] colab_type="text" id="guFZ9QjsHIBT"
# # Summarization with Gensim
#
# Let’s look at an implementation of document summarization by leveraging Gensim’s
# summarization module. It is pretty straightforward.
# + colab={"base_uri": "https://localhost:8080/", "height": 154} colab_type="code" id="fJfaklbaDGm3" outputId="3ed760e5-145a-451d-ceee-f9b3e6bf3c21"
from gensim.summarization import summarize
print(summarize(DOCUMENT, ratio=0.2, split=False))
# + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" id="7H83_YvADI2j" outputId="f26cee81-c7da-4619-840e-fd7d4c5cba3c"
print(summarize(DOCUMENT, word_count=75, split=False))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="N4ObNmxqDL1N" outputId="724bcf9e-0723-4ada-984a-e4f8a87413df"
sentences = nltk.sent_tokenize(DOCUMENT)
len(sentences)
# + [markdown] colab_type="text" id="mgHBIH5jHIBb"
# This summarization implementation from Gensim is based on a variation of
# a popular algorithm called TextRank.
# + [markdown] colab_type="text" id="6Bb_eyoiHIBc"
# # Basic Text pre-processing
# + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" id="HWTCv0YQDPYR" outputId="5a6da8ed-8a97-4061-b0dc-4c012e3ed02c"
import numpy as np
stop_words = nltk.corpus.stopwords.words('english')
def normalize_document(doc):
# lower case and remove special characters\whitespaces
doc = re.sub(r'[^a-zA-Z\s]', '', doc, re.I|re.A)
doc = doc.lower()
doc = doc.strip()
# tokenize document
tokens = nltk.word_tokenize(doc)
# filter stopwords out of document
filtered_tokens = [token for token in tokens if token not in stop_words]
# re-create document from filtered tokens
doc = ' '.join(filtered_tokens)
return doc
normalize_corpus = np.vectorize(normalize_document)
norm_sentences = normalize_corpus(sentences)
norm_sentences[:3]
# + [markdown] colab_type="text" id="7e1PYqJwHIBe"
# # Text Representation with Feature Engineering
#
# We will be vectorizing our normalized sentences using the TF-IDF feature engineering
# scheme. We keep things simple and don’t filter out any words based on document
# frequency. But feel free to try that out and maybe even leverage n-grams as features.
# + colab={"base_uri": "https://localhost:8080/", "height": 360} colab_type="code" id="-aGDxNQyDpiY" outputId="e3adf4bc-fd1d-4c0e-e5ac-3fab58b34b6f"
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
tv = TfidfVectorizer(min_df=0., max_df=1., use_idf=True)
dt_matrix = tv.fit_transform(norm_sentences)
dt_matrix = dt_matrix.toarray()
vocab = tv.get_feature_names()
td_matrix = dt_matrix.T
print(td_matrix.shape)
pd.DataFrame(np.round(td_matrix, 2), index=vocab).head(10)
# + [markdown] colab_type="text" id="5jF5QcMhHIBt"
# # Extractive Summarization with TextRank
#
# The TextRank summarization algorithm internally uses the popular PageRank
# algorithm, which is used by Google for ranking websites and pages. This is used by the
# Google search engine when providing relevant web pages based on search queries. To
# understand TextRank better, we need to understand some of the concepts surrounding
# PageRank. The core algorithm in PageRank is a graph-based scoring or ranking
# algorithm, where pages are scored or ranked based on their importance.
#
# Websites and
# pages contain further links embedded in them which link to more pages having more
# links and this continues across the Internet. This can be represented as a graph-based
# model where vertices indicate the web pages and edges indicate links among them. This
# can be used to form a voting or recommendation system such so when one vertex links
# to another one in the graph it is basically casting a vote.
#
# Vertex importance is decided
# not only on the number of votes or edges but also the importance of the vertices that are
# connected to it and their importance.
#
# 
#
# We can see that vertex denoting Page C has a higher score than
# Page E even if it has fewer edges compared to Page E, because Page B is an important
# page connected to Page C.
#
# For textrank we will follow a similar process leveraging pagerank
#
# 
#
# - Tokenize and extract sentences from the document to be
# summarized.
# - Decide on the number of sentences, k, that we want in the final
# summary
# - Build a document-term feature matrix using weights like TF-IDF
# or Bag of Words.
# - Compute a document similarity matrix by multiplying the matrix
# by its transpose.
# - Use these documents (sentences in our case) as the vertices and
# the similarities between each pair of documents as the weight
# or score coefficient we talked about earlier and feed them to the
# PageRank algorithm.
# - Get the score for each sentence.
# - Rank the sentences based on score and return the top k sentences.
# + [markdown] colab_type="text" id="S-mNKhMsHIBv"
# # Build Similarity Matrix
# + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="iwy27oUUFVwC" outputId="56fccc85-ff77-4adb-8315-bbf82a05b1fb"
similarity_matrix = np.matmul(dt_matrix, dt_matrix.T)
print(similarity_matrix.shape)
np.round(similarity_matrix, 3)
# + [markdown] colab_type="text" id="i2FQm7IOHIBz"
# # Build Similarity Graph
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="3XKLLMxVFZFu" outputId="f079e279-a5fa-472b-d0e4-9f799299e784"
import networkx
similarity_graph = networkx.from_numpy_array(similarity_matrix)
similarity_graph
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" id="s8hGp-gQFbOs" outputId="8af1bb9c-101f-4843-8c57-d13c49da3020"
import matplotlib.pyplot as plt
# %matplotlib inline
plt.figure(figsize=(12, 6))
networkx.draw_networkx(similarity_graph, node_color='lime')
# + [markdown] colab_type="text" id="sWZXi1dcHIB4"
# # Get Sentence Importance Scores
# + colab={"base_uri": "https://localhost:8080/", "height": 185} colab_type="code" id="l9M9Wz2lFdej" outputId="4c927c91-e819-4fc2-ee89-43b673c5ea48"
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(((score, index) for index, score
in scores.items()),
reverse=True)
ranked_sentences[:10]
# + colab={} colab_type="code" id="62NO_yc0Ff5C"
num_sentences = 7
top_sentence_indices = [ranked_sentences[index][1]
for index in range(num_sentences)]
top_sentence_indices.sort()
# + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="idmgKnTuFiyg" outputId="00e1d260-5fd3-4e83-f619-3322051e7ae1"
print('\n'.join(np.array(sentences)[top_sentence_indices]))
# -
# # Extractive Summarization with Transformers
#
# This method utilizes the HuggingFace transformers library to run extractive summarizations.
#
# This works by first embedding the sentences, then running a clustering algorithm, finding the sentences that are closest to the cluster's centroids.
#
# This library also uses coreference techniques, utilizing the https://github.com/huggingface/neuralcoref library to resolve words in summaries that need more context. The greedyness of the neuralcoref library can be tweaked in the CoreferenceHandler class.
#
# __Library Repo:__ https://github.com/dmmiller612/bert-extractive-summarizer
# __Paper:__ https://arxiv.org/abs/1906.04165
#
#
# ### Transformer Training Process (Already Pre-trained)
#
# 
#
#
# ### What is a Transformer?
#
# The Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. It is a stacked layer of encoders and decoders.
#
# 
#
# __Source:__ http://jalammar.github.io/illustrated-transformer/
#
# ### Transformer Architecture
#
# Stacked encoder - decoder architecture with multi-head attention blocks
#
# 
#
# __Source:__ https://arxiv.org/abs/1706.03762
# + colab={"base_uri": "https://localhost:8080/", "height": 910} colab_type="code" id="6aLmYA6rSYJ8" outputId="ae58cbe5-7588-4b89-d541-526d68576678"
# !pip install bert-extractive-summarizer
# -
# # Extractive Summarization with BERT
# + colab={} colab_type="code" id="jJJAWqEUU9eQ"
from summarizer import Summarizer
# + colab={"base_uri": "https://localhost:8080/", "height": 163, "referenced_widgets": ["1f401437d3404455b9dbda6581a61c79", "331add4e908b4924b9b4ed50627afb47", "5d6a111852bf42e9ba2b32a91e51ee82", "a8bf49f73e8745e8af9e2bafbf5abc7d", "3bad1165621f465a8ff02aac647bfacd", "93b627b0de0e442d826e8f6b602d8266", "c3b52cd977a34549a4bb8cad0b0d0492", "8c16fc5cabf84576b0497d8c5ec4bf5f", "b04e9d434d664bdf8fb7b6708425531a", "52552930706644788f6b75993e987dcd", "<KEY>", "<KEY>", "aa9f895e46314cee96e0516796a2fbdf", "c4665637a00445e99229b48e3b9f8a81", "<KEY>", "43d509b1e0a540268584ddb1f4acef9d", "<KEY>", "511d230f150849e1ba922314af202f20", "5b0be9e31a414a0e8cc86895ff2ea60c", "<KEY>", "dc51a4fb0e404501a904d69cb2773c33", "d69e9b5a12684b768b99c4ef5215c27a", "912e8d399880419fb6b064035be05e53", "<KEY>"]} colab_type="code" id="xp4Auu_BXtyb" outputId="11611cb0-3520-4452-f15f-658c2ca8c49c"
sm = Summarizer(model='bert-large-uncased')
# + colab={} colab_type="code" id="d7QD8rKqYQO6"
result = sm(body=DOCUMENT, ratio=0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 171} colab_type="code" id="hSl5Vl4xYqzo" outputId="8c86972b-c909-465f-97bd-1b77a282baf5"
result = '\n'.join(nltk.sent_tokenize(result))
print(result)
# -
# # Extractive Summarization with DistilBERT
# + colab={"base_uri": "https://localhost:8080/", "height": 163, "referenced_widgets": ["1ba1072e79e14c3dbd134764d3904683", "9b46ff190f0d4ad29ff9dd9b85387057", "9be8605d578b42e782eee3eef797681d", "b6c5243da2594489aac2f8c503e8ad1f", "8a153cdf4ce9469aada14217244c0379", "d6eb0da5cf6341c0b9e2f3ab4447c50e", "6495ec0c3271432295cb6665e06ae904", "cd1ec870e6354cb99f88a8a4ca80b199", "3d8fd25e762c4412bac96400a343e271", "51baa55b729e49a3a80ce3a99e5d5111", "<KEY>", "d4e0dd0584d84a698379991d895e61f8", "a102d01d5e9f463ba4577eff6d2da89c", "<KEY>", "<KEY>", "<KEY>", "2d916a60a98d40da93de42abec443f82", "a5fe63c9e98442b182a8d2aab56dd8d4", "<KEY>", "6eae2311f7b54e748979408e43deb8d7", "<KEY>", "<KEY>", "<KEY>", "<KEY>"]} colab_type="code" id="AtiMxvIuYsBL" outputId="d45db970-d1f0-4eca-d692-694d3bc5239d"
sm = Summarizer(model='distilbert-base-uncased')
# + colab={} colab_type="code" id="trI0dTLtZSCE"
result = sm(body=DOCUMENT, ratio=0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 171} colab_type="code" id="584vOKjTZoTP" outputId="60698654-2f8e-4620-dfe6-fb46a9b8066d"
result = '\n'.join(nltk.sent_tokenize(result))
print(result)
# -
# # Abstractive Summarization with BART
#
# BART: Bidirectional and Auto-Regressive Transformers
#
# BART, a denoising autoencoder for pretraining sequence-to-sequence models. BART is trained by
#
# - (1) corrupting text with an arbitrary noising function
# - (2) learning a model to reconstruct the original text.
#
# It uses a standard Tranformer-based neural machine translation architecture which, despite its simplicity, can be seen as generalizing BERT (due to the bidirectional encoder), GPT (with the left-to-right decoder), and many other more recent pretraining schemes.
#
# 
#
# __Source:__ https://arxiv.org/abs/1910.13461
# + colab={} colab_type="code" id="nBBFMqrSZ5DF"
from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
BART_PATH = 'bart-large'
# + colab={} colab_type="code" id="6ryVxfzgalPB"
bart_model = BartForConditionalGeneration.from_pretrained(BART_PATH, output_past=True)
bart_tokenizer = BartTokenizer.from_pretrained(BART_PATH, output_past=True)
# + colab={} colab_type="code" id="AXzPFvcHa9R5"
input_tokenized = bart_tokenizer.encode(DOCUMENT, return_tensors='pt')
# + colab={} colab_type="code" id="o7XQkhu0fDVX"
input_tokenized = input_tokenized.to(device)
# + colab={} colab_type="code" id="YVLonzINcaER"
summary_ids = bart_model.generate(input_tokenized,
num_beams=4,
no_repeat_ngram_size=3,
length_penalty=2.0,
min_length=30,
max_length=300,
early_stopping=True)
# + colab={} colab_type="code" id="yjYg04OEfnK7"
output = [bart_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
# + colab={"base_uri": "https://localhost:8080/", "height": 154} colab_type="code" id="NZ53XCihikeO" outputId="1ffb2163-31aa-4958-e2ce-7239777ec10c"
sentences = nltk.sent_tokenize(output[0])
summary = '\n'.join(sentences[:num_sentences])
print(summary)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="n3Hnpzs_f28u" outputId="a87ae13c-4702-4ef9-fa70-84f70b0fa652"
'''
The game's main story revolves around the player character's quest to defeat Alduin the World-Eater,
a dragon who is prophesied to destroy the world.
...
...
Alduin, a large black dragon who returns to the land after being
lost in time, serves as the game's primary antagonist. Alduin is the first dragon created by Akatosh,
one of the series' gods, and is prophesied to destroy and consume the world.
'''
| Notebooks/03_NLP Applications/08_NLP_Applications_Text_Summarization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py35]
# language: python
# name: conda-env-py35-py
# ---
# # Learning BN parameters using EM
#
# In this excercise we will work with real data on Traffic Accidents in the UK during 2015. The idea is to assume a BN structure and learn the parameters using EM.
#
# We first load the data
import numpy as np
from lib import Message_passing_BN as mp
import warnings
import pickle
warnings.filterwarnings('ignore')
from IPython.display import Image
# +
pickle_file='accident_data.pickle'
with open(pickle_file, 'rb') as f:
data= pickle.load(f)
#Labels contain the name of each variable
#Samples contain the variable values per accedient
Samples=data[0]
labels=data[1]
print("%d accidents in the data base, each characterized by %d variables\n" %(Samples.shape[0],Samples.shape[1]))
print("The variables are")
print(labels)
# -
# The encoding for all variables in the data can be accessed at this [Brief guide to road accidents and safety data](https://data.gov.uk/dataset/road-accidents-safety-data/resource/394c8dac-1a7d-49b0-97ff-62df43b00f42)
#
# For simplicity, we will work with the following set of four variables:
#
# * Day_of_Week (1-Sunday, 2-Monday, ...)
#
# * Weather_Conditions
# 1 Fine no high winds
# 2 Raining no high winds
# 3 Snowing no high winds
# 4 Fine + high winds
# 5 Raining + high winds
# 6 Snowing + high winds
# 7 Fog or mist
# 8 Other
# 9 Unknown (this will be labelled as -1)
# -1 Data missing or out of range
#
# * Accident Severity
# 1 Fatal
# 2 Serious
# 3 Slight
# * Did_Police_Officer_Attend_Scene_of_Accident
# 1 Yes
# 2 No
# 3 No - accident was reported using a self completion form (self rep only)
#
# For each accident, we assume we do not observe all the variables (otherwise it does not make sense tu run the EM algorithm). We know simply declare unknown certain variables chosen at random. Also, we transform the data to represent indexes in the corresponding alphabet (from 0 to max-1)
# +
Samples_matrix=Samples[:,[0,4,5,6]]
#We label unknown weather as -1
Samples_matrix[Samples_matrix[:,1]==9,1]=-1
#We index from 0
Samples_matrix=Samples_matrix-1
for i in range(Samples.shape[0]):
num_hidden=np.random.randint(1,5,1)-1
index_hidden=np.random.randint(0,3,num_hidden)
Samples_matrix[i,index_hidden]=-1
# -
# # Excercise 1
#
# Use EM to train the following network
Image(filename='files/Mod_1.png')
# First step is to generate random initial CPDs
# +
v_card=[7,8,3,3] #Vector of cardinalities
CPD_d=np.random.rand(v_card[0],1)
CPD_d/=CPD_d.sum()
CPD_w=np.random.rand(v_card[1],1)
CPD_w/=CPD_w.sum()
#p(severity|day,weather)
length_table=v_card[0]*v_card[1]*v_card[2]
CPD_severity=np.zeros([length_table,1])
for i in range(v_card[0]*v_card[1]):
aux=np.random.rand(v_card[2],1)
aux/=aux.sum()
CPD_severity[i*v_card[2]:(i+1)*v_card[2]]=aux
#p(Police Attendng|Severity)
length_table=v_card[2]*v_card[3]
CPD_attending=np.zeros([length_table,1])
for i in range(v_card[2]):
aux=np.random.rand(v_card[3],1)
aux/=aux.sum()
CPD_attending[i*v_card[3]:(i+1)*v_card[3]]=aux
# -
# Now, we create the factor graph using the provided library
# +
#Variable nodes
node_day=mp.create_var_node(ID=0,cardinality=v_card[0],neighbor_order=[0,2],observed_value_index=-1)
node_weather=mp.create_var_node(ID=1,cardinality=v_card[1],neighbor_order=[1,2],observed_value_index=-1)
node_severity=mp.create_var_node(ID=2,cardinality=v_card[2],neighbor_order=[2,3],observed_value_index=-1)
node_attending=mp.create_var_node(ID=3,cardinality=v_card[3],neighbor_order=[3],observed_value_index=-1)
list_var_nodes=[node_day,node_weather,node_severity,node_attending]
#Factor nodes
list_neigh_factor=[[node_day],[node_weather],[node_day,node_weather,node_severity],[node_severity,node_attending]]
factor_0=mp.create_factor_node(ID=0,neighbors=list_neigh_factor[0],CPD=CPD_d)
factor_1=mp.create_factor_node(ID=1,neighbors=list_neigh_factor[1],CPD=CPD_w)
factor_2=mp.create_factor_node(ID=2,neighbors=list_neigh_factor[2],CPD=CPD_severity)
factor_3=mp.create_factor_node(ID=3,neighbors=list_neigh_factor[3],CPD=CPD_attending)
list_factor_nodes=[factor_0,factor_1,factor_2,factor_3]
# +
# The following function simply runs BP for a given observation
def run_BP(data_index,Samples_matrix,list_var_nodes,list_factor_nodes,BP_iterations):
#We update those variables that have been observed in the data_index-th Sample
for index,node in enumerate(list_var_nodes):
mp.initialize_variable(node,Samples_matrix[data_index,index])
for index,factor_node in enumerate(list_factor_nodes):
mp.initialize_factor_msgs(factor_node,neighbors=list_neigh_factor[index])
#We run message passing
for l in range(BP_iterations):
#Factor update
for factor_node in list_factor_nodes:
mp.update_factor_to_var(factor_node)
#Variable update
for var_node in list_var_nodes:
mp.update_var_to_factor(var_node)
# -
# Before defining the main EM Loop, we define first two intermediate functions to process the $i$-th data given the current estimate to the CPD tables. Given the observation, the goal is to compute the joint marginals
#
# p(day), p(weather), p(severity,day,weather), p(police_attending, severity)
#
# +
# The following function is used to compute joint marginals p(x_i,x_pa_i)
def compute_joint_probabilities(list_var_nodes,list_factor_nodes,v_card):
list_joints=[]
#YOUR CODE HERE!!
#p(day)
list_joints.append(mp.compute_var_marginal(list_var_nodes[0]))
#p(weather)
list_joints.append(mp.compute_var_marginal(list_var_nodes[1]))
#p(day,weather,severity)
factor=list_factor_nodes[2]
msg_1=factor['input_msgs'][0]['table'].reshape(-1,)
msg_2=factor['input_msgs'][1]['table'].reshape(-1,)
msg_3=factor['input_msgs'][2]['table'].reshape(-1,)
aux=np.zeros([factor['CPD'].shape[0],1])
for position,cpd_value in enumerate(factor['CPD']):
indexes=mp.CPD_position_to_variable_index(position,[v_card[0],v_card[1],v_card[2]],v_card[0]*v_card[1]*v_card[2])
aux[position]+=cpd_value*np.exp(msg_1[indexes[0]]+msg_2[indexes[1]]+msg_3[indexes[2]])
list_joints.append(aux/np.sum(aux))
#p(severity,attending)
factor=list_factor_nodes[3]
msg_1=factor['input_msgs'][0]['table'].reshape(-1,)
aux=np.zeros([factor['CPD'].shape[0],1])
for position,cpd_value in enumerate(factor['CPD']):
indexes=mp.CPD_position_to_variable_index(position,[v_card[2],v_card[3]],v_card[2]*v_card[3])
aux[position]+=cpd_value*np.exp(msg_1[indexes[0]])
list_joints.append(aux/np.sum(aux))
return list_joints
# -
# ## EM main Loop
# +
EM_iterations=5
BP_iterations=7 #num_variables+1
N_samples=300 #up to Samples_matrix.shape[0]
for iteration in range(EM_iterations):
print("EM iteration=%d" %(iteration))
#E-Step --> Compute the Expected Number of Configurations. This is a loop over the data that could
#be done in parallalel (or even in a noisy way)
Nd=np.zeros(CPD_d.shape)
Nw=np.zeros(CPD_w.shape)
Ns=np.zeros(CPD_severity.shape)
Na=np.zeros(CPD_attending.shape)
for i in range(N_samples):
#We run BP with the observations for the i-th data
run_BP(i,Samples_matrix,list_var_nodes,list_factor_nodes,BP_iterations)
#Given MP messages, we update the counting matrices Nx (E-STEP)
joint_probs=compute_joint_probabilities(list_var_nodes,list_factor_nodes,v_card)
Nd+=joint_probs[0]
Nw+=joint_probs[1]
Ns+=joint_probs[2]
Na+=joint_probs[3]
#Normalization (M-STEP)
CPD_d=Nd/np.sum(Nd)
CPD_w=Nw/np.sum(Nw)
for j in range(v_card[0]*v_card[1]):
aux=Ns[j*v_card[2]:(j+1)*v_card[2]]
CPD_severity[j*v_card[2]:(j+1)*v_card[2]]=aux/np.sum(aux)
for j in range(v_card[2]):
aux=Na[j*v_card[3]:(j+1)*v_card[3]]
CPD_attending[j*v_card[3]:(j+1)*v_card[3]]=aux/np.sum(aux)
#Finally, update the factors
factor_0['CPD']=CPD_d.reshape(CPD_d.shape[0],)
factor_1['CPD']=CPD_w.reshape(CPD_w.shape[0],)
factor_2['CPD']=CPD_severity.reshape(CPD_severity.shape[0],)
factor_3['CPD']=CPD_attending.reshape(CPD_attending.shape[0],)
# -
CPD_attending
# ## Excercise 2
#
# Use EM to train the following mode, which now includes a binary latent variable $Z$ that **we never observe**. The use of latent variable model is very frequent in Bayesian Reasoning and can help to reduce the number of parameters to learn, as we assume there exists an underlying latent variable that explains different variables.
Image(filename='files/Mod_2.png')
# +
## YOUR CODE HERE
# -
factor=list_factor_nodes[3]
msg=factor['input_msgs'][0]['table']
msg
| Notebooks/Exercise_EM_Traffic_Accident_Data_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import requests
import io
import numpy as np
from datetime import date, timedelta
import re
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
#from github import Github
#import github
import torch
import torch.nn as nn
# Import tensor dataset & data loader
from torch.utils.data import TensorDataset, DataLoader
# Import nn.functional
import torch.nn.functional as F
import torch.optim as optim
from typing import Union, Tuple
import os
import sys
import time
from collections import OrderedDict
from sklearn.preprocessing import MinMaxScaler
from statistics import mean
from sklearn.metrics import mean_absolute_error,mean_squared_error, r2_score
import math
import random
import imageio
#from sklearn.metrics import mean_absolute_percentage_error
matplotlib.style.use('seaborn')
# %matplotlib inline
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1, response_variable_index=0, number_feature = 6):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back),:number_feature]
dataX.append(a)
dataY.append(dataset[i + look_back, response_variable_index])
return np.array(dataX), np.array(dataY)
def data_preparation(df, scaling_range=(0,1),time_step=5,number_feature=6, response_variable_index=3,data_split_ratio=0.8,Suffle=True):
df = df.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=scaling_range)
dataset = scaler.fit_transform(df.copy())
X, Y = create_dataset(dataset, time_step,response_variable_index=response_variable_index, number_feature=number_feature)
# split into train and test sets
train_size = int(len(dataset) * data_split_ratio)
test_size = len(dataset) - train_size
trainX, testX = X[0:train_size,:], X[train_size:len(dataset),:]
trainY, testY = Y[0:train_size], Y[train_size:len(dataset)]
print(trainX.shape)
# reshape input to be [samples, time steps, features]
if not multi_feature:
trainX = np.reshape(trainX, (trainX.shape[0],trainX.shape[1],1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1],1))
#print(trainX.shape)
X_train=trainX
X_test=testX
y_train=trainY.reshape(-1,1)
print(X_train.shape, y_train.shape)
# summarize the data
inputs = torch.from_numpy(X_train)
targets = torch.from_numpy(y_train)
# Define dataset
train_ds = TensorDataset(inputs, targets)
batch_size = 16
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=Suffle)
y_test=testY.reshape(-1,1)
inputs = torch.from_numpy(X_test)
targets = torch.from_numpy(y_test)
# Define dataset
#test_ds = TensorDataset(inputs, targets)
test_ds=(inputs, targets)
#test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False)
return train_loader, test_ds,scaler
| Implementation/Pytorch/data_preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
from lightfm.data import Dataset
dataset = Dataset()
dataset.fit( (x['User-ID'] for x in csv.DictReader(open("data/aux_data_headers_2.csv"),delimiter=";")),
(x['ISBN'] for x in csv.DictReader(open("data/aux_data_headers_2.csv"),delimiter=";")))
print('ready')
# +
#num_users, num_items = dataset.interactions_shape()
#print('Num users: {}, num_items {}.'.format(num_users, num_items))
#print(dataset._user_id_mapping)
#print(dataset._item_id_mapping)
#print(dataset._user_feature_mapping)
#print(dataset._item_feature_mapping)
# +
num_users, num_items = dataset.interactions_shape()
print('Num users: {}, num_items {}.'.format(num_users, num_items))
print('ready')
# +
(interactions, weights) = dataset.build_interactions(( (x['User-ID'], x['ISBN'], float(x['Book-Rating']))
for x in csv.DictReader(open("data/aux_data_headers_2.csv"),delimiter=";" ) ))
print('ready')
# +
print(type(interactions))
print('ready')
# -
print(repr(interactions))
print(interactions.todense()[251,0])
print(interactions.todense()[8,20])
print(interactions.todense()[70,15])
print(interactions.todense()[9,5])
# +
from lightfm.lightfm import LightFM
model = LightFM(loss='bpr')
model.fit(interactions)
# +
from lightfm.evaluation import precision_at_k
from lightfm.datasets import fetch_movielens
import numpy as np
def sample_recommendation(model,data, user_ids):
n_user, n_items = data.shape #define sizes
for user_id in user_ids:
known_positives = data.todense()[user_id] # get the already known items
scores = model.predict(user_id, np.arange(n_items)) # make prediction for new items
top_items = data.todense()[np.argsort(-scores)] # sort best recommendations
print("user %s" % user_id)
print(" known positives:")
for x in known_positives[:5]:
print(" %s" % x)
print(" recommended:")
for x in top_items[:5]:
print(" %s" % x)
print(" scores:")
print(" %s"% np.argsort(-scores))
test_precision = precision_at_k(model, interactions, k=10).mean()
print('presicion: ',test_precision)
sample_recommendation(model,interactions,[4, 25, 450])
# -
| projectDS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Manufactured solution for antiplane shear problem
#
# The following manufactured solution is almost identictial to the one presented in
#
# <NAME>., and <NAME> (2014), An efficient numerical method for earthquake cycles in heterogeneous media: Alternating subbasin and surface-rupturing events on faults crossing a sedimentary basin, J. Geophys. Res. Solid Earth, 119, 3290–3316, doi:10.1002/2013JB010614.
#
# The only difference is that the shear modulus is constant.
#
# Note that the manufactured solution is going to exactly solve the following problem:
#
# $$
# \begin{aligned}
# -\frac{\partial}{\partial x_i}\left(\mu\frac{\partial u}{\partial x_i}\right) &= 0 & \text{ in } & \Omega\\
# u &= u^* & \text{ on } & \Gamma_D \\
# u &= S / 2 & \text{ on } & \Gamma_F \\
# \mu\frac{\partial u}{\partial x_i}n_i &= \sigma_nf(V,\psi) + \eta V & \text{ on } & \Gamma_F \\
# \frac{d\psi}{dt} &= g(V,\psi) + s(\vec x, t) & \text{ on } & \Gamma_F \\
# \frac{dS}{dt} &= V & \text{ on } & \Gamma_F
# \end{aligned},
# $$
# where
# $$
# \begin{aligned}
# f(V, \psi) &= a \cdot \mathrm{asinh}\left(\frac{V}{2.0 V_0} \exp\left(\frac{\psi}{a}\right)\right) \\
# g(V, \psi) &= \frac{b V_0}{L} \left(\exp\left(\frac{f_0-\psi}{b}\right) - \frac{V}{V_0}\right) \\
# s(\vec{x}, t) &= - g(V^*, \psi^*) + \frac{d\psi^*}{dt}
# \end{aligned}
# $$
from sympy import *
init_printing()
# We first define symbols and general functions, which are going to be specified later.
x, y, t = symbols('x y t')
H, L_x, t_e, t_w = symbols('H L_x t_e t_w')
V0, V_p, V_min = symbols('V0 V_p V_min')
delta, tau_inf, a, sn, eta = symbols('delta tau_inf a sn eta')
K = Function('K')(t)
phi = Function('phi')(x, y)
V_star = Function('V^*')(t)
tau_star = Function('tau^*')(t)
mu = symbols('mu')
# $u^*$ is solution for the out-of-plane displacement.
u_star = delta / 2 * K * phi + V_p / 2 * t * (1 - phi) + tau_inf / mu.subs(x, L_x) * x
u_star
# The following force term needs to be added to the right-hand-side of the Poisson problem. Due to the choice of $\phi$ and the constant shear modulus it is going to be zero.
force = - (mu * u_star.diff(x)).diff(x) - (mu * u_star.diff(y)).diff(y)
force.simplify()
# $S^*$ is the on-fault displacement.
S_star = 2 * u_star.subs(x, 0)
S_star.simplify().subs(t, 0)
# $\psi^*$ is the state variable.
psi_star = a * log((2*V0/V_star) * sinh((tau_star - eta*V_star)/(a*sn)))
psi_star
# We need the time derivative $\frac{d\psi^*}{dt}$
psi_star.diff(t).simplify()
# $\tau^*$ is the on fault traction.
tau_star = mu * u_star.diff(x).subs(x, 0)
tau_star.simplify()
# Time derivative of traction: $\frac{d \tau^*}{dt}$
tau_star.diff(t).simplify()
# Slip-rate $V^*$
V_star = S_star.diff(t)
V_star.simplify()
# Acceleration $\frac{dV^*}{dt}$
V_star.diff(t).simplify()
# The following lines define the functions $K$, $\phi$, and necessary time derivatives.
K = 1/pi * (atan((t-t_e)/t_w) + pi/2) + V_min / delta * t
K
K.diff(t)
K.diff(t).diff(t).simplify()
phi = H*(H+x) / ((H+x)**2 + y**2)
phi
phi.diff(x).simplify()
# We show that $\mathop{}\!\mathbin\bigtriangleup \phi = 0$, therefore no additional force term for the Poisson equation is required.
(phi.diff(x).diff(x) + phi.diff(y).diff(y)).simplify()
| notebooks/antiplane_shear_mms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.6 64-bit
# language: python
# name: python37664bit5432155c863f47538fac2a404dac50c9
# ---
# # Glassdoor Company Scraper
#
#
# This scraper downloads firms reviews from Glassdoor website.
# The scope is to create a small DB for research purposes.
#
# The notebook is organized with the following sections:
#
# - Setup of the env (install libraries, set up variables and credentials, ...)
# - Sign in with your credentials
# - Download of the index (with Selenium and Chrome Browser libraries)
# - Parse DOM of the web pages and download the reviews
# - Store the data on CSV files
# ### Setup of the env
#
# Install and import of python libraries
# !pip3 install selenium
# !pip3 install pprint
# !pip3 install pandas
import requests
import pprint
import pandas as pd
import time
from selenium import webdriver as wd
import selenium
import json
# Set the following variables to download data:
#
# - locations array: to download all firms from different place
# - max_page: max number of pages to index and download the firms
# - sleep_time: to be polite with glassdoor (number of seconds between different request)
#
#
#
locations = ['milano','roma']
max_page = 1
max_page_reviews = 2
sleep_time = 1
# This notebook uses Chrome Driver to simulate user interaction with glassdoor.
# To set up Chrome Driver on your laptop please refer to https://chromedriver.chromium.org/downloads
#
# The notebook is tested with
# `ChromeDriver 85.0.4183.87`
#
# Please set up `chromedriver_path` to your Chrome Driver folder.
# For example:
#
# ~~~~~
# chromedriver_path = '/Users/mauropelucchi/Downloads/chromedriver2'
# ~~~~~
chromedriver_path = '/Users/mauropelucchi/Downloads/chromedriver'
# ### Glassdoor credentials
#
# To obtain firms reviews you have to sign in to Glassdoor.
# Please provide your credentials here:
username = "******"
password = "******"
# # Sign in to Glassdoor
#
# `get_browser` method sets the browser and start Chrome Driver
#
# `sign_in` simulates the user login to glassdoor:
#
# - Click the "cookie accept button"
# - Digit your username
# - Digit your password
# - Click the login button
#
# +
# from https://github.com/MatthewChatham/glassdoor-review-scraper/blob/master/main.py
def get_browser():
chrome_options = wd.ChromeOptions()
chrome_options.add_argument('log-level=3')
browser = wd.Chrome(chromedriver_path, options=chrome_options)
return browser
browser = get_browser()
def sign_in():
print(f'Signing in to {username}')
url = 'https://www.glassdoor.it/profile/login_input.htm'
browser.get(url)
time.sleep(4)
cookie_btn = browser.find_element_by_id('onetrust-accept-btn-handler')
cookie_btn.click()
email_field = browser.find_element_by_name('username')
password_field = browser.find_element_by_name('password')
submit_btn = browser.find_element_by_xpath('//button[@type="submit"]')
email_field.send_keys(username)
password_field.send_keys(password)
submit_btn.click()
time.sleep(1)
sign_in()
# -
# # Get firm data
#
# `get_firm_data` function gets a response and produces a dict with
#
# ~~~~
# {'company_name': ' Accenture ',
# 'link': '/Panoramica/Lavorando-in-Accenture-EI_IE4138.13,22.htm',
# 'rating': 3.8}
# ~~~~
#
#
# You can use this function to obtain the dataset of reviews for a single firm following these steps:
#
# - Set up the link to Glassdoor company page
# ~~~~~
# company_url = "https://www.glassdoor.it/Panoramica/Lavorando-in-Intesa-Sanpaolo-EI_IE10537.13,28.htm"
# ~~~~~
# - Run `get_firm_dat(company_url)`
# - Store result on a csv
#
#
# For example:
# ~~~~~
# company_url = "https://www.glassdoor.it/Panoramica/Lavorando-in-Intesa-Sanpaolo-EI_IE10537.13,28.htm"
# reviews = get_firm_data(company_url)
# df = pd.DataFrame.from_dict(reviews)
# df = df.to_csv('reviews.csv')
# ~~~~~
# +
def get_firms():
doc_firms = browser.find_elements_by_class_name('eiHdrModule')
print(len(doc_firms))
my_firms = []
for d_firm in doc_firms:
my_firm = {"company_name": "", "rating": 0, "link": ""}
my_firm['company_name'] = d_firm.find_element_by_class_name("tightAll").text
try:
my_firm['rating'] = float(d_firm.find_element_by_class_name("ratingsSummary").text.replace(",","."))
except:
my_firm['rating'] = d_firm.find_element_by_class_name("ratingsSummary").text.replace(",",".")
my_firm['link'] = d_firm.find_element_by_class_name("tightAll").get_attribute('href').replace("Panoramica","Recensioni")
my_firms.append(my_firm)
my_firms_final = []
for my_firm in my_firms:
my_firm['reviews'] = get_firm_data(my_firm['link'])
my_firms_final.append(my_firm)
return my_firms_final
def get_firm_data(link, language="ita"):
reviews = []
link = link.replace("Panoramica","Recensioni")
if (not "iso3Language" in link):
print(link.replace("Panoramica","Recensioni") + "?filter.iso3Language=" + language + "&filter.employmentStatus=REGULAR&filter.employmentStatus=PART_TIME")
browser.get(link.replace("Panoramica","Recensioni") + "?filter.iso3Language=" + language + "&filter.employmentStatus=REGULAR&filter.employmentStatus=PART_TIME")
else:
print(link.replace("Panoramica","Recensioni"))
browser.get(link.replace("Panoramica","Recensioni"))
if link.endswith(".htm"):
page_link = link
print(f"Max page {max_page_reviews}")
for page_number in range(1, max_page_reviews+1):
#.replace(".htm","") + "_P" + str(page_number) + ".htm"
print(page_link)
reviews.extend(get_firm_reviews(page_link, language))
if(len(browser.find_elements_by_css_selector('[aria-label="Next"]')) > 0):
# page_link = browser.find_element_by_class_name("pagination__ArrowStyle__nextArrow").get_attribute('href')
browser.find_elements_by_css_selector('[aria-label="Next"]')[0].click()
else:
break
else:
reviews.extend(get_firm_reviews(link, language))
return reviews
def get_salary_data(link):
salaries = []
if link.endswith(".htm"):
page_link = link
print(f"Max page {max_page_reviews}")
for page_number in range(1, max_page_reviews+1):
#.replace(".htm","") + "_P" + str(page_number) + ".htm"
print(page_link)
salaries.extend(get_firm_salary(page_link))
if(len(browser.find_elements_by_class_name("pagination__ArrowStyle__nextArrow")) > 0):
page_link = browser.find_element_by_class_name("pagination__ArrowStyle__nextArrow").get_attribute('href')
else:
break
else:
salaries.extend(get_firm_salary(link))
return salaries
def get_firm_benefits(link):
benefits = []
if link.endswith(".htm"):
page_link = link
print(f"Max page {max_page_reviews}")
for page_number in range(1, max_page_reviews+1):
#.replace(".htm","") + "_P" + str(page_number) + ".htm"
print(page_link)
benefits.extend(get_firm_benefit(page_link))
if (len(browser.find_elements_by_css_selector(".next a")) > 0):
page_link = browser.find_element_by_css_selector(".next a").get_attribute('href')
else:
break
else:
benefits.extend(get_firm_benefit(link))
return benefits
def get_firm_reviews(link, language):
time.sleep(5)
reviews = []
doc_reviews = browser.find_elements_by_class_name('empReview')
for doc_rev in doc_reviews:
btn = doc_rev.find_elements_by_class_name('v2__EIReviewDetailsV2__continueReading')
try:
if (len(btn) > 0):
btn[0].click()
except:
pass
main_text = doc_rev.find_element_by_class_name('mainText').text.replace('\n',' ')
date = doc_rev.find_element_by_class_name('date').text.replace('\n',' ')
reviewer = doc_rev.find_element_by_class_name('authorInfo').text.replace('\n',' ')
benefits_obj = doc_rev.find_elements_by_css_selector('[data-test="pros"]')
drawbacks_obj = doc_rev.find_elements_by_css_selector('[data-test="cons"]')
tips_obj = doc_rev.find_elements_by_css_selector('[data-test="advice-management"]')
benefits = ""
drawbacks = ""
tips = ""
if (len(benefits_obj) > 0):
benefits = benefits_obj[0].text.replace('\n',' ')
if (len(drawbacks_obj) > 0):
drawbacks = drawbacks_obj[0].text.replace('\n',' ')
if (len(tips_obj) > 0):
tips = tips_obj[0].text.replace('\n',' ')
ratings = doc_rev.find_elements_by_css_selector(".subRatings ul li .gdBars")
balance = ratings[0].get_attribute('title') if len(ratings) > 0 else ''
culture = ratings[1].get_attribute('title') if len(ratings) > 1 else ''
opportunity = ratings[2].get_attribute('title') if len(ratings) > 2 else ''
salary = ratings[3].get_attribute('title') if len(ratings) > 3 else ''
executives = ratings[4].get_attribute('title') if len(ratings) > 4 else ''
review = {"main_text": main_text, \
"date": date, \
"reviewer": reviewer, \
"benefits": benefits, \
"drawbacks": drawbacks, \
"tips": tips, \
"balance": balance, \
"culture": culture, \
"opportunity": opportunity, \
"salary": salary, \
"executives": executives
}
reviews.append(review)
return reviews
# -
# +
def get_firm_salary(link):
browser.get(link.replace("Panoramica","Stipendi"))
time.sleep(5)
salaries = []
doc_salaries = browser.find_elements_by_css_selector('#SalariesRef > div')
for doc_rev in doc_salaries:
if (len(doc_rev.find_elements_by_css_selector('strong.d-block')) > 1):
value = doc_rev.find_elements_by_css_selector('strong.d-block')[1].text
occupation = doc_rev.find_elements_by_css_selector('.m-0')[0].text.replace('\n',' ')
salary = {"occupation": occupation, \
"value": value
}
salaries.append(salary)
return salaries
def get_firm_benefit(link):
browser.get(link.replace("Panoramica","Benefit"))
time.sleep(5)
benefits = []
doc_benefits = browser.find_elements_by_css_selector('li.benefitReview')
for doc_rev in doc_benefits:
if (len(doc_rev.find_elements_by_css_selector('.authorInfo')) > 0):
authorInfo = doc_rev.find_elements_by_css_selector('.authorInfo')[0].text.replace('\n',' ')
description = doc_rev.find_elements_by_css_selector('.description')[0].text.replace('\n',' ')
benefit = {"authorInfo": authorInfo, \
"description": description}
benefits.append(benefit)
return benefits
# -
# ## Download a list of companies by locations
# `download_index` downloads the index pages from Glassdoor and calls `get_firms` to build a list of firm with its review
# +
def download_index(location):
results = []
for page_number in range(1,max_page+1):
page_index = f"https://www.glassdoor.it/Recensioni/{location}-recensioni-SRCH_IL.0,6_IM1058_IP{page_number}.htm"
current_firms = []
print(f"Download data from {page_index} - Page {page_number}")
browser.get(page_index)
current_firms = get_firms()
results.extend(current_firms)
time.sleep(sleep_time)
return results
# -
total_firms = []
for location in locations:
total_firms.extend(download_index(location))
# Review the downloaded data:
pprint.pprint(total_firms)
# Store the data on a json file:
with open('my_data1.json', 'w') as fp:
json.dump(total_firms, fp)
# ## Download reviews of companies
#
# For example, we can apply this notebook to download reviews of major banks or hotels to select the best place where work.
#
# Here is an example to build a dataset for some companies:
#
#
# +
max_page_reviews = 5
company_url = "https://www.glassdoor.it/Panoramica/Lavorando-in-Enel-EI_IE10910.13,17.htm"
reviews_ita = get_firm_data(company_url, "ita")
reviews_eng = get_firm_data(company_url, "eng")
salaries = get_salary_data(company_url)
benefits = get_firm_benefits(company_url)
df = pd.DataFrame.from_dict(reviews_ita)
df = df.to_csv('enel_ita.csv')
df = pd.DataFrame.from_dict(reviews_eng)
df = df.to_csv('enel_eng.csv')
df_salaries = pd.DataFrame.from_dict(salaries)
df_salaries = df_salaries.to_csv('enel_salaries.csv')
df_benefits = pd.DataFrame.from_dict(benefits)
df_benefits = df_benefits.to_csv('enel_benefits.csv')
# -
company_url = "https://www.glassdoor.it/Panoramica/Lavorando-in-UniCredit-Group-EI_IE10546.13,28.htm"
reviews = get_firm_data(company_url)
df = pd.DataFrame.from_dict(reviews)
df = df.to_csv('unicredit.csv')
company_url = "https://www.glassdoor.it/Panoramica/Lavorando-in-Deutsche-Bank-EI_IE3150.13,26.htm"
reviews = get_firm_data(company_url)
df = pd.DataFrame.from_dict(reviews)
df = df.to_csv('deutsche_bank.csv')
| Glassdoor_scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # waveform-studies
#
# ## about
#
# waveform-studies is a project by [<NAME>](https://montoyamoraga.io/).
#
# waveform-studies is a project started on February 2020 while being a graduate student at MIT Media Lab and a research assistant with the groups Opera of the Future and Future Sketches, respectively led by <NAME> and <NAME>.
#
# waveform-studies is made using Jupyter notebooks.
#
# we will review sampling rate, speculative sound synthesis, and fundamental waveforms used in arts, along with proposals for generating new ones.
# ## study-00
#
# this is a study about sine waves in time
# import packages
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
# +
# create x domain, between 0 and 4*PI
x = np.linspace(0, 4*np.pi, 256, endpoint=True)
# calculate sine wave
ySine = np.sin(x)
# create plot and show it
plt.plot(x, ySine)
plt.title('sine wave')
plt.xlabel('x')
plt.ylabel('amplitude')
plt.show()
# -
# # study-01
#
# this is a study about square waves in time
# +
# create x domain, between 0 and 4*PI
x = np.linspace(0, 4*np.pi, 256, endpoint=True)
# calculate square wave
ySquare = signal.square(5 * x)
# create plot and show it
plt.plot(x, ySquare)
plt.title('square wave')
plt.xlabel('x')
plt.ylabel('amplitude')
plt.show()
# -
# ## study-02
#
# this is a study about sawtooth waves over time
# +
# create x domain, between 0 and 4*PI
x = np.linspace(0, 4*np.pi, 256, endpoint=True)
# calculate square wave
ySawtooth = signal.sawtooth(5 * x)
# create plot and show it
plt.plot(x, ySawtooth)
plt.title('sawtooth wave')
plt.xlabel('x')
plt.ylabel('amplitude')
plt.show()
# -
# draft ideas:
# study about sine wave over time and frequency
# study about square wave over time and frequency, duty cycle
# study about triangle wave over time and frequency
# study about sawtooth wave over time and frequency
# study about wave scaling over time
# study about squishing sine waves and duty cycles
| waveform-studies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Custom Sequences (Part 1)
# We'll focus first on how to create a custom sequence type that supports indexing, slicing (read only) and iteration. We'll look into mutable custom sequences in an upcoming video.
# First we should understand how the `__getitem__` method works for iteration and retrieving individual elements from a sequence:
my_list = [0, 1, 2, 3, 4, 5]
my_list.__getitem__(0)
my_list.__getitem__(5)
# But if our index is out of bounds:
my_list.__getitem__(6)
# we get an IndexError.
# Technically, the `list` object's `__getitem__` method also supports negative indexing and slicing:
my_list.__getitem__(-1)
my_list.__getitem__(slice(0,6,2))
my_list.__getitem__(slice(None, None, -1))
# #### Mimicking Python's `for` loop using the `__getitem__` method
my_list = [0, 1, 2, 3, 4, 5]
for item in my_list:
print(item ** 2)
# Now let's do the same thing ourselves without a for loop:
index = 0
while True:
try:
item = my_list.__getitem__(index)
except IndexError:
# reached the end of the sequence
break
# do something with the item...
print(item ** 2)
index += 1
# #### Implementing a custom Sequence
# Custom objects can support slicing - we'll see this later in this course, but for now we'll take a quick peek ahead.
#
# To make a custom classes support indexing (and slicing) we only need to implement the `__getitem__` method which receives the index (or slice) we are interested in.
class MySequence:
def __getitem__(self, index):
print(type(index), index)
my_seq = MySequence()
my_seq[0]
my_seq[100]
my_seq[0:2]
my_seq[0:10:2]
# As you can see, the `__getitem__` method receives an index number of type `int` when we use `[n]` and a `slice` object when we use `[i:j]` or `[i:j:k]`.
# As we saw in a previous lecture, given the bounds for a slice, and the length of the sequence we are slicing, we can always define a `range` that will generate the desired indices.
#
# We also saw that the `slice` object has a method, `indices`, that precisely tells us the start/stop/step values we would need for an equivalent `range`, given the length of the sequence we are slicing.
#
# Let's recall a simple example first:
l = 'python'
len(l)
s = slice(0, 6, 2)
l[s]
s.start, s.stop, s.step
s.indices(6)
list(range(0, 6, 2))
# This matches exactly the indices that were selected from the sequence `'python'`
# ### Example
# So, why am I re-emphasizing this equivalence between the indices in a `slice` and and equivalent `range` object?
# Let's say we want to implement our own sequence type and we want to support slicing.
# For this example we'll create a custom Fibonacci sequence type.
# First recall that the `__getitem__` will receive either an integer (for simple indexing), or a slice object:
class Fib:
def __getitem__(self, s):
print(type(s), s)
f = Fib()
f[2]
f[2:10:2]
# We'll use that to implement both indexing and slicing for our custom Fibonacci sequence type.
# We'll make our sequence type bounded (i.e. we'll have to specify the size of the sequence). But we are not going to pre-generate the entire sequence of Fibonacci numbers, we'll only generate the ones that are being requested as needed.
class Fib:
def __init__(self, n):
self._n = n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
print(f'requesting [{s}]')
else:
# slice being requested
print(f'requesting [{s.start}:{s.stop}:{s.step}]')
f = Fib(10)
f[3]
f[:5]
# Let's now add in what the equivalent range would be:
class Fib:
def __init__(self, n):
self._n = n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
print(f'requesting [{s}]')
else:
# slice being requested
print(f'requesting [{s.start}:{s.stop}:{s.step}]')
idx = s.indices(self._n)
rng = range(*idx)
print(f'\trange({idx[0]}, {idx[1]}, {idx[2]}) --> {list(rng)}')
f = Fib(10)
f[3:5]
f[::-1]
# Next step is for us to actually calculate the n-th Fibonacci number, we'll use memoization as well (see lecture on decorators and memoization if you need to refresh your memory on that):
from functools import lru_cache
@lru_cache(2**10)
def fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
fib(0), fib(1), fib(2), fib(3), fib(4), fib(5), fib(50)
# Now, let's make this function part of our class:
class Fib:
def __init__(self, n):
self._n = n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
print(f'requesting [{s}]')
else:
# slice being requested
print(f'requesting [{s.start}:{s.stop}:{s.step}]')
idx = s.indices(self._n)
rng = range(idx[0], idx[1], idx[2])
print(f'\trange({idx[0]}, {idx[1]}, {idx[2]}) --> {list(rng)}')
@staticmethod
@lru_cache(2**32)
def _fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
# The next step is to implement the `__getitem__` method. Let's start by implementing the simple indexing:
class Fib:
def __init__(self, n):
self._n = n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
return self._fib(s)
else:
# slice being requested
print(f'requesting [{s.start}:{s.stop}:{s.step}]')
idx = s.indices(self._n)
rng = range(idx[0], idx[1], idx[2])
print(f'\trange({idx[0]}, {idx[1]}, {idx[2]}) --> {list(rng)}')
@staticmethod
@lru_cache(2**32)
def _fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
# Let's test that out:
f = Fib(100)
f[0], f[1], f[2], f[3], f[4], f[5], f[50]
# But we still have a few problems.
#
# First we do not handle negative values, and we also will return results for indices that should technically be out of bounds, so we can't really iterate through this sequence yet as we would end up with an infinite iteration!
f[200], f[-5]
# So we first need to raise an `IndexError` exception when the index is out of bounds, and we also need to remap negative indices (for example `-1` should correspond to the last element of the sequence, and so on)
class Fib:
def __init__(self, n):
self._n = n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
if s < 0:
s = self._n + s
if s < 0 or s > self._n - 1:
raise IndexError
return self._fib(s)
else:
# slice being requested
print(f'requesting [{s.start}:{s.stop}:{s.step}]')
idx = s.indices(self._n)
rng = range(idx[0], idx[1], idx[2])
print(f'\trange({idx[0]}, {idx[1]}, {idx[2]}) --> {list(rng)}')
@staticmethod
@lru_cache(2**32)
def _fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
f = Fib(10)
f[9], f[-1]
f[10]
f[-100]
for item in f:
print(item)
# We still don't support slicing though...
f[0:2]
# So let's implement slicing as well:
class Fib:
def __init__(self, n):
self._n = n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
if s < 0:
s = self._n + s
if s < 0 or s > self._n - 1:
raise IndexError
return self._fib(s)
else:
# slice being requested
idx = s.indices(self._n)
rng = range(idx[0], idx[1], idx[2])
return [self._fib(n) for n in rng]
@staticmethod
@lru_cache(2**32)
def _fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
f = Fib(10)
f[0:5]
f[5::-1]
list(f)
f[::-1]
# One other thing, is that the built-in `len` function will not work with our class:
f = Fib(10)
len(f)
# That's an easy fix, we just need to implement the `__len__` method:
class Fib:
def __init__(self, n):
self._n = n
def __len__(self):
return self._n
def __getitem__(self, s):
if isinstance(s, int):
# single item requested
if s < 0:
s = self._n + s
if s < 0 or s > self._n - 1:
raise IndexError
return self._fib(s)
else:
# slice being requested
idx = s.indices(self._n)
rng = range(idx[0], idx[1], idx[2])
return [self._fib(n) for n in rng]
@staticmethod
@lru_cache(2**32)
def _fib(n):
if n < 2:
return 1
else:
return fib(n-1) + fib(n-2)
f = Fib(10)
len(f)
# One thing I want to point out here: we did not need to use inheritance! There was no need to inherit from another sequence type. All we really needed was to implement the `__getitem__` and `__len__` methods.
# The other thing I want to mention, is that I would not use recursion for production purposes for a Fibonacci sequence, even with memoization - partly because of the cost of recursion and the limit to the recursion depth that is possible.
#
# Also, when we look at generators, and more particularly generator expressions, we'll see better ways of doing this as well.
#
# I really wanted to show you a simple example of how to create your own sequence types.
| python-tuts/1-intermediate/01 - Sequences/06 - Custom Sequences - Part 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="M_kWbS_Etv8p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="85d7ab03-bddc-4d65-ad27-a0847c1bd6af"
# !pip install -i https://test.pypi.org/simple/ Vampyr-MTL-Max-JJ==0.0.5
# + id="8X3cHmFBuZFx" colab_type="code" colab={}
from Vampyr_MTL import functions
import numpy as np
import math
from scipy import linalg
import plotly.express as px
# + id="dUT71azDugzz" colab_type="code" colab={}
clus_var = 900
task_var = 16
nois_var = 150
clus_num = 2
clus_task_num = 10
task_num = clus_num * clus_task_num
sample_size = 100
dimension = 20
comm_dim = 2
clus_dim = math.floor((dimension - comm_dim)/2)
# generate cluster model
cluster_weight = np.random.randn(dimension, clus_num)* clus_var
for i in range(clus_num):
bll = np.random.permutation(range(dimension-clus_num))<=clus_dim
blc = np.array([False]*clus_num)
bll = np.hstack((bll, blc))
cluster_weight[:,i][bll]=0
cluster_weight[-1-comm_dim:, :]=0
W = np.tile(cluster_weight, (1, clus_task_num))
cluster_index = np.tile(range(clus_num), (1, clus_task_num)).T
# generate task and intra-cluster variance
W_it = np.random.randn(dimension, task_num) * task_var
for i in range(task_num):
bll = np.hstack(((W[:-1-comm_dim+1,i]==0).reshape(1,-1), np.zeros((1,comm_dim))==1))
W_it[:,i][bll.flatten()]=0
W = W+W_it
W = W + np.random.randn(dimension, task_num)*nois_var
X = [0]*task_num
Y = [0]*task_num
for i in range(task_num):
X[i] = np.random.randn(sample_size, dimension)
xw = X[i] @ W[:,i]
s= xw.shape
xw = xw + np.random.randn(s[0]) * nois_var
Y[i] = np.sign(xw)
# + id="40-Sq38Zu-HZ" colab_type="code" colab={}
from Vampyr_MTL.functions.MTL_Cluster_Least_L21 import MTL_Cluster_Least_L21
from Vampyr_MTL.evaluations.utils import opts as op
# + id="ojUwdBqvukqx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="1a79479a-cd41-40a5-f01d-b0e1f046033c"
opts = op(1500,2)
clf = MTL_Cluster_Least_L21(opts,2)
clf.fit(X, Y)
corr = clf.analyse()
# + id="zJ1_PvWC4y-k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="acfd785e-4eb6-418b-dee6-94e074578c6e"
fig = px.imshow(corr, color_continuous_scale='Bluered_r')
fig.update_layout(
title={
'text': "predict",
})
fig.show()
OrderedTrueModel = np.zeros(W.shape)
clus_task_num = task_num//clus_num
for i in range(clus_num):
clusModel = W[:, i:task_num:clus_num]
OrderedTrueModel[:, (i)*clus_task_num: (i+1)* clus_task_num] = clusModel
corr2 = 1-np.corrcoef(OrderedTrueModel)
fig2 = px.imshow(corr2, color_continuous_scale='Bluered_r')
fig2.update_layout(
title={
'text': "real",
})
fig2.show()
# + id="813EhfVk9LYr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="42c6a76d-ca60-4e74-92f8-ade8605199df"
fig = px.imshow(clf.get_weights(), color_continuous_scale='Bluered_r')
fig.update_layout(
title={
'text': "predict",
})
fig.show()
fig2 = px.imshow(W, color_continuous_scale='Bluered_r')
fig2.update_layout(
title={
'text': "real",
})
fig2.show()
| MD_MTL/example/CMTL_L21_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D4_DynamicNetworks/W2D4_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Bonus Tutorial: Extending the Wilson-Cowan Model
# **Week 2, Day 4: Dynamic Networks**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# __Content editors:__
#
# __Production editors:__ <NAME>
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# ---
# # Tutorial Objectives
# In the previous tutorial, you became familiar the **Wilson-Cowan** rate model. Here we will dive into some deeper analyses of this model.
#
# Bonus steps:
#
# - Find and plot the **fixed points** of the Wilson-Cowan model.
# - Investigate the stability of the Wilson-Cowan model by linearizing its dynamics and examining the **Jacobian matrix**.
# - Learn how the Wilson-Cowan model can reach an oscillatory state.
#
# Applications of Wilson-Cowan model:
# - Visualize the behavior of an Inhibition-stabilized network.
# - Simulate working memory using the Wilson-Cowan model.
#
# \\
# Reference paper:
#
# _[<NAME> and <NAME> (1972) Excitatory and inhibitory interactions in localized populations of model neurons. Biophysical Journal 12](https://doi.org/10.1016/S0006-3495(72)86068-5)_
# ---
# # Setup
# + cellView="both"
# Imports
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt # root-finding algorithm
# + cellView="form"
# @title Figure Settings
import ipywidgets as widgets # interactive display
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form"
# @title Plotting Functions
def plot_FI_inverse(x, a, theta):
f, ax = plt.subplots()
ax.plot(x, F_inv(x, a=a, theta=theta))
ax.set(xlabel="$x$", ylabel="$F^{-1}(x)$")
def plot_FI_EI(x, FI_exc, FI_inh):
plt.figure()
plt.plot(x, FI_exc, 'b', label='E population')
plt.plot(x, FI_inh, 'r', label='I population')
plt.legend(loc='lower right')
plt.xlabel('x (a.u.)')
plt.ylabel('F(x)')
plt.show()
def my_test_plot(t, rE1, rI1, rE2, rI2):
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(pars['range_t'], rE1, 'b', label='E population')
ax1.plot(pars['range_t'], rI1, 'r', label='I population')
ax1.set_ylabel('Activity')
ax1.legend(loc='best')
ax2 = plt.subplot(212, sharex=ax1, sharey=ax1)
ax2.plot(pars['range_t'], rE2, 'b', label='E population')
ax2.plot(pars['range_t'], rI2, 'r', label='I population')
ax2.set_xlabel('t (ms)')
ax2.set_ylabel('Activity')
ax2.legend(loc='best')
plt.tight_layout()
plt.show()
def plot_nullclines(Exc_null_rE, Exc_null_rI, Inh_null_rE, Inh_null_rI):
plt.figure()
plt.plot(Exc_null_rE, Exc_null_rI, 'b', label='E nullcline')
plt.plot(Inh_null_rE, Inh_null_rI, 'r', label='I nullcline')
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
plt.legend(loc='best')
plt.show()
def my_plot_nullcline(pars):
Exc_null_rE = np.linspace(-0.01, 0.96, 100)
Exc_null_rI = get_E_nullcline(Exc_null_rE, **pars)
Inh_null_rI = np.linspace(-.01, 0.8, 100)
Inh_null_rE = get_I_nullcline(Inh_null_rI, **pars)
plt.plot(Exc_null_rE, Exc_null_rI, 'b', label='E nullcline')
plt.plot(Inh_null_rE, Inh_null_rI, 'r', label='I nullcline')
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
plt.legend(loc='best')
def my_plot_vector(pars, my_n_skip=2, myscale=5):
EI_grid = np.linspace(0., 1., 20)
rE, rI = np.meshgrid(EI_grid, EI_grid)
drEdt, drIdt = EIderivs(rE, rI, **pars)
n_skip = my_n_skip
plt.quiver(rE[::n_skip, ::n_skip], rI[::n_skip, ::n_skip],
drEdt[::n_skip, ::n_skip], drIdt[::n_skip, ::n_skip],
angles='xy', scale_units='xy', scale=myscale, facecolor='c')
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
def my_plot_trajectory(pars, mycolor, x_init, mylabel):
pars = pars.copy()
pars['rE_init'], pars['rI_init'] = x_init[0], x_init[1]
rE_tj, rI_tj = simulate_wc(**pars)
plt.plot(rE_tj, rI_tj, color=mycolor, label=mylabel)
plt.plot(x_init[0], x_init[1], 'o', color=mycolor, ms=8)
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
def my_plot_trajectories(pars, dx, n, mylabel):
"""
Solve for I along the E_grid from dE/dt = 0.
Expects:
pars : Parameter dictionary
dx : increment of initial values
n : n*n trjectories
mylabel : label for legend
Returns:
figure of trajectory
"""
pars = pars.copy()
for ie in range(n):
for ii in range(n):
pars['rE_init'], pars['rI_init'] = dx * ie, dx * ii
rE_tj, rI_tj = simulate_wc(**pars)
if (ie == n-1) & (ii == n-1):
plt.plot(rE_tj, rI_tj, 'gray', alpha=0.8, label=mylabel)
else:
plt.plot(rE_tj, rI_tj, 'gray', alpha=0.8)
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
def plot_complete_analysis(pars):
plt.figure(figsize=(7.7, 6.))
# plot example trajectories
my_plot_trajectories(pars, 0.2, 6,
'Sample trajectories \nfor different init. conditions')
my_plot_trajectory(pars, 'orange', [0.6, 0.8],
'Sample trajectory for \nlow activity')
my_plot_trajectory(pars, 'm', [0.6, 0.6],
'Sample trajectory for \nhigh activity')
# plot nullclines
my_plot_nullcline(pars)
# plot vector field
EI_grid = np.linspace(0., 1., 20)
rE, rI = np.meshgrid(EI_grid, EI_grid)
drEdt, drIdt = EIderivs(rE, rI, **pars)
n_skip = 2
plt.quiver(rE[::n_skip, ::n_skip], rI[::n_skip, ::n_skip],
drEdt[::n_skip, ::n_skip], drIdt[::n_skip, ::n_skip],
angles='xy', scale_units='xy', scale=5., facecolor='c')
plt.legend(loc=[1.02, 0.57], handlelength=1)
plt.show()
def plot_fp(x_fp, position=(0.02, 0.1), rotation=0):
plt.plot(x_fp[0], x_fp[1], 'ko', ms=8)
plt.text(x_fp[0] + position[0], x_fp[1] + position[1],
f'Fixed Point1=\n({x_fp[0]:.3f}, {x_fp[1]:.3f})',
horizontalalignment='center', verticalalignment='bottom',
rotation=rotation)
# + cellView="form"
# @title Helper functions
def default_pars(**kwargs):
pars = {}
# Excitatory parameters
pars['tau_E'] = 1. # Timescale of the E population [ms]
pars['a_E'] = 1.2 # Gain of the E population
pars['theta_E'] = 2.8 # Threshold of the E population
# Inhibitory parameters
pars['tau_I'] = 2.0 # Timescale of the I population [ms]
pars['a_I'] = 1.0 # Gain of the I population
pars['theta_I'] = 4.0 # Threshold of the I population
# Connection strength
pars['wEE'] = 9. # E to E
pars['wEI'] = 4. # I to E
pars['wIE'] = 13. # E to I
pars['wII'] = 11. # I to I
# External input
pars['I_ext_E'] = 0.
pars['I_ext_I'] = 0.
# simulation parameters
pars['T'] = 50. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
pars['rE_init'] = 0.2 # Initial value of E
pars['rI_init'] = 0.2 # Initial value of I
# External parameters if any
for k in kwargs:
pars[k] = kwargs[k]
# Vector of discretized time points [ms]
pars['range_t'] = np.arange(0, pars['T'], pars['dt'])
return pars
def F(x, a, theta):
"""
Population activation function, F-I curve
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
f : the population activation response f(x) for input x
"""
# add the expression of f = F(x)
f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1
return f
def dF(x, a, theta):
"""
Derivative of the population activation function.
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
dFdx : Derivative of the population activation function.
"""
dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2
return dFdx
def F_inv(x, a, theta):
"""
Args:
x : the population input
a : the gain of the function
theta : the threshold of the function
Returns:
F_inverse : value of the inverse function
"""
# Calculate Finverse (ln(x) can be calculated as np.log(x))
F_inverse = -1/a * np.log((x + (1 + np.exp(a * theta))**-1)**-1 - 1) + theta
return F_inverse
def get_E_nullcline(rE, a_E, theta_E, wEE, wEI, I_ext_E, **other_pars):
"""
Solve for rI along the rE from drE/dt = 0.
Args:
rE : response of excitatory population
a_E, theta_E, wEE, wEI, I_ext_E : Wilson-Cowan excitatory parameters
Other parameters are ignored
Returns:
rI : values of inhibitory population along the nullcline on the rE
"""
# calculate rI for E nullclines on rI
rI = 1 / wEI * (wEE * rE - F_inv(rE, a_E, theta_E) + I_ext_E)
return rI
def get_I_nullcline(rI, a_I, theta_I, wIE, wII, I_ext_I, **other_pars):
"""
Solve for E along the rI from dI/dt = 0.
Args:
rI : response of inhibitory population
a_I, theta_I, wIE, wII, I_ext_I : Wilson-Cowan inhibitory parameters
Other parameters are ignored
Returns:
rE : values of the excitatory population along the nullcline on the rI
"""
# calculate rE for I nullclines on rI
rE = 1 / wIE * (wII * rI + F_inv(rI, a_I, theta_I) - I_ext_I)
return rE
def EIderivs(rE, rI,
tau_E, a_E, theta_E, wEE, wEI, I_ext_E,
tau_I, a_I, theta_I, wIE, wII, I_ext_I,
**other_pars):
"""Time derivatives for E/I variables (dE/dt, dI/dt)."""
# Compute the derivative of rE
drEdt = (-rE + F(wEE * rE - wEI * rI + I_ext_E, a_E, theta_E)) / tau_E
# Compute the derivative of rI
drIdt = (-rI + F(wIE * rE - wII * rI + I_ext_I, a_I, theta_I)) / tau_I
return drEdt, drIdt
def simulate_wc(tau_E, a_E, theta_E, tau_I, a_I, theta_I,
wEE, wEI, wIE, wII, I_ext_E, I_ext_I,
rE_init, rI_init, dt, range_t, **other_pars):
"""
Simulate the Wilson-Cowan equations
Args:
Parameters of the Wilson-Cowan model
Returns:
rE, rI (arrays) : Activity of excitatory and inhibitory populations
"""
# Initialize activity arrays
Lt = range_t.size
rE = np.append(rE_init, np.zeros(Lt - 1))
rI = np.append(rI_init, np.zeros(Lt - 1))
I_ext_E = I_ext_E * np.ones(Lt)
I_ext_I = I_ext_I * np.ones(Lt)
# Simulate the Wilson-Cowan equations
for k in range(Lt - 1):
# Calculate the derivative of the E population
drE = dt / tau_E * (-rE[k] + F(wEE * rE[k] - wEI * rI[k] + I_ext_E[k],
a_E, theta_E))
# Calculate the derivative of the I population
drI = dt / tau_I * (-rI[k] + F(wIE * rE[k] - wII * rI[k] + I_ext_I[k],
a_I, theta_I))
# Update using Euler's method
rE[k + 1] = rE[k] + drE
rI[k + 1] = rI[k] + drI
return rE, rI
# -
# The helper functions included:
#
# - Parameter dictionary: `default_pars(**kwargs)`. You can use:
# - `pars = default_pars()` to get all the parameters, and then you can execute `print(pars)` to check these parameters.
# - `pars = default_pars(T=T_sim, dt=time_step)` to set a different simulation time and time step
# - After `pars = default_pars()`, use `par['New_para'] = value` to add a new parameter with its value
# - Pass to functions that accept individual parameters with `func(**pars)`
# - F-I curve: `F(x, a, theta)`
# - Derivative of the F-I curve: `dF(x, a, theta)`
# - Inverse of F-I curve: `F_inv`
# - Nullcline calculations: `get_E_nullcline`, `get_I_nullcline`
# - Derivatives of E/I variables: `EIderivs`
# - Simulate the Wilson-Cowan model: `simulate_wc`
# ---
# # Section 1: Fixed points, stability analysis, and limit cycles in the Wilson-Cowan model
#
# *Correction to video: this is now the first part of the second bonus tutorial, not the last part of the second tutorial*
# + cellView="form"
# @title Video 1: Fixed points and their stability
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Pf4y1d7dx", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="jIx26iQ69ps", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# As in Tutorial 2, we will be looking at the Wilson-Cowan model, with coupled equations representing the dynamics of the excitatory or inhibitory population:
#
# \begin{align}
# \tau_E \frac{dr_E}{dt} &= -r_E + F_E(w_{EE}r_E -w_{EI}r_I + I^{\text{ext}}_E;a_E,\theta_E)\\
# \tau_I \frac{dr_I}{dt} &= -r_I + F_I(w_{IE}r_E -w_{II}r_I + I^{\text{ext}}_I;a_I,\theta_I) \qquad (1)
# \end{align}
#
# $r_E(t)$ represents the average activation (or firing rate) of the excitatory population at time $t$, and $r_I(t)$ the activation (or firing rate) of the inhibitory population. The parameters $\tau_E$ and $\tau_I$ control the timescales of the dynamics of each population. Connection strengths are given by: $w_{EE}$ (E $\rightarrow$ E), $w_{EI}$ (I $\rightarrow$ E), $w_{IE}$ (E $\rightarrow$ I), and $w_{II}$ (I $\rightarrow$ I). The terms $w_{EI}$ and $w_{IE}$ represent connections from inhibitory to excitatory population and vice versa, respectively. The transfer functions (or F-I curves) $F_E(x;a_E,\theta_E)$ and $F_I(x;a_I,\theta_I)$ can be different for the excitatory and the inhibitory populations.
#
# ## Section 1.1: Fixed Points of the E/I system
#
# The intersection points of the two nullcline curves are the fixed points of the Wilson-Cowan model in Equation $(1)$.
#
# In the next exercise, we will find the coordinate of all fixed points for a given set of parameters.
#
# We'll make use of two functions, similar to ones we saw in Tutorial 1, which use a root-finding algorithm to find the fixed points of the system with Excitatory and Inhibitory populations.
# + cellView="form"
# @markdown Execute to visualize nullclines
# Set parameters
pars = default_pars()
Exc_null_rE = np.linspace(-0.01, 0.96, 100)
Inh_null_rI = np.linspace(-.01, 0.8, 100)
# Compute nullclines
Exc_null_rI = get_E_nullcline(Exc_null_rE, **pars)
Inh_null_rE = get_I_nullcline(Inh_null_rI, **pars)
plot_nullclines(Exc_null_rE, Exc_null_rI, Inh_null_rE, Inh_null_rI)
# + cellView="form"
# @markdown *Execute the cell to define `my_fp` and `check_fp`*
def my_fp(pars, rE_init, rI_init):
"""
Use opt.root function to solve Equations (2)-(3) from initial values
"""
tau_E, a_E, theta_E = pars['tau_E'], pars['a_E'], pars['theta_E']
tau_I, a_I, theta_I = pars['tau_I'], pars['a_I'], pars['theta_I']
wEE, wEI = pars['wEE'], pars['wEI']
wIE, wII = pars['wIE'], pars['wII']
I_ext_E, I_ext_I = pars['I_ext_E'], pars['I_ext_I']
# define the right hand of wilson-cowan equations
def my_WCr(x):
rE, rI = x
drEdt = (-rE + F(wEE * rE - wEI * rI + I_ext_E, a_E, theta_E)) / tau_E
drIdt = (-rI + F(wIE * rE - wII * rI + I_ext_I, a_I, theta_I)) / tau_I
y = np.array([drEdt, drIdt])
return y
x0 = np.array([rE_init, rI_init])
x_fp = opt.root(my_WCr, x0).x
return x_fp
def check_fp(pars, x_fp, mytol=1e-6):
"""
Verify (drE/dt)^2 + (drI/dt)^2< mytol
Args:
pars : Parameter dictionary
fp : value of fixed point
mytol : tolerance, default as 10^{-6}
Returns :
Whether it is a correct fixed point: True/False
"""
drEdt, drIdt = EIderivs(x_fp[0], x_fp[1], **pars)
return drEdt**2 + drIdt**2 < mytol
help(my_fp)
# -
# ### Coding Exercise 1.1: Find the fixed points of the Wilson-Cowan model
#
# From the above nullclines, we notice that the system features three fixed points with the parameters we used. To find their coordinates, we need to choose proper initial value to give to the `opt.root` function inside of the function `my_fp` we just defined, since the algorithm can only find fixed points in the vicinity of the initial value.
#
# In this exercise, you will use the function `my_fp` to find each of the fixed points by varying the initial values. Note that you can choose the values near the intersections of the nullclines as the initial values to calculate the fixed points.
# +
pars = default_pars()
######################################################################
# TODO: Provide initial values to calculate the fixed points
# Check if x_fp's are the correct with the function check_fp(x_fp)
# Hint: vary different initial values to find the correct fixed points
raise NotImplementedError('student exercise: find fixed points')
######################################################################
my_plot_nullcline(pars)
# Find the first fixed point
x_fp_1 = my_fp(pars, ..., ...)
if check_fp(pars, x_fp_1):
plot_fp(x_fp_1)
# Find the second fixed point
x_fp_2 = my_fp(pars, ..., ...)
if check_fp(pars, x_fp_2):
plot_fp(x_fp_2)
# Find the third fixed point
x_fp_3 = my_fp(pars, ..., ...)
if check_fp(pars, x_fp_3):
plot_fp(x_fp_3)
# + cellView="both"
# to_remove solution
pars = default_pars()
with plt.xkcd():
my_plot_nullcline(pars)
# Find the first fixed point
x_fp_1 = my_fp(pars, 0.1, 0.1)
if check_fp(pars, x_fp_1):
plot_fp(x_fp_1)
# Find the second fixed point
x_fp_2 = my_fp(pars, 0.3, 0.3)
if check_fp(pars, x_fp_2):
plot_fp(x_fp_2)
# Find the third fixed point
x_fp_3 = my_fp(pars, 0.8, 0.6)
if check_fp(pars, x_fp_3):
plot_fp(x_fp_3)
# -
# ## Section 1.2: Stability of a fixed point and eigenvalues of the Jacobian Matrix
#
# First, let's first rewrite the system $1$ as:
#
# \begin{align}
# &\frac{dr_E}{dt} = G_E(r_E,r_I)\\[0.5mm]
# &\frac{dr_I}{dt} = G_I(r_E,r_I)
# \end{align}
# where
#
# \begin{align}
# &G_E(r_E,r_I) = \frac{1}{\tau_E} [-r_E + F_E(w_{EE}r_E -w_{EI}r_I + I^{\text{ext}}_E;a,\theta)]\\[1mm]
# &G_I(r_E,r_I) = \frac{1}{\tau_I} [-r_I + F_I(w_{IE}r_E -w_{II}r_I + I^{\text{ext}}_I;a,\theta)]
# \end{align}
#
# By definition, $\displaystyle\frac{dr_E}{dt}=0$ and $\displaystyle\frac{dr_I}{dt}=0$ at each fixed point. Therefore, if the initial state is exactly at the fixed point, the state of the system will not change as time evolves.
#
# However, if the initial state deviates slightly from the fixed point, there are two possibilities
# the trajectory will be attracted back to the
#
# 1. The trajectory will be attracted back to the fixed point
# 2. The trajectory will diverge from the fixed point.
#
# These two possibilities define the type of fixed point, i.e., stable or unstable. Similar to the 1D system studied in the previous tutorial, the stability of a fixed point $(r_E^*, r_I^*)$ can be determined by linearizing the dynamics of the system (can you figure out how?). The linearization will yield a matrix of first-order derivatives called the Jacobian matrix:
#
# \begin{equation}
# J=
# \left[ {\begin{array}{cc}
# \displaystyle{\frac{\partial}{\partial r_E}}G_E(r_E^*, r_I^*) & \displaystyle{\frac{\partial}{\partial r_I}}G_E(r_E^*, r_I^*)\\[1mm]
# \displaystyle\frac{\partial}{\partial r_E} G_I(r_E^*, r_I^*) & \displaystyle\frac{\partial}{\partial r_I}G_I(r_E^*, r_I^*) \\
# \end{array} } \right] \quad (7)
# \end{equation}
#
# \\
#
# The eigenvalues of the Jacobian matrix calculated at the fixed point will determine whether it is a stable or unstable fixed point.
#
# \\
#
# We can now compute the derivatives needed to build the Jacobian matrix. Using the chain and product rules the derivatives for the excitatory population are given by:
#
# \\
#
# \begin{align}
# &\frac{\partial}{\partial r_E} G_E(r_E^*, r_I^*) = \frac{1}{\tau_E} [-1 + w_{EE} F_E'(w_{EE}r_E^* -w_{EI}r_I^* + I^{\text{ext}}_E;\alpha_E, \theta_E)] \\[1mm]
# &\frac{\partial}{\partial r_I} G_E(r_E^*, r_I^*)= \frac{1}{\tau_E} [-w_{EI} F_E'(w_{EE}r_E^* -w_{EI}r_I^* + I^{\text{ext}}_E;\alpha_E, \theta_E)]
# \end{align}
#
# \\
#
# The same applies to the inhibitory population.
# ### Coding Exercise 1.2: Compute the Jacobian Matrix for the Wilson-Cowan model
#
# Here, you can use `dF(x,a,theta)` defined in the `Helper functions` to calculate the derivative of the F-I curve.
# +
def get_eig_Jacobian(fp,
tau_E, a_E, theta_E, wEE, wEI, I_ext_E,
tau_I, a_I, theta_I, wIE, wII, I_ext_I, **other_pars):
"""Compute eigenvalues of the Wilson-Cowan Jacobian matrix at fixed point."""
# Initialization
rE, rI = fp
J = np.zeros((2, 2))
###########################################################################
# TODO for students: compute J and disable the error
raise NotImplementedError("Student exercise: compute the Jacobian matrix")
###########################################################################
# Compute the four elements of the Jacobian matrix
J[0, 0] = ...
J[0, 1] = ...
J[1, 0] = ...
J[1, 1] = ...
# Compute and return the eigenvalues
evals = np.linalg.eig(J)[0]
return evals
# Compute eigenvalues of Jacobian
eig_1 = get_eig_Jacobian(x_fp_1, **pars)
eig_2 = get_eig_Jacobian(x_fp_2, **pars)
eig_3 = get_eig_Jacobian(x_fp_3, **pars)
print(eig_1, 'Stable point')
print(eig_2, 'Unstable point')
print(eig_3, 'Stable point')
# + cellView="both"
# to_remove solution
def get_eig_Jacobian(fp,
tau_E, a_E, theta_E, wEE, wEI, I_ext_E,
tau_I, a_I, theta_I, wIE, wII, I_ext_I, **other_pars):
"""Compute eigenvalues of the Wilson-Cowan Jacobian matrix at fixed point."""
# Initialization
rE, rI = fp
J = np.zeros((2, 2))
# Compute the four elements of the Jacobian matrix
J[0, 0] = (-1 + wEE * dF(wEE * rE - wEI * rI + I_ext_E,
a_E, theta_E)) / tau_E
J[0, 1] = (-wEI * dF(wEE * rE - wEI * rI + I_ext_E,
a_E, theta_E)) / tau_E
J[1, 0] = (wIE * dF(wIE * rE - wII * rI + I_ext_I,
a_I, theta_I)) / tau_I
J[1, 1] = (-1 - wII * dF(wIE * rE - wII * rI + I_ext_I,
a_I, theta_I)) / tau_I
# Compute and return the eigenvalues
evals = np.linalg.eig(J)[0]
return evals
# Compute eigenvalues of Jacobian
eig_1 = get_eig_Jacobian(x_fp_1, **pars)
eig_2 = get_eig_Jacobian(x_fp_2, **pars)
eig_3 = get_eig_Jacobian(x_fp_3, **pars)
print(eig_1, 'Stable point')
print(eig_2, 'Unstable point')
print(eig_3, 'Stable point')
# -
# As is evident, the stable fixed points correspond to the negative eigenvalues, while unstable point corresponds to at least one positive eigenvalue.
# The sign of the eigenvalues is determined by the connectivity (interaction) between excitatory and inhibitory populations.
#
# Below we investigate the effect of $w_{EE}$ on the nullclines and the eigenvalues of the dynamical system.
#
# \* _Critical change is referred to as **pitchfork bifurcation**_.
# ## Section 1.3: Effect of `wEE` on the nullclines and the eigenvalues
# ### Interactive Demo 1.3: Nullclines position in the phase plane changes with parameter values
#
# How do the nullclines move for different values of the parameter $w_{EE}$? What does this mean for fixed points and system activity?
# + cellView="form"
# @title
# @markdown Make sure you execute this cell to enable the widget!
def plot_nullcline_diffwEE(wEE):
"""
plot nullclines for different values of wEE
"""
pars = default_pars(wEE=wEE)
# plot the E, I nullclines
Exc_null_rE = np.linspace(-0.01, .96, 100)
Exc_null_rI = get_E_nullcline(Exc_null_rE, **pars)
Inh_null_rI = np.linspace(-.01, .8, 100)
Inh_null_rE = get_I_nullcline(Inh_null_rI, **pars)
plt.figure(figsize=(12, 5.5))
plt.subplot(121)
plt.plot(Exc_null_rE, Exc_null_rI, 'b', label='E nullcline')
plt.plot(Inh_null_rE, Inh_null_rI, 'r', label='I nullcline')
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
plt.legend(loc='best')
plt.subplot(222)
pars['rE_init'], pars['rI_init'] = 0.2, 0.2
rE, rI = simulate_wc(**pars)
plt.plot(pars['range_t'], rE, 'b', label='E population', clip_on=False)
plt.plot(pars['range_t'], rI, 'r', label='I population', clip_on=False)
plt.ylabel('Activity')
plt.legend(loc='best')
plt.ylim(-0.05, 1.05)
plt.title('E/I activity\nfor different initial conditions',
fontweight='bold')
plt.subplot(224)
pars['rE_init'], pars['rI_init'] = 0.4, 0.1
rE, rI = simulate_wc(**pars)
plt.plot(pars['range_t'], rE, 'b', label='E population', clip_on=False)
plt.plot(pars['range_t'], rI, 'r', label='I population', clip_on=False)
plt.xlabel('t (ms)')
plt.ylabel('Activity')
plt.legend(loc='best')
plt.ylim(-0.05, 1.05)
plt.tight_layout()
plt.show()
_ = widgets.interact(plot_nullcline_diffwEE, wEE=(6., 10., .01))
# +
# to_remove explanation
"""
- For low values of wEE there is only one fixed point and it is stable so initial
conditions do not matter and the system always converge to the only fixed point
- For high values of wEE we have three fixed points of which two are stable and
one is unstable (or saddle). Now it matters where the initial conditions are. If
the initial conditions are in the attractor region os the high activity fixed
point then the system will converge to that (the bottom example).
""";
# -
# We can also investigate the effect of different $w_{EI}$, $w_{IE}$, $w_{II}$, $\tau_{E}$, $\tau_{I}$, and $I_{E}^{\text{ext}}$ on the stability of fixed points. In addition, we can also consider the perturbation of the parameters of the gain curve $F(\cdot)$.
# ## Section 1.4: Limit cycle - Oscillations
#
# For some values of interaction terms ($w_{EE}, w_{IE}, w_{EI}, w_{II}$), the eigenvalues can become complex. When at least one pair of eigenvalues is complex, oscillations arise.
# The stability of oscillations is determined by the real part of the eigenvalues (+ve real part oscillations will grow, -ve real part oscillations will die out). The size of the complex part determines the frequency of oscillations.
#
# For instance, if we use a different set of parameters, $w_{EE}=6.4$, $w_{EI}=4.8$, $w_{IE}=6.$, $w_{II}=1.2$, and $I_{E}^{\text{ext}}=0.8$, then we shall observe that the E and I population activity start to oscillate! Please execute the cell below to check the oscillatory behavior.
# + cellView="form"
# @title
# @markdown Make sure you execute this cell to see the oscillations!
pars = default_pars(T=100.)
pars['wEE'], pars['wEI'] = 6.4, 4.8
pars['wIE'], pars['wII'] = 6.0, 1.2
pars['I_ext_E'] = 0.8
pars['rE_init'], pars['rI_init'] = 0.25, 0.25
rE, rI = simulate_wc(**pars)
plt.figure(figsize=(8, 5.5))
plt.plot(pars['range_t'], rE, 'b', label=r'$r_E$')
plt.plot(pars['range_t'], rI, 'r', label=r'$r_I$')
plt.xlabel('t (ms)')
plt.ylabel('Activity')
plt.legend(loc='best')
plt.show()
# -
# We can also understand the oscillations of the population behavior using the phase plane. By plotting a set of trajectories with different initial states, we can see that these trajectories will move in a circle instead of converging to a fixed point. This circle is called "limit cycle" and shows the periodic oscillations of the $E$ and $I$ population behavior under some conditions.
#
# Let's plot the phase plane using the previously defined functions.
# + cellView="form"
# @markdown Execute to visualize phase plane
pars = default_pars(T=100.)
pars['wEE'], pars['wEI'] = 6.4, 4.8
pars['wIE'], pars['wII'] = 6.0, 1.2
pars['I_ext_E'] = 0.8
plt.figure(figsize=(7, 5.5))
my_plot_nullcline(pars)
# Find the correct fixed point
x_fp_1 = my_fp(pars, 0.8, 0.8)
if check_fp(pars, x_fp_1):
plot_fp(x_fp_1, position=(0, 0), rotation=40)
my_plot_trajectories(pars, 0.2, 3,
'Sample trajectories \nwith different initial values')
my_plot_vector(pars)
plt.legend(loc=[1.01, 0.7])
plt.xlim(-0.05, 1.01)
plt.ylim(-0.05, 0.65)
plt.show()
# -
# ### Interactive Demo 1.4: Limit cycle and oscillations.
#
# From the above examples, the change of model parameters changes the shape of the nullclines and, accordingly, the behavior of the $E$ and $I$ populations from steady fixed points to oscillations. However, the shape of the nullclines is unable to fully determine the behavior of the network. The vector field also matters. To demonstrate this, here, we will investigate the effect of time constants on the population behavior. By changing the inhibitory time constant $\tau_I$, the nullclines do not change, but the network behavior changes substantially from steady state to oscillations with different frequencies.
#
# Such a dramatic change in the system behavior is referred to as a **bifurcation**.
#
# \\
# Please execute the code below to check this out.
# + cellView="form"
# @title
# @markdown Make sure you execute this cell to enable the widget!
def time_constant_effect(tau_i=0.5):
pars = default_pars(T=100.)
pars['wEE'], pars['wEI'] = 6.4, 4.8
pars['wIE'], pars['wII'] = 6.0, 1.2
pars['I_ext_E'] = 0.8
pars['tau_I'] = tau_i
Exc_null_rE = np.linspace(0.0, .9, 100)
Inh_null_rI = np.linspace(0.0, .6, 100)
Exc_null_rI = get_E_nullcline(Exc_null_rE, **pars)
Inh_null_rE = get_I_nullcline(Inh_null_rI, **pars)
plt.figure(figsize=(12.5, 5.5))
plt.subplot(121) # nullclines
plt.plot(Exc_null_rE, Exc_null_rI, 'b', label='E nullcline', zorder=2)
plt.plot(Inh_null_rE, Inh_null_rI, 'r', label='I nullcline', zorder=2)
plt.xlabel(r'$r_E$')
plt.ylabel(r'$r_I$')
# fixed point
x_fp_1 = my_fp(pars, 0.5, 0.5)
plt.plot(x_fp_1[0], x_fp_1[1], 'ko', zorder=2)
eig_1 = get_eig_Jacobian(x_fp_1, **pars)
# trajectories
for ie in range(5):
for ii in range(5):
pars['rE_init'], pars['rI_init'] = 0.1 * ie, 0.1 * ii
rE_tj, rI_tj = simulate_wc(**pars)
plt.plot(rE_tj, rI_tj, 'k', alpha=0.3, zorder=1)
# vector field
EI_grid_E = np.linspace(0., 1.0, 20)
EI_grid_I = np.linspace(0., 0.6, 20)
rE, rI = np.meshgrid(EI_grid_E, EI_grid_I)
drEdt, drIdt = EIderivs(rE, rI, **pars)
n_skip = 2
plt.quiver(rE[::n_skip, ::n_skip], rI[::n_skip, ::n_skip],
drEdt[::n_skip, ::n_skip], drIdt[::n_skip, ::n_skip],
angles='xy', scale_units='xy', scale=10, facecolor='c')
plt.title(r'$\tau_I=$'+'%.1f ms' % tau_i)
plt.subplot(122) # sample E/I trajectories
pars['rE_init'], pars['rI_init'] = 0.25, 0.25
rE, rI = simulate_wc(**pars)
plt.plot(pars['range_t'], rE, 'b', label=r'$r_E$')
plt.plot(pars['range_t'], rI, 'r', label=r'$r_I$')
plt.xlabel('t (ms)')
plt.ylabel('Activity')
plt.title(r'$\tau_I=$'+'%.1f ms' % tau_i)
plt.legend(loc='best')
plt.tight_layout()
plt.show()
_ = widgets.interact(time_constant_effect, tau_i=(0.2, 3, .1))
# -
# Both $\tau_E$ and $\tau_I$ feature in the Jacobian of the two population network (eq 7). So here is seems that the by increasing $\tau_I$ the eigenvalues corresponding to the stable fixed point are becoming complex.
#
# Intuitively, when $\tau_I$ is smaller, inhibitory activity changes faster than excitatory activity. As inhibition exceeds above a certain value, high inhibition inhibits excitatory population but that in turns means that inhibitory population gets smaller input (from the exc. connection). So inhibition decreases rapidly. But this means that excitation recovers -- and so on ...
# ---
# # Section 2: Inhibition-stabilized network (ISN)
#
# ## Section 2.1: Inhibition-stabilized network
#
# As described above, one can obtain the linear approximation around the fixed point as
#
# \begin{equation}
# \frac{d}{dr} \vec{R}=
# \left[ {\begin{array}{cc}
# \displaystyle{\frac{\partial G_E}{\partial r_E}} & \displaystyle{\frac{\partial G_E}{\partial r_I}}\\[1mm]
# \displaystyle\frac{\partial G_I}{\partial r_E} & \displaystyle\frac{\partial G_I}{\partial r_I} \\
# \end{array} } \right] \vec{R},
# \end{equation}
#
# \\
#
#
# where $\vec{R} = [r_E, r_I]^{\rm T}$ is the vector of the E/I activity.
#
# Let's direct our attention to the excitatory subpopulation which follows:
#
# \\
#
#
# \begin{equation}
# \frac{dr_E}{dt} = \frac{\partial G_E}{\partial r_E}\cdot r_E + \frac{\partial G_E}{\partial r_I} \cdot r_I
# \end{equation}
#
# \\
#
# Recall that, around fixed point $(r_E^*, r_I^*)$:
#
# \\
#
# \begin{align}
# &\frac{\partial}{\partial r_E}G_E(r_E^*, r_I^*) = \frac{1}{\tau_E} [-1 + w_{EE} F'_{E}(w_{EE}r_E^* -w_{EI}r_I^* + I^{\text{ext}}_E; \alpha_E, \theta_E)] \qquad (8)\\[1mm]
# &\frac{\partial}{\partial r_I}G_E(r_E^*, r_I^*) = \frac{1}{\tau_E} [-w_{EI} F'_{E}(w_{EE}r_E^* -w_{EI}r_I^* + I^{\text{ext}}_E; \alpha_E, \theta_E)] \qquad (9)\\[1mm]
# &\frac{\partial}{\partial r_E}G_I(r_E^*, r_I^*) = \frac{1}{\tau_I} [w_{IE} F'_{I}(w_{IE}r_E^* -w_{II}r_I^* + I^{\text{ext}}_I; \alpha_I, \theta_I)] \qquad (10)\\[1mm]
# &\frac{\partial}{\partial r_I}G_I(r_E^*, r_I^*) = \frac{1}{\tau_I} [-1-w_{II} F'_{I}(w_{IE}r_E^* -w_{II}r_I^* + I^{\text{ext}}_I; \alpha_I, \theta_I)] \qquad (11)
# \end{align} \\
#
#
# From Equation. (8), it is clear that $\displaystyle{\frac{\partial G_E}{\partial r_I}}$ is negative since the $\displaystyle{\frac{dF}{dx}}$ is always positive. It can be understood by that the recurrent inhibition from the inhibitory activity ($I$) can reduce the excitatory ($E$) activity. However, as described above, $\displaystyle{\frac{\partial G_E}{\partial r_E}}$ has negative terms related to the "leak" effect, and positive term related to the recurrent excitation. Therefore, it leads to two different regimes:
#
# - $\displaystyle{\frac{\partial}{\partial r_E}G_E(r_E^*, r_I^*)}<0$, **noninhibition-stabilized
# network (non-ISN) regime**
#
# - $\displaystyle{\frac{\partial}{\partial r_E}G_E(r_E^*, r_I^*)}>0$, **inhibition-stabilized
# network (ISN) regime**
# ### Coding Exercise 2.1: Compute $\displaystyle{\frac{\partial G_E}{\partial r_E}}$
# Implemet the function to calculate the $\displaystyle{\frac{\partial G_E}{\partial r_E}}$ for the default parameters, and the parameters of the limit cycle case.
# +
def get_dGdE(fp, tau_E, a_E, theta_E, wEE, wEI, I_ext_E, **other_pars):
"""
Compute dGdE
Args:
fp : fixed point (E, I), array
Other arguments are parameters of the Wilson-Cowan model
Returns:
J : the 2x2 Jacobian matrix
"""
rE, rI = fp
##########################################################################
# TODO for students: compute dGdrE and disable the error
raise NotImplementedError("Student excercise: compute the dG/dE, Eq. (13)")
##########################################################################
# Calculate the J[0,0]
dGdrE = ...
return dGdrE
# Get fixed points
pars = default_pars()
x_fp_1 = my_fp(pars, 0.1, 0.1)
x_fp_2 = my_fp(pars, 0.3, 0.3)
x_fp_3 = my_fp(pars, 0.8, 0.6)
# Compute dGdE
dGdrE1 = get_dGdE(x_fp_1, **pars)
dGdrE2 = get_dGdE(x_fp_2, **pars)
dGdrE3 = get_dGdE(x_fp_3, **pars)
print(f'For the default case:')
print(f'dG/drE(fp1) = {dGdrE1:.3f}')
print(f'dG/drE(fp2) = {dGdrE2:.3f}')
print(f'dG/drE(fp3) = {dGdrE3:.3f}')
print('\n')
pars = default_pars(wEE=6.4, wEI=4.8, wIE=6.0, wII=1.2, I_ext_E=0.8)
x_fp_lc = my_fp(pars, 0.8, 0.8)
dGdrE_lc = get_dGdE(x_fp_lc, **pars)
print('For the limit cycle case:')
print(f'dG/drE(fp_lc) = {dGdrE_lc:.3f}')
# +
# to_remove solution
def get_dGdE(fp, tau_E, a_E, theta_E, wEE, wEI, I_ext_E, **other_pars):
"""
Compute dGdE
Args:
fp : fixed point (E, I), array
Other arguments are parameters of the Wilson-Cowan model
Returns:
J : the 2x2 Jacobian matrix
"""
rE, rI = fp
# Calculate the J[0,0]
dGdrE = (-1 + wEE * dF(wEE * rE - wEI * rI + I_ext_E, a_E, theta_E)) / tau_E
return dGdrE
# Get fixed points
pars = default_pars()
x_fp_1 = my_fp(pars, 0.1, 0.1)
x_fp_2 = my_fp(pars, 0.3, 0.3)
x_fp_3 = my_fp(pars, 0.8, 0.6)
# Compute dGdE
dGdrE1 = get_dGdE(x_fp_1, **pars)
dGdrE2 = get_dGdE(x_fp_2, **pars)
dGdrE3 = get_dGdE(x_fp_3, **pars)
print(f'For the default case:')
print(f'dG/drE(fp1) = {dGdrE1:.3f}')
print(f'dG/drE(fp2) = {dGdrE2:.3f}')
print(f'dG/drE(fp3) = {dGdrE3:.3f}')
print('\n')
pars = default_pars(wEE=6.4, wEI=4.8, wIE=6.0, wII=1.2, I_ext_E=0.8)
x_fp_lc = my_fp(pars, 0.8, 0.8)
dGdrE_lc = get_dGdE(x_fp_lc, **pars)
print('For the limit cycle case:')
print(f'dG/drE(fp_lc) = {dGdrE_lc:.3f}')
# -
# **SAMPLE OUTPUT**
# ```
# For the default case:
# dG/drE(fp1) = -0.650
# dG/drE(fp2) = 1.519
# dG/drE(fp3) = -0.706
#
#
# For the limit cycle case:
# dG/drE(fp_lc) = 0.837
# ```
# ## Section 2.2: Nullcline analysis of the ISN
#
# Recall that the E nullcline follows
#
# \\
#
# \begin{align}
# r_E = F_E(w_{EE}r_E -w_{EI}r_I + I^{\text{ext}}_E;a_E,\theta_E).
# \end{align}
#
# \\
#
#
# That is, the firing rate $r_E$ can be a function of $r_I$. Let's take the derivative of $r_E$ over $r_I$, and obtain
#
# \\
#
# \begin{align}
# &\frac{dr_E}{dr_I} = F_E' \cdot (w_{EE}\frac{dr_E}{dr_I} -w_{EI}) \iff \\
# &(1-F_E'w_{EE})\frac{dr_E}{dr_I} = -F_E' w_{EI} \iff \\
# &\frac{dr_E}{dr_I} = \frac{F_E' w_{EI}}{F_E'w_{EE}-1}.
# \end{align}
#
# \\
#
#
# That is, in the phase plane `rI-rE`-plane, we can obtain the slope along the E nullcline as
#
# \\
#
#
# $$\frac{dr_I}{dr_E} = \frac{F_E'w_{EE}-1}{F_E' w_{EI}} \qquad (12)$$
#
# Similarly, we can obtain the slope along the I nullcline as
#
# \\
#
# $$\frac{dr_I}{dr_E} = \frac{F_I'w_{IE}}{F_I' w_{II}+1} \qquad (13)$$
#
# \\
#
#
# Then, we can find that $\Big{(} \displaystyle{\frac{dr_I}{dr_E}} \Big{)}_{\rm I-nullcline} >0$ in Equation (13).
#
# \\
#
# However, in Equation (12), the sign of $\Big{(} \displaystyle{\frac{dr_I}{dr_E}} \Big{)}_{\rm E-nullcline}$ depends on the sign of $(F_E'w_{EE}-1)$. Note that, $(F_E'w_{EE}-1)$ is the same as what we show above (Equation (8)). Therefore, we can have the following results:
#
# - $\Big{(} \displaystyle{\frac{dr_I}{dr_E}} \Big{)}_{\rm E-nullcline}<0$, **noninhibition-stabilized
# network (non-ISN) regime**
#
# - $\Big{(} \displaystyle{\frac{dr_I}{dr_E}} \Big{)}_{\rm E-nullcline}>0$, **inhibition-stabilized
# network (ISN) regime**
#
# \\
#
# In addition, it is important to point out the following two conclusions: \\
#
#
# **Conclusion 1:** The stability of a fixed point can determine the relationship between the slopes Equations (12) and (13). As discussed above, the fixed point is stable when the Jacobian matrix ($J$ in Equation (7)) has two eigenvalues with a negative real part, which indicates a positive determinant of $J$, i.e., $\text{det}(J)>0$.
#
# From the Jacobian matrix definition and from Equations (8-11), we can obtain:
#
# $ J=
# \left[ {\begin{array}{cc}
# \displaystyle{\frac{1}{\tau_E}(w_{EE}F_E'-1)} & \displaystyle{-\frac{1}{\tau_E}w_{EI}F_E'}\\[1mm]
# \displaystyle {\frac{1}{\tau_I}w_{IE}F_I'}& \displaystyle {\frac{1}{\tau_I}(-w_{II}F_I'-1)} \\
# \end{array} } \right] $
#
# \\
#
# Note that, if we let
#
# \\
#
# $ T=
# \left[ {\begin{array}{cc}
# \displaystyle{\tau_E} & \displaystyle{0}\\[1mm]
# \displaystyle 0& \displaystyle \tau_I \\
# \end{array} } \right] $,
# $ F=
# \left[ {\begin{array}{cc}
# \displaystyle{F_E'} & \displaystyle{0}\\[1mm]
# \displaystyle 0& \displaystyle F_I' \\
# \end{array} } \right] $, and
# $ W=
# \left[ {\begin{array}{cc}
# \displaystyle{w_{EE}} & \displaystyle{-w_{EI}}\\[1mm]
# \displaystyle w_{IE}& \displaystyle -w_{II} \\
# \end{array} } \right] $
#
# \\
#
# then, using matrix notation, $J=T^{-1}(F W - I)$ where $I$ is the identity matrix, i.e., $I = \begin{bmatrix}
# 1 & 0 \\
# 0 & 1
# \end{bmatrix}.$
#
# \\
#
# Therefore, $\det{(J)}=\det{(T^{-1}(F W - I))}=(\det{(T^{-1})})(\det{(F W - I)}).$
#
# Since $\det{(T^{-1})}>0$, as time constants are positive by definition, the sign of $\det{(J)}$ is the same as the sign of $\det{(F W - I)}$, and so
#
# $$\det{(FW - I)} = (F_E' w_{EI})(F_I'w_{IE}) - (F_I' w_{II} + 1)(F_E'w_{EE} - 1) > 0.$$
#
# \\
#
# Then, combining this with Equations (12) and (13), we can obtain
# $$\frac{\Big{(} \displaystyle{\frac{dr_I}{dr_E}} \Big{)}_{\rm I-nullcline}}{\Big{(} \displaystyle{\frac{dr_I}{dr_E}} \Big{)}_{\rm E-nullcline}} > 1. $$
#
#
# Therefore, at the stable fixed point, I nullcline has a steeper slope than the E nullcline.
#
#
# **Conclusion 2:** Effect of adding input to the inhibitory population.
#
# While adding the input $\delta I^{\rm ext}_I$ into the inhibitory population, we can find that the E nullcline (Equation (5)) stays the same, while the I nullcline has a pure left shift: the original I nullcline equation,
#
# \\
#
# \begin{equation}
# r_I = F_I(w_{IE}r_E-w_{II}r_I + I^{\text{ext}}_I ; \alpha_I, \theta_I)
# \end{equation}
#
# \\
#
# remains true if we take $I^{\text{ext}}_I \rightarrow I^{\text{ext}}_I +\delta I^{\rm ext}_I$ and $r_E\rightarrow r_E'=r_E-\frac{\delta I^{\rm ext}_I}{w_{IE}}$ to obtain
#
# \\
#
# \begin{equation}
# r_I = F_I(w_{IE}r_E'-w_{II}r_I + I^{\text{ext}}_I +\delta I^{\rm ext}_I; \alpha_I, \theta_I)
# \end{equation}
#
# \\
#
# Putting these points together, we obtain the phase plane pictures shown below. After adding input to the inhibitory population, it can be seen in the trajectories above and the phase plane below that, in an **ISN**, $r_I$ will increase first but then decay to the new fixed point in which both $r_I$ and $r_E$ are decreased compared to the original fixed point. However, by adding $\delta I^{\rm ext}_I$ into a **non-ISN**, $r_I$ will increase while $r_E$ will decrease.
# ### Interactive Demo 2.2: Nullclines of Example **ISN** and **non-ISN**
#
# In this interactive widget, we inject excitatory ($I^{\text{ext}}_I>0$) or inhibitory ($I^{\text{ext}}_I<0$) drive into the inhibitory population when the system is at its equilibrium (with parameters $w_{EE}=6.4$, $w_{EI}=4.8$, $w_{IE}=6.$, $w_{II}=1.2$, $I_{E}^{\text{ext}}=0.8$, $\tau_I = 0.8$, and $I^{\text{ext}}_I=0$). How does the firing rate of the $I$ population changes with excitatory vs inhibitory drive into the inhibitory population?
# + cellView="form"
# @title
# @markdown Make sure you execute this cell to enable the widget!
pars = default_pars(T=50., dt=0.1)
pars['wEE'], pars['wEI'] = 6.4, 4.8
pars['wIE'], pars['wII'] = 6.0, 1.2
pars['I_ext_E'] = 0.8
pars['tau_I'] = 0.8
def ISN_I_perturb(dI=0.1):
Lt = len(pars['range_t'])
pars['I_ext_I'] = np.zeros(Lt)
pars['I_ext_I'][int(Lt / 2):] = dI
pars['rE_init'], pars['rI_init'] = 0.6, 0.26
rE, rI = simulate_wc(**pars)
plt.figure(figsize=(8, 1.5))
plt.plot(pars['range_t'], pars['I_ext_I'], 'k')
plt.xlabel('t (ms)')
plt.ylabel(r'$I_I^{\mathrm{ext}}$')
plt.ylim(pars['I_ext_I'].min() - 0.01, pars['I_ext_I'].max() + 0.01)
plt.show()
plt.figure(figsize=(8, 4.5))
plt.plot(pars['range_t'], rE, 'b', label=r'$r_E$')
plt.plot(pars['range_t'], rE[int(Lt / 2) - 1] * np.ones(Lt), 'b--')
plt.plot(pars['range_t'], rI, 'r', label=r'$r_I$')
plt.plot(pars['range_t'], rI[int(Lt / 2) - 1] * np.ones(Lt), 'r--')
plt.ylim(0, 0.8)
plt.xlabel('t (ms)')
plt.ylabel('Activity')
plt.legend(loc='best')
plt.show()
_ = widgets.interact(ISN_I_perturb, dI=(-0.2, 0.21, .05))
# +
# to_remove explanation
"""
Discussion:
Here we observe a paradoxical effect; if we inject excitatory current to the I
population, the r_I goes down, whereas when we inject inhibitory current, the r_I
increases. Recall that we inject a constant excitatory current to the E population,
which also drives, indirectly, the I population. When Iext>0, the r_I increases
but this drives E to a low state, which in turn leads to rI decrease. Whereas,
when Iext<0, the effect is negative on I population for a short amount of time,
which is sufficient to drive the E population to a high steady state, and then due
to E to I connections, the I population activity is increased.
""";
# -
# ---
# # Section 3: Fixed point and working memory
# The input into the neurons measured in the experiment is often very noisy ([links](http://www.scholarpedia.org/article/Stochastic_dynamical_systems)). Here, the noisy synaptic input current is modeled as an Ornstein-Uhlenbeck (OU)process, which has been discussed several times in the previous tutorials.
#
# + cellView="form"
# @markdown Make sure you execute this cell to enable the function my_OU and plot the input current!
def my_OU(pars, sig, myseed=False):
"""
Expects:
pars : parameter dictionary
sig : noise amplitute
myseed : random seed. int or boolean
Returns:
I : Ornstein-Uhlenbeck input current
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tau_ou = pars['tau_ou'] # [ms]
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# Initialize
noise = np.random.randn(Lt)
I_ou = np.zeros(Lt)
I_ou[0] = noise[0] * sig
# generate OU
for it in range(Lt-1):
I_ou[it+1] = (I_ou[it]
+ dt / tau_ou * (0. - I_ou[it])
+ np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])
return I_ou
pars = default_pars(T=50)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
I_ou = my_OU(pars, sig=sig_ou, myseed=2020)
plt.figure(figsize=(8, 5.5))
plt.plot(pars['range_t'], I_ou, 'b')
plt.xlabel('Time (ms)')
plt.ylabel(r'$I_{\mathrm{OU}}$')
plt.show()
# -
#
#
# With the default parameters, the system fluctuates around a resting state with the noisy input.
#
# + cellView="form"
# @markdown Execute this cell to plot activity with noisy input current
pars = default_pars(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
pars['I_ext_E'] = my_OU(pars, sig=sig_ou, myseed=20201)
pars['I_ext_I'] = my_OU(pars, sig=sig_ou, myseed=20202)
pars['rE_init'], pars['rI_init'] = 0.1, 0.1
rE, rI = simulate_wc(**pars)
plt.figure(figsize=(8, 5.5))
ax = plt.subplot(111)
ax.plot(pars['range_t'], rE, 'b', label='E population')
ax.plot(pars['range_t'], rI, 'r', label='I population')
ax.set_xlabel('t (ms)')
ax.set_ylabel('Activity')
ax.legend(loc='best')
plt.show()
# -
# ## Interactive Demo 3: Short pulse induced persistent activity
# Then, let's use a brief 10-ms positive current to the E population when the system is at its equilibrium. When this amplitude (SE below) is sufficiently large, a persistent activity is produced that outlasts the transient input. What is the firing rate of the persistent activity, and what is the critical input strength? Try to understand the phenomena from the above phase-plane analysis.
# + cellView="form"
# @title
# @markdown Make sure you execute this cell to enable the widget!
def my_inject(pars, t_start, t_lag=10.):
"""
Expects:
pars : parameter dictionary
t_start : pulse starts [ms]
t_lag : pulse lasts [ms]
Returns:
I : extra pulse time
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize
I = np.zeros(Lt)
# pulse timing
N_start = int(t_start / dt)
N_lag = int(t_lag / dt)
I[N_start:N_start + N_lag] = 1.
return I
pars = default_pars(T=100)
pars['tau_ou'] = 1. # [ms]
sig_ou = 0.1
pars['I_ext_I'] = my_OU(pars, sig=sig_ou, myseed=2021)
pars['rE_init'], pars['rI_init'] = 0.1, 0.1
# pulse
I_pulse = my_inject(pars, t_start=20., t_lag=10.)
L_pulse = sum(I_pulse > 0.)
def WC_with_pulse(SE=0.):
pars['I_ext_E'] = my_OU(pars, sig=sig_ou, myseed=2022)
pars['I_ext_E'] += SE * I_pulse
rE, rI = simulate_wc(**pars)
plt.figure(figsize=(8, 5.5))
ax = plt.subplot(111)
ax.plot(pars['range_t'], rE, 'b', label='E population')
ax.plot(pars['range_t'], rI, 'r', label='I population')
ax.plot(pars['range_t'][I_pulse > 0.], 1.0*np.ones(L_pulse), 'r', lw=3.)
ax.text(25, 1.05, 'stimulus on', horizontalalignment='center',
verticalalignment='bottom')
ax.set_ylim(-0.03, 1.2)
ax.set_xlabel('t (ms)')
ax.set_ylabel('Activity')
ax.legend(loc='best')
plt.show()
_ = widgets.interact(WC_with_pulse, SE=(0.0, 1.0, .05))
# +
# to_remove explanation
"""
Discussion:
When a system has more than one fixed points, depending on the input strength,
the network will settle in one of the fixed points. In this case, we have two
fixed points, one of the fixed points corresponds to high activity. So when input
drives the network to the high activity fixed points, the network activity will
remain there -- it is a stable fixed point. Because the network retains its
activity (persistent activity) even after the input has been removed, we can
take the persistent activity as working memory.
""";
# -
# Explore what happened when a second, brief current is applied to the inhibitory population.
| tutorials/W2D4_DynamicNetworks/W2D4_Tutorial3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# +
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import glob
import geopandas
import shapely
import fiona
import geopandas as gp
from datetime import datetime
# +
dataframes = []
for f in os.listdir("fox"):
print(f)
df = pd.read_csv(os.path.join("fox", f), parse_dates=[4])
dataframes.append(df)
fox_df = pd.concat(dataframes)
# +
fox_gdf = gp.GeoDataFrame(fox_df, geometry=geopandas.points_from_xy(fox_df.Longitude, fox_df.Latitude))
fox_gdf.head()
# +
colors = 9
cmap = 'plasma'
figsize = (16, 10)
plotvar = 'Name of camp'
scheme = 'equal interval'
ax = fox_gdf.plot(plotvar, figsize=figsize, k = colors, legend=True, color='black')
# -
zip_file_name = "maps\qld_locality_polygon_shp.zip"
shp_file_name = "QLD_LOCALITY_POLYGON_shp"
def unzip_zipfile(zipped_file_path, put_it_here="."):
import zipfile
zip_of_suburbs = zipfile.ZipFile(zipped_file_path, 'r')
zip_of_suburbs.extractall(put_it_here)
zip_of_suburbs.close()
# +
print("unzipping")
map_file = unzip_zipfile(zip_file_name)
print("done")
# +
burbs = gp.GeoDataFrame.from_file(shp_file_name)
burbs.set_crs(epsg=5234, inplace=True, allow_override=True)
burbs.head(7)
cols_to_drop = ["QLD_LOCA_1", "QLD_LOCA_3", "QLD_LOCA_4", "QLD_LOCA_6", "DT_RETIRE"]
burbs.drop(cols_to_drop, axis=1, inplace=True, errors="ignore")
burbs.head(7)
# -
base = burbs.boundary.plot(figsize=(160,20), alpha=0.1, edgecolor='black')
fox_gdf.plot(plotvar, figsize=(160,20), k = colors, legend=False, ax=base)
plt.title("Known Camp Locations", fontsize=25)
plt.xlabel('Longitude', fontsize=20)
plt.ylabel('Latitude', fontsize=20)
plt.show()
| flyingfox maps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
names = ['sig_m350GeV', 'sig_m1TeV', 'bg']
data = [np.load('../raw_data/' + name + '.npy') for name in names]
sig350G, sig1T, bg = data
bg1, bg2, bg3 = [bg[0:1000000], bg[1000000:2000000], bg[2000000:3000000]]
import sklearn
assert sklearn.__version__ >= "0.20"
# +
sig350G_labelled = np.concatenate((sig350G, np.ones((len(sig350G),1))), axis=1)
bg_shuffle = bg
np.random.shuffle(bg_shuffle)
bg_shuffle_labelled = np.concatenate((bg_shuffle, np.zeros((len(bg_shuffle),1))), axis=1)
m350G_data = np.concatenate((sig350G_labelled, bg_shuffle_labelled[:len(sig350G)]), axis=0)
# +
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
stand_pipe = Pipeline([('std_scaler', StandardScaler())])
minmax_pipe = Pipeline([('minmax_scaler', MinMaxScaler())])
# +
from sklearn.model_selection import train_test_split
train, test = train_test_split(m350G_data, test_size=0.2, random_state=42)
train_stand, test_stand = train_test_split(stand_pipe.fit_transform(m350G_data), test_size=0.2, random_state=42)
train_minmax, test_minmax = train_test_split(minmax_pipe.fit_transform(m350G_data), test_size=0.2, random_state=42)
# +
# above data had been imported outside of this notebook, but below data was initially
# imported here first
path = '/Users/elijahsheridan/MG5_aMC_v2_6_5/b_meson_pheno/ttbarzp-ml/raw_data/full_33_data.dat'
data = np.loadtxt(path)
save_path = '/Users/elijahsheridan/MG5_aMC_v2_6_5/b_meson_pheno/ttbarzp-ml/train_data/full_33/full_33_data'
np.save(save_path, data)
# -
fdata = np.load('../train_data/full_33/full_33_data.npy')
mil = 1000000
save_path = '../train_data/full_33/'
names = ['sig350Gm', 'sig1Tm', 'sig1p5Tm', 'sig2TeVm', 'bgh', 'bg4t', 'bgnoh']
fdatas = [fdata[i*mil:(i+1)*mil] for i in range(7)]
for name, data in zip(names, fdatas):
np.save(save_path + name, data)
# f for full, as in 33 component
fsig350Gm, fsig1Tm, fsig1p5Tm, fsig2TeVm, fbgh, fbg4t, fbgnoh = fdatas
# +
""" START HERE """
import numpy as np
path = '../train_data/full_33/'
names = ['sig350Gm', 'sig1Tm', 'sig1p5Tm', 'sig2TeVm', 'bgh', 'bg4t', 'bgnoh']
fsig350Gm, fsig1Tm, fsig1p5Tm, fsig2TeVm, fbgh, fbg4t, fbgnoh = [np.load(path + name + '.npy') for name in names]
# +
fsig350Gm_labelled = np.concatenate((fsig350Gm, np.ones((len(fsig350Gm),1))), axis=1)
fsig1Tm_labelled = np.concatenate((fsig1Tm, np.ones((len(fsig1Tm), 1))), axis=1)
fsig1p5Tm_labelled = np.concatenate((fsig1p5Tm, np.ones((len(fsig1p5Tm),1))), axis=1)
fsig2Tm_labelled = np.concatenate((fsig2TeVm, np.ones((len(fsig2TeVm),1))), axis=1)
fbg_shuffle = np.concatenate((fbgh, fbg4t, fbgnoh))
np.random.shuffle(fbg_shuffle)
fbg_shuffle_labelled = np.concatenate((fbg_shuffle, np.zeros((len(fbg_shuffle),1))), axis=1)
f350Gm_data = np.concatenate((fsig350Gm_labelled, fbg_shuffle_labelled[:len(fsig350Gm_labelled)]), axis=0)
f1Tm_data = np.concatenate((fsig1Tm_labelled, fbg_shuffle_labelled[:len(fsig1Tm_labelled)]), axis=0)
f1p5Tm_data = np.concatenate((fsig1p5Tm_labelled, fbg_shuffle_labelled[:len(fsig1p5Tm_labelled)]), axis=0)
f2Tm_data = np.concatenate((fsig2Tm_labelled, fbg_shuffle_labelled[:len(fsig2Tm_labelled)]), axis=0)
# +
from sklearn.model_selection import train_test_split
train_f350Gm, test_f350Gm = train_test_split(f350Gm_data, test_size=0.2, random_state=43)
train_f1Tm, test_f1Tm = train_test_split(f1Tm_data, test_size=0.2, random_state=43)
train_f1p5Tm, test_f1p5Tm = train_test_split(f1p5Tm_data, test_size=0.2, random_state=43)
train_f2Tm, test_f2Tm = train_test_split(f2Tm_data, test_size=0.2, random_state=43)
# -
train_f350Gm_minmax, test_f350Gm_minmax = train_test_split(minmax_pipe.fit_transform(f350Gm_data), test_size=0.2, random_state=42)
train_f2tm_minmax, test_f2Tm_minmax = train_test_split(minmax_pipe.fit_transform(f2Tm_data), test_size=0.2, random_state=42)
print(len(f350Gm_data))
print(len(f2Tm_data))
| notebooks/legacy/data_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
filepath = "../Data Preprocessing/iot23_combined.csv"
df = pd.read_csv(filepath)
del df['Unnamed: 0']
df
df['label'].value_counts()
df.columns
X = df[['duration', 'orig_bytes', 'resp_bytes', 'missed_bytes', 'orig_pkts', 'orig_ip_bytes', 'resp_pkts', 'resp_ip_bytes', 'proto_icmp', 'proto_tcp', 'proto_udp', 'conn_state_OTH', 'conn_state_REJ', 'conn_state_RSTO', 'conn_state_RSTOS0', 'conn_state_RSTR', 'conn_state_RSTRH', 'conn_state_S0', 'conn_state_S1', 'conn_state_S2', 'conn_state_S3', 'conn_state_SF', 'conn_state_SH', 'conn_state_SHR']]
Y = df['label']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=10, test_size=0.2)
# +
tuned_parameters = {
"n_estimators": [10, 50, 100, 200],
"criterion": ['gini', 'entropy'],
"min_samples_split": [2, 4, 8, 16],
"max_features": ['sqrt', 'log2', None]
}
print(tuned_parameters)
# -
clf = GridSearchCV(RandomForestClassifier(), tuned_parameters, scoring="accuracy", n_jobs=-1, verbose=1)
clf.fit(X_train, Y_train)
# +
print("Best score found on development set:", clf.best_score_)
print("\nBest parameters set found on development set:", clf.best_params_)
print("\nRefit time (in seconds):", clf.refit_time_)
print("\nGrid scores on development set:")
means = clf.cv_results_["mean_test_score"]
stds = clf.cv_results_["std_test_score"]
params = clf.cv_results_["params"]
for mean, std, param in zip(means, stds, params):
print("%0.3f (+/-%0.06f) for %r" % (mean, std * 2, param))
# +
x = [str(x) for x in params]
y = means
err = stds * 2
step = 1
fig, ax = plt.subplots(figsize=(16,16))
plt.style.use('seaborn-whitegrid')
plt.errorbar(x[::step], y[::step], yerr=err[::step], fmt='o', color='black', ecolor='lightsalmon', elinewidth=3, capsize=0);
plt.xlabel('Parameter(s)')
plt.xticks(rotation=90, ha='right')
plt.ylabel('Accuracy')
min_y = np.floor(10*np.min(y[::step]))/10 - 0.05
max_y = np.ceil(10*np.max(y[::step]))/10 + 0.05
plt.ylim(min_y,max_y)
plt.yticks(np.linspace(min_y, max_y, int(1+(max_y-min_y)*10*2)))
plt.title('GridSearchCV results')
# -
print("Detailed classification report:")
print(" - The model is trained on the full development set.")
print(" - The scores are computed on the full evaluation set.")
Y_true, Y_pred = Y_test, clf.predict(X_test)
print(classification_report(Y_true, Y_pred))
labels = df['label'].unique().tolist()
labels.remove('C&C-Mirai')
print(labels)
# +
cm = confusion_matrix(Y_true, Y_pred, normalize='true')
disp = ConfusionMatrixDisplay(cm, display_labels=labels)
fig, ax = plt.subplots(figsize=(10,10))
disp.plot(ax=ax,
cmap=plt.cm.Blues,
xticks_rotation=90,
values_format='.1f')
| Optimization/SKLearn/Random Forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (cmdh)
# language: python
# name: club_mahindra_data_hack
# ---
# +
import sys
sys.path.append('C:/Users/visha/Documents/GitHub/Club_Mahindra_Data_Hack/Club_Mahindra_Data_Hack')
# Import standard libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
color = sns.color_palette()
# Import local libraries
from src.data_preprocessing import preprocess_data
pd.options.display.max_columns = 100
import warnings
warnings.filterwarnings("ignore")
# -
# ### Load Data
train_prepared = pd.read_csv(r'../data/processed/prepared_data/train_prepared.csv')
val_prepared = pd.read_csv(r'../data/processed/prepared_data/val_prepared.csv')
test_prepared = pd.read_csv(r'../data/processed/prepared_data/test_prepared.csv')
# ### Pre-process Data
# separate features from targets
train_prepared_X, train_prepared_y = preprocess_data.separate_features_and_targets(train_prepared)
val_prepared_X, val_prepared_y = preprocess_data.separate_features_and_targets(val_prepared)
test_prepared_X, test_prepared_y = preprocess_data.separate_features_and_targets(test_prepared)
# separate numerical features from categorical features
train_num_feature_names, train_cat_feature_names = preprocess_data.separate_num_cat_features(train_prepared_X)
val_num_feature_names, val_cat_feature_names = preprocess_data.separate_num_cat_features(val_prepared_X)
test_num_feature_names, test_cat_feature_names = preprocess_data.separate_num_cat_features(test_prepared_X)
# transform features to make it ready to be fed to machine learning models
train_preprocessed = preprocess_data.transform_all_features(train_prepared_X, train_num_feature_names, train_cat_feature_names)
val_preprocessed = preprocess_data.transform_all_features(val_prepared_X, val_num_feature_names, val_cat_feature_names)
test_preprocessed = preprocess_data.transform_all_features(test_prepared_X, test_num_feature_names, test_cat_feature_names)
# Convert the arrays into pandas DataFrame
train_preprocessed = pd.DataFrame(train_preprocessed)
val_preprocessed = pd.DataFrame(val_preprocessed)
test_preprocessed = pd.DataFrame(test_preprocessed)
# Save the preprocessed features to project drive
train_preprocessed.to_csv(r'../data/processed/preprocessed_data/train_preprocessed.csv', index=False)
val_preprocessed.to_csv(r'../data/processed/preprocessed_data/val_preprocessed.csv', index=False)
test_preprocessed.to_csv(r'../data/processed/preprocessed_data/test_preprocessed.csv', index=False)
# Save target values to project drive
train_prepared_y.to_csv(r'../data/processed/target_values/train_target_values.csv', index=False)
val_prepared_y.to_csv(r'../data/processed/target_values/val_target_values.csv', index=False)
test_prepared_y.to_csv(r'../data/processed/target_values/test_target_values.csv', index=False)
| notebooks/2.0-rs-data_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
import pytesseract
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import time
import cv2
import gradio as gr
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
# +
import pandas as pd
from glob import glob
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import re
import seaborn as sn
from tensorflow.keras.layers import Input,Dense,Bidirectional,Conv2D,MaxPooling2D,Flatten,concatenate,GlobalAveragePooling2D,BatchNormalization,Lambda,Add,Multiply
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam,SGD
import tensorflow.keras.backend as K
from transformers import AutoTokenizer, TFAutoModel, TFBertModel, logging
from utilities import *
logging.set_verbosity_error()
# -
def class_block(inputs):
X = tf.keras.layers.BatchNormalization()(inputs)
X = tf.keras.layers.Dense(512, activation='relu')(X)
X = tf.keras.layers.Dropout(0.2)(X)
X = tf.keras.layers.Dense(128, activation='relu')(X)
X = tf.keras.layers.Dropout(0.2)(X)
y = tf.keras.layers.Dense(3, activation='softmax', name='outputs')(X) # 3 labels due to three sentiment classes
return y
# +
seq_len=50
seq_len2=10
model= "unideeplearning/polibert_sa"
tokenizer = AutoTokenizer.from_pretrained(model)
bert = TFBertModel.from_pretrained(model)
CNN = tf.keras.applications.ResNet50V2(include_top=False, weights='imagenet', input_tensor=None, input_shape=(224,224,3), pooling=False, classes=3)
# +
input_ids = tf.keras.layers.Input(shape=(seq_len,), name='input_ids', dtype='int32')
mask = tf.keras.layers.Input(shape=(seq_len,), name='attention_mask', dtype='int32')
input_ids2 = tf.keras.layers.Input(shape=(seq_len2,), name='input_ids2', dtype='int32')
mask2 = tf.keras.layers.Input(shape=(seq_len2,), name='attention_mask2', dtype='int32')
input_ids_c=tf.keras.layers.concatenate([input_ids, input_ids2])
mask_c=tf.keras.layers.concatenate([mask, mask2])
image_inputs=tf.keras.layers.Input(shape=(224,224,3),name="images")
text_embeddings = bert(input_ids_c, attention_mask=mask_c)[0] # we only keep tensor 0 (last_hidden_state)
text_1d = tf.keras.layers.GlobalMaxPool1D()(text_embeddings) # reduce tensor dimensionality
image_embeddings=CNN(image_inputs)
image_1d=GlobalAveragePooling2D()(image_embeddings)
X=tf.keras.layers.concatenate([text_1d, image_1d])
y=class_block(X)
model = tf.keras.Model(inputs=[input_ids, mask,input_ids2, mask2, image_inputs], outputs=y)
# freeze the DistilBERT layer
model.layers[7].trainable = False
model.layers[8].trainable = False
# -
model.load_weights('saves/final_weights')
def preprocess_finale(im):
im= cv2.bilateralFilter(im,5, 55,60)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
_, im = cv2.threshold(im, 240, 255, 1)
return im
# +
custom_config = r"--oem 3 --psm 11 -c tessedit_char_whitelist='ABCDEFGHIJKLMNOPQRSTUVWXYZ '"
labels=['negative', 'neutral', 'positive']
def ext_text(tweet_text, tweet_image):
text1=tweet_text
im= tweet_image
img1 = preprocess_finale(im)
text = pytesseract.image_to_string(img1, lang='ita', config=custom_config)
text = text.replace('\n', '')
toktx=tokenize(text1, tokenizer, SEQ_LEN=50)
toktx2=tokenize(text, tokenizer, SEQ_LEN=10)
image = preprocess_image(im,labels=None, prediction=True)
prediction= model.predict([toktx,toktx2,image])
return img1, text, {labels[i]: float(prediction[0][i]) for i in range(0,3)}
# -
iface = gr.Interface(fn=ext_text, inputs=[gr.inputs.Textbox(), gr.inputs.Image()],
outputs=[gr.outputs.Image(label='Preprocessed image'),
gr.outputs.Textbox(label='Extracted Text'),
gr.outputs.Label(num_top_classes=3, label='Predicted sentiment')],
examples=[('', 'examples/example'+str(i)+'.png') for i in range(1,6)]
)
iface.launch(share=True)
| notebook/demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python + QuTiP
# language: python
# name: qutip-env
# ---
# # Optimizing QAOA
#
# Consider a quantum alternating operator ansatz(QAOA) consisting of two types of driving Hamiltonian. We consider the following form of control sequence:
# $$
# U(\{\alpha_i, \beta_i| i=1,\ldots n \}) = e^{i\beta_n h^{(2)}} e^{i\alpha_n h^{(1)}}\cdots e^{i\beta_1 h^{(2)}}e^{i\alpha_1 h^{(1)}}.
# $$
# Suppose we change only one of the parameters. We are interested in the smoothnes of the energy landscape. Suppose that the energy landscape can be approximated well by a few Fourier coefficients. That is, there exists a rank-$r$ approximation of the function.
# $$
# \inf_{\{a_i \}} |E(\alpha_i) - \sum_{i=1} a_{j_i} \cos \alpha_i| \leq \epsilon.
# $$
# Let the approximation be $\tilde{E}(\alpha_i)$. Then the global minimum of the approximation is by definition at most $\epsilon$ apart from the global minimum of the original function.
#
# ## Comparison
#
# There are two quantities to consider. The first is the sample complexity: how many samples do we need to find the global minimum up to an error $\epsilon$? The second is the total computation time. What is the actual amount of time to compute the global minimum?
#
# In this note, we compare three different methods, under two assumptions.
#
# ### Assumptions
#
# In order to compare different optimization methods, we will need to make a few nontrivial assumptions.
#
# #### Periodicity
# 1. Periodic case: In certain cases, we are promised that $E(\alpha_i)= E(\alpha_i + \pi)$. In this case, we are only interested in $\alpha_i \in [0,\pi]$.
#
# 2. Aperiodic case: The periodicity assumption will be invalid in general. In this case, we will need to introduce a cutoff for the maximum value of $\alpha_i$. In the experiment, it will make sense to use $\alpha_i \in [0, 1/\tau_d]$, where $\tau_d$ is the coherence time.
#
# #### Continuity
# We will assume that there is a finite Lipschitz constant L:
# $$
# |E(x) - E(y)| \leq L|x-y|.
# $$
#
# #### Convexity
# Of course $E(\alpha_i)$ will not be convex in general, but we can still assume that it is and then compare different methods, at least for comparing gradient-based methods.
#
# #### Sparse Fourier coefficients
# Let us assume that $E(\alpha_i)$ has only $k$ Fourier coefficients.
#
# ### Methods
# 1. Brute-force search: Divide up the interval
#
# 2. Gradient descent: Estimate the gradient at a given point, and then perform gradient descent. Note that there will be a stochastic noise.
#
# 3. Fitting-based method: Measure energy for a few values of $\alpha_i$ and fit to a function with sparse Fourier coefficient.
#
# ### Sample Complexity
#
# Gradient descent algorithm, applied to a convex and differentiable function with a finite Lipschitz constant, converges in time $1/\epsilon$.
#
# 1. If we apply the gradient descent algorithm, we probably need to estimate the gradient with a statistical noise of at least $\epsilon$. So the total sample complexity would scale as $O(\frac{1}{\epsilon^3})$. Actually, because one must use $O(1/\epsilon)$ iterations, a sum of noise over these steps may fluctuate with a prefactor $O(\sqrt{1/\epsilon})$. In order to suppress this contribution up to a $O(\epsilon)$ error, the statistical accuracy for estimating gradient must be $O(\epsilon^{3/2})$. In that case, the total sample complexity would scale as $O(\frac{1}{\epsilon^4})$.
#
#
# 2. Suppose we have a promise that there is a rank-$r$ approximation. Then we have a sample complexity of probably $O(\frac{r}{\epsilon^2})$.
#
# ### Time Estimate
#
# 1. If we estimate the gradient directly, we have $T_{\text{total}} = O(\frac{\tau_q}{\epsilon^3})$, where $\tau_q$ is the time for one-shot measurement. But according to a more conservative analysis given above, the total might scale as $T_{\text{total}} = O(\frac{\tau_q}{\epsilon^4})$.
#
# 2. If we fit the function, then we have $T_{\text{total}} = O(\frac{\tau_q r}{\epsilon^2}) + O(\frac{r\tau_c}{\epsilon})$, where $\tau_c$ is the time for elementary arithmetic operations involving trigonometric functions. Assuming that $\tau_c \leq \tau_q$, the second term becomes negligible. So we see that there is an advantage in fitting the function to some fixed form.
#
import numpy as np
import scipy.linalg as la
from scipy import sparse
import matplotlib.pyplot as plt
import scipy.fftpack as spfft
import cvxpy as cvx
import sklearn.linear_model as lm
# # Experiment
#
# As a test example, suppose we are trying to optimize the following objective function.
# $$
# E(\theta) = \langle \psi| (e^{-i\theta Z})^{\otimes k}O(e^{i\theta Z})^{\otimes k}|\psi \rangle
# $$
# for a random observable $O$ and a arandom state $| \psi\rangle$. Here $|\psi\rangle$ is chosen randomly uniformly over the Hilbert space and $O= U(Z\otimes I \otimes \cdots \otimes I)U^{\dagger}$.
# Number of qubits
n= 8
# Define Z
Z= np.array([[1,0],[0,-1]])
# +
# Random Unitary 1
X = (np.random.randn(2**n, 2**n) + 1j * np.random.randn(2**n, 2**n))/np.sqrt(2)
Q,R = np.linalg.qr(X)
R = np.diag(np.diag(R)/abs(np.diag(R)))
U1 = Q @ R
# Random Unitary 2
X = (np.random.randn(2**n, 2**n) + 1j * np.random.randn(2**n, 2**n))/np.sqrt(2)
Q,R = np.linalg.qr(X)
R = np.diag(np.diag(R)/abs(np.diag(R)))
U2 = Q @ R
# +
itt = 50
# Create an observable
I = np.array([[1,0],[0,1]])
ob = Z
for k in range(n-1):
ob = sparse.kron(ob, I)
# Initialize the record
Record = []
for i in range(itt):
# Create a state
psi = np.zeros(2**n)
psi[0] = 1
psi = U1 @ psi
# Create a single unitary
theta = np.pi * i / itt
ctrl_loc = la.expm(1j * theta * Z)
# Create a control unitary
ctrl = 1
for j in range(n):
ctrl = sparse.kron(ctrl, ctrl_loc)
# Apply the control
psi = ctrl @ psi
# Measure
answer = np.real(psi.conj().T @ U2 @ ob @ U2.conj().T @ psi)
Record.append(answer)
# -
myfft=np.real(np.fft.fft(Record))
plt.plot(myfft)
plt.show()
myfft
# One can see that the number of Fourier coefficient is small, but this was to some extent expected. The number of $e^{i\theta Z}$ term is at most $16$, so the sparsity may be coming from the fact that $16$ is a small number. In order to test how sparse the Fourier coefficients are, we can imagine identifying some of the $\alpha_i$s to be equal to each other.
# +
itt = 100
layers = 5
# Create an observable
I = np.array([[1,0],[0,1]])
ob = Z
for k in range(n-1):
ob = sparse.kron(ob, I)
# Initialize the record
Record = []
for i in range(itt):
# Create a state
psi = np.zeros(2**n)
psi[0] = 1
psi = U1 @ psi
# Create a single unitary
theta = np.pi * i / itt
ctrl_loc = la.expm(1j * theta * Z)
# Create a control unitary
ctrl = 1
for j in range(n):
ctrl = sparse.kron(ctrl, ctrl_loc)
# Create an ansatz
psi_ansatz = psi
for j in range(layers):
psi_ansatz = ctrl @ psi_ansatz
psi_ansatz = U2.conj().T @ psi_ansatz
# Measure
answer = np.real(psi_ansatz.conj().T @ ob @ psi_ansatz)
Record.append(answer)
# -
plt.plot(Record)
# ## Complexified CVX
def DFT_matrix(N):
i, j = np.meshgrid(np.arange(N), np.arange(N))
omega = np.exp( - 2 * np.pi * 1J / N )
W = np.power( omega, i * j )
return W
dft_mat = DFT_matrix(itt)
ift_mat = np.conj(dft_mat)/itt
rec_ft = dft_mat@Record
# Complexifying the results, i.e. v -> [Re(v), Im(v)]
comp_ift_mat = np.block([[np.real(ift_mat),-np.imag(ift_mat)],[np.imag(ift_mat),np.real(ift_mat)]])
comp_record = np.block([np.real(Record),np.imag(Record)])
comp_record_ft = np.block([np.real(rec_ft),np.imag(rec_ft)])
def reconstruct_sparse_complex(m_vecs,y_vals,verb=False):
vx = cvx.Variable(2*itt)
objective = cvx.Minimize(cvx.norm(vx, 1))
constraints = [m_vecs*vx == y_vals]
prob = cvx.Problem(objective, constraints)
result = prob.solve(verbose=verb)
return np.squeeze(np.array(vx.value))[:itt]+1j*np.squeeze(np.array(vx.value))[itt:]
cost_record_complex = []
cost_record_complex_time = []
for m in range(10,itt):
rand_indx = np.random.choice(np.arange(itt),m,replace=False)
rand_indx = np.append(rand_indx,itt+rand_indx)
recon_results = reconstruct_sparse_complex(comp_ift_mat[rand_indx],comp_record[rand_indx])
cost_record_complex.append(np.linalg.norm(rec_ft-recon_results))
cost_record_complex_time.append(np.linalg.norm((ift_mat@recon_results)-np.array(Record)))
plt.plot(np.arange(10,itt),cost_record_complex)
# plt.yscale('log')
# ## Cosine transform
idct_matrix = spfft.idct(np.identity(itt), norm='ortho', axis=0)
dct_matrix = spfft.dct(np.identity(itt), norm='ortho', axis=0)
rec_ct = dct_matrix@Record
def reconstruct_sparse_cosine(m_vecs,y_vals,verb=False):
vx = cvx.Variable(itt)
objective = cvx.Minimize(cvx.norm(vx, 1))
constraints = [m_vecs*vx == y_vals]
prob = cvx.Problem(objective, constraints)
result = prob.solve(verbose=verb)
return np.squeeze(np.array(vx.value))
cost_record_cos = []
cost_record_cos_time = []
for m in range(10,itt):
rand_indx = np.random.choice(np.arange(itt),m,replace=False)
recon_results = reconstruct_sparse_cosine(idct_matrix[rand_indx],np.array(Record)[rand_indx])
cost_record_cos.append(np.linalg.norm(rec_ct-recon_results))
cost_record_cos_time.append(np.linalg.norm(idct_matrix@recon_results-np.array(Record)))
# +
plt.plot(np.arange(10,itt),cost_record_cos)
# plt.yscale('log')
# -
plt.plot(idct_matrix@recon_results)
plt.plot(Record)
plt.plot(np.arange(10,itt),cost_record_complex_time,label='complex')
plt.plot(np.arange(10,itt),cost_record_cos_time,label='cos')
plt.legend()
# ## LASSO
cost_record_lasso = []
cost_record_lasso_time = []
for m in range(10,itt):
rand_indx = np.random.choice(np.arange(itt),m,replace=False)
clf = lm.Lasso(alpha=0.00001, max_iter=10000, tol=0.0001,fit_intercept=False)
# clf = lm.LassoLars(alpha=0.001,fit_intercept=False)
clf.fit(idct_matrix[rand_indx],np.array(Record)[rand_indx])
cost_record_lasso.append(np.linalg.norm(rec_ct-clf.coef_))
cost_record_lasso_time.append(np.linalg.norm(idct_matrix@clf.coef_-np.array(Record)))
# plt.plot(cost_record_lasso)
plt.plot(cost_record_lasso_time)
plt.plot(idct_matrix@clf.coef_)
plt.plot(Record)
# ## IHT
itemp = 0
jtemp = 0
while jtemp<5 and itemp <10:
itemp+=1
jtemp+=1
def iterative_ht(m_vecs,y_vals,sparsity,tol = 1e-3, max_iter=200,verbose=True):
x_vec = np.zeros(m_vecs.shape[1])
ic = 0
while np.linalg.norm(y_vals-m_vecs@x_vec)>tol and ic<max_iter:
ic += 1
x_vec = x_vec + m_vecs.T@(y_vals-m_vecs@x_vec)
x_vec[np.argsort(-np.abs(x_vec))[sparsity:]] = 0
if verbose:
workdone = (i+1)/max_iter
print("\rProgress: [{0:50s}] {1:.1f}%".format('#' * int(workdone * 50), workdone*100), end="", flush=True)
return x_vec
cost_record_iht = []
cost_record_iht_time = []
for m in range(10,itt):
rand_indx = np.random.choice(np.arange(itt),m,replace=False)
x_sol = iterative_ht(idct_matrix[rand_indx],np.array(Record)[rand_indx],sparsity = 25,verbose=False)
cost_record_iht.append(np.linalg.norm(rec_ct-x_sol))
cost_record_iht_time.append(np.linalg.norm(idct_matrix@x_sol-np.array(Record)))
plt.plot(cost_record_iht)
plt.plot(np.arange(10,itt),cost_record_cos_time,label='cvx')
plt.plot(np.arange(10,itt),cost_record_lasso_time,label='lasso')
plt.plot(np.arange(10,itt),cost_record_iht_time,label='iht')
plt.legend()
| ceo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Goals:
# - Increase cumulative profit after brokerage
# -
#
# ### Best time to Buy and Sell Stock
# - You have an array for which the ith element is the price of a given stock on day i.
#
#
# - Design an **algorithm to find the maximum profit**. You may complete at most two transactions.
#
#
# - **Note** : You may not engage in multiple transactions at the same time **(i.e. you must sell the stock before you buy again)**
#
#
# **Example 1:**
#
# **Input**: [3,3,5,0,0,3,1,4]
#
# **Output**: 6
#
# **Explanation**: Buy on day 4 (price=0) and sell on day 6 (price=3), profit = 3-0=3.
#
# Then buy on day 7 (price=1) and sell on day 8 (price=4), profit = 4-1=3.
#
# - Only two transactions allowed atmost
| 28_Stock_Market/1_Basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import sys
from os.path import join as oj
sys.path.insert(1, oj(sys.path[0], 'train_model'))
from train_model import sent_util
import torch
from torchtext import data, datasets
# +
# To train model, first run 'train.py' from train_model dir
# get model
snapshot_dir = 'train_model/results/'
snapshot_file = oj(snapshot_dir,
'best_snapshot_devacc_84.9770642201835_devloss_0.5329645872116089_iter_7000_model.pt')
model = sent_util.get_model(snapshot_file)
# get data
inputs, answers, train_iterator, dev_iterator = sent_util.get_sst()
# -
# Find sentence used in figure 2
batch_nums = list(range(6920))
data = sent_util.get_batches(batch_nums, train_iterator, dev_iterator)
for ind in range(6919):
text = data[ind].text.data[:, 0]
words = [inputs.vocab.itos[i] for i in text]
if words[0] == 'it' and words[1] == "'s" and words[2] == 'easy':
high_level_comp_ind = ind
break
# Produce CD importance scores for phrases used in figure 2
pos, pos_irrel = sent_util.CD(data[high_level_comp_ind], model, start = 0, stop = 15)
print(' '.join(words[:16]), pos[0] - pos[1])
neg, neg_irrel = sent_util.CD(data[high_level_comp_ind], model, start = 16, stop = 26)
print(' '.join(words[16:]), neg[0] - neg[1])
# Sanity check: CD is a decomposition, so an effective way to check for bugs is to verify that the decomposition holds (up to numerical errors)
print(pos + pos_irrel)
linear_bias = model.hidden_to_label.bias.data.cpu().numpy()
print((model(data[high_level_comp_ind]).data.cpu().numpy() - linear_bias)[0])
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Planning Search Agent
#
# Notebook version of the project [Implement a Planning Search](https://github.com/udacity/AIND-Planning) from [Udacity's Artificial Intelligence Nanodegree](https://www.udacity.com/course/artificial-intelligence-nanodegree--nd889) <br>
#
# **Goal**: Solve deterministic logistics planning problems for an Air Cargo transport system using a planning search agent
#
#
# All problems are in the Air Cargo domain. They have the same action schema defined, but different initial states and goals:
#
# ```
# Action(Load(c, p, a),
# PRECOND: At(c, a) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a)
# EFFECT: ¬ At(c, a) ∧ In(c, p))
# Action(Unload(c, p, a),
# PRECOND: In(c, p) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a)
# EFFECT: At(c, a) ∧ ¬ In(c, p))
# Action(Fly(p, from, to),
# PRECOND: At(p, from) ∧ Plane(p) ∧ Airport(from) ∧ Airport(to)
# EFFECT: ¬ At(p, from) ∧ At(p, to))
# ```
# ## Planning Graph nodes
# +
from planning_agent.aimacode.planning import Action
from planning_agent.aimacode.search import Problem
from planning_agent.aimacode.utils import expr
from planning_agent.lp_utils import decode_state
class PgNode():
""" Base class for planning graph nodes.
includes instance sets common to both types of nodes used in a planning graph
parents: the set of nodes in the previous level
children: the set of nodes in the subsequent level
mutex: the set of sibling nodes that are mutually exclusive with this node
"""
def __init__(self):
self.parents = set()
self.children = set()
self.mutex = set()
def is_mutex(self, other) -> bool:
""" Boolean test for mutual exclusion
:param other: PgNode
the other node to compare with
:return: bool
True if this node and the other are marked mutually exclusive (mutex)
"""
if other in self.mutex:
return True
return False
def show(self):
""" helper print for debugging shows counts of parents, children, siblings
:return:
print only
"""
print("{} parents".format(len(self.parents)))
print("{} children".format(len(self.children)))
print("{} mutex".format(len(self.mutex)))
class PgNode_s(PgNode):
"""
A planning graph node representing a state (literal fluent) from a planning
problem.
Args:
----------
symbol : str
A string representing a literal expression from a planning problem
domain.
is_pos : bool
Boolean flag indicating whether the literal expression is positive or
negative.
"""
def __init__(self, symbol: str, is_pos: bool):
""" S-level Planning Graph node constructor
:param symbol: expr
:param is_pos: bool
Instance variables calculated:
literal: expr
fluent in its literal form including negative operator if applicable
Instance variables inherited from PgNode:
parents: set of nodes connected to this node in previous A level; initially empty
children: set of nodes connected to this node in next A level; initially empty
mutex: set of sibling S-nodes that this node has mutual exclusion with; initially empty
"""
PgNode.__init__(self)
self.symbol = symbol
self.is_pos = is_pos
self.literal = expr(self.symbol)
if not self.is_pos:
self.literal = expr('~{}'.format(self.symbol))
def show(self):
"""helper print for debugging shows literal plus counts of parents, children, siblings
:return:
print only
"""
print("\n*** {}".format(self.literal))
PgNode.show(self)
def __eq__(self, other):
"""equality test for nodes - compares only the literal for equality
:param other: PgNode_s
:return: bool
"""
if isinstance(other, self.__class__):
return (self.symbol == other.symbol) \
and (self.is_pos == other.is_pos)
def __hash__(self):
return hash(self.symbol) ^ hash(self.is_pos)
class PgNode_a(PgNode):
"""A-type (action) Planning Graph node - inherited from PgNode
"""
def __init__(self, action: Action):
"""A-level Planning Graph node constructor
:param action: Action
a ground action, i.e. this action cannot contain any variables
Instance variables calculated:
An A-level will always have an S-level as its parent and an S-level as its child.
The preconditions and effects will become the parents and children of the A-level node
However, when this node is created, it is not yet connected to the graph
prenodes: set of *possible* parent S-nodes
effnodes: set of *possible* child S-nodes
is_persistent: bool True if this is a persistence action, i.e. a no-op action
Instance variables inherited from PgNode:
parents: set of nodes connected to this node in previous S level; initially empty
children: set of nodes connected to this node in next S level; initially empty
mutex: set of sibling A-nodes that this node has mutual exclusion with; initially empty
"""
PgNode.__init__(self)
self.action = action
self.prenodes = self.precond_s_nodes()
self.effnodes = self.effect_s_nodes()
self.is_persistent = False
if self.prenodes == self.effnodes:
self.is_persistent = True
def show(self):
"""helper print for debugging shows action plus counts of parents, children, siblings
:return:
print only
"""
print("\n*** {}{}".format(self.action.name, self.action.args))
PgNode.show(self)
def precond_s_nodes(self):
"""precondition literals as S-nodes (represents possible parents for this node).
It is computationally expensive to call this function; it is only called by the
class constructor to populate the `prenodes` attribute.
:return: set of PgNode_s
"""
nodes = set()
for p in self.action.precond_pos:
n = PgNode_s(p, True)
nodes.add(n)
for p in self.action.precond_neg:
n = PgNode_s(p, False)
nodes.add(n)
return nodes
def effect_s_nodes(self):
"""effect literals as S-nodes (represents possible children for this node).
It is computationally expensive to call this function; it is only called by the
class constructor to populate the `effnodes` attribute.
:return: set of PgNode_s
"""
nodes = set()
for e in self.action.effect_add:
n = PgNode_s(e, True)
nodes.add(n)
for e in self.action.effect_rem:
n = PgNode_s(e, False)
nodes.add(n)
return nodes
def __eq__(self, other):
"""equality test for nodes - compares only the action name for equality
:param other: PgNode_a
:return: bool
"""
if isinstance(other, self.__class__):
return (self.action.name == other.action.name) \
and (self.action.args == other.action.args)
def __hash__(self):
return hash(self.action.name) ^ hash(self.action.args)
# -
# ## Planning Graph
# +
def mutexify(node1: PgNode, node2: PgNode):
""" adds sibling nodes to each other's mutual exclusion (mutex) set. These should be sibling nodes!
:param node1: PgNode (or inherited PgNode_a, PgNode_s types)
:param node2: PgNode (or inherited PgNode_a, PgNode_s types)
:return:
node mutex sets modified
"""
if type(node1) != type(node2):
raise TypeError('Attempted to mutex two nodes of different types')
node1.mutex.add(node2)
node2.mutex.add(node1)
class PlanningGraph():
"""
A planning graph as described in chapter 10 of the AIMA text. The planning
graph can be used to reason about
"""
def __init__(self, problem: Problem, state: str, serial_planning=True):
"""
:param problem: PlanningProblem (or subclass such as AirCargoProblem or HaveCakeProblem)
:param state: str (will be in form TFTTFF... representing fluent states)
:param serial_planning: bool (whether or not to assume that only one action can occur at a time)
Instance variable calculated:
fs: FluentState
the state represented as positive and negative fluent literal lists
all_actions: list of the PlanningProblem valid ground actions combined with calculated no-op actions
s_levels: list of sets of PgNode_s, where each set in the list represents an S-level in the planning graph
a_levels: list of sets of PgNode_a, where each set in the list represents an A-level in the planning graph
"""
self.problem = problem
self.fs = decode_state(state, problem.state_map)
self.serial = serial_planning
self.all_actions = self.problem.actions_list + self.noop_actions(self.problem.state_map)
self.s_levels = []
self.a_levels = []
self.create_graph()
def noop_actions(self, literal_list):
"""create persistent action for each possible fluent
"No-Op" actions are virtual actions (i.e., actions that only exist in
the planning graph, not in the planning problem domain) that operate
on each fluent (literal expression) from the problem domain. No op
actions "pass through" the literal expressions from one level of the
planning graph to the next.
The no-op action list requires both a positive and a negative action
for each literal expression. Positive no-op actions require the literal
as a positive precondition and add the literal expression as an effect
in the output, and negative no-op actions require the literal as a
negative precondition and remove the literal expression as an effect in
the output.
This function should only be called by the class constructor.
:param literal_list:
:return: list of Action
"""
action_list = []
for fluent in literal_list:
act1 = Action(expr("Noop_pos({})".format(fluent)), ([fluent], []), ([fluent], []))
action_list.append(act1)
act2 = Action(expr("Noop_neg({})".format(fluent)), ([], [fluent]), ([], [fluent]))
action_list.append(act2)
return action_list
def create_graph(self):
""" build a Planning Graph as described in Russell-Norvig 3rd Ed 10.3 or 2nd Ed 11.4
The S0 initial level has been implemented for you. It has no parents and includes all of
the literal fluents that are part of the initial state passed to the constructor. At the start
of a problem planning search, this will be the same as the initial state of the problem. However,
the planning graph can be built from any state in the Planning Problem
This function should only be called by the class constructor.
:return:
builds the graph by filling s_levels[] and a_levels[] lists with node sets for each level
"""
# the graph should only be built during class construction
if (len(self.s_levels) != 0) or (len(self.a_levels) != 0):
raise Exception(
'Planning Graph already created; construct a new planning graph for each new state in the planning sequence')
# initialize S0 to literals in initial state provided.
leveled = False
level = 0
self.s_levels.append(set()) # S0 set of s_nodes - empty to start
# for each fluent in the initial state, add the correct literal PgNode_s
for literal in self.fs.pos:
self.s_levels[level].add(PgNode_s(literal, True))
for literal in self.fs.neg:
self.s_levels[level].add(PgNode_s(literal, False))
# no mutexes at the first level
# continue to build the graph alternating A, S levels until last two S levels contain the same literals,
# i.e. until it is "leveled"
while not leveled:
self.add_action_level(level)
self.update_a_mutex(self.a_levels[level])
level += 1
self.add_literal_level(level)
self.update_s_mutex(self.s_levels[level])
if self.s_levels[level] == self.s_levels[level - 1]:
leveled = True
def add_action_level(self, level):
""" add an A (action) level to the Planning Graph
:param level: int
the level number alternates S0, A0, S1, A1, S2, .... etc the level number is also used as the
index for the node set lists self.a_levels[] and self.s_levels[]
:return:
adds A nodes to the current level in self.a_levels[level]
"""
self.a_levels.append(set()) # set of a_nodes
for a in self.all_actions:
a_node = PgNode_a(a)
if set(a_node.prenodes).issubset(set(self.s_levels[level])): # True: Valid A node
for s_node in self.s_levels[level]:
if s_node in a_node.prenodes: # search for the right parents
a_node.parents.add(s_node)
s_node.children.add(a_node)
self.a_levels[level].add(a_node)
def add_literal_level(self, level):
""" add an S (literal) level to the Planning Graph
:param level: int
the level number alternates S0, A0, S1, A1, S2, .... etc the level number is also used as the
index for the node set lists self.a_levels[] and self.s_levels[]
:return:
adds S nodes to the current level in self.s_levels[level]
"""
self.s_levels.append(set()) # set of s_nodes
for a in self.a_levels[level-1]:
for s_node in a.effnodes: # Valid S nodes
a.children.add(s_node)
s_node.parents.add(a)
self.s_levels[level].add(s_node)
def update_a_mutex(self, nodeset):
""" Determine and update sibling mutual exclusion for A-level nodes
Mutex action tests section from 3rd Ed. 10.3 or 2nd Ed. 11.4
A mutex relation holds between two actions a given level
if the planning graph is a serial planning graph and the pair are nonpersistence actions
or if any of the three conditions hold between the pair:
Inconsistent Effects
Interference
Competing needs
:param nodeset: set of PgNode_a (siblings in the same level)
:return:
mutex set in each PgNode_a in the set is appropriately updated
"""
nodelist = list(nodeset)
for i, n1 in enumerate(nodelist[:-1]):
for n2 in nodelist[i + 1:]:
if (self.serialize_actions(n1, n2) or
self.inconsistent_effects_mutex(n1, n2) or
self.interference_mutex(n1, n2) or
self.competing_needs_mutex(n1, n2)):
mutexify(n1, n2)
def serialize_actions(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
"""
Test a pair of actions for mutual exclusion, returning True if the
planning graph is serial, and if either action is persistent; otherwise
return False. Two serial actions are mutually exclusive if they are
both non-persistent.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
"""
#
if not self.serial:
return False
if node_a1.is_persistent or node_a2.is_persistent:
return False
return True
def inconsistent_effects_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
"""
Test a pair of actions for inconsistent effects, returning True if
one action negates an effect of the other, and False otherwise.
HINT: The Action instance associated with an action node is accessible
through the PgNode_a.action attribute. See the Action class
documentation for details on accessing the effects and preconditions of
an action.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
"""
# Create 1 set with all the adding effects and 1 set with all the removing effects.
# (a single action cannot result in inconsistent effects)
# If the intersection (&) of the two sets is not empty, then at least one effect negates another
effects_add = node_a1.action.effect_add + node_a2.action.effect_add
effects_rem = node_a1.action.effect_rem + node_a2.action.effect_rem
return bool(set(effects_add) & set(effects_rem))
def interference_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
"""
Test a pair of actions for mutual exclusion, returning True if the
effect of one action is the negation of a precondition of the other.
HINT: The Action instance associated with an action node is accessible
through the PgNode_a.action attribute. See the Action class
documentation for details on accessing the effects and preconditions of
an action.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
"""
# Similar implementation of inconsistent_effects_mutex but crossing the adding/removing effect of each action
# with the negative/positive precondition of the other.
# 4 sets are used for 2 separated intersections. The intersection of 2 large sets (pos_add and neg_rem) would
# also result True for inconsistent_effects
cross_pos = node_a1.action.effect_add + node_a2.action.precond_pos
cross_neg = node_a1.action.precond_neg + node_a2.action.effect_rem
cross_pos2 = node_a2.action.effect_add + node_a1.action.precond_pos
cross_neg2 = node_a2.action.precond_neg + node_a1.action.effect_rem
return bool(set(cross_pos) & set(cross_neg)) or bool(set(cross_pos2) & set(cross_neg2))
def competing_needs_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
"""
Test a pair of actions for mutual exclusion, returning True if one of
the precondition of one action is mutex with a precondition of the
other action.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
"""
# Create a list with the parents of one action node that are mutually exclusive with the parents of the other
# and return True if the list is not empty
mutex = [i for i in node_a1.parents for j in node_a2.parents if i.is_mutex(j)]
return bool(mutex)
def update_s_mutex(self, nodeset: set):
""" Determine and update sibling mutual exclusion for S-level nodes
Mutex action tests section from 3rd Ed. 10.3 or 2nd Ed. 11.4
A mutex relation holds between literals at a given level
if either of the two conditions hold between the pair:
Negation
Inconsistent support
:param nodeset: set of PgNode_a (siblings in the same level)
:return:
mutex set in each PgNode_a in the set is appropriately updated
"""
nodelist = list(nodeset)
for i, n1 in enumerate(nodelist[:-1]):
for n2 in nodelist[i + 1:]:
if self.negation_mutex(n1, n2) or self.inconsistent_support_mutex(n1, n2):
mutexify(n1, n2)
def negation_mutex(self, node_s1: PgNode_s, node_s2: PgNode_s) -> bool:
"""
Test a pair of state literals for mutual exclusion, returning True if
one node is the negation of the other, and False otherwise.
HINT: Look at the PgNode_s.__eq__ defines the notion of equivalence for
literal expression nodes, and the class tracks whether the literal is
positive or negative.
:param node_s1: PgNode_s
:param node_s2: PgNode_s
:return: bool
"""
# Mutual exclusive nodes have the same 'symbol' and different 'is_pos' attributes
return (node_s1.symbol == node_s2.symbol) and (node_s1.is_pos != node_s2.is_pos)
def inconsistent_support_mutex(self, node_s1: PgNode_s, node_s2: PgNode_s):
"""
Test a pair of state literals for mutual exclusion, returning True if
there are no actions that could achieve the two literals at the same
time, and False otherwise. In other words, the two literal nodes are
mutex if all of the actions that could achieve the first literal node
are pairwise mutually exclusive with all of the actions that could
achieve the second literal node.
HINT: The PgNode.is_mutex method can be used to test whether two nodes
are mutually exclusive.
:param node_s1: PgNode_s
:param node_s2: PgNode_s
:return: bool
"""
# Get a list with the parents of one node that are not mutually exclusive with at least one parent of the other
# Here the inconsistent is detected if the list is empty (none of the actions can lead to these pair of nodes at
# the same time)
compatible_parents_s1 = [a for a in node_s1.parents for b in node_s2.parents if not a.is_mutex(b)]
return not bool(compatible_parents_s1)
def h_levelsum(self) -> int:
"""The sum of the level costs of the individual goals (admissible if goals independent)
:return: int
"""
level_sum = 0
# for each goal in the problem, determine the level cost, then add them together
remaining_goals = set(self.problem.goal) # remaining goals to find to determine the level cost
# Search for all the goals simultaneously from level 0
for level in range(len(self.s_levels)+1):
literals = set([node.literal for node in self.s_levels[level]]) # literals found in the current level
match = literals & remaining_goals # set of goals found in literals (empty set if none)
level_sum += len(match)*level # add cost of the found goals (0 if none)
remaining_goals -= match # remove found goals from the remaining goals
if not remaining_goals: # return when all goals are found
return level_sum
raise Exception("Goal not found")
# -
# ## Air Cargo Problem
# +
from planning_agent.aimacode.logic import PropKB
from planning_agent.aimacode.planning import Action
from planning_agent.aimacode.search import Node, Problem
from planning_agent.aimacode.utils import expr
from planning_agent.lp_utils import FluentState, encode_state, decode_state
class AirCargoProblem(Problem):
def __init__(self, cargos, planes, airports, initial: FluentState, goal: list):
"""
:param cargos: list of str
cargos in the problem
:param planes: list of str
planes in the problem
:param airports: list of str
airports in the problem
:param initial: FluentState object
positive and negative literal fluents (as expr) describing initial state
:param goal: list of expr
literal fluents required for goal test
"""
self.state_map = initial.pos + initial.neg
self.initial_state_TF = encode_state(initial, self.state_map)
Problem.__init__(self, self.initial_state_TF, goal=goal)
self.cargos = cargos
self.planes = planes
self.airports = airports
self.actions_list = self.get_actions()
def get_actions(self):
"""
This method creates concrete actions (no variables) for all actions in the problem
domain action schema and turns them into complete Action objects as defined in the
aimacode.planning module. It is computationally expensive to call this method directly;
however, it is called in the constructor and the results cached in the `actions_list` property.
Returns:
----------
list<Action>
list of Action objects
"""
def load_actions():
"""Create all concrete Load actions and return a list
:return: list of Action objects
"""
loads = []
for c in self.cargos:
for p in self.planes:
for a in self.airports:
precond_pos = [expr("At({}, {})".format(c, a)),
expr("At({}, {})".format(p, a))]
precond_neg = []
effect_add = [expr("In({}, {})".format(c, p))]
effect_rem = [expr("At({}, {})".format(c, a))]
load = Action(expr("Load({}, {}, {})".format(c, p, a)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
loads.append(load)
return loads
def unload_actions():
"""Create all concrete Unload actions and return a list
:return: list of Action objects
"""
unloads = []
for c in self.cargos:
for p in self.planes:
for a in self.airports:
precond_pos = [expr("In({}, {})".format(c, p)),
expr("At({}, {})".format(p, a))]
precond_neg = []
effect_add = [expr("At({}, {})".format(c, a))]
effect_rem = [expr("In({}, {})".format(c, p))]
unload = Action(expr("Unload({}, {}, {})".format(c, p, a)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
unloads.append(unload)
return unloads
def fly_actions():
"""Create all concrete Fly actions and return a list
:return: list of Action objects
"""
flys = []
for fr in self.airports:
for to in self.airports:
if fr != to:
for p in self.planes:
precond_pos = [expr("At({}, {})".format(p, fr)),
]
precond_neg = []
effect_add = [expr("At({}, {})".format(p, to))]
effect_rem = [expr("At({}, {})".format(p, fr))]
fly = Action(expr("Fly({}, {}, {})".format(p, fr, to)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
flys.append(fly)
return flys
return load_actions() + unload_actions() + fly_actions()
def actions(self, state: str) -> list:
""" Return the actions that can be executed in the given state.
:param state: str
state represented as T/F string of mapped fluents (state variables)
e.g. 'FTTTFF'
:return: list of Action objects
"""
possible_actions = []
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for action in self.actions_list:
is_possible = True
for clause in action.precond_pos:
if clause not in kb.clauses:
is_possible = False
for clause in action.precond_neg:
if clause in kb.clauses:
is_possible = False
if is_possible:
possible_actions.append(action)
return possible_actions
def result(self, state: str, action: Action):
""" Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state).
:param state: state entering node
:param action: Action applied
:return: resulting state after action
"""
new_state = FluentState([], [])
# Used the same implementation as cake example:
old_state = decode_state(state, self.state_map)
for fluent in old_state.pos:
if fluent not in action.effect_rem:
new_state.pos.append(fluent)
for fluent in action.effect_add:
if fluent not in new_state.pos:
new_state.pos.append(fluent)
for fluent in old_state.neg:
if fluent not in action.effect_add:
new_state.neg.append(fluent)
for fluent in action.effect_rem:
if fluent not in new_state.neg:
new_state.neg.append(fluent)
return encode_state(new_state, self.state_map)
def goal_test(self, state: str) -> bool:
""" Test the state to see if goal is reached
:param state: str representing state
:return: bool
"""
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for clause in self.goal:
if clause not in kb.clauses:
return False
return True
def h_1(self, node: Node):
# note that this is not a true heuristic
h_const = 1
return h_const
def h_pg_levelsum(self, node: Node):
"""
This heuristic uses a planning graph representation of the problem
state space to estimate the sum of all actions that must be carried
out from the current state in order to satisfy each individual goal
condition.
"""
# requires implemented PlanningGraph class
pg = PlanningGraph(self, node.state)
pg_levelsum = pg.h_levelsum()
return pg_levelsum
def h_ignore_preconditions(self, node: Node):
"""
This heuristic estimates the minimum number of actions that must be
carried out from the current state in order to satisfy all of the goal
conditions by ignoring the preconditions required for an action to be
executed.
"""
# Note: We assume that the number of steps required to solve the relaxed ignore preconditions problem
# is equal to the number of unsatisfied goals.
# Thus no action results in multiple goals and no action undoes the effects of other actions
kb = PropKB()
kb.tell(decode_state(node.state, self.state_map).pos_sentence())
# Unsatisfied goals are the ones not found in the clauses of PropKB() for the current state
count = len(set(self.goal) - set(kb.clauses))
# print("Current_state: ", kb.clauses, " Goal state: ", self.goal)
return count
# -
# ## Scenarios
# +
def air_cargo_p1() -> AirCargoProblem:
cargos = ['C1', 'C2']
planes = ['P1', 'P2']
airports = ['JFK', 'SFO']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
]
neg = [expr('At(C2, SFO)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('At(C1, JFK)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('At(P1, JFK)'),
expr('At(P2, SFO)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
def air_cargo_p2() -> AirCargoProblem:
cargos = ['C1', 'C2', 'C3']
planes = ['P1', 'P2', 'P3']
airports = ['SFO', 'JFK', 'ATL']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(C3, ATL)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
expr('At(P3, ATL)'),
]
neg = [expr('At(C1, JFK)'),
expr('At(C1, ATL)'),
expr('At(C2, SFO)'),
expr('At(C2, ATL)'),
expr('At(C3, SFO)'),
expr('At(C3, JFK)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('In(C1, P3)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('In(C2, P3)'),
expr('In(C3, P1)'),
expr('In(C3, P2)'),
expr('In(C3, P3)'),
expr('At(P1, JFK)'),
expr('At(P1, ATL)'),
expr('At(P2, SFO)'),
expr('At(P2, ATL)'),
expr('At(P3, SFO)'),
expr('At(P3, JFK)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
expr('At(C3, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
def air_cargo_p3() -> AirCargoProblem:
cargos = ['C1', 'C2', 'C3', 'C4']
planes = ['P1', 'P2']
airports = ['SFO', 'JFK', 'ATL', 'ORD']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(C3, ATL)'),
expr('At(C4, ORD)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
]
neg = [expr('At(C1, JFK)'),
expr('At(C1, ATL)'),
expr('At(C1, ORD)'),
expr('At(C2, SFO)'),
expr('At(C2, ATL)'),
expr('At(C2, ORD)'),
expr('At(C3, JFK)'),
expr('At(C3, SFO)'),
expr('At(C3, ORD)'),
expr('At(C4, JFK)'),
expr('At(C4, SFO)'),
expr('At(C4, ATL)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('In(C3, P1)'),
expr('In(C3, P2)'),
expr('In(C4, P1)'),
expr('In(C4, P2)'),
expr('At(P1, JFK)'),
expr('At(P1, ATL)'),
expr('At(P1, ORD)'),
expr('At(P2, SFO)'),
expr('At(P2, ATL)'),
expr('At(P2, ORD)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
expr('At(C3, JFK)'),
expr('At(C4, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
# -
# - Problem 1 initial state and goal:
# ```
# Init(At(C1, SFO) ∧ At(C2, JFK)
# ∧ At(P1, SFO) ∧ At(P2, JFK)
# ∧ Cargo(C1) ∧ Cargo(C2)
# ∧ Plane(P1) ∧ Plane(P2)
# ∧ Airport(JFK) ∧ Airport(SFO))
# Goal(At(C1, JFK) ∧ At(C2, SFO))
# ```
# - Problem 2 initial state and goal:
# ```
# Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(C3, ATL)
# ∧ At(P1, SFO) ∧ At(P2, JFK) ∧ At(P3, ATL)
# ∧ Cargo(C1) ∧ Cargo(C2) ∧ Cargo(C3)
# ∧ Plane(P1) ∧ Plane(P2) ∧ Plane(P3)
# ∧ Airport(JFK) ∧ Airport(SFO) ∧ Airport(ATL))
# Goal(At(C1, JFK) ∧ At(C2, SFO) ∧ At(C3, SFO))
# ```
# - Problem 3 initial state and goal:
# ```
# Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(C3, ATL) ∧ At(C4, ORD)
# ∧ At(P1, SFO) ∧ At(P2, JFK)
# ∧ Cargo(C1) ∧ Cargo(C2) ∧ Cargo(C3) ∧ Cargo(C4)
# ∧ Plane(P1) ∧ Plane(P2)
# ∧ Airport(JFK) ∧ Airport(SFO) ∧ Airport(ATL) ∧ Airport(ORD))
# Goal(At(C1, JFK) ∧ At(C3, JFK) ∧ At(C2, SFO) ∧ At(C4, SFO))
# ```
# ## Solving the problem
# +
import argparse
from timeit import default_timer as timer
from planning_agent.aimacode.search import InstrumentedProblem
from planning_agent.aimacode.search import (breadth_first_search, astar_search,
breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,
greedy_best_first_graph_search, depth_limited_search,
recursive_best_first_search)
PROBLEMS = [["Air Cargo Problem 1", air_cargo_p1],
["Air Cargo Problem 2", air_cargo_p2],
["Air Cargo Problem 3", air_cargo_p3]]
SEARCHES = [["breadth_first_search", breadth_first_search, ""],
['breadth_first_tree_search', breadth_first_tree_search, ""],
['depth_first_graph_search', depth_first_graph_search, ""],
['depth_limited_search', depth_limited_search, ""],
['uniform_cost_search', uniform_cost_search, ""],
['recursive_best_first_search', recursive_best_first_search, 'h_1'],
['greedy_best_first_graph_search', greedy_best_first_graph_search, 'h_1'],
['astar_search', astar_search, 'h_1'],
['astar_search', astar_search, 'h_ignore_preconditions'],
['astar_search', astar_search, 'h_pg_levelsum'],
]
class PrintableProblem(InstrumentedProblem):
""" InstrumentedProblem keeps track of stats during search, and this class modifies the print output of those
statistics for air cargo problems """
def __repr__(self):
return '{:^10d} {:^10d} {:^10d}'.format(self.succs, self.goal_tests, self.states)
def show_solution(node, elapsed_time):
print("Plan length: {} Time elapsed in seconds: {}".format(len(node.solution()), elapsed_time))
for action in node.solution():
print("{}{}".format(action.name, action.args))
def run_search(problem, search_function, parameter=None):
start = timer()
ip = PrintableProblem(problem)
if parameter is not None:
node = search_function(ip, parameter)
else:
node = search_function(ip)
end = timer()
print("\nExpansions Goal Tests New Nodes")
print("{}\n".format(ip))
show_solution(node, end - start)
print()
def main(p_choices, s_choices):
problems = [PROBLEMS[i-1] for i in map(int, p_choices)]
searches = [SEARCHES[i-1] for i in map(int, s_choices)]
for pname, p in problems:
for sname, s, h in searches:
hstring = h if not h else " with {}".format(h)
print("\nSolving {} using {}{}...".format(pname, sname, hstring))
_p = p()
_h = None if not h else getattr(_p, h)
run_search(_p, s, _h)
if __name__=="__main__":
main([1,2,3],[1,9])
| 3_planning_agent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Plot style
sns.set()
# %pylab inline
pylab.rcParams['figure.figsize'] = (4, 4)
# Avoid inaccurate floating values (for inverse matrices in dot product for instance)
# See https://stackoverflow.com/questions/24537791/numpy-matrix-inversion-rounding-errors
np.set_printoptions(suppress=True)
# + language="html"
# <style>
# .pquote {
# text-align: left;
# margin: 40px 0 40px auto;
# width: 70%;
# font-size: 1.5em;
# font-style: italic;
# display: block;
# line-height: 1.3em;
# color: #5a75a7;
# font-weight: 600;
# border-left: 5px solid rgba(90, 117, 167, .1);
# padding-left: 6px;
# }
# .notes {
# font-style: italic;
# display: block;
# margin: 40px 10%;
# }
# img + em {
# text-align: center;
# display: block;
# color: gray;
# font-size: 0.9em;
# font-weight: 600;
# }
# </style>
# -
def plotVectors(vecs, cols, alpha=1):
"""
Plot set of vectors.
Parameters
----------
vecs : array-like
Coordinates of the vectors to plot. Each vectors is in an array. For
instance: [[1, 3], [2, 2]] can be used to plot 2 vectors.
cols : array-like
Colors of the vectors. For instance: ['red', 'blue'] will display the
first vector in red and the second in blue.
alpha : float
Opacity of vectors
Returns:
fig : instance of matplotlib.figure.Figure
The figure of the vectors
"""
plt.figure()
plt.axvline(x=0, color='#A9A9A9', zorder=0)
plt.axhline(y=0, color='#A9A9A9', zorder=0)
for i in range(len(vecs)):
x = np.concatenate([[0,0],vecs[i]])
plt.quiver([x[0]],
[x[1]],
[x[2]],
[x[3]],
angles='xy', scale_units='xy', scale=1, color=cols[i],
alpha=alpha)
# $$
# \newcommand\bs[1]{\boldsymbol{#1}}
# $$
# <span class='notes'>
# This content is part of a series following the chapter 2 on linear algebra from the [Deep Learning Book](http://www.deeplearningbook.org/) by <NAME>., <NAME>., and <NAME>. (2016). It aims to provide intuitions/drawings/python code on mathematical theories and is constructed as my understanding of these concepts. You can check the syllabus in the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/).
# </span>
# # Introduction
#
# This chapter is quite heavy by its size and its content but I did what I could to make it more intuitive and visual. We will see how to represent systems of equations graphically, how to interpret the number of solutions of a system, what is linear combination and more. As usual, we will use Numpy/Matplotlib as a tool to experiment these concepts and hopefully gain a more concrete understanding.
# # 2.4 Linear Dependence and Span
#
# Since it is all about systems of linear equations, let's start again with the set of equations:
#
# $$\bs{Ax}=\bs{b}$$
#
# We saw in [2.2](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.2-Multiplying-Matrices-and-Vectors/) that this system corresponds to:
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 + \cdots + A_{1,n}x_n = b_1 \\\\
# A_{2,1}x_1 + A_{2,2}x_2 + \cdots + A_{2,n}x_n = b_2 \\\\
# \cdots \\\\
# A_{m,1}x_1 + A_{m,2}x_2 + \cdots + A_{m,n}x_n = b_n
# $$
#
# So we have multiple equations with multiple unknowns. We know $A_{1,1}...A_{m,n}$ and $b_1...b_n$. To solve the system we need to find the values of the variables $x_1...x_n$ that satisfies all equations.
# # Number of solutions
#
# The first thing to ask when we face such a system of equations is: what is the number of solutions ?
#
# Three cases can represent the number of solutions of the system of equations $\bs{Ax}=\bs{b}$.
#
# 1. No solution
# 2. 1 solution
# 3. An infinite number of solutions
#
# ## Why there can't be more than 1 solution and less than an infinite number of solutions ?
#
# ### Intuition
#
# Simply because we deal with **linear** systems! Two lines can't cross more than once.
#
# To be able to visualize it, let's take two dimensions and two equations. The solutions of the system correspond to the intersection of the lines. One option is that the two lines never cross (parallel). Another option is that they cross once. And finally, the last option is that they cross everywhere (superimposed):
#
# <img src="images/number-solutions-system-equations.png" width="700" alt="Examples of systems of equations with 0, 1 and an infinite number of solutions" title="System of equations with 0, 1 and an infinite number of solutions">
# <em>A system of equations has no solution, 1 solution or an infinite number of solutions</em>
#
# <span class='pquote'>
# Two lines can't cross more than once but can be either parallel or superimposed
# </span>
#
# ### Proof
#
# Let's imagine that $\bs{x}$ and $\bs{y}$ are two solutions of our system. This means that
#
# $$
# \begin{cases}
# \bs{Ax}=\bs{b}\\\\
# \bs{Ay}=\bs{b}
# \end{cases}
# $$
#
# In that case, we will see that $\bs{z}=\alpha \bs{x} + (1-\alpha \bs{y})$ is also a solution for any value of $\alpha$. If $\bs{z}$ is a solution, we can say that $\bs{Az}=\bs{b}$. Indeed, if we plug $\bs{z}$ into the left hand side of the equation we obtain:
#
# $$
# \begin{align*}
# \bs{Az}&=\bs{A}(\alpha x + (1-\alpha y))\\\\
# &=\bs{Ax}\alpha + \bs{A}(1-\alpha y)\\\\
# &=\bs{Ax}\alpha + \bs{Ay}(1-\alpha)
# \end{align*}
# $$
#
# And since $\bs{Ax}=\bs{Ay}=\bs{b}$. This leads to:
#
# $$
# \begin{align*}
# \bs{Az}&=\bs{b}\alpha + \bs{b}(1-\alpha)\\\\
# &=\bs{b}\alpha + \bs{b}-\bs{b}\alpha\\\\
# &=\bs{b}
# \end{align*}
# $$
#
# So $\bs{z}$ is also a solution.
# # Matrix representation of the system
#
# As we saw it, the equation $\bs{Ax}=\bs{b}$ can be represented by a matrix $\bs{A}$ containing the weigths of each variable and a vector $\bs{x}$ containing each variable (see [2.2](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.2-Multiplying-Matrices-and-Vectors/)). The product of $\bs{A}$ and $\bs{x}$ gives $\bs{b}$ that is another vector of size $m$:
#
# $$
# \begin{bmatrix}
# A_{1,1} & A_{1,2} & \cdots & A_{1,n} \\\\
# A_{2,1} & A_{2,2} & \cdots & A_{2,n} \\\\
# \cdots & \cdots & \cdots & \cdots \\\\
# A_{m,1} & A_{m,2} & \cdots & A_{m,n}
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\\\
# x_2 \\\\
# \cdots \\\\
# x_n
# \end{bmatrix}
# =
# \begin{bmatrix}
# b_1 \\\\
# b_2 \\\\
# \cdots \\\\
# b_m
# \end{bmatrix}
# $$
#
# Which corresponds to the set of linear equations
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 + \cdots + A_{1,n}x_n = b_1 \\\\
# A_{2,1}x_1 + A_{2,2}x_2 + \cdots + A_{2,n}x_n = b_2 \\\\
# \cdots \\\\
# A_{m,1}x_1 + A_{m,2}x_2 + \cdots + A_{m,n}x_n = b_n
# $$
#
# Here are some intuitions about what is represented by these matrices. The number of columns of $\bs{A}$ is the number of dimensions of our vector space. It is the number $n$ of directions we can travel by. The number of solutions of our linear system corresponds to the number of ways we can reach $\bs{b}$ by travelling through our $n$ dimensions.
#
# But to understand this, we need to underline that two possibilities exist to represent the system of equations: ***the row figure*** and ***the column figure***.
# # Graphical views: Row and column figures
#
#
# I recommend to look at [this video lesson of <NAME>](http://ia802205.us.archive.org/18/items/MIT18.06S05_MP4/01.mp4). It provides a very nice intuition about these two ways of looking at a system of linear equations.
#
#
# When you are looking to the matrix $\bs{A}$:
#
# $$
# \bs{A}=\begin{bmatrix}
# A_{1,1} & A_{1,2} & \cdots & A_{1,n} \\\\
# A_{2,1} & A_{2,2} & \cdots & A_{2,n} \\\\
# \cdots & \cdots & \cdots & \cdots \\\\
# A_{m,1} & A_{m,2} & \cdots & A_{m,n}
# \end{bmatrix}
# $$
#
# You can consider its rows or its columns separately. Recall that the values are the weights corresponding to each variable. Each row synthetizes one equation. Each column is the set of weights given to 1 variable.
#
# It is possible to draw a different graphical represention of the set of equations looking at the rows or at the columns.
# ## Graphical view 1: the row figure
#
# The row figure is maybe more usual because it is the representation used when we have only one equation. It can now be extended to an infinite number of equations and unknowns (even if it would be hard to represent a 9-dimensional hyperplane in a 10-dimensional space...).
#
# We said that the solutions of the linear system of equations are the sets of values of $x_1...x_n$ that satisfies all equations, that is to say, the values taken by the unknowns. For instance, in the case of $\bs{A}$ being a ($2 \times 2$) matrix ($n=m=2$) the equations correspond to lines in a 2-dimensional space and the solution of the system is the intersection of these lines.
#
# Note that associating one direction in space to one parameter is only one way to represent the equations. There are number of ways to represent more than 3 parameters systems. For instance, you can add colors to have the representation of a fourth dimension. It is all about **representation**.
#
# <img src="images/representing-features.png" width="900" alt="Different ways of representing features" title="Feature representation">
# <em>Graphical representations of features</em>
#
# ### Overdetermined and underdetermined systems
#
# A linear system of equations can be viewed as a set of $(n-1)$-dimensional hyperplanes in a *n*-dimensional space. So the linear system can be characterized with its number of equations ($m$) and the number of unknown variables ($n$).
#
# - If there are more equations than unknows the system is called **overdetermined**. In the following example we can see a system of 3 equations (represented by 3 lines) and 2 unknowns (corresponding to 2 dimensions). In this example there is no solution since there is no point belonging to the three lines:
#
# <img src="images/overdetermined-system-linear-equations.png" width="300" alt="Example of an overdetermined system of linear equations with no solution" title="Example of an overdetermined system of linear equations with no solution">
# <em>Example of an overdetermined system of linear equations with no solution</em>
#
# - If there is more unknowns than equations the system is called **underdetermined**. In the following picture, there is only 1 equation (1 line) and 2 dimensions. Each point that is on the line is a solution of the system. In this case there is an infinite number of solutions:
#
# <img src="images/underdetermined-system-linear-equations.png" width="300" alt="Example of an underdetermined system of linear equations with an infinite number of solutions" title="Example of an underdetermined system of linear equations with an infinite number of solutions">
# <em>Example of an underdetermined system of linear equations with an infinite number of solutions</em>
#
#
# Let's see few examples of these different cases to clarify that.
# ### Example 1.
#
# $m=1$, $n=2$: **1 equation and 2 variables**
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 = b_1
# $$
#
# The graphical interpretation of $n=2$ is that we have a 2-D space. So we can represent it with 2 axes. Since our hyperplane is of $n-1$-dimensional, we have a 1-D hyperplane. This is simply a line. As $m=1$, we have only one equation. This means that we have only one line characterizing our linear system.
#
# Note that the last equation can also be written in a way that may be more usual:
#
# $$
# y = ax + b
# $$
#
# with $y$ corresponding to $x_2$, $x$ corresponding to $x_1$, $a$ corresponding to $A_{1,1}$ and $A_{1,2}=1$.
#
# For this first example we will take the following equation:
#
# $$
# y = 2x + 1
# $$
#
# Let's draw the line of this equation with Numpy and Matplotlib (see BONUS in [2.3](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.3-Identity-and-Inverse-Matrices/) for light tips to plot equations).
# +
x = np.arange(-10, 10)
y = 2*x + 1
plt.figure()
plt.plot(x, y)
plt.xlim(-2, 10)
plt.ylim(-2, 10)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.show()
plt.close()
# -
# #### Solutions
#
# The solutions of this linear system correspond to the value of $x$ and $y$ such as $y=2x+1$. Graphically, it corresponds to each point on the line so there is an infinite number of solutions. For instance, one solution is $x=0$ and $y=1$, or $x=1$ and $y=3$ and so on.
# ### Example 2.
#
# *m*=2, *n*=2: **2 equations and 2 unknowns**
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 = b_1\\\\
# A_{2,1}x_1 + A_{2,2}x_2 = b_2
# $$
# The graphical interpretation of this system is that we still have lines in a 2-D space. However this time there are 2 lines since there are 2 equations.
#
# Let's take these equations as example:
#
# $$
# \begin{cases}
# y = 2x + 1\\\\
# y = 6x - 2
# \end{cases}
# $$
#
# +
x = np.arange(-10, 10)
y = 2*x + 1
y1 = 6*x - 2
plt.figure()
plt.plot(x, y)
plt.plot(x, y1)
plt.xlim(-2, 10)
plt.ylim(-2, 10)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.show()
plt.close()
# -
# As we have seen, with 2 lines in a 2-D space, there are multiple possible cases. On the above figure, the two lines are crossing so there is one unique solution. If they are superimposed (same equation or equivalent, *cf*. linear dependance bellow) there are a infinite number of solutions since each points of the lines corresponds to an intersection. If they are parallel, there is no solution.
#
# The same thing can be observed with other values of $m$ (number of equations) and $n$ (number of dimensions). For instance, two 2-D planes in a 3-D space can be superposed (infinitely many solutions), or crossed (infinitely many solutions since their crossing is a line), or parallel (no solution).
# ### Example 3.
#
# *m*=3, *n*=2: **3 equations and 2 unknowns**
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 = b_1\\\\
# A_{2,1}x_1 + A_{2,2}x_2 = b_2\\\\
# A_{3,1}x_1 + A_{3,2}x_2 = b_3
# $$
#
# The same idea stands with more than 2 equations in a 2-D space. In that example we have the following 3 equations:
#
# $$
# \begin{cases}
# y = 2x + 1\\\\
# y = 6x - 2\\\\
# y = \frac{1}{10}x+6
# \end{cases}
# $$
# +
x = np.arange(-10, 10)
y = 2*x + 1
y1 = 6*x - 2
y2 = 0.1*x+6
plt.figure()
plt.plot(x, y)
plt.plot(x, y1)
plt.plot(x, y2)
plt.xlim(-2, 10)
plt.ylim(-2, 10)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.show()
plt.close()
# -
# In the above case, there is 3 equations and no solution because there is no point in space that is on each of these lines.
# ## Linear combination
#
# Before going to the column figure, we need to talk about linear combination. The linear combination of 2 vectors corresponds to their weighted sum.
#
# ### Example 4.
#
# Let's take two vectors
#
# $$
# \vec{u}=
# \begin{bmatrix}
# 1 \\\\
# 3
# \end{bmatrix}
# $$
#
# and
#
# $$
# \vec{v}=
# \begin{bmatrix}
# 2 \\\\
# 1
# \end{bmatrix}
# $$
#
# These two vectors have 2 dimensions and thus contain coordinates in 2-D.
#
#
# The linear combination of $\vec{u}$ and $\vec{v}$ is
#
# $$
# a\vec{u}+b\vec{v}= a
# \begin{bmatrix}
# 1 \\\\
# 3
# \end{bmatrix} + b\begin{bmatrix}
# 2 \\\\
# 1
# \end{bmatrix}
# $$
#
# with $a$ and $b$ the weights of the vectors.
#
# Graphically, the vectors are added to reach a specific point in space. For example if $a=2$ and $b=1$:
#
# $$
# 2\vec{u}+\vec{v}= 2
# \begin{bmatrix}
# 1 \\\\
# 3
# \end{bmatrix} +
# \begin{bmatrix}
# 2 \\\\
# 1
# \end{bmatrix} =
# \begin{bmatrix}
# 2 \cdot 1 + 2 \\\\
# 2 \cdot 3 + 1
# \end{bmatrix} =
# \begin{bmatrix}
# 4 \\\\
# 7
# \end{bmatrix}
# $$
#
# The sum of $\vec{u}$ and $\vec{v}$ is a vector that will reach the point of corrdinates $(4, 7)$. To show that on a plot, I will use the custom function `plotVectors()` that I defined at the beginning of [the notebook](https://github.com/hadrienj/deepLearningBook-Notes/blob/master/2.4%20Linear%20Dependence%20and%20Span/2.4%20Linear%20Dependence%20and%20Span.ipynb). It takes a set of coordinates and an array of colors as input and plot the corresponding vectors. So let's plot $\vec{u}$ and $\vec{v}$:
orange = '#FF9A13'
blue = '#1190FF'
plotVectors([[1, 3], [2, 1]], [orange, blue])
plt.xlim(0, 5)
plt.ylim(0, 5)
# We will now add these vectors and their weights. This gives:
# +
# Weigths of the vectors
a = 2
b = 1
# Start and end coordinates of the vectors
u = [0,0,1,3]
v = [2,6,2,1]
plt.quiver([u[0], a*u[0], b*v[0]],
[u[1], a*u[1], b*v[1]],
[u[2], a*u[2], b*v[2]],
[u[3], a*u[3], b*v[3]],
angles='xy', scale_units='xy', scale=1, color=[orange, orange, blue])
plt.xlim(-1, 8)
plt.ylim(-1, 8)
# Draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.scatter(4,7,marker='x',s=50)
# Draw the name of the vectors
plt.text(-0.5, 2, r'$\vec{u}$', color=orange, size=18)
plt.text(0.5, 4.5, r'$\vec{u}$', color=orange, size=18)
plt.text(2.5, 7, r'$\vec{v}$', color=blue, size=18)
plt.show()
plt.close()
# -
# We can see that we end up with the coordinates ($4$, $7$).
# ## Span
#
# Take the vectors $\vec{u}$ and $\vec{v}$ from the previous example and think about all the points you can reach by their combination changing $a$ and $b$. This set of points is the span of the set of vectors $\{\vec{u}, \vec{v}\}$.
# +
# Defining u and v
u = [1, 3]
v = [2, 1]
# Ploting a sample of the set of points in the span of u and v
for a in range(-10, 10):
for b in range(-10, 10):
plt.scatter(u[0] * a + v[0] * b, u[1] * a + v[1] * b, marker='.', color=blue)
# Defining x and y sizes
plt.xlim(-50, 50)
plt.ylim(-50, 50)
# Draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.show()
plt.close()
# -
# ## Note on spaces and subspaces
#
# (For more details see Strang (2006), p.70)
#
# The space of a vector determines all the values that can be taken by this vector. The vector spaces are denoted $\mathbb{R}$ because the values are real numbers. If there are multiple dimensions the space is denoted $\mathbb{R}^n$ with $n$ corresponding to the number of dimensions. For instance $\mathbb{R}^2$ is the space of the usual $x$-$y$ plane where $x$ and $y$ values are real numbers.
#
# If you take a 2-dimensional plane in $\mathbb{R}^3$ (3-dimensional space), this plane is a **subspace** of your original $\mathbb{R}^3$ space. On the same manner, if you start with a $\mathbb{R}^2$ space and take a line in this space, this line is a subspace of the original space.
#
# The linear combination of vectors gives vectors in the original space. Every linear combination of vectors inside a space will stay in this space. For instance, if you take 2 lines in a $\mathbb{R}^2$ space, any linear combinations will give you a vector in the same $\mathbb{R}^2$ space.
#
# <span class='pquote'>
# The linear combination of vectors gives vectors in the original space
# </span>
# ## Graphical view 2: the column figure
#
# It is also possible to represent the set of equations by considering that the solution vector $\bs{b}$ corresponds to a linear combination of each columns multiplied by their weights.
#
# From the set of equations:
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 + A_{1,n}x_n = b_1 \\\\
# A_{2,1}x_1 + A_{2,2}x_2 + A_{2,n}x_n = b_2 \\\\
# \cdots \\\\
# A_{m,1}x_1 + A_{m,2}x_2 + A_{m,n}x_n = b_m
# $$
#
# The column form is then:
#
# $$
# x_1
# \begin{bmatrix}
# A_{1,1}\\\\
# A_{2,1}\\\\
# A_{m,1}
# \end{bmatrix}
# +
# x_2
# \begin{bmatrix}
# A_{1,2}\\\\
# A_{2,2}\\\\
# A_{m,2}
# \end{bmatrix}
# +
# x_n
# \begin{bmatrix}
# A_{1,n}\\\\
# A_{2,n}\\\\
# A_{m,n}
# \end{bmatrix}
# =
# \begin{bmatrix}
# b_1\\\\
# b_2\\\\
# b_m
# \end{bmatrix}
# $$
#
# On a graphical point of view, we have to travel from the origin (zero on every dimensions) to the point of coordinate $\bs{b}$. The columns of $\bs{A}$ give us the directions we can travel by and their weights are the length of the way in that direction.
#
# <span class='pquote'>
# The columns of $\bs{A}$ give us the directions we can travel by and their weights are the length of the way in each direction.
# </span>
# ### Example 5.
#
# $m=2$, $n=2$: 2 equations and 2 variables
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 = b_1\\\\
# A_{2,1}x_1 + A_{2,2}x_2 = b_2
# $$
#
# $$
# \begin{cases}
# y = \frac{1}{2}x+1\\\\
# y = -x + 4
# \end{cases}
# \Leftrightarrow
# \begin{cases}
# \frac{1}{2}x-y = -1\\\\
# x+y=4
# \end{cases}
# $$
#
# So here is the matrix $\bs{A}$:
#
# $$
# \bs{A}=
# \begin{bmatrix}
# \frac{1}{2} & -1 \\\\
# 1 & 1
# \end{bmatrix}
# $$
#
# The column figure gives us:
#
# $$
# x
# \begin{bmatrix}
# \frac{1}{2} \\\\
# 1
# \end{bmatrix}
# +
# y
# \begin{bmatrix}
# -1 \\\\
# 1
# \end{bmatrix}
# =
# \begin{bmatrix}
# -1 \\\\
# 4
# \end{bmatrix}
# $$
#
# The goal is to find the value of the weights ($x$ and $y$) for which the linear combination of the vector
#
# $$
# \begin{bmatrix}
# \frac{1}{2} \\\\
# 1
# \end{bmatrix}
# $$
#
# and
#
# $$
# \begin{bmatrix}
# -1 \\\\
# 1
# \end{bmatrix}
# $$
#
# gives the vector
#
# $$
# \begin{bmatrix}
# -1 \\\\
# 4
# \end{bmatrix}
# $$
#
# We will solve the system graphically by plotting the equations and looking for their intersection:
# +
x = np.arange(-10, 10)
y = 0.5*x + 1
y1 = -x + 4
plt.figure()
plt.plot(x, y)
plt.plot(x, y1)
plt.xlim(-2, 10)
plt.ylim(-2, 10)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.show()
plt.close()
# -
# We can see that the solution (the intersection of the lines representing our two equations) is $x=2$ and $y=2$. This means that the linear combination is the following:
#
# $$
# 2
# \begin{bmatrix}
# \frac{1}{2} \\\\
# 1
# \end{bmatrix}
# +
# 2
# \begin{bmatrix}
# -1 \\\\
# 1
# \end{bmatrix}
# =
# \begin{bmatrix}
# -1 \\\\
# 4
# \end{bmatrix}
# $$
#
# Let's say that
#
# $$
# \vec{u}=
# \begin{bmatrix}
# \frac{1}{2} \\\\
# 1
# \end{bmatrix}
# $$
#
# and
#
# $$
# \vec{v}=
# \begin{bmatrix}
# -1 \\\\
# 1
# \end{bmatrix}
# $$
#
# To talk in term of the column figure we can reach the point of coordinates $(-1, 4)$ if we add two times the vector $\vec{u}$ and two times the vector $\vec{v}$. Let's check that:
# +
u = [0,0,0.5,1]
u_bis = [u[2],u[3],u[2],u[3]]
v = [2*u[2],2*u[3],-1,1]
v_bis = [2*u[2]-1,2*u[3]+1,v[2],v[3]]
plt.quiver([u[0], u_bis[0], v[0], v_bis[0]],
[u[1], u_bis[1], v[1], v_bis[1]],
[u[2], u_bis[2], v[2], v_bis[2]],
[u[3], u_bis[3], v[3], v_bis[3]],
angles='xy', scale_units='xy', scale=1, color=[blue, blue, orange, orange])
# plt.rc('text', usetex=True)
plt.xlim(-1.5, 2)
plt.ylim(-0.5, 4.5)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.scatter(-1,4,marker='x',s=50)
plt.text(0, 0.5, r'$\vec{u}$', color=blue, size=18)
plt.text(0.5, 1.5, r'$\vec{u}$', color=blue, size=18)
plt.text(0.5, 2.7, r'$\vec{v}$', color=orange, size=18)
plt.text(-0.8, 3, r'$\vec{v}$', color=orange, size=18)
plt.show()
plt.close()
# -
# We can see that it is working! We arrive to the point ($-1$, $4$).
# ## Determine if the system has one and only one solution for every value of $\bs{b}$
#
# We will now see how to determine if a system of equations has one and only one solution. Note that this is only the general cases. This can be split into two requirements:
#
# 1. The system must have at least one solution
# 2. Then, the system must have **only** one solution
#
# ### Requirement 1. Underdetermined system: the system must have at least one solution for each value of $\bs{b}$: $n\geq m$
#
# <span class='pquote'>
# An underdetermined system of equations is a system with less equations than unknowns
# </span>
#
# If we want our system to have one and only one solution a first requirement is that $n$ must not be bigger than $m$.
#
# Let's take the example of a ($2\times 3$) matrix that corresponds to a set of 2 equations with 3 unknowns variables:
#
#
# <div>
# $$
# \begin{cases}
# 8x+y+z=1\\\\
# x+y+z=1
# \end{cases}
# $$
# </div>
#
# <div>
# $$
# x
# \begin{bmatrix}
# 8 \\\\
# 1
# \end{bmatrix}
# +
# y
# \begin{bmatrix}
# 1 \\\\
# 1
# \end{bmatrix}
# +
# z
# \begin{bmatrix}
# 1 \\\\
# 1
# \end{bmatrix}
# =
# \begin{bmatrix}
# 1 \\\\
# 1
# \end{bmatrix}
# $$
# </div>
#
# Here is the representation of the planes plotted with the help of this [website](https://technology.cpm.org/general/3dgraph/):
#
# <img src="images/intersection-2-planes-line.png" alt="Plot showing two planes. The intersection of the two planes is a line" title="The intersection of the two planes is a line" width="500">
# <em>The intersection of the two planes is a line</em>
#
# We can see that in the best case the two planes are not parallel and there are solutions to the set of equations. It means that it exists some points that rely on both planes. But we can also see that there is inevitably an infinite number of points on the intersection (a line that we can see on the figure). We need a third plane to have a unique solution.
#
# ### Requirement 2. Overdetermined system: the system must have **only** one solution for each value of $\bs{b}$: $m\geq n$
#
#
# <span class='pquote'>
# An overdetermined system of equations is a system with more equations than unknowns
# </span>
#
# The column figure is helpful to understand why the linear system has usually no solution if $n$ (the number of unknowns) is smaller than $m$ (the number of equations). Let's add 1 equation to the above system in order to end up with a ($3\times2$) matrix (3 equations and 2 unknowns):
#
# <div>
# $$
# \begin{cases}
# y = \frac{1}{2}x+1\\\\
# y = -x + 4\\\\
# y = 7x + 2
# \end{cases}
# \Leftrightarrow
# \begin{cases}
# \frac{1}{2}x-y = -1\\\\
# x+y=4\\\\
# 7x-y=2
# \end{cases}
# $$
# </div>
#
# This corresponds to:
#
# <div>
# $$
# x
# \begin{bmatrix}
# \frac{1}{2} \\\\
# 1 \\\\
# 7
# \end{bmatrix}
# +
# y
# \begin{bmatrix}
# -1 \\\\
# 1 \\\\
# -1
# \end{bmatrix}
# =
# \begin{bmatrix}
# -1 \\\\
# 4 \\\\
# 2
# \end{bmatrix}
# $$
# </div>
#
# So we are still traveling in our 2-dimensional space (see the plot of the column space above) but the point that we are looking for is defined by 3 dimensions. There are cases where the third coordinate does not rely on our 2-dimensional $x$-$y$ plane. In that case no solution exists.
#
# <span class='pquote'>
# We are traveling in a 2D space but the solution is defined by 3 dimensions. If the third coordinate does not rely on our 2D $x$-$y$ plane then there is no solution.
# </span>
# ### Linear dependence
#
# The number of columns can thus provide information on the number of solutions. But the number that we have to take into account is the number of **linearly independent** columns. Columns are linearly dependent if one of them is a linear combination of the others. Thinking in the column picture, the direction of two linearly dependent vectors is the same. This doesn't add a dimension that we can use to travel and reach $\bs{b}$.
# Here is an example of linear system containing linear dependency:
#
# $$
# \begin{cases}
# y = 2x+6\\\\
# y = 2x
# \end{cases}
# \Leftrightarrow
# \begin{cases}
# 2x-y = -6\\\\
# 2x-y=0
# \end{cases}
# $$
#
# The row figure shows that the system has no solution:
# +
x = np.arange(-10, 10)
y = 2*x + 6
y1 = 2*x
plt.figure()
plt.plot(x, y)
plt.plot(x, y1)
plt.xlim(-2, 10)
plt.ylim(-2, 10)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.show()
plt.close()
# -
# Since the lines are parallel, there is no point at their intersection.
# The column figure illustrates the point as well:
#
# $$
# x
# \begin{bmatrix}
# 2 \\\\
# 2
# \end{bmatrix}
# +
# y
# \begin{bmatrix}
# -1 \\\\
# -1
# \end{bmatrix}
# =
# \begin{bmatrix}
# -6 \\\\
# 0
# \end{bmatrix}
# $$
#
# +
u = [0,0,2,2]
v = [0,0,-1,-1]
plt.quiver([u[0], v[0]],
[u[1], v[1]],
[u[2], v[2]],
[u[3], v[3]],
angles='xy', scale_units='xy', scale=1, color=[blue, orange])
plt.xlim(-7, 3)
plt.ylim(-2, 3)
# draw axes
plt.axvline(x=0, color='#A9A9A9')
plt.axhline(y=0, color='#A9A9A9')
plt.scatter(-6,0,marker='x',s=150)
plt.text(-6, 0.5, r'$b$', color='b', size=18)
plt.show()
plt.close()
# -
# We would like to go to $b$ but the only path we can take is the blue/orange line. The second equation doesn't provide us with a new direction to take since it is just a linear combination of the first one.
#
# Thus, an overdetermined system of independant equations has at most 1 solution.
# ### Square matrix
#
# How could we satisfy both requirements ($m\geq n$ and $n\geq m$): we must have $m=n$!
#
# The resulting of all of this is that the system needs a **square matrix** $\bs{A}$ ($m=n$) with linearly independant columns to have a unique solution for every values of $\bs{b}$.
#
# <span class='pquote'>
# The system needs a **square matrix** $\bs{A}$ ($m=n$) with linearly independant columns to have a unique solution for every values of $\bs{b}$
# </span>
#
# The inverse of a matrix exists only if the set of equations has one and only one solution for each value of $\bs{b}$ because:
#
# - The matrix $\bs{A}$ cannot have more than 1 inverse. Imagine that $\bs{A}$ has 2 inverses $\bs{B}$ and $\bs{C}$ such as $\bs{AB}=\bs{I}$ and $\bs{AC}=\bs{I}$. This would mean that $\bs{B}=\bs{C}$.
#
# - The solution of the system $\bs{Ax}=\bs{b}$ is $\bs{x}=\bs{A} ^{-1} \bs{b}$. So if there are multiple solutions, there are multiple inverses and the first point is not met.
# For more details about the row and the column figure, have a look at the books of <NAME> (there are some ressources [here](http://math.mit.edu/~gs/dela/dela_4-1.pdf)). There are tons of really great examples and graphical explanations! And the *1.2 Geometry of linear equations* in 'Linear algebra and its applications' also from Gilbert Strang.
# <span class='notes'>
# Feel free to drop me an email or a comment. The syllabus of this series can be found [in the introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/). All the notebooks can be found on [Github](https://github.com/hadrienj/deepLearningBook-Notes).
# </span>
# # References
#
# ## Books and videos of Gilbert Strang
#
# - <NAME>. (2006). Linear Algebra and Its Applications, 4th Edition (4th edition). Belmont, CA: Cengage Learning.
#
# - <NAME>. (2014). Differential Equations and Linear Algebra (UK ed. edition). Wellesley, Mass: Wellesley-Cambridge.
#
# - [The column space of a matrix. Video from <NAME>](https://ocw.mit.edu/resources/res-18-009-learn-differential-equations-up-close-with-gilbert-strang-and-cleve-moler-fall-2015/differential-equations-and-linear-algebra/vector-spaces-and-subspaces/the-column-space-of-a-matrix/)
#
# ## System of equations
#
# - [Wikipedia - System of linear equations](https://en.wikipedia.org/wiki/System_of_linear_equations)
#
# ## Numpy
#
# - [Numpy arange()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html)
| 2.4 Linear Dependence and Span/2.4 Linear Dependence and Span.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Analysis Examples
# ## 1.USA.gov Data from Bitly
from numpy.random import randn
import numpy as np
np.random.seed(123)
import os
import matplotlib.pyplot as plt
import pandas as pd
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4)
pd.options.display.max_rows = 20
# In [5]: path = 'datasets/bitly_usagov/example.txt'
#
# In [6]: open(path).readline()
# Out[6]: '{ "a": "Mozilla\\/5.0 (Windows NT 6.1; WOW64) AppleWebKit\\/535.11
# (KHTML, like Gecko) Chrome\\/17.0.963.78 Safari\\/535.11", "c": "US", "nk": 1,
# "tz": "America\\/New_York", "gr": "MA", "g": "A6qOVH", "h": "wfLQtf", "l":
# "orofrog", "al": "en-US,en;q=0.8", "hh": "1.usa.gov", "r":
# "http:\\/\\/www.facebook.com\\/l\\/7AQEFzjSi\\/1.usa.gov\\/wfLQtf", "u":
# "http:\\/\\/www.ncbi.nlm.nih.gov\\/pubmed\\/22415991", "t": 1331923247, "hc":
# 1331822918, "cy": "Danvers", "ll": [ 42.576698, -70.954903 ] }\n'
import json
path = 'datasets/bitly_usagov/example.txt'
records = [json.loads(line) for line in open(path)]
# In [18]: records[0]
# Out[18]:
# {'a': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko)
# Chrome/17.0.963.78 Safari/535.11',
# 'al': 'en-US,en;q=0.8',
# 'c': 'US',
# 'cy': 'Danvers',
# 'g': 'A6qOVH',
# 'gr': 'MA',
# 'h': 'wfLQtf',
# 'hc': 1331822918,
# 'hh': '1.usa.gov',
# 'l': 'orofrog',
# 'll': [42.576698, -70.954903],
# 'nk': 1,
# 'r': 'http://www.facebook.com/l/7AQEFzjSi/1.usa.gov/wfLQtf',
# 't': 1331923247,
# 'tz': 'America/New_York',
# 'u': 'http://www.ncbi.nlm.nih.gov/pubmed/22415991'}
# ### Counting Time Zones in Pure Python
time_zones = [rec['tz'] for rec in records]
time_zones = [rec['tz'] for rec in records if 'tz' in rec]
time_zones[:10]
def get_counts(sequence):
counts = {}
for x in sequence:
if x in counts:
counts[x] += 1
else:
counts[x] = 1
return counts
# +
from collections import defaultdict
def get_counts2(sequence):
counts = defaultdict(int) # values will initialize to 0
for x in sequence:
counts[x] += 1
return counts
# -
counts = get_counts(time_zones)
counts['America/New_York']
len(time_zones)
def top_counts(count_dict, n=10):
value_key_pairs = [(count, tz) for tz, count in count_dict.items()]
value_key_pairs.sort()
return value_key_pairs[-n:]
top_counts(counts)
from collections import Counter
counts = Counter(time_zones)
counts.most_common(10)
# ### Counting Time Zones with pandas
import pandas as pd
frame = pd.DataFrame(records)
frame.info()
frame['tz'][:10]
tz_counts = frame['tz'].value_counts()
tz_counts[:10]
clean_tz = frame['tz'].fillna('Missing')
clean_tz[clean_tz == ''] = 'Unknown'
tz_counts = clean_tz.value_counts()
tz_counts[:10]
plt.figure(figsize=(10, 4))
import seaborn as sns
subset = tz_counts[:10]
sns.barplot(y=subset.index, x=subset.values)
frame['a'][1]
frame['a'][50]
frame['a'][51][:50] # long line
results = pd.Series([x.split()[0] for x in frame.a.dropna()])
results[:5]
results.value_counts()[:8]
cframe = frame[frame.a.notnull()]
cframe = cframe.copy()
cframe['os'] = np.where(cframe['a'].str.contains('Windows'),
'Windows', 'Not Windows')
cframe['os'][:5]
by_tz_os = cframe.groupby(['tz', 'os'])
agg_counts = by_tz_os.size().unstack().fillna(0)
agg_counts[:10]
# Use to sort in ascending order
indexer = agg_counts.sum(1).argsort()
indexer[:10]
count_subset = agg_counts.take(indexer[-10:])
count_subset
agg_counts.sum(1).nlargest(10)
plt.figure()
# Rearrange the data for plotting
count_subset = count_subset.stack()
count_subset.name = 'total'
count_subset = count_subset.reset_index()
count_subset[:10]
sns.barplot(x='total', y='tz', hue='os', data=count_subset)
# +
def norm_total(group):
group['normed_total'] = group.total / group.total.sum()
return group
results = count_subset.groupby('tz').apply(norm_total)
# -
plt.figure()
sns.barplot(x='normed_total', y='tz', hue='os', data=results)
g = count_subset.groupby('tz')
results2 = count_subset.total / g.total.transform('sum')
# ## MovieLens 1M Dataset
# +
import pandas as pd
# Make display smaller
pd.options.display.max_rows = 10
unames = ['user_id', 'gender', 'age', 'occupation', 'zip']
users = pd.read_table('datasets/movielens/users.dat', sep='::',
header=None, names=unames)
rnames = ['user_id', 'movie_id', 'rating', 'timestamp']
ratings = pd.read_table('datasets/movielens/ratings.dat', sep='::',
header=None, names=rnames)
mnames = ['movie_id', 'title', 'genres']
movies = pd.read_table('datasets/movielens/movies.dat', sep='::',
header=None, names=mnames)
# -
users[:5]
ratings[:5]
movies[:5]
ratings
data = pd.merge(pd.merge(ratings, users), movies)
data
data.iloc[0]
mean_ratings = data.pivot_table('rating', index='title',
columns='gender', aggfunc='mean')
mean_ratings[:5]
ratings_by_title = data.groupby('title').size()
ratings_by_title[:10]
active_titles = ratings_by_title.index[ratings_by_title >= 250]
active_titles
# Select rows on the index
mean_ratings = mean_ratings.loc[active_titles]
mean_ratings
mean_ratings = mean_ratings.rename(index={'Seven Samurai (The Magnificent Seven) (Shichinin no samurai) (1954)':
'Seven Samurai (Shichinin no samurai) (1954)'})
top_female_ratings = mean_ratings.sort_values(by='F', ascending=False)
top_female_ratings[:10]
# ### Measuring Rating Disagreement
mean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F']
sorted_by_diff = mean_ratings.sort_values(by='diff')
sorted_by_diff[:10]
# Reverse order of rows, take first 10 rows
sorted_by_diff[::-1][:10]
# Standard deviation of rating grouped by title
rating_std_by_title = data.groupby('title')['rating'].std()
# Filter down to active_titles
rating_std_by_title = rating_std_by_title.loc[active_titles]
# Order Series by value in descending order
rating_std_by_title.sort_values(ascending=False)[:10]
# ## US Baby Names 1880–2010
# In [4]: names.head(10)
# Out[4]:
# name sex births year
# 0 Mary F 7065 1880
# 1 Anna F 2604 1880
# 2 Emma F 2003 1880
# 3 Elizabeth F 1939 1880
# 4 Minnie F 1746 1880
# 5 Margaret F 1578 1880
# 6 Ida F 1472 1880
# 7 Alice F 1414 1880
# 8 Bertha F 1320 1880
# 9 Sarah F 1288 1880
# !head -n 10 datasets/babynames/yob1880.txt
import pandas as pd
names1880 = pd.read_csv('datasets/babynames/yob1880.txt',
names=['name', 'sex', 'births'])
names1880
names1880.groupby('sex').births.sum()
# +
years = range(1880, 2011)
pieces = []
columns = ['name', 'sex', 'births']
for year in years:
path = 'datasets/babynames/yob%d.txt' % year
frame = pd.read_csv(path, names=columns)
frame['year'] = year
pieces.append(frame)
# Concatenate everything into a single DataFrame
names = pd.concat(pieces, ignore_index=True)
# -
names
total_births = names.pivot_table('births', index='year',
columns='sex', aggfunc=sum)
total_births.tail()
total_births.plot(title='Total births by sex and year')
def add_prop(group):
group['prop'] = group.births / group.births.sum()
return group
names = names.groupby(['year', 'sex']).apply(add_prop)
names
names.groupby(['year', 'sex']).prop.sum()
def get_top1000(group):
return group.sort_values(by='births', ascending=False)[:1000]
grouped = names.groupby(['year', 'sex'])
top1000 = grouped.apply(get_top1000)
# Drop the group index, not needed
top1000.reset_index(inplace=True, drop=True)
# pieces = []
# for year, group in names.groupby(['year', 'sex']):
# pieces.append(group.sort_values(by='births', ascending=False)[:1000])
# top1000 = pd.concat(pieces, ignore_index=True)
top1000
# ### Analyzing Naming Trends
boys = top1000[top1000.sex == 'M']
girls = top1000[top1000.sex == 'F']
total_births = top1000.pivot_table('births', index='year',
columns='name',
aggfunc=sum)
total_births.info()
subset = total_births[['John', 'Harry', 'Mary', 'Marilyn']]
subset.plot(subplots=True, figsize=(12, 10), grid=False,
title="Number of births per year")
# #### Measuring the increase in naming diversity
plt.figure()
table = top1000.pivot_table('prop', index='year',
columns='sex', aggfunc=sum)
table.plot(title='Sum of table1000.prop by year and sex',
yticks=np.linspace(0, 1.2, 13), xticks=range(1880, 2020, 10))
df = boys[boys.year == 2010]
df
prop_cumsum = df.sort_values(by='prop', ascending=False).prop.cumsum()
prop_cumsum[:10]
prop_cumsum.values.searchsorted(0.5)
df = boys[boys.year == 1900]
in1900 = df.sort_values(by='prop', ascending=False).prop.cumsum()
in1900.values.searchsorted(0.5) + 1
# +
def get_quantile_count(group, q=0.5):
group = group.sort_values(by='prop', ascending=False)
return group.prop.cumsum().values.searchsorted(q) + 1
diversity = top1000.groupby(['year', 'sex']).apply(get_quantile_count)
diversity = diversity.unstack('sex')
# -
fig = plt.figure()
diversity.head()
diversity.plot(title="Number of popular names in top 50%")
# #### The “last letter” revolution
# +
# extract last letter from name column
get_last_letter = lambda x: x[-1]
last_letters = names.name.map(get_last_letter)
last_letters.name = 'last_letter'
table = names.pivot_table('births', index=last_letters,
columns=['sex', 'year'], aggfunc=sum)
# -
subtable = table.reindex(columns=[1910, 1960, 2010], level='year')
subtable.head()
subtable.sum()
letter_prop = subtable / subtable.sum()
letter_prop
# +
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
letter_prop['M'].plot(kind='bar', rot=0, ax=axes[0], title='Male')
letter_prop['F'].plot(kind='bar', rot=0, ax=axes[1], title='Female',
legend=False)
# -
plt.subplots_adjust(hspace=0.25)
letter_prop = table / table.sum()
dny_ts = letter_prop.loc[['d', 'n', 'y'], 'M'].T
dny_ts.head()
plt.close('all')
fig = plt.figure()
dny_ts.plot()
# #### Boy names that became girl names (and vice versa)
all_names = pd.Series(top1000.name.unique())
lesley_like = all_names[all_names.str.lower().str.contains('lesl')]
lesley_like
filtered = top1000[top1000.name.isin(lesley_like)]
filtered.groupby('name').births.sum()
table = filtered.pivot_table('births', index='year',
columns='sex', aggfunc='sum')
table = table.div(table.sum(1), axis=0)
table.tail()
fig = plt.figure()
table.plot(style={'M': 'k-', 'F': 'k--'})
# ## USDA Food Database
# {
# "id": 21441,
# "description": "KENTUCKY FRIED CHICKEN, Fried Chicken, EXTRA CRISPY,
# Wing, meat and skin with breading",
# "tags": ["KFC"],
# "manufacturer": "Kentucky Fried Chicken",
# "group": "Fast Foods",
# "portions": [
# {
# "amount": 1,
# "unit": "wing, with skin",
# "grams": 68.0
# },
#
# ...
# ],
# "nutrients": [
# {
# "value": 20.8,
# "units": "g",
# "description": "Protein",
# "group": "Composition"
# },
#
# ...
# ]
# }
import json
db = json.load(open('datasets/usda_food/database.json'))
len(db)
db[0].keys()
db[0]['nutrients'][0]
nutrients = pd.DataFrame(db[0]['nutrients'])
nutrients[:7]
info_keys = ['description', 'group', 'id', 'manufacturer']
info = pd.DataFrame(db, columns=info_keys)
info[:5]
info.info()
pd.value_counts(info.group)[:10]
# +
nutrients = []
for rec in db:
fnuts = pd.DataFrame(rec['nutrients'])
fnuts['id'] = rec['id']
nutrients.append(fnuts)
nutrients = pd.concat(nutrients, ignore_index=True)
# -
nutrients
nutrients.duplicated().sum() # number of duplicates
nutrients = nutrients.drop_duplicates()
col_mapping = {'description' : 'food',
'group' : 'fgroup'}
info = info.rename(columns=col_mapping, copy=False)
info.info()
col_mapping = {'description' : 'nutrient',
'group' : 'nutgroup'}
nutrients = nutrients.rename(columns=col_mapping, copy=False)
nutrients
ndata = pd.merge(nutrients, info, on='id', how='outer')
ndata.info()
ndata.iloc[30000]
fig = plt.figure()
result = ndata.groupby(['nutrient', 'fgroup'])['value'].quantile(0.5)
result['Zinc, Zn'].sort_values().plot(kind='barh')
# +
by_nutrient = ndata.groupby(['nutgroup', 'nutrient'])
get_maximum = lambda x: x.loc[x.value.idxmax()]
get_minimum = lambda x: x.loc[x.value.idxmin()]
max_foods = by_nutrient.apply(get_maximum)[['value', 'food']]
# make the food a little smaller
max_foods.food = max_foods.food.str[:50]
# -
max_foods.loc['Amino Acids']['food']
# ## 2012 Federal Election Commission Database
fec = pd.read_csv('datasets/fec/P00000001-ALL.csv')
fec.info()
fec.iloc[123456]
unique_cands = fec.cand_nm.unique()
unique_cands
unique_cands[2]
parties = {'<NAME>': 'Republican',
'<NAME>': 'Republican',
'<NAME>': 'Republican',
'<NAME>': 'Republican',
'<NAME>': 'Republican',
'<NAME>': 'Republican',
'Obama, Barack': 'Democrat',
'<NAME>': 'Republican',
'<NAME>': 'Republican',
'<NAME>': 'Republican',
"<NAME>. 'Buddy' III": 'Republican',
'<NAME>': 'Republican',
'<NAME>': 'Republican'}
fec.cand_nm[123456:123461]
fec.cand_nm[123456:123461].map(parties)
# Add it as a column
fec['party'] = fec.cand_nm.map(parties)
fec['party'].value_counts()
(fec.contb_receipt_amt > 0).value_counts()
fec = fec[fec.contb_receipt_amt > 0]
fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', '<NAME>'])]
# ### Donation Statistics by Occupation and Employer
fec.contbr_occupation.value_counts()[:10]
# +
occ_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED',
'C.E.O.': 'CEO'
}
# If no mapping provided, return x
f = lambda x: occ_mapping.get(x, x)
fec.contbr_occupation = fec.contbr_occupation.map(f)
# +
emp_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'SELF' : 'SELF-EMPLOYED',
'SELF EMPLOYED' : 'SELF-EMPLOYED',
}
# If no mapping provided, return x
f = lambda x: emp_mapping.get(x, x)
fec.contbr_employer = fec.contbr_employer.map(f)
# -
by_occupation = fec.pivot_table('contb_receipt_amt',
index='contbr_occupation',
columns='party', aggfunc='sum')
over_2mm = by_occupation[by_occupation.sum(1) > 2000000]
over_2mm
plt.figure()
over_2mm.plot(kind='barh')
def get_top_amounts(group, key, n=5):
totals = group.groupby(key)['contb_receipt_amt'].sum()
return totals.nlargest(n)
grouped = fec_mrbo.groupby('cand_nm')
grouped.apply(get_top_amounts, 'contbr_occupation', n=7)
grouped.apply(get_top_amounts, 'contbr_employer', n=10)
# ### Bucketing Donation Amounts
bins = np.array([0, 1, 10, 100, 1000, 10000,
100000, 1000000, 10000000])
labels = pd.cut(fec_mrbo.contb_receipt_amt, bins)
labels
grouped = fec_mrbo.groupby(['cand_nm', labels])
grouped.size().unstack(0)
plt.figure()
bucket_sums = grouped.contb_receipt_amt.sum().unstack(0)
normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0)
normed_sums
normed_sums[:-2].plot(kind='barh')
# ### Donation Statistics by State
grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st'])
totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0)
totals = totals[totals.sum(1) > 100000]
totals[:10]
percent = totals.div(totals.sum(1), axis=0)
percent[:10]
# ## Conclusion
| ch14.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pixyzの確率分布の記述方法
#
# ここではまず,Pixyzにおける確率モデルの実装方法について説明します.
# Distribution API document: https://docs.pixyz.io/en/latest/distributions.html
# +
from __future__ import print_function
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
torch.manual_seed(1)
# -
from pixyz.utils import print_latex
# ## 1. 深層ニューラルネットワークを用いない確率分布の定義
# ### 1.1 シンプルな確率分布の定義
# ガウス分布を作るためには,`Normal`をインポートして,平均(loc)と標準偏差(scale)を定義します.
# +
from pixyz.distributions import Normal
x_dim = 50
p1_nor_x = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=['x'], features_shape=[x_dim], name='p_{1}')
# -
# なお``var``には,変数の名前を設定します.ここでは`"x"`を設定しています.
#
# また,features_shapeでは次元数を指定します.ここではfeatures_shapeが50となっていますから,50次元のサンプルを生成する形になります.
#
# 上記で定義したp1の情報は次のようにみることができます.
print(p1_nor_x.distribution_name)
print(p1_nor_x.prob_text)
# distribution_nameでは,確率分布の名前を確認できます.
#
# prob_textでは,確率分布の形をテキストで出力できます.ここでテキストに書かれている確率変数は,上記のvarで指定したものです.
#
# また,p1を丸ごとprintすると,以下のように表示されます.
print(p1_nor_x)
# print_latexを利用するとLaTex表記で定義した確率分布が表示されます
# 注: 数式のtex形式への出力に外部ライブラリのsympy使用しており,sympyの影響で数式の結果に支障を与えないが,数式の順序が入れ替わることがある
# print_latex(A +B)の出力が
# B+Aになることがあったりする
print_latex(p1_nor_x)
# 次に,定義した分布からサンプリングしてみましょう. サンプリングは,`sample()`によって実行します.
p1_nor_x_samples = p1_nor_x.sample()
print(p1_nor_x_samples)
print('--------------------------------------------------------------------------')
print(p1_nor_x_samples["x"])
# 出力はdict形式になっています.
#
# サンプリング結果を確認したい変数について指定することで,中身を確認できます(ただし,この例では変数は"x"のみです).
#
# なお,サンプリング結果は,PyTorchのtensor形式になっています.
# ### 1.2 条件付確率分布の定義
# 次に条件付確率分布の定義の仕方を正規分布の例で見ていきます
#
# 正規分布ではパラメータは平均$\mu$と分散$\sigma^2$がありますが,今回は平均が条件付けられた正規分布を取り上げたいと思います
# $p(x|\mu_{var}) = \cal N(x; \mu=\mu_{var}, \sigma^2=1)$
#
# 分布の条件付き変数の設定はcond_varで行います
# ここではmu_varという変数を正規分布の平均に設定したいため
# cond_var=['mu_var']
# loc='mu_var'
# とします
x_dim = 50
p1_nor_x__mu = Normal(loc='mu_var', scale=torch.tensor(1.), var=['x'], cond_var=['mu_var'], features_shape=[x_dim])
print(p1_nor_x__mu)
print_latex(p1_nor_x__mu)
# これで平均が$\mu_{var}$で条件付けされる正規分布が定義できました
# 試しに$\mu_{var}=0$としてxをサンプリングしてみます
# sampleメソッドにdict形式で変数を指定します
p1_nor_x__mu.sample({"mu_var": 0})
# 次に$\mu_{var}$自体も何らかの確率分布に従う変数とし,その確率分布を定めます
# ここでは仮にベルヌーイ分布とします
# $p(\mu_{var}) = \cal B(\mu_{var};p=0.3)$
from pixyz.distributions import Bernoulli
p2_ber_mu = Bernoulli(probs=torch.tensor(0.3), var=['mu_var'], features_shape=[x_dim])
print(p2_ber_mu)
print(p2_ber_mu.sample())
print_latex(p2_ber_mu)
# Pixyzでは分布の積は,掛け算で表すことができます
# 定義した$p(\mu_{var})$と$p(x|\mu_{var})$を掛け合わせて同時分布$p(x, \mu_{var})$を定義します
# $p(x, \mu_{var}) = p(x|\mu_{var}) p(\mu_{var})$
#
p_joint_mu_x = p1_nor_x__mu * p2_ber_mu
print(p_joint_mu_x)
print_latex(p_joint_mu_x)
# 同時分布でも今までと同様にsampleメソッドでサンプリングを行うことができます
# 全ての変数とその値がdict形式で出力されます
p_joint_mu_x.sample()
# ## 2. 深層ニューラルネットワークと組み合わせた確率分布の設定
# 次に, 確率分布のパラメータを深層ニューラルネットワークで定義します.
#
# 例えば,ガウス分布の平均$\mu$と分散$\sigma^2$は, パラメータ$\theta$を持つ深層ニューラルネットワークによって,$\mu=f(x;\theta)$および$\sigma^2=g(x;\theta)$と定義できます.
#
# したがって,ガウス分布は${\cal N}(\mu=f(x;\theta),\sigma^2=g(x;\theta))$となります.
#
# $p(a) = {\cal N}(a; \mu=f(x;\theta),\sigma^2=g(x;\theta))$を定義してみましょう
#
# Pixyzでは,次のようなクラスを記述することで,これを実現できます.
# +
a_dim = 20
class ProbNorAgivX(Normal):
"""
Probability distrituion Normal A given X
p(a) = {\cal N}(a; \mu=f(x;\theta),\sigma^2=g(x;\theta)
loc and scale are parameterized by theta given x
"""
def __init__(self):
super(ProbNorAgivX, self).__init__(var=['a'], cond_var=['x'])
self.fc1 = nn.Linear(x_dim, 10)
self.fc_loc = nn.Linear(10, a_dim)
self.fc_scale = nn.Linear(10, a_dim)
def forward(self, x):
h1 = F.relu(self.fc1(x))
return {'loc': self.fc_loc(h1), 'scale': F.softplus(self.fc_scale(h1))}
p_nor_a__x = ProbNorAgivX()
# -
# まず, ガウス分布クラスを継承することで,ガウス分布のパラメータを深層ニューラルネットワークで定義することを明示します.
#
# 次に,コンストラクタで,利用するニューラルネットワークを記述します.これは,通常のPyTorchと同じです.
#
# 唯一異なる点は,superの引数にvarとcond_varの名前を指定している点です.
#
# varは先程見たように,出力する変数の名前を指定します.一方,cond_varではニューラルネットワークの入力変数の名前を指定します.これは,ここで定義する分布において,条件付けられる変数とみなすことができます.
#
# forwardについても,通常のPyTorchと同じです.ただし,注意点が2つあります.
#
# * 引数の名前と数は,cond_varで設定したものと同じにしてください. 例えば,cond_var=["x", "y"]とした場合は,forward(self, x, y)としてください.
# * 戻り値は,それぞれの確率分布のパラメータになります.上記の例ではガウス分布なので,平均と分散を指定しています.
#
# そして最後に,定義した確率分布クラスのインスタンスを作成します.
#
# 次に,先程の例と同様,確率分布の情報を見てみましょう.
print(p_nor_a__x)
print_latex(p_nor_a__x)
# p2の分布は,xで条件付けた形になっています.これらの表記は,superの引数で設定したとおりになっています.
# 次に,先程の例のように,サンプリングしてみましょう.
#
# 注意しなければならないのは,先ほどと異なり,条件づけた変数xがあるということです.
#
# x_samplesをxとしてサンプリングしましょう.
x_samples = torch.Tensor([[-0.3030, -1.7618, 0.6348, -0.8044, -1.0371, -1.0669, -0.2085,
-0.2155, 2.2952, 0.6749, 1.7133, -1.7943, -1.5208, 0.9196,
-0.5484, -0.3472, 0.4730, -0.4286, 0.5514, -1.5474, 0.7575,
-0.4068, -0.1277, 0.2804, 1.7460, 1.8550, -0.7064, 2.5571,
0.7705, -1.0739, -0.2015, -0.5603, -0.6240, -0.9773, -0.1637,
-0.3582, -0.0594, -2.4919, 0.2423, 0.2883, -0.1095, 0.3126,
-0.3417, 0.9473, 0.6223, -0.4481, -0.2856, 0.3880, -1.1435,
-0.6512]])
p_nor_a__x_samples = p_nor_a__x.sample({'x': x_samples})
print(p_nor_a__x_samples)
print(p_nor_a__x_samples['a'])
print(p_nor_a__x_samples['x'])
# 出力には,aとxの2つのサンプルがあります.
#
# aが今回計算したサンプルで,xについては,引数として与えたサンプルがそのまま入っています.
# ### Next Tutorial
# 02-LossAPITutorial.ipynb
| tutorial/Japanese/01-DistributionAPITutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot
import matplotlib.pylab as plt
# -
dat = pd.read_csv("twitter_release.csv")
dat['date'] = pd.to_datetime(dat['date'])
dat.head()
release = dat.groupby(dat['date'].dt.date).agg(sum).reset_index()
release.to_csv("../twitter_0321.csv")
| twitter_data/final_process.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# %matplotlib inline
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, forest
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
from IPython.display import display
import numpy as np
import scipy
import re
# -
majority = pd.read_csv('majority.csv')
print('majority set loaded')
print(majority.shape)
test = pd.read_csv('test114.csv')
testing, drop = test.drop('Unnamed: 0', axis=1), test['Unnamed: 0']
X_test, Y_test = testing.drop('HasDetections', axis=1), testing['HasDetections']
# +
from sklearn import metrics
print(metrics.confusion_matrix(Y_test, majority))
# -
print(metrics.classification_report(Y_test, majority))
import numpy as np
from sklearn.metrics import roc_auc_score
print(roc_auc_score(Y_test, majority))
| LGBM(114)/Voting result(114).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Python statistics essential training - 03_07_proportions
# Standard imports
import numpy as np
import scipy.stats
import pandas as pd
# +
import matplotlib
import matplotlib.pyplot as pp
import pandas.plotting
from IPython import display
from ipywidgets import interact, widgets
# %matplotlib inline
# -
import re
import mailbox
import csv
smoking = pd.read_csv('whickham.csv')
smoking['ageGroup'] = pd.cut(smoking.age,[0,30,40,53,64],labels=['0-30','30-40','40-53','53-64'])
bysmoker = smoking.groupby("smoker").outcome.value_counts(normalize=True)
byage = smoking.groupby(['ageGroup','smoker']).outcome.value_counts(normalize=True)
bysmoker
byage
pp.figure(figsize=(10,4))
pp.subplot(1,2,1); smoking.outcome.value_counts().plot(kind='pie', colors=['C0', 'C1']); pp.title('outcome')
pp.subplot(1,2,2); smoking.smoker.value_counts().plot(kind='pie', colors=['C2', 'C3']); pp.title('smoker')
bysmoker.unstack().plot(kind='bar', stacked=True)
byage.unstack().plot(kind='bar', stacked=True)
byage.unstack().drop('Dead', axis=1).unstack()
byage2 = byage.unstack().drop('Dead', axis=1).unstack()
byage2.columns = ['No', 'Yes']
byage2.columns.name = 'smoker'
byage2.plot(kind='bar')
| Python Statistics/Exercise Files/chapter3/03_07/03_07_proportions_begin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# * ver si conviene usar `generators` (ver proyecto de EEG prediction)
# * leer desde timescale db
# * diseño de la red "optimizable" (~neuroevolución)
#
#
# Recursos interesantes:
#
# * https://www.liip.ch/en/blog/time-series-prediction-a-short-comparison-of-best-practices
# * https://github.com/ageron/handson-ml2/blob/master/15_processing_sequences_using_rnns_and_cnns.ipynb
# * https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
# * https://towardsdatascience.com/time-series-analysis-visualization-forecasting-with-lstm-77a905180eba
# +
import pandas as pd
from datetime import datetime
from matplotlib import pyplot
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot as plt
import seaborn as sns
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error,mean_absolute_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.models import load_model, model_from_json
import pickle
import glob
from satlomasproc.configuration import LSTMTrainingScriptConfig
from satlomasproc.data import read_time_series_from_csv
from satlomasproc.feature import (
get_dataset_from_series,
get_interest_variable
)
from satlomasproc.model_hyperopt import (
get_lstm_nnet_opt
)
from satlomasproc.model import (train_val_test_split)
import ipdb
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
#df = DataFrame(data)
df = data
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
#names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
names += [('%s(t-%d)' % (var_name, i)) for var_name in df.columns]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('%s(t)' % (var_name)) for var_name in df.columns]
else:
names += [('%s(t+%d)' % (var_name, i)) for var_name in df.columns]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# +
# design network
model = Sequential()
#model.add(LSTM(2*n_hours, input_shape=(train_X.shape[1], train_X.shape[2]),return_sequences=True))
model.add(LSTM(2*n_hours, input_shape=(n_hours, 1),return_sequences=True))
model.add(Dropout(rate=0.2))
model.add(LSTM(3*n_hours, return_sequences=True))
model.add(Dropout(rate=0.2))
model.add(LSTM(2*n_hours))
model.add(Dropout(rate=0.2))
model.add(Dense(1))
#model_loss = 'mae'
model_loss = 'mean_squared_error'
model.compile(loss=model_loss, optimizer='adam')
#model.compile(loss='model_loss', optimizer='adam', metrics=['accuracy'])
# fit network
# que corte despues de no mejorar mucho
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min',restore_best_weights=True)
# que use el mejor modelo
checkpoint = ModelCheckpoint(out_model_name, save_best_only=True, monitor='val_loss', mode='min',verbose=2)
history = model.fit(train_X, train_y, epochs=100, validation_data=(val_X, val_y), verbose=1, shuffle=False,callbacks=[checkpoint,early_stopping])
print(model.summary())
with open('history.pickle', 'wb') as file_pi:
pickle.dump(history, file_pi)
# +
# plot history
read_prev = True
filename_hyperopt_history = glob.glob('../models/*_hyperopt_history_*.pickle')[-1]
filename_history = '../models/esp:10_eps:1000_loss:mean_squared_error_opt:adam_pstps:5_sensor:A620_var:temp_basenet:4.4_midnet:4.2_history_2020-02-27_23:55:00.pickle'
print(filename_hyperopt_history)
print(filename_history)
if read_prev:
try:
del history
except:
pass
with open(filename_history, 'rb') as file_pi:
history = pickle.load(file_pi)
try:
del history_hyper
except:
pass
with open(filename_hyperopt_history, 'rb') as file_pi:
history_hyper = pickle.load(file_pi)
plt.figure(figsize=(15,10))
plt.plot(history.history['loss'], label='Train Mean Squared Error')
plt.plot(history.history['val_loss'], label='Validation Mean Squared Error')
plt.plot(history_hyper.history['loss'], label='Train Mean Squared Error - Hyperopted')
plt.plot(history_hyper.history['val_loss'], label='Validation Mean Squared Error - Hyperopted')
plt.legend()
plt.savefig('traininig_curve.png')
plt.show()
# +
out_model_name = glob.glob('../models/*_hyperopt_model_*.hdf5')[-1]
print(out_model_name)
#out_model_name = '../models/esp:10_eps:2_loss:mean_squared_error_opt:adam_pstps:3_sensor:A620_var:temp_basenet:4.4_midnet:4.2_hyperoptpars:[1, 2, 3][2, 3][0.1, 0.8]3_hyperopt_model_2020-02-25_10:13:29.hdf5'
model_hyper = load_model(out_model_name)
print(model_hyper.summary())
# -
layer_idx = 2
model_hyper.layers[layer_idx].input_shape
# +
out_model_name_simple = '../models/esp:10_eps:1000_loss:mean_squared_error_opt:adam_pstps:5_sensor:A620_var:temp_basenet:4.4_midnet:4.2_model_2020-02-27_23:55:00.hdf5'
model_simple = load_model(out_model_name_simple)
print(model_simple.summary())
model_simple.layers[3].rate
# +
# load dataset usando satlomas
config_file = '../config_train_lstm_temp.json'
script_config = LSTMTrainingScriptConfig(config_file)
n_past_steps = script_config.n_past_steps
input_csv = script_config.input_csv
date_col = script_config.date_col
hr_col = script_config.hr_col
target_var = script_config.numeric_var
sensor_var = script_config.sensor_var
sensor = script_config.target_sensor
# read the raw data
input_csv_nb = '../{}'.format(input_csv)
raw_dataset = read_time_series_from_csv(input_csv_nb,date_col,hr_col,target_var,sensor_var)
raw_dataset.head()
# get the time series dataset
time_series_dset = get_interest_variable(raw_dataset,sensor_var,date_col,hr_col,target_var,sensor)
time_series_dset.head()
# get the final dataset
sup_dataset,scaler = get_dataset_from_series(time_series_dset,n_past_steps)
print(scaler)
sup_dataset.head()
# split the dataset in train , test and validation
n_features = time_series_dset.shape[1]
dataset_splits = train_val_test_split(sup_dataset,n_past_steps,n_features,target_var)
train_X = dataset_splits['trainset']['X']
test_X = dataset_splits['testset']['X']
train_y = dataset_splits['trainset']['y']
test_y = dataset_splits['testset']['y']
# +
# make a prediction
def make_prediction(model):
# try:
# del model
# except:
# pass
# model = model_hyper
print(model.summary())
yhat = model.predict(test_X)
tr_yhat = model.predict(train_X)
# por que reshapeamos aca?
test_X_2d = test_X.reshape((test_X.shape[0], n_past_steps*n_features))
train_X_2d = train_X.reshape((train_X.shape[0], n_past_steps*n_features))
# invert scaling for forecast
# test
# Por que lo que hace es armar una matrix n_samples,6 con una fila por cada datapoint con la primera columna con la prediccion y las otras las features?
inv_yhat = concatenate((yhat, test_X_2d[:, -1*(n_features-1):]), axis=1)
# afecta en algo aplicar invers_stransform sobre una sola columna que con tra la matriz?
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# train
tr_inv_yhat = concatenate((tr_yhat, train_X_2d[:, -1*(n_features-1):]), axis=1)
tr_inv_yhat = scaler.inverse_transform(tr_inv_yhat)
tr_inv_yhat = tr_inv_yhat[:,0]
# invert scaling for actual
#test
test_y_2d = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y_2d, test_X_2d[:, -1*(n_features-1):]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
#train
train_y_2d = train_y.reshape((len(train_y), 1))
tr_inv_y = concatenate((train_y_2d, train_y_2d[:, -1*(n_features-1):]), axis=1)
tr_inv_y = scaler.inverse_transform(tr_inv_y)
tr_inv_y = tr_inv_y[:,0]
# calculate MAE
tr_mae = mean_absolute_error(tr_inv_y, tr_inv_yhat)
print('Train MAE: %.3f' % tr_mae)
mae = mean_absolute_error(inv_y, inv_yhat)
print('Test MAE: %.3f' % mae)
return inv_yhat,inv_y
predictions_hyper , inv_y = make_prediction(model_hyper)
predictions_simple , inv_y = make_prediction(model_simple)
# +
# #%matplotlib inline
def plot_predictions(real,hyper,simple):
inv_y = real
inv_yhat = hyper
inv_yhat_simple = simple
# ploting the actual and prediction together
total_time_steps = 750
time_steps=[x for x in range(total_time_steps)]
plt.figure(figsize=(15,7))
#plt.plot(time_steps, Y_test[0][:total_time_steps], marker='.', label="actual")
#plt.plot(time_steps, test_predict[:,0][:total_time_steps], 'r', label="prediction")
plt.plot(time_steps, inv_y[:total_time_steps], marker='.', label="Valor real")
plt.plot(time_steps, inv_yhat[:total_time_steps], 'r', label="Predicción Hyperoptimized")
plt.plot(time_steps, inv_yhat_simple[:total_time_steps], 'g', label="Predicción Simple")
# plt.tick_params(left=False, labelleft=True) #remove ticks
#plt.tight_layout()
sns.despine(top=True)
plt.subplots_adjust(left=0.07)
plt.ylabel(target_var, size=15)
plt.xlabel('Horas', size=15)
plt.legend(fontsize=15)
plt.savefig('prediction_plot.png')
plt.show()
plot_predictions(inv_y,predictions_hyper,predictions_simple)
# +
results_train_simple_old = pd.read_csv('../results/A620_temp_results_2020-01-08_23:29:54.csv')
results_train_simple_new = pd.read_csv('../results/esp:10_eps:1000_loss:mean_squared_error_opt:adam_pstps:5_sensor:A620_var:temp_basenet:4.4_midnet:4.2_results_2020-02-27_23:55:00.csv')
# TODO add the results of the hyperopt version
full_results = results_train_simple_old.append([results_train_simple_new])
full_results
| notebooks/meteorologica/sensor_predictions_evaluation_server.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selenium
# ## 基本使用
# +
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
#创建Chrome浏览器对象browser
browser = webdriver.Chrome()
try:
#用get方式访问百度首页
browser.get('https://www.baidu.com')
#通过id为‘kw’找到输入框input
input = browser.find_element_by_id('kw')
#调用该输入框的send_keys方法向输入框键入“Python”关键词
input.send_keys('Python')
#高用该输入框的send_keys方法向输入框按回车键
input.send_keys(Keys.ENTER)
#创建浏览器等待对象wait,等待时间10称
wait = WebDriverWait(browser,10)
#调用wait的until方法,直到ID为content_left(即搜索结果界面)元素出现
wait.until(EC.presence_of_element_located((By.ID, 'content_left')))
#打印输出浏览器当前的URL
print(browser.current_url)
#打印输出当前的cookies信息
print(browser.get_cookies)
#打印输出网页源代码
print(browser.page_source)
finally:
#关闭浏览器
browser.close()
# -
# ## 声明浏览器对象
# +
from selenium import webdriver
browser = webdriver.Chrome()
browser = webdriver.Firefox()
browser = webdriver.Edge()
browser = webdriver.PhantomJS()
browser = webdriver.Safari()
# -
# ## 访问页面
# +
from selenium import webdriver
#创建Chrome浏览器对象
browser = webdriver.Chrome()
#访问淘宝首页
browser.get('https://taobao.com')
#打印输出网页源代码
print(browser.page_source)
#关闭浏览器
browser.close()
# -
# ## 查找元素
# ### 单个元素
# +
from selenium import webdriver
#创建Chrome浏览器对象
browser = webdriver.Chrome()
#访问淘宝首页
browser.get('https://www.taobao.com')
#通过id属性查找id为q的元素,即为搜索输入框,命名为input_first
input_first = browser.find_element_by_id('q')
#通过css选择器查找搜索输入框,命名为input_second
input_second = browser.find_element_by_css_selector('#q')
#通过xpath查找搜索输入框,命名为input_third
input_third = browser.find_element_by_xpath('//*[@id="q"]')
#打印输出以上三个查找结果
print(input_first,input_second,input_third)
#关闭浏览器
browser.close()
# -
# * find_element_by_name
# * find_element_by_xpath
# * find_element_by_link_text
# * find_element_by_partial_link_text
# * find_element_by_tag_name
# * find_element_by_class_name
# * find_element_by_css_selector
# +
from selenium import webdriver
from selenium.webdriver.common.by import By
browser = webdriver.Chrome()
browser.get('https://www.taobao.com')
input_first = browser.find_element(By.ID, 'q')
print(input_first)
browser.close()
# -
# ### 多个元素
# +
from selenium import webdriver
#创建Chrome浏览器对象 browser
browser = webdriver.Chrome()
#get方式访问淘宝首页
browser.get('https://www.taobao.com')
#(多个元素用elements,须借助css_selector)查找淘宝首页左边导航条下面的class为service-bd的ul标签下面的名为li的多个标签
lis = browser.find_elements_by_css_selector('.service-bd')
#打印输出所有lis元素
print(lis)
#关闭浏览器
browser.close()
# +
from selenium import webdriver
from selenium.webdriver.common.by import By
browser = webdriver.Chrome()
browser.get('https://www.taobao.com')
lis = browser.find_elements(By.CSS_SELECTOR, '.service-bd li')#作用同上
print(lis)
browser.close()
# -
# * find_elements_by_name
# * find_elements_by_xpath
# * find_elements_by_link_text
# * find_elements_by_partial_link_text
# * find_elements_by_tag_name
# * find_elements_by_class_name
# * find_elements_by_css_selector
# ## 元素交互操作
# 对获取的元素调用交互方法
# +
from selenium import webdriver
import time
#创建chrome浏览器对象
browser = webdriver.Chrome()
#访问淘宝首页
browser.get('https://www.taobao.com')
#查找id为q的搜索输入框input
input = browser.find_element_by_id('q')
#调用input的send_keys函数向输入框输入关键字“OnePLus”
input.send_keys('OnePlus')
#当前浏览器暂停1S
time.sleep(1)
#清除输入框的内容
input.clear()
#调用input的send_keys再次输入 一加
input.send_keys('一加')
#查找class为btn-search的搜索按钮并命名为button 用find_element_by_class_name
button = browser.find_element_by_class_name('btn-search')
#调用click点击该按钮
button.click()
# -
# 更多操作: http://selenium-python.readthedocs.io/api.html#module-selenium.webdriver.remote.webelement
# ## 交互动作
# 将动作附加到动作链中串行执行
# +
from selenium import webdriver
from selenium.webdriver import ActionChains
#创建chrome浏览器对象browser
browser = webdriver.Chrome()
#设置url为http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable
url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
#用get方式访问以上url
browser.get(url)
#在浏览器中切换定位到名为iframeResult的frame元素
browser.switch_to.frame('iframeResult')
#通过css选择器查找id为draggable的拖曳源,并命名为source
source = browser.find_element_by_css_selector('#draggable')
#通过css选择器查找id为droppable的代入源,并命名为target
target = browser.find_element_by_css_selector('#droppable')
#用ActionChains函数创建浏览器动作链对象actions
actions = ActionChains(browser)
#调用actions的drag_and_drop,设计将source放入target的动作
actions.drag_and_drop(source,target)
#调用perform执行该动作
actions.perform()
#关闭浏览器
browser.close()
# -
# 更多操作: http://selenium-python.readthedocs.io/api.html#module-selenium.webdriver.common.action_chains
# ## 执行JavaScript
# +
from selenium import webdriver
#创建chrome()浏览器对象browser
browser = webdriver.Chrome()
#get方式访问https://www.zhihu.com/explore网页
browser.get('https://www.zhihu.com/explore')
#利用execute_script执行js语句window.scrollTo(0, document.body.scrollHeight)
browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
##利用execute_script执行js语句alert("To Bottom")警报信息,注意,execute_script('')里面的js加引号
browser.execute_script('alert("To Bottom")')
# -
# ## 获取元素信息
# ### 获取属性
# +
from selenium import webdriver
from selenium.webdriver import ActionChains
#创建Chrome浏览器对象browser
browser = webdriver.Chrome()
#设置url为https://www.zhihu.com/explore
url = 'https://www.zhihu.com/explore'
#get方式访问以上网址
browser.get(url)
#查找id为zh-top-link-logo的元素并命名为logo(知乎logo)
logo = browser.find_element_by_id('zh-top-link-logo')
#打印输出此logo
print(logo)
#打印输出:用logo的get_attribute函数获得logo的class属性值
print(logo.get_attribute('class'))
# -
# ### 获取文本值
# +
from selenium import webdriver
browser = webdriver.Chrome()
url = 'https://www.zhihu.com/explore'
browser.get(url)
#利用by_class_name查找class属性值为zu-top-add-question的输入框input
input = browser.find_element_by_class_name('zu-top-add-question')
#输出打印该输入框的文本(text)
print(input.text)
# -
# ### 获取ID、位置、标签名、大小
# +
from selenium import webdriver
browser = webdriver.Chrome()
#设置url为https://www.zhihu.com/explore
url = 'https://www.zhihu.com/explore'
#用get方式访问以上网址
browser.get(url)
#查找class属性值为zu-top-add-question的输入框input
input = browser.find_element_by_class_name('zu-top-add-question')
#打印输出input的id
print(input.id)
#打印输出input的位置location
print(input.location)
#打印输出input的标签名tag_name
print(input.tag_name)
#打印输出input的对象几何大小size
print(input.size)
# -
# ## Frame
# +
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
browser = webdriver.Chrome()
url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
browser.get(url)
browser.switch_to.frame('iframeResult')
source = browser.find_element_by_css_selector('#draggable')
print(source)
try:
logo = browser.find_element_by_class_name('logo')
except NoSuchElementException:
print('NO LOGO')
browser.switch_to.parent_frame()
logo = browser.find_element_by_class_name('logo')
print(logo)
print(logo.text)
# -
# ## 等待
# ### 隐式等待
# 当使用了隐式等待执行测试的时候,如果 WebDriver没有在 DOM中找到元素,将继续等待,超出设定时间后则抛出找不到元素的异常, 换句话说,当查找元素或元素并没有立即出现的时候,隐式等待将等待一段时间再查找 DOM,默认的时间是0,
# +
from selenium import webdriver
browser = webdriver.Chrome()
#利用implicitly_wait进行隐式等待,等待时间设置为10S
browser.implicitly_wait(10)
browser.get('https://www.zhihu.com/explore')
input = browser.find_element_by_class_name('zu-top-add-question')
print(input)
# -
# ### 显式等待
显示等待则打指定等待不固定,具体时间受指定的元素出现或其他条件实现的时间限制
# +
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#创建浏览器对象
browser = webdriver.Chrome()
#利用get方式访问淘宝首页
browser.get('https://www.taobao.com')
#创建浏览器等待对象wait,默认为10秒
wait = WebDriverWait(browser,10)
#创建input对象,利用wait.until函数直到id为q的元素加载完后,注意By.ID参数双括号
input = wait.until(EC.presence_of_element_located((By.ID, 'q')))
#创建button对象,利用wait.until直到class为btn-search
button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '.btn-search')))
#打印输出input,button
print(input,button)
# -
# * title_is 标题是某内容
# * title_contains 标题包含某内容
# * presence_of_element_located 元素加载出,传入定位元组,如(By.ID, 'p')
# * visibility_of_element_located 元素可见,传入定位元组
# * visibility_of 可见,传入元素对象
# * presence_of_all_elements_located 所有元素加载出
# * text_to_be_present_in_element 某个元素文本包含某文字
# * text_to_be_present_in_element_value 某个元素值包含某文字
# * frame_to_be_available_and_switch_to_it frame加载并切换
# * invisibility_of_element_located 元素不可见
# * element_to_be_clickable 元素可点击
# * staleness_of 判断一个元素是否仍在DOM,可判断页面是否已经刷新
# * element_to_be_selected 元素可选择,传元素对象
# * element_located_to_be_selected 元素可选择,传入定位元组
# * element_selection_state_to_be 传入元素对象以及状态,相等返回True,否则返回False
# * element_located_selection_state_to_be 传入定位元组以及状态,相等返回True,否则返回False
# * alert_is_present 是否出现Alert
# 详细内容:http://selenium-python.readthedocs.io/api.html#module-selenium.webdriver.support.expected_conditions
# ## 前进后退
# +
import time
from selenium import webdriver
browser = webdriver.Chrome()
#访问百度
browser.get('https://www.baidu.com')
#访问淘宝
browser.get('https://www.taobao.com')
#访问python主页https://www.python.org/(被GWF铁拳重创,故改为qq.com)
browser.get('https://www.qq.com')
#调用浏览器的back函数执行返回。问:返回到哪一个网页?
browser.back()
#当前休眠3s
time.sleep(3)
#浏览器前进。问,前进到哪一网页?
browser.forward()
#关闭浏览器
browser.close()
# -
# ## Cookies
# +
from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://www.zhihu.com/explore')
#get_cookies获取cookies并输出打印
print(browser.get_cookies())
print('------------------------------------------------------------------------------------------------------')
#add_cookie(注意此cookie单词没有s)增加cookies{'name': 'name', 'domain': 'www.zhihu.com', 'value': 'germey'}
browser.add_cookie({'name': 'name', 'domain': 'www.zhihu.com', 'value': 'germey'})
#重新获取cookies并输出打印
print(browser.get_cookies())
print('------------------------------------------------------------------------------------------------------')
#delete_all_cookies删除所有cookies
browser.delete_all_cookies()
#重新获取cookies并输出打印
print(browser.get_cookies())
# -
# ## 选项卡管理
# +
import time
from selenium import webdriver
#创建浏览器对象
browser = webdriver.Chrome()
#访问淘宝
browser.get('https://www.taobao.com')
#打开新的选项卡(执行js语句 execute_script('window.open()')
browser.execute_script('window.open()')
#打印输出当前浏览器对象browser.window_handles
print(browser.window_handles)
#切换到第2个选项卡,下标为1
browser.switch_to.window(browser.window_handles[1])
#访问 百度
browser.get('https://www.baidu.com')
#休眠1s
time.sleep(1)
#切换到第1个选项卡,下标为0
browser.switch_to.window(browser.window_handles[0])
#访问百度
browser.get('https://www.vitan.me')
# -
# ## 异常处理
# +
from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://www.baidu.com')
browser.find_element_by_id('hello')
# +
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
browser = webdriver.Chrome()
try:
browser.get('https://www.baidu.com')
except TimeoutException:
print('Time Out')
try:
browser.find_element_by_id('hello')
except NoSuchElementException:
print('No Element')
finally:
browser.close()
# -
# 详细文档:http://selenium-python.readthedocs.io/api.html#module-selenium.common.exceptions
| Crawl/Class/selenium/selenium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from numpy.random import seed
seed(1)
import tensorflow
tensorflow.random.set_seed(2)
# + tags=[]
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import scipy as sp
import sklearn
from sklearn.decomposition import TruncatedSVD
from matplotlib import pyplot as plt
from IPython.display import clear_output
import powerlaw
import tensorflow_addons as tfa
import copy
import weightwatcher as ww
import imageio
from datetime import datetime
import io
import cv2
# Suppress the powerlaw package warnings
# "powerlaw.py:700: RuntimeWarning: divide by zero encountered in true_divide"
# "powerlaw.py:700: RuntimeWarning: invalid value encountered in true_divide"
import warnings
warnings.simplefilter(action='ignore', category=RuntimeWarning)
import random
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("weightwatcher")
logger.setLevel(logging.CRITICAL)
# + tags=[]
# Model / data parameters
num_classes = 10
inputShape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# shuffle training set and its labels accordingly
trainingIndexes = np.arange(0,y_train.shape[0]).tolist()
random.shuffle(trainingIndexes)
x_train = x_train[trainingIndexes,:,:,:]
y_train = y_train[trainingIndexes,:]
# +
# LENET model
model = keras.Sequential(
[
keras.layers.InputLayer(input_shape=inputShape),
layers.Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu'),
layers.MaxPool2D(strides=2),
layers.Conv2D(filters=48, kernel_size=(5,5), padding='valid', activation='relu'),
layers.MaxPool2D(strides=2),
layers.Flatten(),
layers.Dense(256, activation='relu'),
layers.Dense(84, activation='relu'),
layers.Dense(10, activation='softmax'),
]
)
model.summary()
# + tags=[]
# define a function which returns an image as numpy array from figure
def get_img_from_fig(fig, dpi=180):
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# + tags=[]
trainingSize = 2000
testSize = 10000
batch_size = 128
epochs = 30
learningRate = .001
selectComponentsMethod = "randomize_percentage" #"localization_ratio" #"percentage" #"classic" #"percentage" #"powerlaw_spikes" #"mp_spikes" #"powerlaw_xmin"
percentageKept = 40
# + tags=[]
# updatable plot
# a minimal example (sort of)
class PlotLosses(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.test_losses = []
self.estimatedLosses = []
self.logs = []
now = datetime.now()
date_time = now.strftime("%m-%d-%Y-%H-%M-%S")
self.writer = imageio.get_writer("training-" + date_time + ".mp4", format = "FFMPEG", mode='I', fps = 1)
def on_epoch_end(self, epoch, logs={}):
clear_output(wait=True)
if selectComponentsMethod == "powerlaw_xmin" or selectComponentsMethod == "powerlaw_spikes":
self.fig, self.axes = plt.subplots(2,3,figsize=(25,10))
elif selectComponentsMethod == "mp_spikes":
self.fig, self.axes = plt.subplots(2,3,figsize=(25,10))
elif selectComponentsMethod == "localization_ratio":
self.fig, self.axes = plt.subplots(1,4, figsize=(30,10))
elif selectComponentsMethod == "percentage" or selectComponentsMethod == "randomize_percentage":
self.fig, self.axes = plt.subplots(1,4, figsize=(30,10))
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.test_losses.append(model.evaluate(x_test[0:testSize], y_test[0:testSize], verbose=0)[0])
# careful! Python is dangerous :) as opposed to MATLAB, it can modify the contents of an object from inside a function even if that wasn't asked for.
# so before we begin, do a deep copy (keras clone) of the model and work with that
modelToSmooth = keras.models.clone_model(model)
modelToSmooth.build(model.input_shape)
modelToSmooth.compile(loss=model.loss, optimizer=model.optimizer, metrics=["accuracy"])
modelToSmooth.set_weights(model.get_weights())
watcher = ww.WeightWatcher(model=modelToSmooth)
if selectComponentsMethod == "powerlaw_xmin" or selectComponentsMethod == "powerlaw_spikes":
resultFunction = watcher.unifiedSVDSmoothing(methodSelectComponents = selectComponentsMethod, doPlot = True, axes = [self.axes[0,1],self.axes[0,2],self.axes[1,0],self.axes[1,1]])
elif selectComponentsMethod == "mp_spikes":
resultFunction = watcher.unifiedSVDSmoothing(methodSelectComponents = selectComponentsMethod, doPlot = True, axes = [self.axes[0,1],self.axes[0,2],self.axes[1,0],self.axes[1,1]])
elif selectComponentsMethod == "localization_ratio":
resultFunction = watcher.unifiedSVDSmoothing(methodSelectComponents = selectComponentsMethod, doPlot = True, axes = [self.axes[1],self.axes[2],self.axes[3]]) #, smoothBias = False, normalizeVectors = False)
elif selectComponentsMethod == "percentage" or selectComponentsMethod == "randomize_percentage":
resultFunction = watcher.unifiedSVDSmoothing(methodSelectComponents = selectComponentsMethod, percent = percentageKept / 100, doPlot = True, axes = [self.axes[1],self.axes[2],self.axes[3]])
self.estimatedLosses.append(resultFunction[0].evaluate(x_train[0:100], y_train[0:100], verbose=0)[0])
self.i += 1
if selectComponentsMethod == "powerlaw_xmin" or selectComponentsMethod == "powerlaw_spikes":
self.axes[0,0].plot(self.x, self.losses, label="loss")
self.axes[0,0].plot(self.x, self.test_losses, label="test_loss")
self.axes[0,0].plot(self.x, self.estimatedLosses, label="SVDestimate_" + str(resultFunction[1]) + "c")
self.axes[0,0].title.set_text("Epoch " + str(epoch) + ", training size N" + str(trainingSize) + ", test size N" + str(testSize) + ", using " + selectComponentsMethod) #, fontdict=None, loc='center')
self.axes[0,0].legend()
elif selectComponentsMethod == "mp_spikes":
self.axes[0,0].plot(self.x, self.losses, label="loss")
self.axes[0,0].plot(self.x, self.test_losses, label="test_loss")
self.axes[0,0].plot(self.x, self.estimatedLosses, label="SVDestimate_" + str(resultFunction[1]) + "c")
self.axes[0,0].title.set_text("Epoch " + str(epoch) + ", training size N" + str(trainingSize) + ", test size N" + str(testSize) + ", using " + selectComponentsMethod) #, fontdict=None, loc='center')
self.axes[0,0].legend()
elif selectComponentsMethod == "localization_ratio":
self.axes[0].plot(self.x, self.losses, label="loss")
self.axes[0].plot(self.x, self.test_losses, label="test_loss")
self.axes[0].plot(self.x, self.estimatedLosses, label="SVDestimate_" + str(resultFunction[1]) + "c")
self.axes[0].title.set_text("Epoch " + str(epoch) + ", training size N" + str(trainingSize) + ", test size N" + str(testSize) + ", using " + selectComponentsMethod) #, fontdict=None, loc='center')
self.axes[0].legend()
elif selectComponentsMethod == "percentage" or selectComponentsMethod == "randomize_percentage":
self.axes[0].plot(self.x, self.losses, label="loss")
self.axes[0].plot(self.x, self.test_losses, label="test_loss")
self.axes[0].plot(self.x, self.estimatedLosses, label="SVDestimate_" + str(resultFunction[1]) + "c")
self.axes[0].title.set_text("Epoch " + str(epoch) + ", training size N" + str(trainingSize) + ", test size N" + str(testSize) + ", using " + selectComponentsMethod) #, fontdict=None, loc='center')
self.axes[0].legend()
plt.show()
data = get_img_from_fig(self.fig)
self.writer.append_data(data)
def on_train_end(self, epoch, logs={}):
self.writer.close()
plot_losses = PlotLosses()
# + tags=[]
opt = keras.optimizers.Adam(learning_rate=learningRate)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model.fit(x_train[0:trainingSize], y_train[0:trainingSize], batch_size=batch_size, epochs=epochs, validation_split=0, callbacks=[plot_losses])
# -
| trainWithUnifiedSVDsmoothing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reference model - test set: age, temperature
# ## Table of contents
# 1. [Linear Regression](#LinearRegression)
# 2. [MLP (Dense)](#MLP)
# 3. [AE combined latent](#AE_combined)
# 4. [AE OTU latent](#AE_latentOTU)
import sys
sys.path.append('../../Src/')
from data import *
from transfer_learning import *
from test_functions import *
from layers import *
from utils import *
from loss import *
from metric import *
from results import *
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
df_microbioma_train, df_microbioma_test, _, _, \
df_domain_train, df_domain_test, _, _, otu_columns, domain_columns = \
read_df_with_transfer_learning_subset_fewerDomainFeatures( \
metadata_names=['age','Temperature'], \
otu_filename='../../Datasets/otu_table_all_80.csv', \
metadata_filename='../../Datasets/metadata_table_all_80.csv')
print(df_domain_train.shape)
print(df_domain_test.shape)
# +
print('TRAIN:')
print('age:' + str(df_domain_train.loc[:,'age'].mean()))
print('Temperature:' + str(df_domain_train.loc[:,'Temperature'].mean()))
print('TEST:')
print('age:' + str(df_domain_test.loc[:,'age'].mean()))
print('Temperature:' + str(df_domain_test.loc[:,'Temperature'].mean()))
# -
# ### Get numpy transfer_learning objects
data_microbioma_train = df_microbioma_train.to_numpy(dtype=np.float32)
data_microbioma_test = df_microbioma_test.to_numpy(dtype=np.float32)
data_domain_train = df_domain_train.to_numpy(dtype=np.float32)
data_domain_test = df_domain_test.to_numpy(dtype=np.float32)
# # 1. Linear regression <a name="LinearRegression"></a>
# +
def model(shape_in, shape_out, output_transform):
in_layer = layers.Input(shape=(shape_in,))
net = in_layer
net = layers.Dense(shape_out, activation='linear')(net)
if output_transform is not None:
net = output_transform(net)
out_layer = net
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
return model
def compile_model(model, optimizer, reconstruction_error, input_transform, output_transform):
metrics = get_experiment_metrics(input_transform, output_transform)[0][3:]
model.compile(optimizer=optimizer, loss=reconstruction_error, metrics=metrics)
# -
def model_fn():
m = model(shape_in=2,
shape_out=717,
output_transform=None)
compile_model(model=m,
optimizer=optimizers.Adam(lr=0.001),
reconstruction_error=LossMeanSquaredErrorWrapper(CenterLogRatio(), None),
input_transform=CenterLogRatio(),
output_transform=None)
return m, None, m, None
latent_space = 0
results, modelsLR = train(model_fn,
data_microbioma_train,
data_domain_train,
latent_space=latent_space,
folds=5,
epochs=100,
batch_size=64,
learning_rate_scheduler=None,
verbose=-1)
print_results(results)
predictions = test_model(modelsLR, CenterLogRatio, None, otu_columns, data_microbioma_test, data_domain_test)
#save_predictions(predictions, 'experiment_testSet_linear_regresion_2var.txt')
# # 2. MLP (Dense) <a name="MLP"></a>
# +
def model(shape_in, shape_out, output_transform, layers_list, activation_fn):
in_layer = layers.Input(shape=(shape_in,))
net = in_layer
for s in layers_list:
net = layers.Dense(s, activation=activation_fn)(net)
net = layers.Dense(shape_out, activation='linear')(net)
if output_transform is not None:
net = output_transform(net)
out_layer = net
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
return model
def compile_model(model, optimizer, reconstruction_error, input_transform, output_transform):
metrics = get_experiment_metrics(input_transform, output_transform)[0][3:]
model.compile(optimizer=optimizer, loss=reconstruction_error, metrics=metrics)
# -
def model_fn():
m = model(shape_in=2,
shape_out=717,
output_transform=None,
layers_list=[128,512],
activation_fn='tanh')
compile_model(model=m,
optimizer=optimizers.Adam(lr=0.01),
reconstruction_error=LossMeanSquaredErrorWrapper(CenterLogRatio(), None),
input_transform=CenterLogRatio(),
output_transform=None)
return m, None, m, None
latent_space=0
results, modelsMLP = train(model_fn,
data_microbioma_train,
data_domain_train,
latent_space=latent_space,
folds=5,
epochs=100,
batch_size=64,
learning_rate_scheduler=None,
verbose=-1)
print_results(results)
predictions = test_model(modelsMLP, CenterLogRatio, None, otu_columns, data_microbioma_test, data_domain_test)
#save_predictions(predictions, 'experiment_testSet_MLP_2var.txt')
# # 3. Auto-encoder combined latent <a name="AE_combined"></a>
# ### Get microbioma train data and numpy train objects
df_microbioma_train, df_microbioma_test, _, _, \
df_domain_train, df_domain_test, _, _, otu_columns, domain_columns = \
read_df_with_transfer_learning_subset_fewerDomainFeatures( \
metadata_names=['age','Temperature'], \
otu_filename='../../Datasets/otu_table_all_80.csv', \
metadata_filename='../../Datasets/metadata_table_all_80.csv')
data_microbioma_train = df_microbioma_train.to_numpy(dtype=np.float32)
data_microbioma_test = df_microbioma_test.to_numpy(dtype=np.float32)
data_domain_train = df_domain_train.to_numpy(dtype=np.float32)
data_domain_test = df_domain_test.to_numpy(dtype=np.float32)
data_domain_train.shape
# ### To create auto-encoder combined model
# Train the selected model (the best one from those with the smallest latent space (10)): no.351
experiment_metrics, models, results = perform_experiment_2(cv_folds=5,
epochs=100,
batch_size=64,
learning_rate=0.001,
optimizer=optimizers.Adam,
learning_rate_scheduler=None,
input_transform=Percentage,
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=10,
layers=[512,256],
activation='tanh',
activation_latent='tanh',
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=True,
device='/CPU:0')
predictions = test_model_cv_predictions(models, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
#save_predictions(predictions, 'experiment_testSet_AE_combinedLatent_5CV_2var.txt')
# # 4. Auto-encoder OTU latent <a name="AE_latentOTU"></a>
# ### Get microbioma train data and numpy train objects
df_microbioma_train, df_microbioma_test, _, _, \
df_domain_train, df_domain_test, _, _, otu_columns, domain_columns = \
read_df_with_transfer_learning_subset_fewerDomainFeatures( \
metadata_names=['age','Temperature'], \
otu_filename='../../Datasets/otu_table_all_80.csv', \
metadata_filename='../../Datasets/metadata_table_all_80.csv')
data_microbioma_train = df_microbioma_train.to_numpy(dtype=np.float32)
data_microbioma_test = df_microbioma_test.to_numpy(dtype=np.float32)
data_domain_train = df_domain_train.to_numpy(dtype=np.float32)
data_domain_test = df_domain_test.to_numpy(dtype=np.float32)
data_microbioma_train.shape
# Train the selected model (the best one from those with the smallest latent space (10)): no.351
experiment_metrics, models, results = perform_experiment_2(cv_folds=0,
epochs=100,
batch_size=64,
learning_rate=0.001,
optimizer=optimizers.Adam,
learning_rate_scheduler=None,
input_transform=Percentage,
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=10,
layers=[512,256],
activation='tanh',
activation_latent='tanh',
data_microbioma_train=data_microbioma_train,
data_domain_train=None,
show_results=True,
device='/CPU:0')
# ### To get encoders and decoders to use in transfer learning model
model, encoder, _ ,decoder = models[0]
df_domain_train.shape
# ### To predict latent space for samples in domain->latent model
latent_train = encoder.predict(data_microbioma_train)
latent_test = encoder.predict(data_microbioma_test)
# ### To build model to predict latent space
def model_fn_latent():
in_layer = layers.Input(shape=(2,))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation=None)(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.MeanSquaredError()])
return model
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
print_results_noEnsemble(result_latent)
# Test only Dense(domain->latent)
predictions = test_model_tl_latent(model_latent, latent_test, data_domain_test)
#save_predictions(predictions, 'experiment_testSet_domain-latent_AE_OTUlatent_2var.txt)
# ### Domain -> latent -> microbiome. Test set
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
| Notebooks/Auxiliary/model_reference_2domainFeatures_age-temperature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Keras_CIFAR10_CNN_模型增强
# ## 导入模块
# +
# %matplotlib inline
import os
import PIL
import gzip
import tarfile
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from IPython import display
from functools import partial
from sklearn.preprocessing import normalize
from keras import backend
from keras.utils import np_utils, plot_model
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.callbacks import LearningRateScheduler, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, MaxPool2D, Input, AveragePooling2D
from keras.layers import Activation, Dropout, Flatten, BatchNormalization
from keras.regularizers import l2
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
np.random.seed(42)
# -
# ## 准备数据
# +
data_dir = r"dataset"
cifar10_file = r"../input/cifar-10-python.tar.gz"
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
# !cp ../input/cifar-10-python.tar.gz dataset/
# !tar xzvf dataset/cifar-10-python.tar.gz -C dataset/
# +
cifar10_dir = r"cifar-10-batches-py"
filepath = os.path.join(data_dir, cifar10_dir)
files = os.listdir(filepath)
for fn in files:
fp = os.path.join(filepath, fn)
f = open(fp, 'rb')
if "1" in fn:
data_b = pickle.load(f, encoding="bytes")
y_train = np_utils.to_categorical(data_b[b"labels"])
X_train = np.array(data_b[b"data"])
f.close()
for fn in files:
fp = os.path.join(filepath, fn)
f = open(fp, 'rb')
if "data" in fn and "1" not in fn:
data_b = pickle.load(f, encoding="bytes")
imgs = np.array(data_b[b"data"])
t_1hot = np_utils.to_categorical(data_b[b"labels"])
y_train = np.vstack((y_train, t_1hot))
X_train = np.vstack((X_train, imgs))
elif "meta" in fn:
meta = pickle.load(f, encoding="bytes")
label_dict = [label.decode() for label in meta[b"label_names"]]
elif "test" in fn:
test_b = pickle.load(f, encoding="bytes")
X_test = test_b[b"data"]
labels = test_b[b"labels"]
y_test = np_utils.to_categorical(labels)
f.close()
"""规范化数据以及切分训练集、验证集和测试集"""
X_train = normalize(X_train, axis=0, norm="max")
X_train = X_train.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1).astype("float32")
X_test = normalize(X_test, axis=0, norm="max")
X_test = X_test.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1).astype("float32")
x_train = X_train[10000:]
t_train = y_train[10000:]
x_val = X_train[:10000]
t_val = y_train[:10000]
print("all files : \n", files)
print("\nall labels : \n", label_dict)
print("\nimgs of trainset : ", x_train.shape)
print("labels of trainset : ", t_train.shape)
print("imgs of valset : ", x_val.shape)
print("labels of valset : ", t_val.shape)
print("imgs of testset : ", X_test.shape)
print("labels of testset : ", y_test.shape)
# -
# ## 定义模型
def myCNN():
"""定义CNN模型"""
MyConv = partial(Conv2D, kernel_size=(3, 3), padding="same", activation="relu")
MyMaxPool = partial(MaxPool2D, pool_size=(2, 2))
cifar_input = Input(shape=(32, 32, 3), name="input")
conv1 = MyConv(32, name="conv1")(cifar_input)
conv2 = MyConv(32, name="conv2")(conv1)
bn1 = BatchNormalization()(conv2)
pool1 = MyMaxPool(name="pool1")(bn1)
dropout1 = Dropout(0.5)(pool1)
conv3 = MyConv(64, name="conv3")(dropout1)
conv4 = MyConv(64, name="conv4")(conv3)
bn2 = BatchNormalization()(conv4)
pool2 = MyMaxPool(name="pool2")(bn2)
dropout2 = Dropout(0.5)(pool2)
flat1 = Flatten()(dropout2)
dense1 = Dense(512, activation="relu", name="dense1")(flat1)
dropout3 = Dropout(0.5)(dense1)
y_output = Dense(10, activation="softmax", name="output")(dropout3)
model = Model(inputs=cifar_input, outputs=y_output)
return model
model = myCNN()
print(model.summary())
if not os.path.isdir(r"model_img"):
os.mkdir(r"model_img")
plot_model(model, r"model_img/CNN_Dropout_BN.png")
img = PIL.Image.open(r"model_img/CNN_Dropout_BN.png")
display.display(img)
# ## 训练模型
# +
"""训练模型并保存模型及训练历史
保存模型单独创建一个子文件夹modeldir, 保存训练历史则为单个文件hisfile"""
models_name = "Keras_CIFAR10_CNN_Dropout_BN" # 模型名称的公共前缀
factor_list = [""] # 此次调参的变量列表
model_list = [] # 模型名称列表
for i in range(len(factor_list)):
modelname = models_name + factor_list[i] + ".h5"
model_list.append(modelname)
# 创建模型保存子目录modeldir
if not os.path.isdir("saved_model"):
os.mkdir("saved_model")
modeldir = r"saved_model"
# 创建训练历史保存目录
if not os.path.isdir("train_history"):
os.mkdir("train_history")
# 设置训练历史文件路径
hisfile = r"train_history/Keras_CIFAR10_CNN_Dropout_BN.train_history"
# 每个模型及其对应的训练历史作为键值对{modelname: train_history}
# train_history为字典,含四个key,代表train和val的loss和acc
model_train_history = dict()
epochs=200
steps_per_epoch=1250
for i in range(len(model_list)):
model = myCNN()
modelname = model_list[i]
modelpath = os.path.join(modeldir, modelname)
train_his = np.array([]).reshape(-1, 2)
val_his = np.array([]).reshape(-1, 2)
datagen = ImageDataGenerator( rotation_range=20,
shear_range=0.2,
zoom_range=0.2,
height_shift_range=0.2,
width_shift_range=0.2,
horizontal_flip=True)
datagen.fit(x_train)
lr = 0.001
model.compile(loss="categorical_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr),
metrics=["accuracy"])
print("\ntraining model : ", modelname)
ck_epoch, max_val_acc = 0, 0.0
for epoch in range(epochs+1):
i = 0
tr_his = []
for X, y in datagen.flow(x_train, t_train, batch_size=32):
his = model.train_on_batch(X, y)
tr_his.append(his)
i += 1
if i >= steps_per_epoch: break
tr = np.mean(tr_his, axis=0)
val = model.evaluate(x_val, t_val, verbose=0)
train_his = np.vstack((train_his, tr))
val_his = np.vstack((val_his, val))
if epoch<10 or epoch%5==0:
print("%4d epoch: train acc: %8f loss: %8f val acc: %8f loss: %8f"%(epoch, tr[1], tr[0], val[1], val[0]))
# 设置保存模型
if val[1] > max_val_acc:
model.save(modelpath)
print("val acc improved from %6f to %6f"%(max_val_acc, val[1]))
max_val_acc = val[1]
ck_epoch = epoch
# 调学习率:
if epoch>=80 and epoch%40 == 0 and lr >= 1e-5:
if epoch%80 == 0:
lr *= 0.2
else:
lr *= 0.5
model.compile(loss="categorical_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr),
metrics=["accuracy"])
print("lr : ", lr)
# 提前停止
if epoch-ck_epoch>40:
print("Early stop !")
break
model_train_history[modelname] = {"acc": train_his[:, 1], "val_acc": val_his[:, 1],
"loss": train_his[:, 0], "val_loss": val_his[:, 0]}
"""保存训练历史"""
fo = open(hisfile, 'wb')
pickle.dump(model_train_history, fo)
fo.close()
# -
# ## 可视化训练过程
def show_train_history(saved_history, his_img_file):
modelnames = sorted(list(saved_history.keys()))
train = ["acc", "loss"]
val = ["val_acc", "val_loss"]
"""作loss和acc两个图"""
fig, ax = plt.subplots(1, 2, figsize=(16, 5))
ax = ax.flatten()
color_add = 0.9/len(saved_history)
for i in range(2):
c = 0.05
for j in range(len(saved_history)):
modelname = modelnames[j]
train_history = saved_history[modelname]
ax[i].plot(train_history[train[i]],
color=(0, 1-c, 0),
linestyle="-",
label="train_"+modelname[21:-3])
ax[i].plot(train_history[val[i]],
color=(c, 0, 1-c),
linestyle="-",
label=modelname[21:-3])
c += color_add
ax[i].set_title('Train History')
ax[i].set_ylabel(train[i])
ax[i].set_xlabel('Epoch')
ax[0].legend(loc="lower right")
ax[1].legend(loc="upper right")
ax[0].set_ylim(0.0, 1.0)
ax[1].set_ylim(0.0, 2)
plt.suptitle("CNN_Dropout_BN")
print("saved img: ", his_img_file)
plt.savefig(his_img_file)
plt.show()
# +
"""载入训练历史并可视化, 并且保存图片"""
if not os.path.isdir("his_img"):
os.mkdir("his_img")
his_img_file = r"his_img/CNN_Dropout_BN.png"
fo2 = open(hisfile, "rb")
saved_history1 = pickle.load(fo2)
show_train_history(saved_history1, his_img_file)
# -
# ## 用测试集测试模型
# +
smodel = load_model(modelpath)
print("\ntest model : ", os.path.basename(modelpath))
loss, acc = smodel.evaluate(X_test, y_test)
print("acc: %.4f \t loss: %.4f"%(acc, loss))
| Keras_CIFAR10_CNN_Dropout_BN.7885.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import CLUSTER_Node_Classification as cnc
# The following function allows you to train the provided models with a feature-augmented version of the datasets.
#
# cnc.perform_cluster_classification(Dataset, Model, random_seed)
#
# The available options for the dataset are: "SBM_CLUSTER", "SBM_CLUSTER_3Cl", "SBM_CLUSTER_4Cl",
# "SBM_CLUSTER_5Cl", "SBM_CLUSTER_34Cl", "SBM_CLUSTER_345Cl"
#
# The available models are "GAT", "GCN", "MoNet", "GraphSage" and "GatedGCN_E_PE".
#
# A random seed of choise needs to be chosen as well (the results in the paper are obtained by averaging over 41, 42, 43 and 44).
#
# An example (to reproduce the best results for the GraphSage model) is provided below:
cnc.perform_cluster_classification("SBM_CLUSTER_34Cl", "GraphSage", 44)
| CLUSTER_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/abdoulayegk/Python-Workshop/blob/master/notebook1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="e87b1866-2c79-4004-85db-d55413675fd5"
# # End to end introduction to machine learning using python
#
# Workshop lead: <NAME> [@abdoulayegk](http://twitter.com/abdoulayegk)<br>
# Notebook will be [abdulayegk]()<br>
# [Colab Notebook](https://colab.research.google.com/drive/14adHs2rjCAH0TXyvgCBOgEFRwCBzjh9f?usp=sharing)
# + [markdown] id="c1f7659d-f40f-43e8-9bc3-10f4aec48b85"
# # Overview
# The goal of this workshop is to give learners a general intro to machine learning and data science with Python using Pandas and Jupyter.
# We will first go through a general overwiew of python such as list, tuple and dictionary.
# then we go through the process of loading data from CSV files, inspecting and cleaning the data. As a second step, we will analyse the data and draw some insights about Chronic-kidney dataset.
#
# The workshop is structured as follows:
#
# - Intro and background
# - Part 0: Quick Jupyter exercise
# - Part 1: General overview of python
# - Part 2: Creation of dataframe and series using pandas
# - Part 3: Loading and inspecting data
# - Part 4: Data analysis
# - Part 5 Model building
# - Part 6: Summary
#
# **Note that this workshop is only intended as an introduction to some basic concepts of python for data science using Pandas. It is in no means intended to be comprehensive, and there are a lot of useful functions a beginner needs to know to do in-depth data analysis. I hope that this workshop sets you up for self-guided learning to master the full range of necessary Pandas tools.**
#
# ## How to follow along with the workshop
# - You can run every cell in the notebook as we go along using the shortcut Shift+Enter
# + [markdown] id="8fc04718-2fbb-4eb6-af67-8b4f3fa6c5c9"
# # Intro
#
# ## What is Jupyter (and the Jupyter ecosystem...)?
# - **IPython** is an **interactive Python shell** (just type "ipython" to start it)
# - **Jupyter** is a Python library that provides a **web-based UI** on top of ipython to create notebooks with code and output
# - **JupyterLab** provides some additional **features on top of Jupyter**, e.g. a file browser
# - **Binder** is a **web-based hub** for containers that contain your Python environment and renders notebooks based on a git repo
#
# ## Quick overview of python list, tupl eand dictionary
# - **List** A list is a data structure in Python that is a mutable, or changeable, ordered sequence of elements. Each element or value that is inside of a list is called an item. Lists are defined by having values between square brackets [ ].
# - **Tuple** A tuple is a data structure that is an immutable, or unchangeable, ordered sequence of elements. Because tuples are immutable, their values cannot be modified. Tuples have values between parentheses ( ) separated by commas.
#
# - **Dictionary** The dictionary is Python’s built-in mapping type. Dictionaries map keys to values and these key-value pairs provide a useful way to store data in Python.
#
# Typically used to hold data that are related, such as the information contained in an ID or a user profile, dictionaries are constructed with curly braces on either side { }.
#
# ## What is Pandas/Matplotlib/Pyplot/Seaborn?
#
# - **Pandas** is a Python library for **data manipulation and analysis**. It offers data structures and operations for manipulating numerical tables and time series.
# - **Matplotlib** is a Python **2D plotting library**. Pyplot is a collection of command style functions in matplotlib that make matplotlib work like MATLAB. While we mostly use Seaborn, we sometimes fall back to using Pyplot functions for certain aspects of plotting.
# - **Seaborn** is a Python **data visualization** library based on matplotlib. It's kind of like a nicer version of Pyplot.
# - You can **use Pandas code in a regular Python script** of course. I'm just combining Jupyter + Pandas in this tutorial because notebooks are a great way to immediately see output!
# + [markdown] id="cc9377c0-8557-4c44-813b-8bc3aabac411"
# ### Notebooks are basically just interactive ipython terminals, often mixed in with markdown text:
# - Each input field you see is called a **cell**
# - Cells can be **either code or markdown**
# - You can execute any kind of Python code
# - **Variables persist** between cells
# - The notebook **doesn't care about the order of cells**, just the order of executing it in order to remember variables. However, "run all" executes your cells top to bottom.
#
# ### Notebooks have **two modes**: a) editing the cells and b) navigating the notebook (command mode):
# - You can **navigate** around the notebook in command mode by clicking cells or using the arrow keys
# - Depending on the environment you're using (Jupyter notebook, Jupyter lab, Google Colab...) there will be a different **visual cue** (e.g. a colored line) to indicate the mode a cell is in
# - In order to **edit a cell**, you can press **Enter** or double-click it.
# - To **execute** the cell content, press Shift+Enter to run the cell
# - To get **out of edit mode** and back into navigation mode, press the **Escape key**
# + [markdown] id="aa555fed-8cb2-46cb-a0e3-489ab3ab9ff0"
# ### Some helpful keyboard shortcuts:
# - The **default type for a cell is code**. In command mode, press *m* to make a cell markdown and *y* to make it code
# - Press *a* in command mode to create a new cell *above* the current one
# - Press *b* in command mode to create a new cell *below* the current one
# - *Tab* autocompletes methods (like in IPython)
# - *Shift+Tab* shows you the docstring for the outer function of the line your cursor is in
# - Press *dd* in command mode to delete a cell.
# - *Cmd+z* undoes operations in the highlighted cell, *z* undoes cell operations in the notebook (e.g. deleting a cell)
# + [markdown] id="77c7e001-abb9-4f5f-947b-d6c691840192"
# # Part 1: General Overview of python
# In this part we are going to go through the basics things we need to know before loading data for that we are going to start from looping in python and we will go till classes in python.<br>
# **Note this will be just very basics things we should know to follow along in this workshop if you want to go in deep then you should get a book for that**
# + id="85d165c2-f36b-4cc7-b48c-5303ae451fdd" outputId="269593e9-2d37-4f14-a843-4be79d0b10e9"
# To print your name in python
print("Hello world!")
# + [markdown] id="a723c5b3-87c4-4880-919d-9e6e03dfb981"
# ## List
# ### Les éléments peuvent être quelconques.
# + id="39a7a73f-c909-4cd7-aba9-d6f40f679b96" outputId="5290b57a-4719-47ef-a371-eb39195e5741"
# Une liste vide
a = [] # an empty list
print(a)
print("\n\n") # leave two lines blank
b = [1, 2, 3, 4] # list of numbers
print(b)
print("\n\n")
fruits = ["Orange", "Banana", "Apple"] # list of fruits
print(fruits)
print("\n\n")
# you can mix list using different datatypes
mylist = [1, "Pineapple", 3.14, [2, 3, 4]]
print(mylist)
# + [markdown] id="b4cf06d3-7ae9-46e5-834e-0c580ebb09c6"
# ## Tuple
# ### Un tuple, c'est comme une liste, sauf que les éléments ne peuvent pas être changés (non mutable) : t = ('a', 'b'); t[0] = 'c' renvoie une erreur.
# + id="e9daded0-8e20-4b4e-9210-c28d758b4936" outputId="decd0c1b-ab09-4064-e9d1-e8de618afc05"
# Tuple
t = () # an empty tuple
print(t)
mytuple1 = (1, 2, 3, 4, 5)
print(mytuple1)
# + id="93c027c8-d18f-4b7a-a75e-819e34de6bbf" outputId="54e70fd6-a7c5-4155-905e-b0021f43bd40"
bar = [(2, 3, 4), (), ("Banana", "orange")]
type(bar)
# + [markdown] tags=[] id="20d51444-faf2-4d45-a09c-3e24d8a01f0b"
# ### Dictionary
# Un ensemble d'affectation cle valeur.
# + id="6aab0c46-97ef-4bba-976e-bbfb644bb0ab" outputId="db2c8a5b-dace-4551-b5a5-ce3a8c4b30d7"
traduction = {"chien": "Dog", "Chat": "Cat", "Guinee": "Guinea"}
traduction
# + [markdown] id="256cb37d-6024-43de-b850-4033fd214d78"
# **NB**: la cle doivent etre unique par example comme j'ai une cle chien je peux pas faire entre une autre cle Chien
# + id="2daadbcb-ff0c-425b-ae46-56c95bca91b5" outputId="e30d001f-7ec6-4340-9c6e-9b2b95803712"
dic = {} # an empty dictionary
mydic = {"username": "abdoulayegk", "online": True, "followers": 987}
print(mydic)
# + id="8b576c8a-c9b1-4e6f-8f54-b94f2c154921" outputId="75f456a0-e97a-4b3d-8cc9-4e7351feed4b"
mydic["username"]
# + id="27fdd2ff-5a66-458c-86f9-dc24941a8650" outputId="1a84e0ff-3dc5-45f1-b288-6c18044c61a1"
print(mydic["followers"])
# Returns 987
print(mydic["online"])
# Returns True
# + id="d2a57240-7850-43ad-9be4-80d25da417b3" outputId="b56000a1-5c7a-4626-ee0b-24f243a64c92"
# to print the key values pair
for key, value in mydic.items():
print(key, "is the key for the value", value)
# + [markdown] id="0094c62d-ef4a-403a-95f2-a06deebfedbc"
# Using Methods to Access Elements <br>
# In addition to using keys to access values, we can also work with some built-in methods:<br>
#
# dict.keys() isolates keys <br>
# dict.values() isolates values<br>
# dict.items() returns items in a list format of (key, value) tuple pairs<br>
# + id="d627f536-94cc-470b-ab9d-7f9e44c7f1fe" outputId="6809d325-561a-462b-dcfc-f95105934b79"
# To return all the keys of our dictionary
print(mydic.keys())
# + id="5d607c21-306d-449a-9835-833954c62979" outputId="77e17460-2ad7-41fe-dff0-d22255a66cf4"
# To return the values of a dictionary
print(mydic.values())
# + [markdown] id="52b0a900-01d7-41e2-bc93-3d5330f2630c"
# # Part 2: creation of Series and DataFrame in Pandas
# + [markdown] tags=[] id="27581ac2-ead3-48bf-819b-f7aa311417c5"
# ## What is a dataframe?
# * A **dataframe** is a **2-dimensional labeled data structure** with columns of potentially different types. You can think of it like a spreadsheet or SQL table, or a dict of Series objects. It is generally the most commonly used Pandas object.
# * Pandas borrows the concept of DataFrame from the statistical programming language R.
# * There are a lot of **different ways to read data** into a dataframe - from lists, dicts, CSVs, databases... In this example, we're loading data from a CSV file!
#
# **Let's take a look at the data to familiarize ourselves with the format and data types. In this example, I'm using some treatment data from the oncology domain, including treatment starts and the drugs patients are getting.**
# + [markdown] id="333782e4-7c26-4fc8-ac7a-76acadbfd48b"
# # Importer les bibliothèques nécessaires
# Avant de travailler avec des bibliothèques comme Pandas ou Numpy, il faut les importer ; et avant même cette étape, il faut installer ces bibliothèques. Si ce n’est pas encore fait sur votre machine, voici donc des [instructions](https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html) pour procéder à l’installation. Une fois que c’est fait, nous pouvons les importer
# + id="7311d787-63bb-4ea1-ad5c-4adca8a6e402"
# we first import series from pandas
from pandas import Series
# + [markdown] id="fbb54870-2cc0-41de-a801-edd5d1fd6dd2"
# - **Note we can create a series from a list**
# + id="8c8fd631-f85a-4e10-9d7a-a01937f1a281" outputId="19e7ef81-98ab-491d-c39b-a3598675d83d"
# let's first creat a serie
mylist = [20, 20, 30, 40]
s = Series(mylist)
s
# + id="636802ce-9c6c-4fa3-a606-f30e8b613784" outputId="5e41206a-264d-4734-a108-ddba2dd3b46c"
s[2]
# + id="582a88cb-c239-4e14-a858-9b1f8861dd4e" outputId="0950394a-e8d3-4c13-e514-ef28f7f5a64f"
# you can also et your own index because the default index start from 0 till n
ss = Series(
[12, 13, 7, 80, 95], index=list("abcde")
) # not we can use index=['a','b',..]
ss
# # you can also change the datatype to float or unsigned int
# Series(
# [20.5, 12, 34, 56, 100],
# index=["a", "b", "c", "d", "e"],
# )
# + id="c66077b9-9ab5-43a6-807a-34837cb44c24" outputId="316d8a52-e7bd-4a6f-e81e-e949d043c25a"
# On peux changer les type de donne par defaut c'est None
s1 = Series([12, 13, 7, 80, 95], index=list("abcde"), dtype="int8")
s1
# + id="f63dc327-e6ba-43bc-82a3-e27f05258ee6" outputId="0418fad2-a680-4c04-d0a5-1b26ef68c00e"
# we can use index of integer of our choice for that we use the range function
ex1 = Series([5, 3, 7, 8, 19], index=range(10, 15))
ex1
# + [markdown] id="37797f1e-0ba9-40b4-b6c9-dc69df494b8e"
# ## Sclicing series
# + id="922d9736-b687-4871-8ffb-196beac877a8" outputId="e2115475-cf3e-48e0-b3ed-025e7920c56a"
# using the above example we want to get the value at index 10 we can do following
ex1[10] # output should be 5
# + id="3ac62698-fc93-42e8-ab92-e22f3f7d1213" outputId="1030731d-d177-4594-980d-7cdcb4695558"
# using the string base index we can ge the same by using "" inside the bracket
# if we want to get the value at index e we can do
print(ss)
print("Value at index e is {}".format(ss["e"]))
# + id="2662e17f-5489-41d1-8142-6a3a5ee58fba" outputId="40a84d83-fe5c-4cfd-efb9-6794bc4ce335"
# you can also add element to your series. Let's say I want to add 200 in ss
ss["f"] = 200
ss
# + id="856dcf6b-8332-4795-a658-6621bf98f0b3" outputId="8297d653-22e0-4bec-8709-259afcedb28f"
# use can use comparaison operatiors on series
ss > 12
# # To make look nice and more readable this will return a series of integer
# ss[ss < 13]
# + id="13cca2b4-5c2b-43eb-b1dd-b1c3fe92d207" outputId="9f0caa2c-707a-447f-be9f-308e66f58dd0"
# What will be difference between the following two lines(list and array)
l = [12, 13, 7, 80, 95] * 2
print(l)
print("\n\n")
print(ss * 2)
# + [markdown] id="f7ee917f-2b82-4864-9078-de9b6a79987b"
# ### Dataframe
# + id="264b3e03-f4a5-4fc9-8716-5ea7564bea07"
from pandas import DataFrame
# + id="7e6353ee-08a4-464e-a5ec-6c925653e93d" outputId="026a5a99-d378-47df-ed86-d170e9fb0341"
data = {
"capital": [
"Delhi",
"Delhi",
"Delhi",
"Delhi",
"Delhi",
"Conakry",
"Conakry",
"Conakry",
"Conakry",
"Conakry",
"washington",
"washington",
"washington",
"washington",
"washington",
],
"year": [
2001,
2004,
2007,
2010,
2015,
2001,
2005,
2008,
2011,
2019,
2001,
2003,
2007,
2009,
2017,
],
"pop": [
2.45,
2.99,
3.01,
3.50,
4.24,
2.47,
2.73,
2.85,
2.99,
3.11,
2.11,
3.00,
3.67,
3.73,
3.97,
],
}
df = DataFrame(data)
df
# + id="eb712bf6-9dac-4832-b31e-091f63ea4616" outputId="bce5b0bd-7a7c-4d96-c9ff-641c83f18f82"
# we can see the capital whcich are not guinea
df[df["capital"] != "Conakry"]
# + id="1ea3222a-fb14-4577-a963-4e385f30c9e6"
# + id="4dadab51-5d2b-49ad-8802-34459ff9fc45" outputId="eb781a62-173e-4fe9-9ff9-1e70fe9a081d"
# create a new dataframe where year is greater than 2010
df[df["year"] > 2010]
# + id="34d62e1c-bd44-46fa-bad4-44414cd2f603" outputId="19ed4fc3-19c4-41a2-9990-5e53e4f425d7"
# we can also slice a dataframe for example if we want to capital and year only we can do following
df[
["capital", "year"]
].head() # the head function will return the first 5 rows of your dataframe
# + id="4db60234-e7ad-4d5b-b413-96b4b169d35b"
df.to_csv("workshop.csv", index=False)
# + [markdown] id="ffe64b98-6c01-46c4-94d4-b2841c34a83d"
# # Part 3: Loading and inspecting a (csv)
#
# Before we can start answering questions about the data we need to do a little bit of exploratory analysis.The first thing we need to do when working with a new dataset is to get an idea of what the data looks like. We start by loading the data into memory. Pandas comes with a built-in `read_csv` function that we can use to read CSV files and load them directly to a pandas `DataFrame` object.
# - **Note the dataset is on my github account** [dataset](https://raw.githubusercontent.com/abdoulayegk/ml-workshop/main/kidney_disease.csv)
# + id="3f4e14d7-ae47-4c78-b6b7-1c20618ecb80"
# We need to import the libraries to start with
import warnings
import matplotlib.pyplot as plt
import missingno as ms
import numpy as np
import pandas as pd
import seaborn as sns
warnings.filterwarnings("ignore")
# This command makes charts show inline in a notebook
# %matplotlib inline
plt.style.use("ggplot")
# Making the figures show up a little larger than default size
plt.rcParams["figure.figsize"] = [10, 6]
# + [markdown] id="d0a2df99-48b1-40b1-8bd6-e02874ea70f9"
# ### Loading real world dataset
# + id="fc38bea1-528f-445e-90dc-45527bf5237f"
# Read data from a CSV into a dataframe
# This is the data we're going to be working with!
df = pd.read_csv("kidney_disease.csv")
# + id="b16bd6ac-db3a-4c80-abd8-d07c1997b295" outputId="b09f306d-236a-4ee4-ade5-4c1e7dedc50d"
# Just typing the name of the dataframe will print the entire output
# If there are too many rows, Jupyter will print the top few and
# bottom few rows with a "..." to indicate that there are more rows
df
# + [markdown] id="09c5f17d-02e6-403c-b1c9-19a62a834990"
# # Data Set Information:
#
# We use the following representation to collect the dataset
# 1. age - age
# 2. bp - blood pressure
# 3. sg - specific gravity
# 4. al - albumin
# 5. su - sugar
# 6. rbc - red blood cells
# 7. pc - pus cell
# 8. pcc - pus cell clumps
# 9. ba - bacteria
# 10. bgr - blood glucose random
# 11. bu - blood urea
# 12. sc - serum creatinine
# 13. sod - sodium
# 14. pot - potassium
# 15. hemo - hemoglobin
# 16. pcv - packed cell volume
# 17. wc - white blood cell count
# 18. rc - red blood cell count
# 19. htn - hypertension
# 20. dm - diabetes mellitus
# 21. cad - coronary artery disease
# 22. appet - appetite
# 23. pe - pedal edema
# 24. ane - anemia
# 25. class - class
# 26. id
# + [markdown] id="de3d000f-d54b-419d-9e62-9df4d8f39d35"
# ## Inspecting a dataframe using built-in functions
# * Most operations on a dataframe happen by applying a function to it using the "." notation, e.g. `my_dataframe.do_something()`
# * Let's look at some simple functions that we can apply to Pandas dataframes
# + [markdown] id="ea558ba5-6f93-458a-9a37-9bc9e8b19632"
# **Note**: It is very important to give your columns name a meaningful names.
# + id="a0b4abfc-ad14-41d7-81e1-4f3ffc4ab79b" outputId="4b204194-f7d3-4d44-eb0d-d51a7e5e49c8"
# let's see the columns name of our dataset
df.columns.to_list()
# + id="5e6cc71e-f292-4f09-b0e7-5e1c4f3d88db" outputId="b5984f65-7d9f-4bc8-d4be-a647ea1311e7"
# The head(n) function shows the first n rows in a dataframe. les 5 premiere Rangees de notre table
# If no n is specified, it defaults to 5 Rangee.
df.head()
# + id="044feb21-85d2-4019-9494-7b7224f44183" outputId="3a52a15d-9d79-4acf-95f7-42c71169324b"
# You can also use the sample() function to get n random rows in
# the dataframe
df.sample(5)
# + id="9f1b069b-4901-4f97-970f-6dbb27f6282d" outputId="57d58c08-fe5a-4c84-af37-c4185e7517c8"
# This method prints information about a DataFrame including the index dtype and columns, non-null values and memory usage
# Let's talk about the # column later!
df.info()
# + id="26313768-8916-45cf-960a-3dcca1c80ba7"
# the variable classification is our target so let's rename it.
df = df.rename(columns={"classification": "target"})
# + id="7cd74add-66e3-4fbd-b339-e72820797657" outputId="2c196934-92d9-498f-c27a-cfb296dddc38"
# The describe function shows some basic statistics for numeric columns
# We only have one here (Dosage), so this isn't very interesting
df.describe().T
# + id="685069ac-2b4c-4ef7-aaf0-de98fae79259" outputId="5f7c4542-2486-4b75-8335-56070528dc25"
# now let's see the shape of our dataset Nombre de rangees et numbere de columns
df.shape
# + [markdown] id="0d9e7168-3327-43ea-a423-c4c9369240c9"
# ## Other ways to inspect a dataframe
# * There are other operations you can do on a dataframe that don't follow the function notation
# * Let's look at a few examples:
# 1. len(df)
# 2. df.dtypes, etc
#
# + [markdown] id="52ec970e-297e-426f-8748-db2095880158"
# ## <span style="color:blue">*** DIY exercise ***</span>
# Create a new cell below and print the first ten rows of the "df" dataframe.
# + [markdown] id="9043fc0e-7658-443f-ba45-bfdc091f61de"
# # Part 4: Data Exploration
# Let's assume we've loaded the treatment related data from our dataset in order to provide them with some analytical insights around the types of disease a patient has.
# + [markdown] id="c24acea9-d4de-4b86-a7bf-ca897c609d5a"
# ## Accessing columns in a dataframe
# + id="e5ab8980-bdbc-4b50-ab4a-98b110110df3" outputId="0ec139d0-1fcf-4087-a00e-aaf7f9305d71"
# let's plot a graph to see missing values in our dataset
ms.matrix(df)
# + id="2e63e672-7862-4b8c-a307-fec0d8a0cf5e" outputId="823d59eb-65c0-4ef0-8ad1-0bb397f08017"
# Check the type to show that this indeed returns a Series object
type(df["wc"])
# + id="31584e78-a0ab-4230-90db-9561e793886c" outputId="b652273e-1ee3-4294-e526-f1781f1a7434"
# And this is how you access two columns of a dataframe.
# Note that this will return a dataframe again, not a series
# (because a series has only one column...)
# Also note the double square brackets
# because you're passing a *list* of columns as an argument
df[["wc", "pcc"]].head()
# + [markdown] tags=[] id="a7d8e72e-5116-4199-a670-d74482f15aaa"
# ## <span style="color:blue">*** DIY exercise ***</span>
# Create a new cell below and print the list of unique pc in the dataframe.
# + id="0c864c2e-2ea3-4f1b-8d92-f59efadc32e0"
# + [markdown] tags=[] id="2ead8d39-e41f-4452-9174-fcf47f0f2550"
# ## Accessing rows in a dataframe
# In addition to slicing by column, we often want to get the record where a column has a specific value, e.g. a specific age here. This can be done using the `.loc` function syntax and a boolean statement:
# + [markdown] id="e3871f1d-7ed3-4618-a0f3-fc3c4a3587fe"
# ### Loc
# + id="267e55b5-ddd7-4d08-9b64-ec16b5b70ceb" outputId="a6f35db4-d1ae-429a-8966-6d00ac0893e4"
# Access the record(s) where the value in the PatientID column is PT20
df.loc[df["age"] == 20]
# + id="1aeed7d0-da94-4cd1-9ff1-d541a7120a9a" outputId="6e6da304-6635-4fbc-f916-beb47f945474"
# You can also use boolean conditions in the selector
df.loc[(df["age"] == 20) & (df["pc"] == "normal")]
# + [markdown] id="a6f6dece-a3b1-40be-bb34-426220779838"
# ### iloc
# + id="b11e0cb9-9744-427d-84b8-a56025d49344" outputId="cf6b2d94-4f44-4ad1-8e98-3148b70d173e"
df.iloc[:, [2, 3]]
# + id="9d4215f8-3dd4-45cd-a227-6435c72bce23" outputId="6878af5b-19c5-4ba5-8ac3-fb769109b290"
df.iloc[[0, 2], [1, 3]]
# + id="252d5316-2192-4241-b8e4-6ab7a086b5f2" outputId="9256ab92-9363-4ccd-9f38-4a8843c5c6fa"
df.iloc[:, lambda df: [0, 2]]
# + [markdown] id="3063edd2-701b-4a13-b9b6-ce69b2af605b"
# **loc()** fait référence l’étiquette.<br>
# **iloc()** fait référence à l’indice de position.
# + [markdown] id="6b20772a-39a4-411a-9015-8870a2ecbe70"
# ## Sorting dataframes
# Sorting the output of a dataframe can be helpful for visually inspecting or presenting data! Sorting by one or multiple columns is super easy using the `sort_values` function:
# + id="f1491c9f-3e34-422d-8101-a611665cae23" outputId="88d4520e-28a7-4d46-b4e7-67a93c8d4879"
# Sort by earliest treatment start date, i.e. in ascending order (default)
df.sort_values("age").head()
# + [markdown] id="f8972576-f486-470a-8790-4ba7422f8e5a"
# -**Note you can use ascending=False to sort in descending order also you can sort a whole DataFrame**
#
# + id="0ee09872-f66d-42a6-ba19-55af08437872"
# Use the inplace keyword to modify the dataframe
# Note that you can also sort by a list of columns
df.sort_values(["id", "age"], inplace=True)
# + id="58e011c8-ff08-487e-aa06-604c38d37142"
# we can use replace as you can see here we are using dictionary
df["target"].replace({"ckd": 1, "notckd": 0}, inplace=True)
# + [markdown] id="5f494abe-1848-4a23-8c56-2ef195cbd45f"
# in this case the change that we made is temporary because we didn't change in the official data
# + id="3affccca-cf4b-46b5-9b2f-3dcaad977080"
# use inplace=true to make the change in the original dataset
# + id="e091da8b-a010-4a8d-aa65-1ccf1ca5f963" outputId="54eb626e-f271-48eb-89b8-921cff44ce5c"
sns.catplot(x="target", kind="count", data=df)
# + id="181a1198-85bc-4ad2-8446-9d46f6647189" outputId="a4ca77d3-2877-4cde-dc50-a7570e45afa9"
df.rc.unique()
# + [markdown] id="17b6eda2-fd22-4d42-9a45-8affdfd0338c"
# You can from the above cell that we are having a string of numbers that's why we we're having dtype as object we have to convert it to the appropriate format
# + id="11058c4a-a1c7-4bd7-a607-b15ed5832e4c" outputId="d6e27c41-71b5-41d9-c833-17d857101fab"
df.pcv.unique()
# + id="d1a540a1-6350-4728-86cc-92a480a628a7" outputId="d75068e4-72f1-45cb-b902-01baea47cfdf"
df.wc.unique()
# + id="0425f4e6-f697-400f-a8fa-77af89b9d6a9" outputId="1256e4f1-d3f5-408a-d32f-d8d2a5d5d3d9"
df.sg.unique()
# + id="57ab3585-90f9-4de6-8cee-04cd5a45a1c2" outputId="09653122-5702-4f99-e190-b95db207ca6e"
df.wc.unique()
# + [markdown] id="0d8dcd35-f720-46c3-8aad-7a93f5058bf1"
# Our rc column is an object also we have some missing values and some thing that we don't really know like ?. we are going to replace all my the mean in this case and then we will fill missing values.
# + id="ebb11d27-9bea-49f3-b062-5432da3462af"
# To replace the string caracters with NaN
df.rc.replace("?", np.nan, inplace=True)
df.wc.replace(("?"), np.nan, inplace=True)
df.pcv.replace(("?"), np.nan, inplace=True)
# + id="600e2299-4fad-4f7a-8b17-fd567bfb92bf" outputId="756a5c38-aeb6-417a-834a-ffa4f1bd7869"
df.rbc.unique()
# + [markdown] id="513ef539-e939-4ca3-ae0a-c7db3d0d977f"
# if you notice age is of float type so it's good to convert it to int but for not let's leave it as it it is.
# + id="01fd26aa-3732-4365-b7ec-2e79f1f2ac13" outputId="fdcfa513-9956-4cc0-de6f-d8b606817e63"
sns.distplot(df.age)
# + [markdown] id="bea73f72-aeea-4003-a847-87d2c60ed195"
# #### These are still object so we have to convert it to numerical.
# 1. pcv
# 2. wc
# 3. rc
# + id="3007aa21-0bb1-480a-a31f-110d79e33dde"
# now we have to change the datatype of pcv, wc and rc
df.wc = df.wc.astype("float64")
df.rc = df.rc.astype("float64")
df.pcv = df.pcv.astype("float64")
# + id="94d15e7e-d9d5-4203-859a-8d8d91897bb8"
# # Now let's fill missing values
# df.age.fillna(df.age.mean(), inplace=True)
# df.bp.fillna(df.bp.mean(), inplace=True)
# df.sg.fillna(df.sg.mean(), inplace=True)
# df.al.fillna(df.al.mean(), inplace=True)
# df.su.fillna(df.su.mode(), inplace=True)
# df.wc.fillna(df.wc.mean(), inplace=True)
# df.pcv.fillna(df.pcv.mean(), inplace=True)
# df.rc.fillna(df.rc.mean(), inplace=True)
# df.age.fillna(df.age.mean(), inplace=True)
# df.al.fillna(df.al.mode(), inplace=True)
# df.su.fillna(df.su.mean(), inplace=True)
# df.pot.fillna(df.pot.mean(), inplace=True)
# df.bu.fillna(df.bu.mean(), inplace=True)
# df.sod.fillna(df.sod.mean(), inplace=True)
# df.hemo.fillna(df.hemo.mean(), inplace=True)
# df.sc.fillna(df.sc.mean(), inplace=True)
# df.bgr.fillna(df.bgr.mean(), inplace=True)
# + [markdown] id="2a165e37-922b-4f4b-aff4-9f83670232aa"
# **NB** Il ya plusieure facons de remplace les valuers manquantes:
# 1. fillna(0)
# 2. ffill()
# 3. bfill(), etc
# + id="864c1fd1-234d-483c-9045-1521ea81faad" outputId="a6963bc8-1a71-4859-970d-60204ec79dd6"
df.isna().sum()
# + id="f58c3e29-d8cb-4247-8a5f-f10ea47cc3c7"
df.rbc.unique()
df.rbc = df.rbc.map({"normal": 1, "abnormal": 0})
df.pc = df.pc.map({"normal": 1, "abnormal": 0})
df.pcc = df.pcc.map({"present": 1, "notpresent": 0})
df.ba = df.ba.map({"present": 1, "notpresent": 0})
df.htn = df.htn.map({"yes": 1, "no": 0})
df.dm = df.dm.map({"yes": 1, "no": 0})
df.cad = df.cad.map({"yes": 1, "no": 0})
df.appet = df.appet.map({"good": 1, "poor": 0})
df.pe = df.pe.map({"yes": 1, "no": 0})
df.ane = df.ane.map({"yes": 1, "no": 0})
# + [markdown] id="68acd344-1d25-4849-a12b-6ad5103ec82b"
# **NB** Sklearn provide a nice way of encoding features:
# - OneHotEncoder [Reference](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html)
#
#
# **Pandas also provide one nice dummy encoder function**
# - pd.get_dummes() [Reference](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html)
# + id="2e51407a-af0c-4af0-aab5-a278a54bc070" outputId="d39c8214-b01e-48d0-c704-cc911aaff53d"
df.htn.unique()
# + id="70683e2c-bdcb-4113-9c2a-aff1118fc7ae" outputId="ce3e4eb0-4862-4e96-d10b-6f3dc5c658ad"
df.head()
# + id="8b25dfbf-54a2-40c1-a7c9-a20fcf0b7811" outputId="6ca8e005-0b06-4cbb-d3be-83a5266faebc"
df.isna().sum()
# + id="2370f6b5-af6b-4674-a90c-ee88bb0390be" outputId="087c151f-b5f2-4258-8819-b2e24a903fa1"
df.age.plot(kind="hist", bins=20)
plt.show()
# + id="4201cf84-badf-49ea-ad73-717c01a5cd43" outputId="f2c51790-548b-4764-d68d-394eb0b2ddf7"
print(df.target.value_counts(normalize=True) * 100)
# + id="173cf117-f6d8-40b2-884f-4829b6f20a1e" outputId="33644b81-a44c-41a4-f42d-667ff51aee94"
# Explore appetite vs target
plt.figure(figsize=(16, 6))
sns.countplot(x="appet", hue="target", data=df)
plt.xticks(fontweight="light", fontsize="x-large");
# + id="206fbc36-5ead-4260-94c2-60c98183490d" outputId="dad0617a-2f08-45bc-d1a2-d6b990785331"
# Explore pc(pus cell) vs target
plt.figure(figsize=(16, 6))
sns.countplot(x="pc", hue="target", data=df)
plt.xticks(fontweight="light", fontsize="x-large")
plt.show()
# + [markdown] id="e5630e25-b0e3-4169-97ac-b49fdaff90c3"
# ### Boxplot: boxplot is a method for graphically depicting groups of numerical data through their quartiles.
# + id="a8a82031-7ba8-4e78-aa83-ba1d56c9867d" outputId="05bcf284-c69a-4974-bf0b-ab278a44cce5"
sns.boxplot(x=df.age)
# + id="83dc94c2-73a0-488b-8523-6836d77700fd"
# from the above cess we can see that we have outliers(extrime values). we can visialize a boxplot as a way to see outliers on our data
# + id="2800dc2c-cbb1-49fc-ad21-00c8ddd51f19" outputId="67175d0e-8ad0-477a-aab9-56ee74cc8d6d"
sns.heatmap(df.corr(), annot=True)
# + [markdown] id="a53325a3-1ce8-416f-8d75-786e2229278e"
# ### what is the total number of people who's age is greater than 20 and are suffering from the disease?
# + id="bf311c5d-03db-4a91-8551-7a703a606d84" outputId="08e8fc5a-9c87-4db2-a00d-54f7420020ff"
len(df[(df["age"] > 20) & (df["target"] == 1)])
# + [markdown] id="10092736-f721-493a-ae21-fc58dc1d95e4"
# We can see that 233 people have their age greater than 20 and are also suffering from the disease
# + [markdown] id="e9e8932b-b0b8-4b92-a3a0-6c9c9162de7c"
# ### what is the number of people who's appetite is good but suffering from the disease?
# + id="e9a96039-013e-4fd2-8395-3095891da427" outputId="f8d4a917-2f57-4897-ede9-a49f5aa19f8d"
len(df[(df["appet"] == 1) & (df["target"] == 1)])
# + [markdown] id="7ab4fabe-4575-4596-8027-b6b3a27af5dd"
# It appear to be 168 people with good appetite but suffering from the disease
# + [markdown] tags=[] id="0de81f9d-71a3-43b7-82c6-3830ebfbd5d4"
# ## Query
# Query the columns of a DataFrame with a boolean expression.
#
# inplace: bool
# Whether the query should modify the data in place or return a modified copy.
#
# + id="1a7565ce-b5fc-454e-93b9-f05a542c64b1" outputId="87e20cf5-2438-4fb6-a356-fe9c179c1427"
df.query("age > 70")
# + [markdown] id="9e4b2eec-2734-4f73-a962-dbaa8e0afd9f"
# **Node** we can use this method to create a brand new dataframe or modify the original dataframe by using **inplace=True**.
# + [markdown] id="2c32d5f3-d231-463c-827c-31c7320ab05b"
# # Model building
# + id="962596b1-7445-490d-b922-c14543587196"
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, StandardScaler
# + [markdown] id="5b427125-cf03-4177-9fce-a2974bce8c97"
# 
# + id="5bbeeccc-7e45-4574-b17c-835e912bfe21" outputId="f226cd1a-dc66-41e9-e1f4-5af52f4e955a"
df.head()
# + id="eb3ff493-7060-469b-9fb2-04bb315fdc2b"
# + [markdown] id="1ce8cf33-37ec-4efc-92f9-2304e27a9853"
# ### Using pandas get_dummies
# I don't always recommand using this you can find the reasons behind it here:[link]('https://stackoverflow.com/questions/36631163/what-are-the-pros-and-cons-between-get-dummies-pandas-and-onehotencoder-sciki#56567037')
# + id="40b84228-4a5c-448a-9307-577b08d6d02d"
# Encoding the remainig features with pandas get_dummies
# df = pd.get_dummies(df)
# + id="d0330b11-b518-4129-b731-daba53421c9c"
# we are going to select the features and the target variable.
"""
Note that X is n dimmentional array or dataframe
y: 1D array or Series
"""
X = df.drop(["id", "target"], 1)
y = df.target
# + [markdown] id="d02eb1ca-f207-49dd-a28e-69aea3ff0604"
# 
# + id="2de5d752-55aa-45e7-8417-fb6398e3646a" outputId="5a11b73b-478c-4490-aa21-190e33932837"
X
# + [markdown] tags=[] id="7a7475df-9dd6-4f62-b53a-d3ffe4168768"
# ## Handling missing values
# we are going to use SimpleImputer from sklearn which is a great technique then we will scale the data.<br>
# **Standardize features by removing the mean and scaling to unit variance.**
#
# The standard score of a sample x is calculated as:
#
# z = (x - u) / s
# 1. SimpleImputer: https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html
# 2. StandardScaler: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# + id="ac31bea1-16bc-46b3-8f87-352bce5041ee"
# encoding the target with label encoder
encoder = LabelEncoder()
df.target = encoder.fit_transform(df.target)
# + id="196d5f2a-afe5-431a-a30d-30c13e34fc11"
# scaling the data
pipeline = Pipeline(
[("impute", SimpleImputer(strategy="mean")), ("scale", StandardScaler())]
)
X = pd.DataFrame(columns=X.columns, data=pipeline.fit_transform(X))
# + id="e0015d05-30dd-4844-bcfa-a5c8df46c51e"
# X
# + id="b90dbca3-4098-4bca-b4cb-6375d248b9c3"
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# + [markdown] id="9770d758-24c8-4aa1-a217-a29dfd959bdc"
# 
# + id="5664dd02-7dd3-4787-a2e9-2c8202da44b6" outputId="a98ea339-4926-4f59-b62a-fa3498f1105f"
lgr = LogisticRegression()
lgr.fit(X_train, y_train)
y_pred = lgr.predict(X_test)
y_pred[:10]
# + [markdown] id="7eefebce-576b-44d4-b279-ae7cf60c4b5d"
# 
# + id="54034425-253a-4319-8d76-2e5f8bb5fcdb"
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# + id="050bf115-012c-43ca-a329-c961422bd7a0" outputId="f2bd1b02-fb4e-41d0-b160-0291d64c565a"
print(classification_report(y_test, y_pred))
# + id="314a2200-1cee-4074-8699-a39c89aca1d1" outputId="944a7daf-8ceb-412e-a706-f9ee1468f43a"
accuracy_score(y_test, y_pred)
# + id="fc909999-4f16-4fee-983e-b5bca6c4e5e4" outputId="c9490276-6516-46bd-fff0-de2deb432f84"
confusion_matrix(y_test, y_pred)
# + id="84f4f8e2-da18-4335-8902-b8cc6f7ecc5c"
from sklearn.model_selection import cross_val_score
# + id="c6febe2c-6235-4db1-b57b-db52a945af73" outputId="2074c373-b4d6-428d-94f1-8ccae1747dfc"
cross_val_score(lgr, X, y, scoring="accuracy").mean()
# + id="dbe3018c-16f3-4027-a193-54028df0150d"
# + id="805e42da-ca5d-4daa-8221-07273942723d" outputId="93610c6e-78cd-4316-f7b5-7f076767ac0f"
# define lists to collect scores
train_scores, test_scores = list(), list()
# define the tree depths to evaluate
values = [i for i in range(1, 51)]
# evaluate a decision tree for each depth
for i in values:
# configure the model
# model = KNeighborsClassifier(n_neighbors=i)
model = LogisticRegression()
# fit model on the training dataset
model.fit(X_train, y_train)
# evaluate on the train dataset
train_yhat = model.predict(X_train)
train_acc = accuracy_score(y_train, train_yhat)
train_scores.append(train_acc)
# evaluate on the test dataset
test_yhat = model.predict(X_test)
test_acc = accuracy_score(y_test, test_yhat)
test_scores.append(test_acc)
# summarize progress
print(">%d, train: %.3f, test: %.3f" % (i, train_acc, test_acc))
# plot of train and test scores vs number of neighbors
plt.plot(values, train_scores, "-o", label="Train")
plt.plot(values, test_scores, "-o", label="Test")
plt.legend()
plt.show()
# + [markdown] id="27618143-7e67-4b8e-9088-296e283695b9"
# ### Can take another example with KNN if we want.
# + [markdown] id="3d7599c5-fd82-4bb8-8216-e4354f8fdc82"
# **In Many cases you would want use different machine learning models and also apply different preprocessing techniques I highly encourage you using crossvalidaton to avoid model overfitting**
# + [markdown] id="2282effd-a9c6-49a9-86ed-9298150e5bde"
# # Part 4: Summary!
# + [markdown] id="23df0d0b-c239-4a9a-b92f-98a21e68606f"
# We hope this workshop was useful for you. We've only touched on some of the **basic concepts** of Pandas, but we believe this will give you the foundations to keep exploring the data! We covered:
#
# - Basic operations in Jupyter notebooks
# - Dataframes and Series in Pandas, and loading data to a dataframe
# - Basic data inspection (head, describe, dtypes, accessing columns and rows, sorting)
# - count, nunique
# - Indexing in dataframes and reset_index
# - Plotting (bar plots, hist plots, boxplot, heatmap)
# - Model building and evaluation
# + [markdown] id="57e34e3d-36d1-4d20-958c-e0a258a4924a"
# **I would appreciate your feedback** <br>
# Email: <EMAIL> <br>
# github: abdoulayegk <br>
# twitter: @abdoulayegk
| notebook1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dynamic risk budgeting between PSP & LHP
# +
import pandas as pd
import numpy as np
import ashmodule as ash
import matplotlib.pyplot as plt
import seaborn as sns
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
n_scenarios = 5000
rates, zc_prices = ash.cir(10,n_scenarios=n_scenarios,b=0.03,r_0 = 0.03,sigma = 0.02)
prices_eq = ash.gbm(10, n_scenarios=n_scenarios,mu=0.07,sigma=0.15)
rets_eq = prices_eq.pct_change().dropna()
rets_zc = zc_prices.pct_change().dropna()
rets_7030b = ash.bt_mix(rets_eq, rets_zc,allocator = ash.fixedmix_allocator, w1 = 0.7)
pd.concat([ash.terminal_stats(rets_zc, name = "ZC", floor = 0.75),
ash.terminal_stats(rets_eq, name = "Eq", floor = 0.75),
ash.terminal_stats(rets_7030b, name = "70/30", floor = 0.75)],
axis = 1).round(2)
rets_floor75 = ash.bt_mix(rets_eq, rets_zc, allocator=ash.floor_allocator, floor =0.75, zc_prices=zc_prices[1:])
pd.concat([ash.terminal_stats(rets_zc, name = "ZC", floor = 0.75),
ash.terminal_stats(rets_eq, name = "Eq", floor = 0.75),
ash.terminal_stats(rets_7030b, name = "70/30", floor = 0.75),
ash.terminal_stats(rets_floor75, name ="Floor75%",floor=0.75)],
axis = 1).round(2)
# this code is to test pd.concat for np array out of class
first_array = np.random.normal(loc = 0,scale= 5,size = 20)
second_array = np.random.normal(loc = 0,scale= 10,size = 20)
df = pd.concat(first_array,second_array)
df.head()
# didn't work
# try to convert before concat
df= pd.concat([pd.Series(first_array),pd.Series(second_array)])
df.head()
df.shape
# didn't work either
df = pd.DataFrame(data = [first_array,second_array])
df.shape
df.head()
#needs to convert to columns instead
first_array = pd.DataFrame(data = np.random.normal(loc = 0,scale= 5,size = 20))
second_array = pd.DataFrame(data =np.random.normal(loc = 0,scale= 10,size = 20))
df = pd.concat([first_array,second_array])
df.shape
| Introduction to Portfolio Construction and Analysis with Python/W4/.ipynb_checkpoints/Dynamic Risk Budgeting-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import time
import sys
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split, KFold
import multiprocessing
import os
from scipy import sparse
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import r2_score
from scipy.stats import spearmanr, pearsonr
from sklearn import ensemble
import datetime as dt
from datetime import date
import calendar
from calendar import weekday, day_name
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import scipy as sp
from patsy import dmatrix
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import scipy as sp
from sklearn.datasets import make_regression
train = pd.read_csv("data/train.csv", sep=',')
cols = ['store_nbr', 'item_nbr']
st_it = pd.DataFrame(train, columns=cols)
st_it
st_it.to_csv('data/store_item_nbrs.csv', sep=',', index=None)
# +
import pandas as pd
import numpy as np
def create_vaild_item_store_combinations(_df):
df = _df.copy()
df['log1p'] = np.log(df['units'] + 1)
g = df.groupby(["store_nbr", "item_nbr"])['log1p'].mean()
g = g[g > 0.0]
store_nbrs = g.index.get_level_values(0)
item_nbrs = g.index.get_level_values(1)
store_item_nbrs = sorted(zip(store_nbrs, item_nbrs), key = lambda t: t[1] * 10000 + t[0] )
with open(store_item_nbrs_path, 'wb') as f:
f.write("store_nbr,item_nbr\n")
for sno, ino in store_item_nbrs:
f.write("{},{}\n".format(sno, ino))
store_item_nbrs_path = 'data/store_item_nbrs.csv'
df_train = pd.read_csv("data/train.csv")
create_vaild_item_store_combinations(df_train)
# +
import pandas as pd
import numpy as np
import pickle
class SubmissionCreator(object):
def create_id(self, row):
date = row["date"]
sno = row["store_nbr"]
ino = row["item_nbr"]
id = "{}_{}_{}".format(sno, ino, date)
return id
def create_id2(self, row):
date = row["date"]
s_no = row["store_nbr"]
i_no = row["item_nbr"]
id = str(i_no) + "_" + str(s_no) + "_" + date[0:4] + date[5:7] + date[8:10]
return id
def create_prediction_dict(self, fname_test, fname_p):
d = dict()
f_test = open(fname_test)
f_p = open(fname_p)
lines_test = f_test.readlines()
lines_p = f_p.readlines()
for line_test, line_p in zip(lines_test, lines_p):
p_from_baseline = float(line_p.strip())
I = line_test.strip().split("|")[-1]
id2 = I.split(" ")[2]
notsold = I.split(" ")[4]
baseline = float(I.split(" ")[-1])
if notsold == "True":
pred = p_from_baseline + baseline
else:
pred = 0.0
d[id2] = np.max([pred, 0.0])
return d
def create_submission(self, df_test, fname_submission):
df = df_test
fw = open(fname_submission, "w")
fw.write("id,units\n")
for index, row in df.iterrows():
id = self.create_id(row)
id2 = self.create_id2(row)
if prediction_dict.has_key(id2):
log1p = prediction_dict[id2]
else:
log1p = 0.0
units = np.exp(log1p) - 1
fw.write("{},{}\n".format(id, units))
fw.close()
print "finished {}".format(fname_submission)
submission_creator = SubmissionCreator()
df_test = pd.read_csv("data/test.csv")
prediction_dict = submission_creator.create_prediction_dict("model/vwdata_test.vwtxt", "model/vwdata.predict.txt")
submission_creator.create_submission(df_test, "submission/p.csv")
# -
all_data_result = pd.read_csv('another_all_train_matrix.csv', sep=',')
model = sm.OLS.from_formula( "np.log1p(units) ~ C(item_nbr) + C(weekday) + "
"is_weekend + "
"tmax + tmin + depart + dewpoint + wetbulb + sunrise + "
"sunset + snowfall + preciptotal + stnpressure + sealevel + resultspeed + resultdir +"
"avgspeed", data=all_data_result)
result = model.fit()
# +
dfX0 = pd.DataFrame(all_data_result, columns=boston.feature_names)
dfX = sm.add_constant(dfX0)
dfy = pd.DataFrame(boston.target, columns=["MEDV"])
dfy_predict = result.predict()
# +
test = pd.read_csv('data/test.csv')
weather = pd.read_csv('data/weather_processed.csv')
key = pd.read_csv('data/key.csv')
holidays = get_holidays("data/holidays.txt")
holiday_names = get_holiday_names("data/holiday_names.txt")
test_key = pd.merge(test, key, on=['store_nbr'], how='left')
test_merge = pd.merge(test_key, weather, on=['date', 'station_nbr'], how='left')
test_gg = preprocess(test_merge)
#test_gg.tail()
# -
model = sm.OLS.from_formula( "np.log1p(units) ~ C(item_nbr) + C(weekday) + C(is_weekend) + C(is_holiday_weekday) + C(is_holiday) + C(holiday_name) + C(is_holiday_weekend) + C(around_BlackFriday) ", data=all_data_result)
result = model.fit()
df = pd.DataFrame(result.predict(all_data_result))
| team_directory/ya/practice_0313.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Web Scraping: Extracting Data from the Web
# + [markdown] slideshow={"slide_type": "slide"}
# ## Some Import
# + slideshow={"slide_type": "-"}
# %matplotlib inline
from selenium import webdriver
import time,re,json,numpy as np
import pandas as pd
from collections import defaultdict,Counter
import matplotlib.pyplot as plt
# + [markdown] slideshow={"slide_type": "slide"}
# ## Initial Setup and Launch the browser to open the URL
# + slideshow={"slide_type": "-"}
url = "http://www.imdb.com/list/ls061683439/"
with open('./img/filmfare.json',encoding="utf-8") as f:
datatbl = json.load(f)
driver = webdriver.Chrome(datatbl['data']['chromedriver'])
driver.get(url)
# -
# # Beautiful Soup
from bs4 import BeautifulSoup
soup = BeautifulSoup(driver.page_source, 'lxml')
lstelem = soup.findAll("div", attrs={"class" : 'info'})
Movielist = []
for lst in lstelem:
Movielist.append((lst.find('b').contents[0]).text)
print("First 10 Movies in the list")
Movielist[0:10]
# + [markdown] slideshow={"slide_type": "slide"}
# # Getting Data
# + [markdown] slideshow={"slide_type": "slide"}
# ### Function to extract the data from Web using Selenium
# + slideshow={"slide_type": "-"}
def ExtractText(Xpath):
textlist=[]
if(Xpath=='Movies_Director_Xpath'):
for item in range(1,123,2):
textlist.append(driver.find_element_by_xpath(datatbl['data'][Xpath]+'[%d]'%item).text)
else:
[textlist.append(item.text) for item in driver.find_elements_by_xpath(datatbl['data'][Xpath])]
return textlist
# + [markdown] slideshow={"slide_type": "slide"}
# ### Let's extract all the required data like Ratings,Votes,Genre, Year of Release for the Best Movies
# + slideshow={"slide_type": "-"}
#Extracting Data from Web
Movies_Votes,Movies_Name,Movies_Ratings,Movies_RunTime=[[] for i in range(4)]
datarepo = [[]]*5
Xpath_list = ['Movies_Name_Xpath','Movies_Rate_Xpath','Movies_Runtime_Xpath','Movies_Votes_Xpath',
'Movies_Director_Xpath']
for i in range(5):
if(i==3):
driver.find_element_by_xpath(datatbl['data']['listview']).click()
if(i==4):
driver.find_element_by_xpath(datatbl['data']['detailview']).click()
datarepo[i] = ExtractText(Xpath_list[i])
driver.quit()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data in List
# + slideshow={"slide_type": "-"}
# Movie Name List & Ratings
print(datarepo[0][:5])
print(datarepo[1][:5])
print(datarepo[3][:5])
print(datarepo[4][:5])
print("")
print(datarepo[3][:5])
# + [markdown] slideshow={"slide_type": "slide"}
# # Store Data in a Python Dictionary
# + slideshow={"slide_type": "-"}
# Result in a Python Dictionary
Years=range(2015,1954,-1)
result = defaultdict(dict)
for i in range(0,len(datarepo[0])):
result[i]['Movie Name']= datarepo[0][i]
result[i]['Year']= Years[i]
result[i]['Rating']= datarepo[1][i]
result[i]['Votes']= datarepo[3][i]
result[i]['RunTime']= datarepo[2][i]
result[i]['Genre']= datatbl['data']['Genre'][i]
result[i]['Director']= datarepo[4][i]
# + [markdown] slideshow={"slide_type": "slide"}
# # Let's see now how the data in dictionary looks like?
# + slideshow={"slide_type": "slide"}
print(json.dumps((result[0]),indent=4))
# -
print(json.dumps((result),indent=4))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Let's clean the data
# + [markdown] slideshow={"slide_type": "slide"}
# * Replace the comma(,) in Vote Value and change the data type to int
# * Change the Data type for Rating and RunTime
# + slideshow={"slide_type": "-"}
for key,values in result.items():
values['Votes'] = int(values['Votes'].replace(",",""))
values['Rating']= float(values['Rating'])
values['Director']= values['Director'].replace('Director: ','')
try:
values['RunTime'] = int(re.findall(r'\d+',values['RunTime'])[-1])
except TypeError:
values['RunTime'] = np.NaN
except IndexError:
values['RunTime'] = np.NaN
# + [markdown] slideshow={"slide_type": "slide"}
# ## Now let's look at the data and see how it looks like
# + slideshow={"slide_type": "slide"}
print(json.dumps((result[0]),indent=4))
# -
# ## Movie details in dictionary
print(json.dumps((result),indent=4))
# + [markdown] slideshow={"slide_type": "slide"}
# # Data in Pandas Dataframe
# + slideshow={"slide_type": "-"}
# create dataframe
df = pd.DataFrame.from_dict(result,orient='index')
df = df[['Year', 'Movie Name', 'Rating', 'Votes','Genre','RunTime','Director']]
df.index = np.arange(1, 62)
df.head(10)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Let's use some of the Pandas functions now and start the Analysis
# + [markdown] slideshow={"slide_type": "slide"}
# ## No. of rows with missing values(NaN)
# + slideshow={"slide_type": "skip"}
df
# + slideshow={"slide_type": "slide"}
df[df['RunTime'].isnull()==True]
# + slideshow={"slide_type": "-"}
nans = df.shape[0] - df.dropna().shape[0]
print('%d rows have missing values' % nans)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Replace NaN with Mean
# + slideshow={"slide_type": "-"}
df=df.fillna(int(df['RunTime'].mean()))
df.ix[[41, 49,59]]
# + slideshow={"slide_type": "slide"}
df.info()
# + [markdown] slideshow={"slide_type": "slide"}
# # Movies with Highest Ratings
# + [markdown] slideshow={"slide_type": "slide"}
# ## The top five movies with Maximum Rating since 1955
# + slideshow={"slide_type": "-"}
#Highest Rating Movies
df1=df.sort_values('Rating',ascending=[False]).head(5)
df1.index = np.arange(1, 6)
df1
# + [markdown] slideshow={"slide_type": "slide"}
# ## Rating Trend for Best Movies from last 65 years
# + slideshow={"slide_type": "-"}
df.plot(x=df.Year,y=['Rating']);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Movies with Lowest Ratings
# + slideshow={"slide_type": "-"}
df1=df.sort_values('Rating',ascending=[True]).head(5)
df1.index = np.arange(1, 6)
df1
# + [markdown] slideshow={"slide_type": "slide"}
# # Movies with Maximum Run time
# + [markdown] slideshow={"slide_type": "slide"}
# ## Top 10 movies with maximum Run time
# + slideshow={"slide_type": "-"}
#Movies with maximum Run Time
df1=df.sort_values(['RunTime'],ascending=[False]).head(10)
df1.index = np.arange(1, 11)
df1
# + [markdown] slideshow={"slide_type": "slide"}
# ## Best Movie Run time
# + [markdown] slideshow={"slide_type": "slide"}
# ## Let's plot a graph to see the movie run time trend from 1955 thru 2015
# + slideshow={"slide_type": "slide"}
df.plot(x=df.Year,y=['RunTime']);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mean of the Movie Run Time
# + slideshow={"slide_type": "-"}
df['RunTime'].mean()
# + [markdown] slideshow={"slide_type": "slide"}
# # Perform some analysis on the ratings of all the Best won movies
#
# * No. of Movies Greater than IMDB 7 ratings
# + slideshow={"slide_type": "-"}
df[(df['Rating']>=7)]['Rating'].count()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Movie Ratings Visualization using Bar Graph
# + slideshow={"slide_type": "-"}
import seaborn as sns
sns.set_style("white")
Rating_Histdic = defaultdict(dict)
Rating_Histdic['Btwn 6&7'] = df[(df['Rating']>=6)&(df['Rating']<7)]['Rating'].count()
Rating_Histdic['GTEQ 8'] = df[(df['Rating']>=8)]['Rating'].count()
Rating_Histdic['Btwn 7 & 8'] = df[(df['Rating']>=7)&(df['Rating']<8)]['Rating'].count()
plt.bar(range(len(Rating_Histdic)), Rating_Histdic.values(), align='center',color='b',width=0.4)
plt.xticks(range(len(Rating_Histdic)), Rating_Histdic.keys(), rotation=25);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Percentage distribution of the Ratings in a Pie-Chart
# + slideshow={"slide_type": "-"}
sns.set_style("white")
sns.set_context("notebook")
Rating_Hist = []
import numpy as np
Rating_Hist.append(Rating_Histdic['Btwn 6&7'])
Rating_Hist.append(Rating_Histdic['GTEQ 8'])
Rating_Hist.append(Rating_Histdic['Btwn 7 & 8'])
labels = ['Btwn 6&7', 'GTEQ 8', 'Btwn 7 & 8']
colors = ['red', 'c', 'lightgreen']
plt.pie(Rating_Hist,labels=labels, colors=colors,autopct='%1.1f%%', shadow=True, startangle=90);
plt.figure(figsize=(8, 6));
# + [markdown] slideshow={"slide_type": "slide"}
# # Best Picture by Genre
# + [markdown] slideshow={"slide_type": "slide"}
# ## Let's analyze the Genre for the best won movies
# + slideshow={"slide_type": "-"}
Category=Counter(datatbl['data']['Genre'])
df1 = pd.DataFrame.from_dict(Category,orient='index')
df1 = df1.sort_values([0],ascending=[False]).head(5)
df1.plot(kind='barh',color=['g','c','m']);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Directors whose movie won more than once for Best Film
# + [markdown] slideshow={"slide_type": "slide"}
# ## Movies for which they won best film award
# + slideshow={"slide_type": "skip"}
df['freq']= df.groupby('Director')['Director'].transform('count')
df2=df[df['freq']>1]
del df2['freq']
# + slideshow={"slide_type": "slide"}
df2.groupby(['Director','Year', 'Movie Name',
'Rating', 'Genre','Votes','RunTime']).count()[0:100]
# + [markdown] slideshow={"slide_type": "slide"}
# # Conclusion :
#
# * Movies with Ratings greater than 7
# * Run time more than 2hrs
# * Category Drama & Musical are most likely to be selcted for Best Picture
| WebScrapingnDataAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
#load data
data1 = pd.read_csv("output.csv", encoding = 'utf-8')
data2 = pd.read_csv("raw_data/train.csv", encoding = 'utf-8')
data1.head()
#Select only relevant columns
data1 = data1[[
'EMI',
'Loan Amount',
'Maximum amount sanctioned for any Two wheeler loan',
'Age at which customer has taken the loan',
'Rate of Interest',
'Number of times 30 days past due in last 6 months',
'Maximum MOB (Month of business with TVS Credit)',
'Number of times 60 days past due in last 6 months',
'Number of loans',
'Maximum amount sanctioned in the Live loans',
'Number of times 90 days past due in last 3 months',
'Tenure',
'Number of times bounced while repaying the loan',
'Target variable ( 1: Defaulters / 0: Non-Defaulters)'
]]
data1.columns
data2.head()
data1.head()
| .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project: Univariate Linear Regression
#
# * **Business Understanding Phase:**
# In this Project we want to model how Profit in Bike Sharing Business Increases with the increase in Population in the City.
#
# * **Data Understanding Phase:**
# The Data Consists of a Profit in Dollars of Bike Sharing Business with respect to the Population in that City.
#
# ### Lets us take a look at the data
#
# importing necessary Libraries
import matplotlib.pyplot as plt #Data Visualization Library
import numpy as np #High Level Library for Linear Alegbra related operations and manupilation
import pandas as pd #High Level Library for Data Wrangling, Manupilation, viewing etc
import seaborn as sns # High Level Data Visualization Library built on Matplotlib library
plt.style.use('ggplot') # Setting default style
# %matplotlib inline
# Importing data
data = pd.read_csv('bike_sharing_data.txt')
data.head() # reading top 5 lines of Data
# Getting information about the data
data.info()
# **It can be observed that the data contains 2 Columns; Population and Profit, containing a total 97 instances**
# ## Visualizing the Data
fig, ax = plt.subplots(figsize = (10,8))
sns.scatterplot(x='Population', y = 'Profit', data = data, ax=ax)
ax.set_title('Profit in $10000s vs City Population in 10000s')
ax.set_xlabel('Population in 10000s')
ax.set_ylabel('Profit in $10000s')
# * **Data Preparation Phase:**
#
# In this phase we will prepare the data for modeling. First we will segregate the data in 2 parts, first part the independent variable/s or feature/s or predictor variable/s and the other - dependent variable or response variable. After this segregation we will add an intercept or bias variable equal to 1 for each instance in the dataset.
m = len(data) #number of instances
X= np.append(np.ones((m,1)), data.Population.values.reshape(m,1), axis=1) # adding the intercept term
y=data.Profit.values.reshape(m,1)
theta=np.zeros((2,1)) #our initial Theta
# * **Model Preparation Phase:**
#
# During this phase we will build a univariate regression model to predict the Profit based on the population of cities
# ### Simple Linear Regression
#
# $h_{\theta}(x)$ is the hypothesis and given by the linear model
#
# $$h_{\theta}(x) = \theta^Tx = \theta_0x_0 + \theta_1x_1$$
# The $\theta$ values are "weights" and $x_0$ is the bias term
#
#
# The objective of linear regression is to minimize the cost function which is given by:
#
# $$J(\theta) = \frac{1}{2m} \sum_{i=1}^m (h_\theta(x^{(i)}) - y^{(i)} )^2$$
#
# where m is the number of samples in the data
def cost_function(X, y, theta):
"""
Takes the inputs:
X: Feature matrix of shape (m,2)
y: Respose variable of shape (m,1)
theta: Weights of shape(2,1)
Outputs:
cost
"""
m = len(y)
h_theta = X.dot(theta)
error = h_theta-y
cost = 1/(2*m)*((error).T).dot((error))
cost = np.squeeze(cost).tolist()
return cost
# #### Updating the weights
#
# To update the weight vector $\theta$, gradient descent is applied to iteratively improve your model's predictions.
# The gradient of the cost function $J$ with respect to one of the weights $\theta_j$ is:
#
# $$\nabla_{\theta_j}J(\theta) = \frac{1}{m} \sum_{i=1}^m(h^{(i)}-y^{(i)})x_j $$
# * 'i' is the index across all 'm' training examples.
# * 'j' is the index of the weight $\theta_j$, so $x_j$ is the feature associated with weight $\theta_j$
#
# * To update the weight $\theta_j$, we adjust it by subtracting a fraction of the gradient determined by $\alpha$:
# $$\theta_j = \theta_j - \alpha \times \nabla_{\theta_j}J(\theta) $$
# * The learning rate $\alpha$ is a value that we choose to control how big a single update will be.
#
# Hence, compuationally to minimize the cost function $J(\theta)$ the equation below is updated and repeated until the convergence is met
#
# $\theta_j := \theta_j - \alpha \frac{1}{m} \sum_{i=1}^m (h_{\theta}(x^{(i)}) - y^{(i)})x_j^{(i)}$ (simultaneously update $\theta_j$ for all $j$).
# Testing our Cost Function with intial values of theta which are [0,0]
cost_function(X, y, theta)
def gradient_descent(X,y, theta, alpha, num_iters):
'''
Input:
X: matrix of features which is (m,2)
y: corresponding labels of the input matrix x, dimensions (m,1)
theta: weight vector of dimension (2,1)
alpha: learning rate
num_iters: number of iterations we want to train our model for
Output:
costs: all the costs that we calculated during the gradient decent algorithm
theta: your final weight vector
'''
m = len(y)
costs=[]
for i in range(num_iters):
h_theta = X.dot(theta)
delta = np.dot(X.transpose(),(h_theta-y))
theta -=alpha * 1/m*delta #updating the theta vector
costs.append(cost_function(X, y, theta))
return theta, costs
theta , costs = gradient_descent(X, y , theta, alpha = 0.01, num_iters = 2000)
print('h(x) = {} + {}x1'.format(str(round(theta[0,0], 2)), str(round(theta[1,0],2))))
# * **Model Evaluation Phase:**
# In this phase, we will see how the model has converged over the number of iterations and whether the regression is able to explain our model
#
# ### Visualising the Cost Function $J(\theta)$
plt.figure(figsize = (10,8))
plt.plot(costs)
plt.xlabel('Number of Iterations')
plt.ylabel("$J(\Theta)$")
plt.title("Values of the Cost Function over Iterations of Gradient Decent")
# ### Visualizing our Model using Regression Line
theta = np.squeeze(theta)
plt.figure(figsize = (10,8))
sns.scatterplot(x='Population', y = 'Profit', data = data)
x_value=[x for x in range(5,25)]
y_value = [(x*theta[1]+theta[0]) for x in x_value]
sns.lineplot(x_value, y_value)
plt.xlabel("Population in 10000s")
plt.ylabel("Profit in $10000s")
plt.title("Linear Regression Fit")
# **Here we are able to see that the regression line is able explain the variablity of the response variable with respect to predictor variable which is City's Population in this case.**
# * **Model Deployment Phase:**
#
# In this phase, we draw inferences/prediction from the model on the unseen values
#
# ### Inference using the optimized $\theta$ values
#
# $h_\theta(x) = \theta^Tx$
def predict(x, theta):
y_pred = np.dot(theta.transpose(),x)
return y_pred
y_pred_1 = predict(np.array([1,4]), theta) *10000
print("For a population of 40000 people, the model predicts a profit of $"+str(round(y_pred_1,2)))
print('\n')
y_pred_2 = predict(np.array([1, 8.3]), theta)*10000
print("For a population of 83000 people, the model predicts a profit of $"+str(round(y_pred_2,2)))
# ## The End ##
| Univariate Linear Regression from Scratch/Predicting_Profit.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# # Chromosome Painting
# +
using Revise
using VCFTools
using MendelImpute
using VariantCallFormat
using Random
using StatsBase
using CodecZlib
using ProgressMeter
using BenchmarkTools
using GroupSlices
using LinearAlgebra
using DataFrames
using Plots
using DelimitedFiles
using JLSO
using StatsBase
# using ProfileView
BLAS.set_num_threads(1)
# -
# # Download population data
#
# Download [population code](ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/) for each 1000 genomes sample. Different population code is explained [here](https://www.internationalgenome.org/category/population/) and their inclusion criteria is explained [here](https://www.coriell.org/1/NHGRI/About/Guidelines-for-Referring-to-Populations).
# +
# run this code in terminal
# wget -r -l3 -N --no-parent ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/1000_genomes_project/data/
# -
# # Get each sample's population and super-population origin
refID_to_population = thousand_genome_samples_to_population()
# # Separate admixed populations
#
# We will use samples from:
#
# - MXL: Mexican Ancestry in Los Angeles
# - PUR: Puerto Rican in Puerto Rico
# - ASW: African Ancestry in SW USA
# - CLM: Colombian in Medellin, Colombia
# - PEL: Peruvian in Lima, Peru
# - ACB: African Caribbean in Barbados
# - CHD: Chinese in Metropolitan Denver, Colorado, USA (actually NOT present in our data)
# - GIH: Gujarati Indians in Houston, Texas, USA
# - ITU: Indian Telugu in the U.K.
# - STU: Sri Lankan Tamil in the UK
#
# as targets to infer ancestry. These population show high degrees of recent admixture and are also highly heterogeneous in their admixture. Remaining populations will be used as the reference panel.
#
# We may wish to also exclude:
#
# Deatailed population description is found [here](https://www.coriell.org/1/NHGRI/About/Guidelines-for-Referring-to-Populations).
# need full sample ID list
data = "/Users/biona001/.julia/dev/MendelImpute/data/1000_genome_phase3_v5/beagle_raw/chr18.1kg.phase3.v5a.vcf.gz"
sampleIDs = sampleID(data)
# check how many people remain in reference panel
admixed = ["MXL", "PUR", "CLM", "PEL", "ASW", "ACB", "GIH", "ITU", "STU"]
ref_idx = Int[]
sample_idx = Int[]
for (i, id) in enumerate(sampleIDs)
refID_to_population[id] ∈ admixed ? push!(sample_idx, i) : push!(ref_idx, i)
end
@show length(ref_idx)
@show length(sample_idx);
cd("/Users/biona001/.julia/dev/MendelImpute/data/1000_genome_phase3_v5/country_origin2")
function filter_and_mask(ref_idx, sample_idx, refID_to_population)
# filter chromosome data for unique snps
# data = "../beagle_raw/chr18.1kg.phase3.v5a.vcf.gz"
# full_record_index = .!find_duplicate_marker(data)
# VCFTools.filter(data, full_record_index, 1:nsamples(data),
# des = "chr18.uniqueSNPs.vcf.gz")
# summarize data
total_snps, samples, _, _, _, maf_by_record, _ = gtstats("chr18.uniqueSNPs.vcf.gz")
# generate target panel with all snps
VCFTools.filter("chr18.uniqueSNPs.vcf.gz", 1:total_snps,
sample_idx, des = "target.chr18.full.vcf.gz", allow_multiallelic=false)
# also generate reference panel
VCFTools.filter("chr18.uniqueSNPs.vcf.gz", 1:total_snps,
ref_idx, des = "ref.chr18.vcf.gz", allow_multiallelic=false)
# set top 50k ancestry informative marker as typed SNPs
p = 50000
aim_pvals = VCFTools.aim_select("chr18.uniqueSNPs.vcf.gz", refID_to_population)
aim_rank = sortperm(aim_pvals)
record_idx = falses(total_snps)
[record_idx[aim_rank[i]] = true for i in 1:p] # typed SNPs are top aim markers
VCFTools.filter("chr18.uniqueSNPs.vcf.gz", record_idx, sample_idx,
des = "target.chr18.typedOnly.aim.vcf.gz", allow_multiallelic=false)
# unphase and mask 1% entries in target file
n = length(sample_idx)
masks = falses(p, n)
missingprop = 0.001
for j in 1:n, i in 1:p
rand() < missingprop && (masks[i, j] = true)
end
mask_gt("target.chr18.typedOnly.aim.vcf.gz", masks,
des="target.chr18.typedOnly.aim.masked.vcf.gz", unphase=true)
end
Random.seed!(2020)
@time filter_and_mask(ref_idx, sample_idx, refID_to_population)
# Compress reference file to VCF
d = 1000
@time compress_haplotypes("ref.chr18.vcf.gz", "target.chr18.typedOnly.aim.masked.vcf.gz",
"ref.chr18.jlso", d)
| manuscript/sec3.3/filter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Allegheny County Health Violation Analysis
#
# ## Program Objective
#
# Write a script that answers basic data-based questions concerning health violations in Allgheny county
#
# ## Questions to Pursue in Allegheny County Health Code Violations
#
# 1. What types of health violations are most common in three municipal areas of your choosing?
# 2. Which types of violations are mostly only considered "high" severity and not "medium" or "low" severity?
# 3. Which classification of restaurant (i.e. use the column 'description') has the most "high" severity violations? Does this vary by the municipalities of your choosing?
#
#
#
# +
import pandas as pd
import numpy as np
import matplotlib as plt
# %matplotlib inline
# show all columns in a dataframe
pd.set_option('display.max_columns', None)
df = pd.read_csv('data/allegheny_county_food_violations.csv')
# -
df.head()
# ## Data Assessment
#
# __Completeness__: Do we have all of the records that we should? Do we have missing records or not? Are there specific rows, columns, or cells missing?
#
# __Validity__: Does the data conform to a defined schema? A schema is a defined set of rules for data. These rules can be real-world constraints (e.g. negative height is impossible) and table-specific constraints (e.g. unique key constraints in tables).
#
# __Accuracy__: Inaccurate data is wrong data that is valid. It adheres to the defined schema, but it is still incorrect. An example of inaccurate data are typos.
#
# __Consistency__: Inconsistent data is both valid and accurate, but there are multiple correct ways of referring to the same thing. An example of inconsistent data is inconsistent capitalization in textual data.
# Check the data type of each column
df.info()
# Count how many rows contain null values
df.isnull().sum()
# Check duplicated rows
df.duplicated().sum()
# ## Data Wrangling
# ### Convert columns with binary-like data into boolean data
#
# Some of the columns only contain two string values (e.g. "V" and "N" in the "rating" column). We can convert these to either a boolean or numeric data to make the column better suited for calculations downstream.
# Inspect columns with binary-like data
print(df['rating'].value_counts(), '\n')
print(df['low'].value_counts(), '\n')
print(df['medium'].value_counts(), '\n')
print(df['high'].value_counts())
# +
# Map rating column to numeric data
mapping = {'N': 0, 'V': 1}
df['rating'] = df['rating'].map(mapping)
# Inspect results
df['rating'].value_counts()
# +
# Map risk violation level to numeric data
mapping = {'T': int(1), 'F': int(0)}
df['low'] = df['low'].map(mapping)
df['medium'] = df['medium'].map(mapping)
df['high'] = df['high'].map(mapping)
# -
# Verify results
print(df['low'].value_counts(), '\n')
print(df['medium'].value_counts(), '\n')
print(df['high'].value_counts())
# ### Rename Columns
#
# The column names aren't intuitively named. Let's rename these so they are more easily understood what they are.
# +
# Rename columns that are unclear
col = {
"encounter": "inspection_id",
"id": "restaurant_id",
"bus_st_date": "start_date",
"description": "facility_type",
"description_new": "violation",
"num": "street_num",
"rating": "is_violation",
"low": "violation_level_low",
"medium": "violation_level_med",
"high": "violation_level_high"
}
df = df.rename(columns=col)
# Verify changes
df.columns
# -
# ## Question 1: Investigate most common health violations
# ### Top Ten Common Health Violations
# Find the top ten common health violations
df['violation'].value_counts().head(10)
# ## Question 2: Rank Facilities by the Amount of Health Violations
#
# Investigate which facilities have the highest number of health violations, and group facilities by municipality as well as health violation level.
cols = ['facility_name', 'is_violation', 'violation_level_low',
'violation_level_med', 'violation_level_high', 'municipal']
df_viol_rank = df[cols].groupby(['facility_name', 'municipal']) \
.sum() \
.sort_values(by='is_violation', ascending=False)
df_viol_rank.head(20)
# ## Question 3: Which type of violations occur the most for each severity level?
cols = ['violation', 'violation_level_low',
'violation_level_med', 'violation_level_high']
df_violations_by_severity = df[cols].groupby(['violation']) \
.sum()
# #### Most common violations considered "low severity"
df_violations_by_severity['violation_level_low'].sort_values(ascending=False).head(10)
# #### Most common violations considered "medium severity"
df_violations_by_severity['violation_level_med'].sort_values(ascending=False).head(10)
# #### Most common violations considered "high severity"
df_violations_by_severity['violation_level_high'].sort_values(ascending=False).head(10)
# ## Question 4: Which classification of restaurant has the most "high" severity violations? Does this vary by the municipalities of your choosing?
# Get sum of high level violations by facility and municipality
cols = ['facility_type', 'municipal', 'violation_level_high']
df_viol_by_facility_type = df[cols].groupby(['municipal', 'facility_type']) \
.sum()
# Inspect the grouped dataframe
df_viol_by_facility_type.head()
# Get sorted list of highest count of high level violations by facility type and municipality
df_viol_by_municipal = df_viol_by_facility_type.groupby(level=0, group_keys=False) \
.apply(lambda x: x.sort_values(by='violation_level_high', ascending=False))
# Inspect the grouped dataframe
df_viol_by_municipal.head()
# Reset index so we can work with the data more
df_viol_by_municipal = df_viol_by_municipal.reset_index()
# #### Get the top 5 facility_types with the largest number of high level violations
# #### Robinson
df_viol_by_municipal.loc[df_viol_by_municipal['municipal'] == 'Robinson'].head()
# #### Pittsburgh-114
df_viol_by_municipal.loc[df_viol_by_municipal['municipal'] == 'Pittsburgh-114'].head()
# #### Dormont
df_viol_by_municipal.loc[df_viol_by_municipal['municipal'] == 'Dormont'].head()
# Based on the subset of municipalities, it is most likely that the facility type with the largest number of high level violations will be __restaurants with liquor__.
# ## Note to Self:
#
# Create a programmatic way to pull the first row of each grouped municipality. This will give us a survey of the facility types with the largest number of high violation levels for all municipalities. I was able to group by municipality and facility_type but couldn't figure out how to filter the resulting dataset (perhaps understanding multi-indexed dataframes more is necessary). The workaround was to reindex the dataframe and filter from there, but this made it difficult to understand how to filter the top row for each municipality.
| wk_10/health_violations/allegheny_health_violation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/heshumi/NNTI-WS2021-NLP-Project/blob/main/Task%202.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="vJoff6KegGK_"
# ## Task 2a) Binary neural sentiment classifier for Hindi dataset using pretrained Hindi word2vec model trained from task 1.
#
# **Note:** keep/upload the folowing files in the runtime before executing the notebook.
# `hindi_dataset.tsv`, `bengali_hatespeech.csv`, `bengali_hatespeech_sampled.csv`, `stopwords-bn.txt`, `stopwords-en.txt`, `stopwords-hi.txt`, `Word2Vec-hindi.model`, `Word2Vec-bg.model`, `task2_utils.py`. Note that,
# `task2_utils.py` contains all the necessary classes and functions that are required in this task 2 and those are separated as per the general guidlines of the project handout.
# + [markdown] id="S55mfENxGNOJ"
# #### Importing the packages
# + id="Zgc9KSPrcnCH"
# Imports
import pandas as pd
import numpy as np
from math import sqrt
import random
import time
import copy
import string
import torch.nn as nn
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, TensorDataset
import matplotlib.pyplot as plt
from task2_utils import * #separate .py file for functions and classes as instructed
# + [markdown] id="8GoTEhb8GUjf"
# #### Reading Hindi dataset for sentiment classification. Word2vec model for Hindi dataset is already trained in task 1 and will be loaded in a further cell.
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="H5vjly3Dc7rD" outputId="c3227149-fd52-424e-f9e7-b8880a446e6e"
data_hindi = pd.read_csv('hindi_dataset.tsv', sep='\t', usecols=['text', 'task_1'])
dev_hindi = data_hindi
dev_hindi.head()
# + [markdown] id="pL-KWdcCGrW3"
# #### Preprocess the dataset. Here we remove the punctuations, stopwords and other non-influential words and symbols like 'http' and '@' from the beginning of a word and make word list candidates from the sentences.
# + colab={"base_uri": "https://localhost:8080/"} id="r4L-rIvUdH8l" outputId="3bc96f64-0e36-4ea7-e4d6-b4317c13fe9b"
with open('stopwords-hi.txt', 'r') as f:
stopwords_hi = set(f.read().split())
punct = ':;?!-—-\"\'|।()[]{},./\\“'
dev_hindi['text'] = dev_hindi['text'].apply(lambda x: preprocess(x, punct, stopwords_hi))
dev_hindi['text'][:5]
# + [markdown] id="7Wu2DZvGH8IH"
# #### Build the vocabulary avaialable in the dataset
# + colab={"base_uri": "https://localhost:8080/"} id="NbAl-QjSdMfr" outputId="72684669-2f88-46a8-b292-aa95d8e500d2"
hindi_V = build_vocab(dev_hindi)
summ = sum(hindi_V.values())
len(hindi_V)
# + [markdown] id="GkMa5dsEIE_W"
# #### Developing a dictionary that represents the one hot encoding of the words in the vocabulary
# + id="6Jz0aynNdO7b"
pad = 1
onehot_dict_hindi= build_onehot(hindi_V, pad)
# + colab={"base_uri": "https://localhost:8080/"} id="QdG_SeN_mkn8" outputId="b2bac588-8ec5-45b6-ee11-b1543e2e5724"
# print one hot of padding
print(onehot_dict_hindi[' '])
# + [markdown] id="A_95wHa1IUhe"
# #### Getting the insigths of the sentences in sense of the number of words in a sentence.
# + colab={"base_uri": "https://localhost:8080/", "height": 432} id="HgqbFi99dUqj" outputId="9e209257-1bdc-4883-ffc7-37370f78a2d1"
text_len = [len(x) for x in dev_hindi['text']]
ax = pd.Series(text_len).hist()
fig = ax.get_figure()
fig.savefig('Hindi_dataset_histogram.png')
pd.Series(text_len).describe()
# + [markdown] id="JCdvsYNSxU_I"
# #### From the Histogram above we see that 75% of the sentences are covered with 23 being the size of the words in sentences. We select 35 as the maximum number of words in a sentence considering factor like outliers.
# + [markdown] id="4Abnnir1JTRX"
# #### Set the hyperparameters for Sentiment classification of Hindi Dataset
# + id="LWSCJgQ0yEU3"
# Set hyperparameters
batch_size = 64
min_sen_length=35
epochs = 10
embedding_size = 300
input_size = len(hindi_V) #For embedding
# + [markdown] id="f80Q7-jpxR2S"
# #### Then we create a custom Hindi dataset and data loader to train the binary classifier
# + id="VNN7X7sQx4hb"
dataset_hindi = MyDataset(dev_hindi, min_sen_length, onehot_dict_hindi)
# + [markdown] id="6H10YI0HZf5q"
# #### Split dataset to train, validation and test set and define Data loader for Hindi dataset
# + colab={"base_uri": "https://localhost:8080/"} id="vo7C8UKwZStq" outputId="45002935-0834-4786-a6a5-13b2916c3dfa"
train_size, val_size, test_size = int(0.7 * len(dataset_hindi)), int(0.15 * len(dataset_hindi)+1), int(0.15 * len(dataset_hindi)+1)
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(dataset_hindi, [train_size, val_size, test_size])
trainloader_hindi = DataLoader(train_dataset, batch_size= batch_size, shuffle = False, num_workers=0)
valloader_hindi = DataLoader(val_dataset, batch_size= batch_size, shuffle = False, num_workers=0)
testloader_hindi = DataLoader(test_dataset, batch_size= batch_size, shuffle = False, num_workers=0)
i1, l1 = next(iter(trainloader_hindi))
print(i1.shape)
# + [markdown] id="hOJNK2r0dzG5"
# #### Create same Word2Vec model architecture as defined in task 1. We are not gonna train the Word2Vec model again rather will load the Hindi Word2Vec model as instructed in the next cell.
# + id="RtawnFPqcX6h"
word2vec_model = Word2Vec(input_size, embedding_size)
# + [markdown] id="Rvx_vnBaMZGO"
# #### Load the pretrained word2vec model. Also, add a new column to the first hidden layer for allwoing weights of padding. (Please make sure the pretrained model from task 1 'word2vec_model' is avaialable in the runtime)
# + id="mIFW0QHX4Tiw"
# word2vec_model.cuda()
word2vec_model.load_state_dict(torch.load('/content/Word2Vec-hindi.model', map_location='cpu'))
word2vec_model.eval()
# Add a column to the first hidden layer matrix of the word2vec model for the padding embedding.
embeddings = torch.cat((word2vec_model.fc1.weight.detach(), torch.zeros(embedding_size).unsqueeze(1)), dim = 1) # init the padding embedding by zeros
biases = word2vec_model.fc1.bias.detach()
# + colab={"base_uri": "https://localhost:8080/"} id="CNWV_RHiAXZ3" outputId="4c5de98e-ded0-469a-bd54-89a96bafd6f3"
embeddings.shape
# + [markdown] id="T1LXnDp4NCIN"
# #### Set more hyperparameters to train binary sentiment classifier
# + id="aEzMu-N_g6i2"
n_filters = 35
filter_sizes = [8,5,3]
output_dim = 1
dropout = 0.5
N_EPOCHS = 50 #with early stopping
# + [markdown] id="w0ilEqsBQVvH"
# #### Convolution based model architecture for Hindi sentiment classification
# + [markdown] id="g6IrwXroSpjF"
# #### Create CNN model instance by importing CNN_HINDI defined in task2_utils file.
# + colab={"base_uri": "https://localhost:8080/"} id="5peQSsd0S4HE" outputId="dd6e0e8b-6d0e-49a3-e4ad-adb8c935ce40"
cnn = CNN_HINDI(input_size, embedding_size, n_filters, filter_sizes, output_dim, dropout)
cnn.double()
# + [markdown] id="2ZjVqRcvSt_u"
# #### Copy the pre-trained embeddings weights and biases as the Hindi CNN model's embedding layer
# + colab={"base_uri": "https://localhost:8080/"} id="qMpcuZGldJwT" outputId="1a3a00fd-2c54-44da-b7e3-06522a8b3b10"
cnn.embedding.bias.data.copy_(biases)
cnn.embedding.weight.data.copy_(embeddings)
# + [markdown] id="g62JLXxITEbN"
# #### Define optimizer and criterion for sentiment classification training
# + id="EaxF-NwYnEHR"
optimizer = optim.Adam(cnn.parameters())
criterion = nn.BCEWithLogitsLoss()
# + [markdown] id="ukQX0dynUjn2"
# #### Training the binary sentiment classification model
# + colab={"base_uri": "https://localhost:8080/"} id="ZCKBNczrthZs" outputId="78bee0da-d025-4927-debc-801b26bc7a97"
# Keeping loss and accuracy history of epochs for visualiztion
train_loss_history = []
train_acc_history = []
val_loss_history = []
val_acc_history = []
# Counter for Early stopping. Stops if no better loss for validation is achieved in 4 consecutive epochs.
stop_criterion = 0
best_valid_loss = float('inf') #Initially sets to infinity
for epoch in range(N_EPOCHS):
#start time of the epoch
start_time = time.time()
train_loss, train_acc = train_cnn(cnn, trainloader_hindi, optimizer, criterion)
valid_loss, valid_acc = evaluate_cnn(cnn, valloader_hindi, criterion)
train_loss_history.append(train_loss)
train_acc_history.append(train_acc)
val_loss_history.append(valid_loss)
val_acc_history.append(valid_acc)
#End time
end_time = time.time()
# calculate elapsed time in mins and seconds
required_time = end_time - start_time
mins = int(required_time / 60)
secs = int(required_time - (mins * 60))
stop_criterion += 1
#Saving the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(cnn.state_dict(), 'cnn_hindi_model.pt')
best_model_hindi = copy.deepcopy(cnn)
stop_criterion = 0
print(f'Epoch: {epoch+1:02} | Epoch Time: {mins}m {secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
if stop_criterion == 4:
break
# + [markdown] id="y-a1RtfwjWbi"
# #### Visualize Loss and Accuracy for the model
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="swv9iYTnvPbA" outputId="1993ea3a-619b-4bcb-9bfe-30b680c23724"
epoch_count = list(range(1, len(train_loss_history)+1))
x1 = epoch_count
x2 = epoch_count
y1_train = train_loss_history
y1_val = val_loss_history
y2_train = train_acc_history
y2_val = val_acc_history
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
axes[0].plot(x1, y1_train, 'r--')
axes[0].plot(x1, y1_val, 'b--')
axes[0].legend(['Training Loss', 'Validation Loss'])
axes[0].set(xlabel='Epoch', ylabel='Loss')
axes[0].set_title('CNN model loss')
axes[1].plot(x1, y2_train, 'r--')
axes[1].plot(x1, y2_val, 'b--')
axes[1].legend(['Training Accuracy', 'Validation Accuracy'])
axes[1].set(xlabel='Epoch', ylabel='Accuracy')
axes[1].set_title('CNN model accuracy')
fig.suptitle('Hindi sentiment classifier loss and accuracy', y=-0.01)
plt.savefig('cnn.png')
# + [markdown] id="Ip8xX3x7akux"
# #### Evaluate model on test data
# + colab={"base_uri": "https://localhost:8080/"} id="J65RPPoOaj7z" outputId="22f21286-5af5-417e-8782-1ad348ae20fd"
best_model_hindi.eval()
test_acc_hindi, precision, recall, f1_score = evaluate_sentiment_classifier(best_model_hindi, testloader_hindi)
print(f'Accuracy of Binary sentiment classification for Hindi test dataset: {test_acc_hindi*100:.2f}%')
print(f'Precision of Hindi test dataset: {precision*100:.2f}%')
print(f'Recall of Hindi test dataset: {recall*100:.2f}%')
print(f'F1 score for Hindi test dataset: {f1_score*100:.2f}%')
# + [markdown] id="yz4CySW3q0zW"
# ## Task 2b) Preprocess Bengali dataset to make Approximately equal size in sense of size and class distribution as Hindi Dataset
# + [markdown] id="02P8ILCho4dG"
# **MUST READ**
# #### If you want to sample bengali dataset, then you have to train the word2vec for Bengali dataset again which requires a lot of time and memory that we trained on clusters. That will make `train_bg_word2vec` flag to 1 that will eventually force to train the word2vec for bengali datset. Initailly the flag is set to 0 to load the presampled bengali data and presampled bengali embedding. **Set the value of `train_bg_word2vec = 1` only and only if you want to train word2vec for Bengali dataset**
# + id="kCET16qvsI3N"
# keep it 0 if you don't want to train word2vec for bengali dataset once again that will take hours.
train_bg_word2vec = 0
# + [markdown] id="t7e5vx4Rx7KG"
# #### Loading the already sampled Bengali dataset if train_bg_word2vec = 0 else sample again and you may need to train word2vec for Bengali dataeset if vocabulary size mismatch due to random sampling(though seed is used)
# + colab={"base_uri": "https://localhost:8080/"} id="dWvVuICyx7wG" outputId="d9adac13-b79d-4379-d3e8-ae01c9d3f785"
if train_bg_word2vec==0:
data = pd.read_csv('bengali_hatespeech_sampled.csv', usecols=['text', 'task_1'])
dev_bg = data
print(dev_bg.head())
else:
df = pd.read_csv('bengali_hatespeech.csv', usecols=['sentence', 'hate'])
#split data classes
data_hate = df.iloc[:10000]
data_not = df.iloc[-20000:]
data_hate_sampled = data_hate.sample(n = 2500, random_state=1)
data_not_sampled = data_not.sample(n = 2200, random_state=1)
data_concat = pd.concat([data_hate_sampled, data_not_sampled], ignore_index=True)
dev = data_concat.sample(frac=1).reset_index(drop=True)
dev.columns=['text', 'task_1']
dev['task_1'] = dev['task_1'].replace([1],'HOF')
dev['task_1'] = dev['task_1'].replace([0],'NOT')
dev_bg = dev
dev.to_csv('bengali_hatespeech_sampled.csv')
print(dev_bg.head())
# + [markdown] id="j-2xMNnTxr6D"
# #### Preprocess the Bengali dataset. Here we remove the punctuations, stopwords and other non-influential words and symbols like 'http' and '@' from the beginning of a word and make word list candidates from the sentences.
# + colab={"base_uri": "https://localhost:8080/"} id="ek-obO1Rx9XQ" outputId="60b854da-2f5a-44bb-e624-567d8f350e38"
with open('stopwords-hi.txt', 'r') as f:
stopwords_hi = f.read().split()
with open('stopwords-bn.txt', 'r') as f:
stopwords_bn = f.read().split()
with open('stopwords-en.txt', 'r') as f:
stopwords = set(f.read().split() + stopwords_hi + stopwords_bn)
punct = ':;?!-—-\"\'|।()[]{},./\\“'
dev_bg['text'] = dev_bg['text'].apply(lambda x: preprocess(x, punct, stopwords))
dev_bg['text'][:5]
# + [markdown] id="_jwh0-Dt0cke"
# #### Build the vocabulary avaialable in the dataset
# + colab={"base_uri": "https://localhost:8080/"} id="T4iS8pr-0ckm" outputId="c0d28468-4f10-41ce-a576-bdaaa76c962b"
bg_V = build_vocab(dev_bg)
summ = sum(bg_V.values())
len(bg_V)
# + [markdown] id="S6vGO2bv-TMt"
# #### Developing a dictionary that represents the one hot encoding of the words in the vocabulary
# + id="aOPQNd3T1a89"
if train_bg_word2vec==0: #padding is 1 when we don't want to train word2vec but will load it later
pad = 1
onehot_dict_bg = build_onehot(bg_V, pad)
else:
pad = 0
onehot_dict_bg = build_onehot(bg_V, pad)
# + [markdown] id="YEgbtIGzjeIC"
# #### Set hyperparameters for training word2vec model for Bengali dataset
# + id="oCXKdlh61mol"
# Set hyperparameters
window_size = 5
embedding_size = 300
input_size = len(bg_V) # Onehot vector length
batch_size = 50
# More hyperparameters
learning_rate = 0.05
epochs = 100
# + [markdown] id="vldaDUt_kzjL"
# #### Create Bengali dataset instance for wor2vec and use collate function for batches
# + id="Is0-eTrl2Ko1"
dataset_bg = BengaliDataset(dev_bg['text'], window_size, bg_V, summ, onehot_dict_bg)
dataloader = DataLoader(dataset_bg, collate_fn = my_collate, batch_size= batch_size, shuffle = False, num_workers=0)
# + [markdown] id="UeqWQepolVZi"
# #### Create word2vec model instance for Bengali if you want to train the model.
# + id="XqOQxjAT2iSk"
word2vec_bg_model = Word2Vec(input_size, embedding_size)
if train_bg_word2vec==1:
print(word2vec_bg_model.fc1.weight.shape)
word2vec_bg_model.double()
nn.init.uniform_(word2vec_bg_model.fc1.weight)
nn.init.uniform_(word2vec_bg_model.fc2.weight)
# + [markdown] id="qEiQLp8Wl7ET"
# #### Define optimizer and loss for Word2Vec
# + id="eQncTHEQ2oX1"
# Define optimizer and loss
optimizer = torch.optim.Adam(word2vec_bg_model.parameters(), lr = learning_rate)
criterion = nn.CosineEmbeddingLoss()
# + [markdown] id="oflRqEkO3BVl"
# ## Task 2c) Bengali Embedding
# #### Run the below cell to train the word2vec model. It will take hours to completely train. If training word2vec for Bengali dataset is set to 1 then it will train otherwise, already trained model will be loaded
# + id="yCnG2MZS2-Yd"
if train_bg_word2vec==1: #For training
train(epochs, dataloader, word2vec_bg_model, criterion, optimizer)
print("Training finished")
torch.save(model.state_dict(), 'Word2Vec-bg.model')
else: #for loading already trained word2vec model 'Word2Vec-bg.model' that should be available in the runtime
word2vec_bg_model = Word2Vec(input_size, embedding_size)
word2vec_bg_model.load_state_dict(torch.load('/content/Word2Vec-bg.model', map_location='cpu'))
word2vec_bg_model.eval()
# Add a column to the first hidden layer matrix of the word2vec model for the padding embedding.
embeddings = torch.cat((word2vec_bg_model.fc1.weight.detach(), torch.zeros(embedding_size).unsqueeze(1)), dim = 1) # init the padding embedding by zeros
biases = word2vec_bg_model.fc1.bias.detach()
# + [markdown] id="K-5EoGDXPBgX"
# ## Task 2d) Part-0: Build a Bengali sentiment classifier with Bengali embedding
# + [markdown] id="8jvZwjTY140x"
# #### One hot encoding with padding
# + colab={"base_uri": "https://localhost:8080/"} id="KzwcLnGD3Ee0" outputId="c0bdbb82-0ebe-41b8-9b5f-6cb26df04635"
pad = 1
onehot_dict_bg = build_onehot(bg_V, pad)
print(onehot_dict_bg[' '])
# + [markdown] id="Mr3entfI2Syl"
# #### Getting the insigths of the sentences in sense of the number of words in a sentence for bengali Dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 428} id="RAug556f3QM0" outputId="bafa9f40-e418-428d-ebb7-2e9815a5c2db"
text_len = [len(x) for x in dev_bg['text']]
ax = pd.Series(text_len).hist()
fig = ax.get_figure()
fig.savefig('Bengali_dataset_histogram.png')
pd.Series(text_len).describe()
# + [markdown] id="k4HakIe9gPCE"
# #### From the Histogram above we see that 75% of the sentences are covered with 11 being the size of the words in sentences. We select 20 as the maximum number of words in a sentence considering factor like outliers for Bengali dataset.
# + [markdown] id="jFbL1pNR6Y9c"
# #### Set hyperparameters for sentiment analysis on bengali data using already trained hindi model
# + id="-uh1lW763R7M"
# Set hyperparameters
batch_size = 64
min_sen_length = 20
embedding_size = 300
input_size = len(bg_V)
output_dim = 1
dropout = 0.5
n_filters = 35
filter_sizes = [8,5,3]
# + [markdown] id="aRJJyFfd6Jpf"
# #### We create a custom Bengali dataset and data loader to train the binary classifier
# + id="uj723YID3Y8M"
dataset_bg = MyDataset(dev_bg, min_sen_length, onehot_dict_bg)
# + colab={"base_uri": "https://localhost:8080/"} id="d2psC3px3aps" outputId="bdfd1ca8-49ee-492e-b72c-37322acf01e0"
train_size, val_size, test_size = int(0.7 * len(dataset_bg)), int(0.15 * len(dataset_bg)), int(0.15 * len(dataset_bg))
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(dataset_bg, [train_size, val_size, test_size])
trainloader = DataLoader(train_dataset, batch_size= batch_size, shuffle = False, num_workers=0)
valloader = DataLoader(val_dataset, batch_size= batch_size, shuffle = False, num_workers=0)
testloader = DataLoader(test_dataset, batch_size= batch_size, shuffle = False, num_workers=0)
i1, l1 = next(iter(valloader))
print(i1.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="FAp3jMui3npt" outputId="6c212fe3-3905-4038-b4b5-005b6e1493b0"
embeddings.shape
# + [markdown] id="xlXnd_ryRxzI"
# #### Create CNN model instance by importing CNN_BENGALI defined in task2_utils file.
# + colab={"base_uri": "https://localhost:8080/"} id="jWEYvJbRRpk2" outputId="cbb6d944-b9ce-425e-9a02-ffa2ed1854f3"
cnn_bengali_bg = CNN_BENGALI(input_size, embedding_size, n_filters, filter_sizes, output_dim, dropout)
cnn_bengali_bg.double()
# + [markdown] id="3ekmITJlR9cv"
# #### Copying the biases and embeddings of Bengali word2vec model
# + colab={"base_uri": "https://localhost:8080/"} id="zkJjCxTqR9cw" outputId="73875ff0-0296-49a8-f05f-c33a1e64d0f5"
cnn_bengali_bg.embedding.bias.data.copy_(biases)
cnn_bengali_bg.embedding.weight.data.copy_(embeddings)
# + [markdown] id="kxcr6FKYSaX3"
# #### Define optimizer and criterion for sentiment analysis training
# + id="vnsncUeGSaX4"
optimizer = optim.Adam(cnn_bengali_bg.parameters())
criterion = nn.BCEWithLogitsLoss()
# + [markdown] id="eQUlFLlTSzDv"
# #### Train CNN on Bengali data with Bengali Embedding
# + colab={"base_uri": "https://localhost:8080/"} id="Po09-TqySzDw" outputId="6bf5f006-04f4-498f-8e9a-81dc7e80c8ce"
N_EPOCHS = 50
best_valid_loss = float('inf') #Initially sets to infinity
cnn_bengali_bg.train()
# Keeping loss and accuracy history of epochs for visualiztion
train_loss_history = []
train_acc_history = []
val_loss_history = []
val_acc_history = []
# Counter for Early stopping. Stops if no better loss for validation is achieved in 4 consecutive epochs.
stop_criterion = 0
for epoch in range(N_EPOCHS):
#start time of the epoch
start_time = time.time()
train_loss, train_acc = train_cnn(cnn_bengali_bg, trainloader, optimizer, criterion)
valid_loss, valid_acc = evaluate_cnn(cnn_bengali_bg, valloader, criterion)
train_loss_history.append(train_loss)
train_acc_history.append(train_acc)
val_loss_history.append(valid_loss)
val_acc_history.append(valid_acc)
#End time
end_time = time.time()
# calculate elapsed time in mins and seconds
required_time = end_time - start_time
mins = int(required_time / 60)
secs = int(required_time - (mins * 60))
stop_criterion += 1
#Saving the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(cnn_bengali_bg.state_dict(), 'cnn_bengali_bg_model.pt')
best_model_bengali_bg = copy.deepcopy(cnn_bengali_bg)
stop_criterion = 0
print(f'Epoch: {epoch+1:02} | Epoch Time: {mins}m {secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
if stop_criterion == 4:
break
# + [markdown] id="HDCddtElTZt-"
# #### Visualize Loss and Accuracy for the sentiment classification model on Bengali data with Bengali embedding
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="6EQub1lHTZt-" outputId="6db9a2f6-497c-4787-c7a6-5a6b16e9fe40"
epoch_count = list(range(1, len(train_loss_history)+1))
x1 = epoch_count
x2 = epoch_count
y1_train = train_loss_history
y1_val = val_loss_history
y2_train = train_acc_history
y2_val = val_acc_history
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
axes[0].plot(x1, y1_train, 'r--')
axes[0].plot(x1, y1_val, 'b--')
axes[0].legend(['Training Loss', 'Validation Loss'])
axes[0].set(xlabel='Epoch', ylabel='Loss')
axes[0].set_title('Bengali CNN loss with Bengali data')
axes[1].plot(x1, y2_train, 'r--')
axes[1].plot(x1, y2_val, 'b--')
axes[1].legend(['Training Accuracy', 'Validation Accuracy'])
axes[1].set(xlabel='Epoch', ylabel='Accuracy')
axes[1].set_title('Bengali CNN accuracy with Bengali data')
fig.suptitle('Bengali sentiment classifier loss and accuracy', y=-0.01)
plt.savefig('bengali_cnn_with_bengali_data.png')
# + [markdown] id="4qeo0FChUBhm"
# #### Evaluation on bengali test dataset of Bengali sentiment classifier
# + colab={"base_uri": "https://localhost:8080/"} id="yrXEHlmqUBhm" outputId="47696d47-e79c-4388-b199-e8fd21905870"
best_model_bengali_bg.eval()
test_acc_on_bengali_test_data, precision, recall, f1_score = evaluate_sentiment_classifier(best_model_bengali_bg, testloader)
print(f'Binary sentiment classification for Bengali test dataset with Bengali model: {test_acc_on_bengali_test_data*100:.2f}%')
print(f'Precision for Bengali test dataset: {precision*100:.2f}%')
print(f'Recall for Bengali test dataset: {recall*100:.2f}%')
print(f'F1 score for Bengali test dataset: {f1_score*100:.2f}%')
# + [markdown] id="TV-DpcLmRa_f"
# ## Task 2d) Part-1: Apply the classifier(Hindi Sentiment classifier) to Bengali data(Bengali Embedding that is completely unseen to the Hindi sentiment classifier).
# + [markdown] id="79ucOf4DJJkl"
# #### Create CNN model instance by importing CNN_BENGALI defined in task2_utils file.
# + colab={"base_uri": "https://localhost:8080/"} id="5qzpVHqjJ3Gf" outputId="8f33fc1f-1349-4611-88a9-ac1d199d3da9"
cnn_bengali_hi = CNN_BENGALI(input_size, embedding_size, n_filters, filter_sizes, output_dim, dropout)
cnn_bengali_hi.double()
# + [markdown] id="GUBjypOR7Qlx"
# #### Copying the biases and embeddings of Bengali word2vec model
# + colab={"base_uri": "https://localhost:8080/"} id="PzvkGE5lKQmr" outputId="dae31e37-1c48-4c84-9dca-6ad38aeb8801"
cnn_bengali_hi.embedding.bias.data.copy_(biases)
cnn_bengali_hi.embedding.weight.data.copy_(embeddings)
# + [markdown] id="79GlWKvQ7byc"
# #### Copying the weights and biases from **HINDI** CNN model that is already trained for creating Bengali CNN model. That means `cnn_bengali_hi` model has Bengali word2vec as embedding layer and all other layers are from the Hindi CNN model.
# + colab={"base_uri": "https://localhost:8080/"} id="9rYFzGrfKXLL" outputId="19bfd4ab-c533-4f76-fc42-692f2ec4daae"
cnn_bengali_hi.conv_0.bias.data.copy_(best_model_hindi.conv_0.bias)
cnn_bengali_hi.conv_0.weight.data.copy_(best_model_hindi.conv_0.weight)
cnn_bengali_hi.conv_1.bias.data.copy_(best_model_hindi.conv_1.bias)
cnn_bengali_hi.conv_1.weight.data.copy_(best_model_hindi.conv_1.weight)
cnn_bengali_hi.conv_2.bias.data.copy_(best_model_hindi.conv_2.bias)
cnn_bengali_hi.conv_2.weight.data.copy_(best_model_hindi.conv_2.weight)
cnn_bengali_hi.fc.bias.data.copy_(best_model_hindi.fc.bias)
cnn_bengali_hi.fc.weight.data.copy_(best_model_hindi.fc.weight)
# + [markdown] id="DGC32fQGXtH3"
# #### Evaluation of Sentiment classifier while Bengali embedding is used on Bengali test data while using Hindi model's weights in other layers except embedding
# + colab={"base_uri": "https://localhost:8080/"} id="5bbhrqHePtuS" outputId="9c158f3c-7cf3-43ef-9ffd-f982caa664b5"
cnn_bengali_hi.eval()
acc_of_senti_model_on_bengali_test_data_without_retrain, precision, recall, f1_score = evaluate_sentiment_classifier(cnn_bengali_hi, testloader)
print(f'Binary sentiment classification for Bengali test dataset before retraining with Bengali Data: {acc_of_senti_model_on_bengali_test_data_without_retrain*100:.2f}%')
print(f'Precision for Bengali test dataset before retraining: {precision*100:.2f}%')
print(f'Recall for Bengali test dataset before retraining: {recall*100:.2f}%')
print(f'F1 score for Bengali test dataset before retraining: {f1_score*100:.2f}%')
# + [markdown] id="553JntRnfotP"
# ## Task 2d) Part-2: Retrain model with Bengali data
# + [markdown] id="BX-LPpDw8nAt"
# #### Define optimizer and criterion for sentiment analysis retraining
# + id="V9ZGbqfClvkJ"
optimizer = optim.Adam(cnn_bengali_hi.parameters())
criterion = nn.BCEWithLogitsLoss()
# + [markdown] id="4eBM0vCGhUvh"
# #### Retrain CNN on Bengali data
# + id="bMntW_24QJBa" colab={"base_uri": "https://localhost:8080/"} outputId="7d7e9678-3581-41d5-ccc1-8e3df2234260"
N_EPOCHS = 50
best_valid_loss = float('inf') #Initially sets to infinity
cnn_bengali_hi.train()
# Keeping loss and accuracy history of epochs for visualiztion
train_loss_history = []
train_acc_history = []
val_loss_history = []
val_acc_history = []
# Counter for Early stopping. Stops if no better loss for validation is achieved in 4 consecutive epochs.
stop_criterion = 0
for epoch in range(N_EPOCHS):
#start time of the epoch
start_time = time.time()
train_loss, train_acc = train_cnn(cnn_bengali_hi, trainloader, optimizer, criterion)
valid_loss, valid_acc = evaluate_cnn(cnn_bengali_hi, valloader, criterion)
train_loss_history.append(train_loss)
train_acc_history.append(train_acc)
val_loss_history.append(valid_loss)
val_acc_history.append(valid_acc)
#End time
end_time = time.time()
# calculate elapsed time in mins and seconds
required_time = end_time - start_time
mins = int(required_time / 60)
secs = int(required_time - (mins * 60))
stop_criterion += 1
#Saving the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(cnn_bengali_hi.state_dict(), 'cnn_bengali_retrained_model.pt')
best_model_bengali_retrained = copy.deepcopy(cnn_bengali_hi)
stop_criterion = 0
print(f'Epoch: {epoch+1:02} | Epoch Time: {mins}m {secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
if stop_criterion == 4:
break
# + [markdown] id="KnHMWcaVH9qe"
# #### Visualize Loss and Accuracy for the model with Bengali dataset after retraining
# + id="LVbKKZBMh-2p" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="99c5101c-287b-47cd-8134-304849bb6331"
epoch_count = list(range(1, len(train_loss_history)+1))
x1 = epoch_count
x2 = epoch_count
y1_train = train_loss_history
y1_val = val_loss_history
y2_train = train_acc_history
y2_val = val_acc_history
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
axes[0].plot(x1, y1_train, 'r--')
axes[0].plot(x1, y1_val, 'b--')
axes[0].legend(['Training Loss', 'Validation Loss'])
axes[0].set(xlabel='Epoch', ylabel='Loss')
axes[0].set_title('Bengali CNN loss retrained with Bengali data')
axes[1].plot(x1, y2_train, 'r--')
axes[1].plot(x1, y2_val, 'b--')
axes[1].legend(['Training Accuracy', 'Validation Accuracy'])
axes[1].set(xlabel='Epoch', ylabel='Accuracy')
axes[1].set_title('Bengali CNN accuracy retrained with Bengali data')
fig.suptitle('Sentiment classifier loss and accuracy after retraining with Bengali data', y=-0.01)
plt.savefig('bengali_cnn_after_retrained.png')
# + [markdown] id="crhmKqKnws0t"
# #### Evaluation on bengali test dataset of Sentiment classifier after retraining with bengali Embedding
# + id="m65ARtQEoko-" colab={"base_uri": "https://localhost:8080/"} outputId="2178cc43-bfb0-4584-a4ac-e311a8920cc3"
best_model_bengali_retrained.eval()
test_acc_on_bengali_test_data_retrained, precision, recall, f1_score = evaluate_sentiment_classifier(best_model_bengali_retrained, testloader)
print(f'Binary sentiment classification for Bengali test dataset after retraining: {test_acc_on_bengali_test_data_retrained*100:.2f}%')
print(f'Precision for Bengali test dataset after retraining: {precision*100:.2f}%')
print(f'Recall for Bengali test dataset after retraining: {recall*100:.2f}%')
print(f'F1 score for Bengali test dataset after retraining: {f1_score*100:.2f}%')
| Task 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.8 64-bit
# language: python
# name: python3
# ---
import numpy as np
import math
import random
import time
# ## P20-EX1
f = lambda x:math.sqrt(1-x**2)
slices = [1000, 10000, 100000, 1000000]
ans = []
for slice in slices:
sum = 0
dx = 1.0 / slice
for i in range(slice):
sum += f(i * dx) * dx
ans.append(sum * 4)
print(ans)
# ## P23-EX3
def F(a, b, c, d, n, f):
assert a <= b and c <= d and n > 0
sum = 0
for i in range(n):
x = np.random.uniform(a,b)
y = np.random.uniform(c,d)
if f(x) > y and y >= 0.:
sum += 1
elif f(x) <= y and y < 0.:
sum -= 1
return float(sum) / n * (b-a) * (d - c)
f = lambda x:x**3-2
print (F(-1,2,-3,6,1000000,f))
# ## P36-EX5 估计整数子集 $1\sim n$的大小
# +
def getK(X):
S = []
k = 0
getUniformX = lambda :random.sample(X, 1)[0]
a = getUniformX()
while True:
k += 1
S.append(a)
a = getUniformX()
if a in S:
break
return k
def SetCount(X, times=1000):
ave = 0.
for _ in range(times):
ave += getK(X)
ave /= times
return 2 * ave * ave / math.pi
# -
N = [1, 5, 10, 50, 100, 500, 1000, 5000, 10000]
ans = []
for n in N:
X = list(np.arange(1, n+1))
ans.append(SetCount(X))
print(ans)
# ## P67-EX7 搜索有序链表...
# +
class OrderedList():
def __init__(self, len=10000) -> None:
self.len = len
self.val = []
self.next = []
self.pre = []
#生成测试数据
for i in range(len):
self.val.append(i)
self.next.append(i+1)
self.pre.append(i-1)
self.next[-1] = 0
self.pre[0] = len - 1
self.head = 0
#打乱数据
self.shuffle()
def shuffle(self):
for i in range(self.len):
r = np.random.randint(self.len)
if self.head == i:
self.head = r
elif self.head == r:
self.head = i
self.val[i], self.val[r] = self.val[r], self.val[i]
self.next[self.pre[i]], self.next[self.pre[r]] = r, i
self.pre[i], self.pre[r] = self.pre[r], self.pre[i]
self.pre[self.next[i]], self.pre[self.next[r]] = r, i
self.next[i], self.next[r] = self.next[r], self.next[i]
#serach函数,从ptr位置开始查找,返回可能对应x的ptr和查找计数
def __call__(self, x, ptr=None):
if ptr is None:
ptr = self.head
assert ptr < self.len
cnt = 1
while x > self.val[ptr]:
cnt += 1
ptr = self.next[ptr]
assert ptr != self.head
return ptr, cnt
def getRandomPtr(self):
return np.random.randint(self.len)
# -
# A算法,确定性算法
def A(x, ordered_list):
return ordered_list(x)
# D算法,时间O(n)的概率算法
def D(x, ordered_list):
ptr = ordered_list.getRandomPtr()
y = ordered_list.val[ptr]
if y > x:
ptr = None
elif y < x:
ptr = ordered_list.next[ptr]
else:
return ptr, 0
return ordered_list(x, ptr)
# B算法,平均时间为O $(\sqrt n)$ 的确定算法
def B(x, ordered_list):
ptr = ordered_list.head
_max = ordered_list.val[ptr]
batch = int(math.sqrt(ordered_list.len))
for i in range(batch):
y = ordered_list.val[i]
if _max < y and y <= x:
ptr = i
_max = y
return ordered_list(x, ptr)
# C算法,Sherwood版的B算法
def C(x, ordered_list):
ptr = ordered_list.head
_max = ordered_list.val[ptr]
batch = int(math.sqrt(ordered_list.len))
choices = np.random.choice(ordered_list.len, size=batch, replace=False)
for choice in choices:
y = ordered_list.val[choice]
if _max < y and y <= x:
ptr = choice
_max = y
return ordered_list(x, ptr)
# 性能测试
# +
len = 10000
test = OrderedList(len)
test_times = 5000
worst = {"A":0, "B":0, "C":0, "D":0}
average = {"A":0., "B":0., "C":0., "D":0.}
alg_s = {"A":A, "B":B, "C":C, "D":D}
for name, alg in alg_s.items():
for i in range(test_times):
x = np.random.randint(len)
ptr, cnt = alg(x, test)
assert test.val[ptr] == x
average[name] += cnt
if cnt > worst[name]:
worst[name] = cnt
average[name] /= test_times
print(worst)
print(average)
# -
# ## P83-EX9
'''
target_k:目标构建的k皇后
k:当前已放置的安全的前k行皇后
col:存在皇后的列
diag45:当前45度对角线冲突表
diag135:当前135度对角线冲突表
cnt:访问节点计数
'''
def backtrace(target_k, col, diag45, diag135, row=0):
if row == target_k:
return True, 0
cnt = 0
for _col in range(target_k):
if _col not in col and _col - row not in diag45 \
and _col + row not in diag135:
col.append(_col)
diag45.append(_col - row)
diag135.append(_col + row)
#访问该节点,对应计数增1
cnt += 1
isSuccess, _cnt = backtrace(target_k, col, diag45
, diag135, row+1)
#增加对应子节点的计数
cnt += _cnt
if isSuccess:
return True, cnt
col.pop()
diag45.pop()
diag135.pop()
return False, cnt
# +
"""
贪心的LV算法,前stepVegas皇后随机放置
"""
def QueensLV(target_k, stepVegas):
col = []
diag45 =[]
diag135 = []
row = 0
k = 0
nb = 0
while True:
nb = 0
for _col in range(target_k):
if _col not in col and _col - row not in diag45 \
and _col + row not in diag135:
nb += 1
if np.random.randint(nb) == 0:
col_candidate = _col
if nb > 0:
col.append(col_candidate)
diag45.append(col_candidate - k)
diag135.append(col_candidate + k)
k += 1
if nb == 0 or k >= stepVegas:
break
if nb > 0:
isSuccess, cnt = backtrace(target_k, col, diag45, diag135, k)
return isSuccess, cnt + stepVegas
else:
return False, stepVegas
# -
times = 1000
epoch = 100
ans = {}
for n in range(12, 21):
best_s = 0.
best_e = 0.
best_t = math.inf
bestStepVeags = 0
bestSuccessRate = 0.
for stepVegas in range(1, n + 1):
successRate = 0.
s = 0.
e = 0.
for _ in range(times):
isSuccess, cnt = QueensLV(n, stepVegas)
if isSuccess:
successRate += 1
s += cnt
else:
e += cnt
successRate += 1e-6
s /= successRate
e /= times - successRate + 1e-6
successRate /= times
t = s + (1 - successRate) * e / successRate
if t < best_t:
best_t = t
best_e = e
best_s = s
bestSuccessRate = successRate
bestStepVeags = stepVegas
ans[n] = (bestStepVeags, bestSuccessRate, best_s, best_e, best_t)
print(ans)
| code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.4 64-bit (''learn-env'': conda)'
# name: python3
# ---
# # Modeling Notebook with Balanced Dataset
# # Importing Packages
# +
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import pickle
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
# %reload_ext autoreload
# %autoreload 2
import sys
sys.path.append("../py")
from utils import *
from preprocess import *
# -
train = pickle.load(open("../pickle/train_bal.pickle", "rb"))
val = pickle.load(open("../pickle/val_bal.pickle", "rb"))
test = pickle.load(open("../pickle/test_bal.pickle", "rb"))
train.head()
train.target.value_counts()
val.head()
train.tweet = train.tweet.apply(lambda x: preprocess(x))
train.head()
val.tweet = val.tweet.apply(lambda x: preprocess(x))
val.head()
test.tweet = test.tweet.apply(lambda x: preprocess(x))
test.head()
# # Modeling
# +
X_tr = train.tweet.values
X_val = val.tweet.values
y_tr = train.target.values
y_val = val.target.values
vec = TfidfVectorizer()
tfidf_tr = vec.fit_transform(X_tr)
tfidf_val = vec.transform(X_val)
# -
# ## Multinomial Naive Bayes Model
nb = MultinomialNB().fit(tfidf_tr, y_tr)
y_pr_nb_val = nb.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pr_nb_val, nb)
# ## Random Forest Classifier
rf = RandomForestClassifier(n_estimators=100, random_state=42).fit(tfidf_tr, y_tr)
y_pr_rf_val = rf.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pr_rf_val, rf)
# ## Logistic Regression
log = LogisticRegression(random_state=42).fit(tfidf_tr, y_tr)
y_pr_log_val = log.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pr_log_val, log)
# ## Support Vector Machine
svc = svm.LinearSVC(random_state=42).fit(tfidf_tr, y_tr)
y_pr_svc_val = svc.predict(tfidf_val)
get_metrics_2(tfidf_val, y_val, y_pr_svc_val, svc)
get_confusion(y_val, y_pr_svc_val)
# ## AdaBoost Classifier
abc = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1),
n_estimators=200,
random_state=42
).fit(tfidf_tr, y_tr)
y_pr_abc_val = abc.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pr_abc_val, abc)
# ## Gradient Boosting
gbc = GradientBoostingClassifier(random_state=42).fit(tfidf_tr, y_tr)
y_pr_gbc_val = gbc.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pr_gbc_val, gbc)
# ## Metrics Dataframe
data = {'Accuracy': [accuracy(y_val, y_pr_nb_val),
accuracy(y_val, y_pr_rf_val),
accuracy(y_val, y_pr_log_val),
accuracy(y_val, y_pr_svc_val),
accuracy(y_val, y_pr_abc_val),
accuracy(y_val, y_pr_gbc_val)],
'F1 Score': [f1(y_val, y_pr_nb_val),
f1(y_val, y_pr_rf_val),
f1(y_val, y_pr_log_val),
f1(y_val, y_pr_svc_val),
f1(y_val, y_pr_abc_val),
f1(y_val, y_pr_gbc_val)],
'Recall': [recall(y_val, y_pr_nb_val),
recall(y_val, y_pr_rf_val),
recall(y_val, y_pr_log_val),
recall(y_val, y_pr_svc_val),
recall(y_val, y_pr_abc_val),
recall(y_val, y_pr_gbc_val)],
'Precision': [precision(y_val, y_pr_nb_val),
precision(y_val, y_pr_rf_val),
precision(y_val, y_pr_log_val),
precision(y_val, y_pr_svc_val),
precision(y_val, y_pr_abc_val),
precision(y_val, y_pr_gbc_val)],
'ROC-AUC': [auc(tfidf_val, y_val, nb),
auc(tfidf_val, y_val, rf),
auc(tfidf_val, y_val, log),
auc2(tfidf_val, y_val, svc),
auc(tfidf_val, y_val, abc),
auc(tfidf_val, y_val, gbc)],
'PR AUC': [aps(tfidf_val, y_val, nb),
aps(tfidf_val, y_val, rf),
aps(tfidf_val, y_val, log),
aps2(tfidf_val, y_val, svc),
aps(tfidf_val, y_val, abc),
aps(tfidf_val, y_val, gbc)]}
metrics5 = pd.DataFrame(data=data, index = ['Multinomial Naive Bayes',
'Random Forest',
'Logistic Regression',
'Support Vector Machine',
'AdaBoost Classifier',
'Gradient Boosting Classifier'])
metrics5.to_csv("../data/metrics/metrics5.csv")
metrics5
pkl_filename = "pickle_model.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(model, file)
test = pickle.load(open("../pickle/test_bal.pickle", "rb"))
test.head()
# # Holdout Set
X_tt = test.tweet
y_tt = test.target
tfidf_tt = vec.transform(X_tt)
y_pr_nb_tt = nb.predict(tfidf_tt)
y_pr_rf_tt = rf.predict(tfidf_tt)
y_pr_log_tt = log.predict(tfidf_tt)
y_pr_svc_tt = svc.predict(tfidf_tt)
y_pr_abc_tt = abc.predict(tfidf_tt)
y_pr_gbc_tt = gbc.predict(tfidf_tt)
get_metrics_confusion(tfidf_tt, y_tt, y_pr_nb_tt, nb)
get_metrics_confusion(tfidf_tt, y_tt, y_pr_rf_tt, rf)
get_metrics_confusion(tfidf_tt, y_tt, y_pr_log_tt, log)
# ###
get_metrics_2(tfidf_tt, y_tt, y_pr_svc_tt, svc)
get_confusion(y_tt, y_pr_svc_tt)
get_metrics_confusion(tfidf_tt, y_tt, y_pr_abc_tt, abc)
get_metrics_confusion(tfidf_tt, y_tt, y_pr_gbc_tt, gbc)
data6 = {'Accuracy': [accuracy(y_tt, y_pr_nb_tt),
accuracy(y_tt, y_pr_rf_tt),
accuracy(y_tt, y_pr_log_tt),
accuracy(y_tt, y_pr_svc_tt),
accuracy(y_tt, y_pr_abc_tt),
accuracy(y_tt, y_pr_gbc_tt)],
'F1 Score': [f1(y_tt, y_pr_nb_tt),
f1(y_tt, y_pr_rf_tt),
f1(y_tt, y_pr_log_tt),
f1(y_tt, y_pr_svc_tt),
f1(y_tt, y_pr_abc_tt),
f1(y_tt, y_pr_gbc_tt)],
'Recall': [recall(y_tt, y_pr_nb_tt),
recall(y_tt, y_pr_rf_tt),
recall(y_tt, y_pr_log_tt),
recall(y_tt, y_pr_svc_tt),
recall(y_tt, y_pr_abc_tt),
recall(y_tt, y_pr_gbc_tt)],
'Precision': [precision(y_tt, y_pr_nb_tt),
precision(y_tt, y_pr_rf_tt),
precision(y_tt, y_pr_log_tt),
precision(y_tt, y_pr_svc_tt),
precision(y_tt, y_pr_abc_tt),
precision(y_tt, y_pr_gbc_tt)],
'ROC-AUC': [auc(tfidf_tt, y_tt, nb),
auc(tfidf_tt, y_tt, rf),
auc(tfidf_tt, y_tt, log),
auc2(tfidf_tt, y_tt, svc),
auc(tfidf_tt, y_tt, abc),
auc(tfidf_tt, y_tt, gbc)],
'PR AUC': [aps(tfidf_tt, y_tt, nb),
aps(tfidf_tt, y_tt, rf),
aps(tfidf_tt, y_tt, log),
aps2(tfidf_tt, y_tt, svc),
aps(tfidf_tt, y_tt, abc),
aps(tfidf_tt, y_tt, gbc)]}
metrics6 = pd.DataFrame(data=data6, index = ['Multinomial Naive Bayes',
'Random Forest',
'Logistic Regression',
'Support Vector Machine',
'AdaBoost Classifier',
'Gradient Boosting Classifier'])
metrics6.to_csv("../data/metrics/metrics6.csv")
metrics6
# # Hyperparameters Tuning
# ## Logistic Regression
logreg = LogisticRegression(random_state=42)
params = {'C': [0.001, 0.01, 0.1, 1, 10],
'penalty': ['l1', 'l2', 'elasticnet'],
'solver': ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']}
gslog = GridSearchCV(estimator = logreg,
param_grid = params,
cv = 10,
n_jobs = -1,
verbose=3).fit(tfidf_tr, y_tr)
y_pred_gslog = gslog.predict(tfidf_val)
print("Best: %f using %s" % (gslog.best_score_, gslog.best_params_))
get_metrics_confusion(tfidf_val, y_val, y_pred_gslog, gslog)
log_best = LogisticRegression(random_state=42,
C=10,
penalty='l2',
solver='lbfgs').fit(tfidf_tr, y_tr)
y_pred_log_best = log_best.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pred_log_best, log_best)
# ## Random Forest Classifier
rfc = RandomForestClassifier(random_state=42)
params = {'n_estimators': [50, 100, 200, 400, 600, 800, 1000]}
gsrfc = GridSearchCV(estimator = rfc,
param_grid = params,
cv = 5,
n_jobs = -1,
verbose=2).fit(tfidf_tr, y_tr)
y_pred_gsrfc = gsrfc.predict(tfidf_val)
print("Best: %f using %s" % (gsrfc.best_score_, gsrfc.best_params_))
rfc = RandomForestClassifier(random_state=42)
params = {'n_estimators': [500, 1000, 2000, 5000]}
gsrfc = GridSearchCV(estimator = rfc,
param_grid = params,
cv = 5,
n_jobs = -1,
verbose=2).fit(tfidf_tr, y_tr)
y_pred_gsrfc = gsrfc.predict(tfidf_val)
print("Best: %f using %s" % (gsrfc.best_score_, gsrfc.best_params_))
rfc = RandomForestClassifier(random_state=42)
params = {'n_estimators': [1000],
'max_depth': [500, 1000, 1500, 2000],
}
gsrfc = GridSearchCV(estimator = rfc,
param_grid = params,
cv = 5,
n_jobs = -1,
verbose=2).fit(tfidf_tr, y_tr)
y_pred_gsrfc = gsrfc.predict(tfidf_val)
print("Best: %f using %s" % (gsrfc.best_score_, gsrfc.best_params_))
rfc = RandomForestClassifier(random_state=42)
params = {'n_estimators': [1000],
'max_depth': [1500],
'min_samples_leaf': [1, 2, 4]
}
gsrfc = GridSearchCV(estimator = rfc,
param_grid = params,
cv = 5,
n_jobs = -1,
verbose=2).fit(tfidf_tr, y_tr)
y_pred_gsrfc = gsrfc.predict(tfidf_val)
print("Best: %f using %s" % (gsrfc.best_score_, gsrfc.best_params_))
rfc = RandomForestClassifier(random_state=42)
params = {'n_estimators': [1000],
'max_depth': [1500],
'min_samples_leaf': [1],
'min_samples_split': [2, 5, 10]
}
gsrfc = GridSearchCV(estimator = rfc,
param_grid = params,
cv = 5,
n_jobs = -1,
verbose=2).fit(tfidf_tr, y_tr)
y_pred_gsrfc = gsrfc.predict(tfidf_val)
print("Best: %f using %s" % (gsrfc.best_score_, gsrfc.best_params_))
rfc_best = RandomForestClassifier(random_state=42,
max_depth=1500,
min_samples_leaf=1,
min_samples_split=5,
n_estimators=1000).fit(tfidf_tr, y_tr)
y_pred_rfc_best = rfc_best.predict(tfidf_val)
get_metrics_confusion(tfidf_val, y_val, y_pred_rfc_best, rfc_best)
# ## Support Vector Machine
from sklearn.svm import SVC
svm = SVC()
params_grid = {'C': [0.1, 1, 10, 100]}
gssvm = GridSearchCV(SVC(),
params_grid,
refit = True,
verbose = 1)
gssvm.fit(tfidf_tr, y_tr)
y_pred_gssvm = gssvm.predict(tfidf_val)
print("Best: %f using %s" % (gssvm.best_score_, gssvm.best_params_))
svm = SVC()
params_grid = {'C': [10],
'gamma': [1, 0.1, 0.01, 0.001]}
gssvm = GridSearchCV(SVC(),
params_grid,
refit = True,
verbose = 1)
gssvm.fit(tfidf_tr, y_tr)
y_pred_gssvm = gssvm.predict(tfidf_val)
print("Best: %f using %s" % (gssvm.best_score_, gssvm.best_params_))
svm = SVC()
params_grid = {'C': [10],
'gamma': [0.1],
'kernel': ['rbf', 'poly', 'sigmoid']}
gssvm = GridSearchCV(SVC(),
params_grid,
refit = True,
verbose = 1)
gssvm.fit(tfidf_tr, y_tr)
y_pred_gssvm = gssvm.predict(tfidf_val)
print("Best: %f using %s" % (gssvm.best_score_, gssvm.best_params_))
get_metrics_2(tfidf_val, y_val, y_pred_gssvm, gssvm)
get_confusion(y_val, y_pred_gssvm)
svm_best = SVC(C=10, gamma=0.1, kernel='sigmoid').fit(tfidf_tr, y_tr)
y_pred_svm_best = svm_best.predict(tfidf_val)
get_metrics_2(tfidf_val, y_val, y_pred_svm_best, svm_best)
get_confusion(y_val, y_pred_svm_best)
import pickle
with open('../pickle/best_model.pickle', 'wb') as f:
pickle.dump(svm_best, f, pickle.HIGHEST_PROTOCOL)
data7 = {'Accuracy': [accuracy(y_val, y_pred_log_best),
accuracy(y_val, y_pred_gsrfc),
accuracy(y_val, y_pred_svm_best)],
'F1 Score': [f1(y_val, y_pred_log_best),
f1(y_val, y_pred_gsrfc),
f1(y_val, y_pred_svm_best)],
'Recall': [recall(y_val, y_pred_log_best),
recall(y_val, y_pred_gsrfc),
recall(y_val, y_pred_svm_best)],
'Precision': [precision(y_val, y_pred_log_best),
precision(y_val, y_pred_gsrfc),
precision(y_val, y_pred_svm_best)],
'ROC-AUC': [auc(tfidf_val, y_val, log_best),
auc(tfidf_val, y_val, gsrfc),
auc2(tfidf_val, y_val, svm_best)],
'PR AUC': [aps(tfidf_val, y_val, log_best),
aps(tfidf_val, y_val, gsrfc),
aps2(tfidf_val, y_val, svm_best)]}
metrics7 = pd.DataFrame(data=data7, index=['Logistic Regression w/GridsearchCV',
'Random Forest w/GridsearchCV',
'Support Vector Machine w/GridsearchCV'])
metrics7
metrics7.to_csv("../data/metrics/metrics7.csv")
# +
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SVC()),
])
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features':
}
}
# -
| notebooks/balanced_modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="ZkIHkjDWgySK"
# ##### Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + cellView="form" colab={} colab_type="code" id="G5eriUZ9g1Ia"
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="iCUZvZvBB7VD"
# # Linear Mixed Effects Models
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/probability/examples/Linear_Mixed_Effects_Models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="yDCkuCjq2DfW"
# A linear mixed effects model is a simple approach for modeling structured linear relationships (Harville, 1997; Laird and Ware, 1982). Each data point consists of inputs of varying type—categorized into groups—and a real-valued output. A linear mixed effects model is a _hierarchical model_: it shares statistical strength across groups in order to improve inferences about any individual data point.
#
# In this tutorial, we demonstrate linear mixed effects models with a real-world example in TensorFlow Probability. We'll use the JointDistributionCoroutine and Markov Chain Monte Carlo (`tfp.mcmc`) modules.
# + [markdown] colab_type="text" id="uiR4-VOt9NFX"
# ### Dependencies & Prerequisites
#
# + colab={} colab_type="code" id="coUnDhkpT5_6"
#@title Import and set ups{ display-mode: "form" }
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
dtype = tf.float64
# %config InlineBackend.figure_format = 'retina'
# %matplotlib inline
plt.style.use('ggplot')
# + [markdown] colab_type="text" id="7nnwjUdVoWN2"
# ### Make things Fast!
# + [markdown] colab_type="text" id="2CK9RaDcoYPG"
# Before we dive in, let's make sure we're using a GPU for this demo.
#
# To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU".
#
# The following snippet will verify that we have access to a GPU.
# + colab={"height": 33} colab_type="code" id="qP_4Xr8vpA42" outputId="c4b93511-6238-4736-9fa2-aeb90d72cb82"
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
# + [markdown] colab_type="text" id="FJRBc_S0ppfE"
# Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.)
# + [markdown] colab_type="text" id="eikJTmPgB7VJ"
# ## Data
#
# We use the `InstEval` data set from the popular [`lme4` package in R](https://CRAN.R-project.org/package=lme4) (Bates et al., 2015). It is a data set of courses and their evaluation ratings. Each course includes metadata such as `students`, `instructors`, and `departments`, and the response variable of interest is the evaluation rating.
# + colab={} colab_type="code" id="lZ8OfS3cDMeG"
def load_insteval():
"""Loads the InstEval data set.
It contains 73,421 university lecture evaluations by students at ETH
Zurich with a total of 2,972 students, 2,160 professors and
lecturers, and several student, lecture, and lecturer attributes.
Implementation is built from the `observations` Python package.
Returns:
Tuple of np.ndarray `x_train` with 73,421 rows and 7 columns and
dictionary `metadata` of column headers (feature names).
"""
url = ('https://raw.github.com/vincentarelbundock/Rdatasets/master/csv/'
'lme4/InstEval.csv')
with requests.Session() as s:
download = s.get(url)
f = download.content.decode().splitlines()
iterator = csv.reader(f)
columns = next(iterator)[1:]
x_train = np.array([row[1:] for row in iterator], dtype=np.int)
metadata = {'columns': columns}
return x_train, metadata
# + [markdown] colab_type="text" id="Um0EhvaDQcVI"
# We load and preprocess the data set. We hold out 20% of the data so we can evaluate our fitted model on unseen data points. Below we visualize the first few rows.
# + colab={"height": 202} colab_type="code" id="YY_VbNt6fkcp" outputId="d56aef44-27f3-48a0-98ec-70a6c91426de"
data, metadata = load_insteval()
data = pd.DataFrame(data, columns=metadata['columns'])
data = data.rename(columns={'s': 'students',
'd': 'instructors',
'dept': 'departments',
'y': 'ratings'})
data['students'] -= 1 # start index by 0
# Remap categories to start from 0 and end at max(category).
data['instructors'] = data['instructors'].astype('category').cat.codes
data['departments'] = data['departments'].astype('category').cat.codes
train = data.sample(frac=0.8)
test = data.drop(train.index)
train.head()
# + [markdown] colab_type="text" id="qWttG6OaVFMO"
# We set up the data set in terms of a `features` dictionary of inputs and a `labels` output corresponding to the ratings. Each feature is encoded as an integer and each label (evaluation rating) is encoded as a floating point number.
# + colab={} colab_type="code" id="NzfVQJN9B7VQ"
get_value = lambda dataframe, key, dtype: dataframe[key].values.astype(dtype)
features_train = {
k: get_value(train, key=k, dtype=np.int32)
for k in ['students', 'instructors', 'departments', 'service']}
labels_train = get_value(train, key='ratings', dtype=np.float32)
features_test = {k: get_value(test, key=k, dtype=np.int32)
for k in ['students', 'instructors', 'departments', 'service']}
labels_test = get_value(test, key='ratings', dtype=np.float32)
# + colab={"height": 84} colab_type="code" id="80ylfxWtB7VT" outputId="600c96dc-d220-4ac0-d054-5fddafa365e3"
num_students = max(features_train['students']) + 1
num_instructors = max(features_train['instructors']) + 1
num_departments = max(features_train['departments']) + 1
num_observations = train.shape[0]
print("Number of students:", num_students)
print("Number of instructors:", num_instructors)
print("Number of departments:", num_departments)
print("Number of observations:", num_observations)
# + [markdown] colab_type="text" id="jMRMLuWwB7VX"
# ## Model
#
# A typical linear model assumes independence, where any pair of data points has a constant linear relationship. In the `InstEval` data set, observations arise in groups each of which may have varying slopes and intercepts. Linear mixed effects models, also known as hierarchical linear models or multilevel linear models, capture this phenomenon (<NAME>, 2006).
#
# Examples of this phenomenon include:
#
# # + __Students__. Observations from a student are not independent: some students may systematically give low (or high) lecture ratings.
# # + __Instructors__. Observations from an instructor are not independent: we expect good teachers to generally have good ratings and bad teachers to generally have bad ratings.
# # + __Departments__. Observations from a department are not independent: certain departments may generally have dry material or stricter grading and thus be rated lower than others.
#
# To capture this, recall that for a data set of $N\times D$ features $\mathbf{X}$ and $N$ labels $\mathbf{y}$, linear regression posits the model
#
# $$
# \begin{equation*}
# \mathbf{y} = \mathbf{X}\beta + \alpha + \epsilon,
# \end{equation*}
# $$
#
# where there is a slope vector $\beta\in\mathbb{R}^D$, intercept $\alpha\in\mathbb{R}$, and random noise $\epsilon\sim\text{Normal}(\mathbf{0}, \mathbf{I})$. We say that $\beta$ and $\alpha$ are "fixed effects": they are effects held constant across the population of data points $(x, y)$. An equivalent formulation of the equation as a likelihood is $\mathbf{y} \sim \text{Normal}(\mathbf{X}\beta + \alpha, \mathbf{I})$. This likelihood is maximized during inference in order to find point estimates of $\beta$ and $\alpha$ that fit the data.
#
# A linear mixed effects model extends linear regression as
#
# $$
# \begin{align*}
# \eta &\sim \text{Normal}(\mathbf{0}, \sigma^2 \mathbf{I}), \\
# \mathbf{y} &= \mathbf{X}\beta + \mathbf{Z}\eta + \alpha + \epsilon.
# \end{align*}
# $$
#
# where there is still a slope vector $\beta\in\mathbb{R}^P$, intercept $\alpha\in\mathbb{R}$, and random noise $\epsilon\sim\text{Normal}(\mathbf{0}, \mathbf{I})$. In addition, there is a term $\mathbf{Z}\eta$, where $\mathbf{Z}$ is a features matrix and $\eta\in\mathbb{R}^Q$ is a vector of random slopes; $\eta$ is normally distributed with variance component parameter $\sigma^2$. $\mathbf{Z}$ is formed by partitioning the original $N\times D$ features matrix in terms of a new $N\times P$ matrix $\mathbf{X}$ and $N\times Q$ matrix $\mathbf{Z}$, where $P + Q=D$: this partition allows us to model the features separately using the fixed effects $\beta$ and the latent variable $\eta$ respectively.
#
# We say the latent variables $\eta$ are "random effects": they are effects that vary across the population (although they may be constant across subpopulations). In particular, because the random effects $\eta$ have mean 0, the data label's mean is captured by $\mathbf{X}\beta + \alpha$. The random effects component $\mathbf{Z}\eta$ captures variations in the data: for example, "Instructor \#54 is rated 1.4 points higher than the mean."
# + [markdown] colab_type="text" id="7B6ROTDQdTjH"
# In this tutorial, we posit the following effects:
#
# # + Fixed effects: `service`. `service` is a binary covariate corresponding to whether the course belongs to the instructor's main department. No matter how much additional data we collect, it can only take on values $0$ and $1$.
# # + Random effects: `students`, `instructors`, and `departments`. Given more observations from the population of course evaluation ratings, we may be looking at new students, teachers, or departments.
#
# In the syntax of R's lme4 package (Bates et al., 2015), the model can be summarized as
#
# ```
# ratings ~ service + (1|students) + (1|instructors) + (1|departments) + 1
# ```
# where `x` denotes a fixed effect,`(1|x)` denotes a random effect for `x`, and `1` denotes an intercept term.
#
# We implement this model below as a JointDistribution. To have better support for parameter tracking (e.g., we want to track all the `tf.Variable` in `model.trainable_variables`), we implement the model template as `tf.Module`.
# + colab={} colab_type="code" id="GS7SjqREp9wC"
class LinearMixedEffectModel(tf.Module):
def __init__(self):
# Set up fixed effects and other parameters.
# These are free parameters to be optimized in E-steps
self._intercept = tf.Variable(0., name="intercept") # alpha in eq
self._effect_service = tf.Variable(0., name="effect_service") # beta in eq
self._stddev_students = tfp.util.TransformedVariable(
1., bijector=tfb.Exp(), name="stddev_students") # sigma in eq
self._stddev_instructors = tfp.util.TransformedVariable(
1., bijector=tfb.Exp(), name="stddev_instructors") # sigma in eq
self._stddev_departments = tfp.util.TransformedVariable(
1., bijector=tfb.Exp(), name="stddev_departments") # sigma in eq
def __call__(self, features):
model = tfd.JointDistributionSequential([
# Set up random effects.
tfd.MultivariateNormalDiag(
loc=tf.zeros(num_students),
scale_identity_multiplier=self._stddev_students),
tfd.MultivariateNormalDiag(
loc=tf.zeros(num_instructors),
scale_identity_multiplier=self._stddev_instructors),
tfd.MultivariateNormalDiag(
loc=tf.zeros(num_departments),
scale_identity_multiplier=self._stddev_departments),
# This is the likelihood for the observed.
lambda effect_departments, effect_instructors, effect_students: tfd.Independent(
tfd.Normal(
loc=(self._effect_service * features["service"] +
tf.gather(effect_students, features["students"], axis=-1) +
tf.gather(effect_instructors, features["instructors"], axis=-1) +
tf.gather(effect_departments, features["departments"], axis=-1) +
self._intercept),
scale=1.),
reinterpreted_batch_ndims=1)
])
# To enable tracking of the trainable variables via the created distribution,
# we attach a reference to `self`. Since all TFP objects sub-class
# `tf.Module`, this means that the following is possible:
# LinearMixedEffectModel()(features_train).trainable_variables
# ==> tuple of all tf.Variables created by LinearMixedEffectModel.
model._to_track = self
return model
lmm_jointdist = LinearMixedEffectModel()
# Conditioned on feature/predictors from the training data
lmm_train = lmm_jointdist(features_train)
# + colab={"height": 101} colab_type="code" id="WmaYmavUtpGh" outputId="fc84e2cc-e570-431a-8333-9945241c7ab6"
lmm_train.trainable_variables
# + [markdown] colab_type="text" id="3G_0t3jiZps2"
# As a Probabilistic graphical program, we can also visualize the model's structure in terms of its computational graph. This graph encodes dataflow across the random variables in the program, making explicit their relationships in terms of a graphical model (Jordan, 2003).
#
# As a statistical tool, we might look at the graph in order to better see, for example, that `intercept` and `effect_service` are conditionally dependent given `ratings`; this may be harder to see from the source code if the program is written with classes, cross references across modules, and/or subroutines. As a computational tool, we might also notice latent variables flow into the `ratings` variable via `tf.gather` ops. This may be a bottleneck on certain hardware accelerators if indexing `Tensor`s is expensive; visualizing the graph makes this readily apparent.
#
# + colab={"height": 84} colab_type="code" id="Ox_kZeuOygqn" outputId="b9b9ef7e-bc2a-47e5-c9f4-5b9b214b18ac"
lmm_train.resolve_graph()
# + [markdown] colab_type="text" id="ZPZTWsCeB7Va"
# ## Parameter Estimation
#
# Given data, the goal of inference is to fit the model's fixed effects slope $\beta$, intercept $\alpha$, and variance component parameter $\sigma^2$. The maximum likelihood principle formalizes this task as
#
# $$
# \max_{\beta, \alpha, \sigma}~\log p(\mathbf{y}\mid \mathbf{X}, \mathbf{Z}; \beta, \alpha, \sigma) = \max_{\beta, \alpha, \sigma}~\log \int p(\eta; \sigma) ~p(\mathbf{y}\mid \mathbf{X}, \mathbf{Z}, \eta; \beta, \alpha)~d\eta.
# $$
#
# In this tutorial, we use the Monte Carlo EM algorithm to maximize this marginal density (Dempster et al., 1977; Wei and Tanner, 1990).¹ We perform Markov chain Monte Carlo to compute the expectation of the conditional likelihood with respect to the random effects ("E-step"), and we perform gradient descent to maximize the expectation with respect to the parameters ("M-step"):
#
# # + For the E-step, we set up Hamiltonian Monte Carlo (HMC). It takes a current state—the student, instructor, and department effects—and returns a new state. We assign the new state to TensorFlow variables, which will denote the state of the HMC chain.
#
# # + For the M-step, we use the posterior sample from HMC to calculate an unbiased estimate of the marginal likelihood up to a constant. We then apply its gradient with respect to the parameters of interest. This produces an unbiased stochastic descent step on the marginal likelihood. We implement it with the Adam TensorFlow optimizer and minimize the negative of the marginal.
# + colab={} colab_type="code" id="U1Ro35iA7UPG"
target_log_prob_fn = lambda *x: lmm_train.log_prob(x + (labels_train,))
trainable_variables = lmm_train.trainable_variables
current_state = lmm_train.sample()[:-1]
# + colab={"height": 33} colab_type="code" id="QQxq9acQ9MpO" outputId="25f274ac-6bd3-4cff-8c85-d00555c2a426"
# For debugging
target_log_prob_fn(*current_state)
# + colab={} colab_type="code" id="F7uOcwQFB7Vb"
# Set up E-step (MCMC).
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.015,
num_leapfrog_steps=3)
kernel_results = hmc.bootstrap_results(current_state)
@tf.function(autograph=False, experimental_compile=True)
def one_e_step(current_state, kernel_results):
next_state, next_kernel_results = hmc.one_step(
current_state=current_state,
previous_kernel_results=kernel_results)
return next_state, next_kernel_results
optimizer = tf.optimizers.Adam(learning_rate=.01)
# Set up M-step (gradient descent).
@tf.function(autograph=False, experimental_compile=True)
def one_m_step(current_state):
with tf.GradientTape() as tape:
loss = -target_log_prob_fn(*current_state)
grads = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
return loss
# + [markdown] colab_type="text" id="6BaHczzpkt0k"
# We perform a warm-up stage, which runs one MCMC chain for a number of iterations so that training may be initialized within the posterior's probability mass. We then run a training loop. It jointly runs the E and M-steps and records values during training.
# + colab={} colab_type="code" id="XwZMt2uqVDzh"
num_warmup_iters = 1000
num_iters = 1500
num_accepted = 0
effect_students_samples = np.zeros([num_iters, num_students])
effect_instructors_samples = np.zeros([num_iters, num_instructors])
effect_departments_samples = np.zeros([num_iters, num_departments])
loss_history = np.zeros([num_iters])
# + colab={"height": 134} colab_type="code" id="zxbcYtrUt3OG" outputId="aa6ba06b-2253-4619-8db4-df7db0f44600"
# Run warm-up stage.
for t in range(num_warmup_iters):
current_state, kernel_results = one_e_step(current_state, kernel_results)
num_accepted += kernel_results.is_accepted.numpy()
if t % 500 == 0 or t == num_warmup_iters - 1:
print("Warm-Up Iteration: {:>3} Acceptance Rate: {:.3f}".format(
t, num_accepted / (t + 1)))
num_accepted = 0 # reset acceptance rate counter
# Run training.
for t in range(num_iters):
# run 5 MCMC iterations before every joint EM update
for _ in range(5):
current_state, kernel_results = one_e_step(current_state, kernel_results)
loss = one_m_step(current_state)
effect_students_samples[t, :] = current_state[0].numpy()
effect_instructors_samples[t, :] = current_state[1].numpy()
effect_departments_samples[t, :] = current_state[2].numpy()
num_accepted += kernel_results.is_accepted.numpy()
loss_history[t] = loss.numpy()
if t % 500 == 0 or t == num_iters - 1:
print("Iteration: {:>4} Acceptance Rate: {:.3f} Loss: {:.3f}".format(
t, num_accepted / (t + 1), loss_history[t]))
# + [markdown] colab_type="text" id="gyo91h1oVx_L"
# You can also write the warmup for-loop into a `tf.while_loop`, and the training step into a `tf.scan` or `tf.while_loop` for even faster inference. For example:
# + colab={} colab_type="code" id="9WmwCZNQWqh7"
@tf.function(autograph=False, experimental_compile=True)
def run_k_e_steps(k, current_state, kernel_results):
_, next_state, next_kernel_results = tf.while_loop(
cond=lambda i, state, pkr: i < k,
body=lambda i, state, pkr: (i+1, *one_e_step(state, pkr)),
loop_vars=(tf.constant(0), current_state, kernel_results)
)
return next_state, next_kernel_results
# + [markdown] colab_type="text" id="r6U2zkdbHj5z"
# Above, we did not run the algorithm until a convergence threshold was detected. To check whether training was sensible, we verify that the loss function indeed tends to converge over training iterations.
# + colab={"height": 281} colab_type="code" id="HR4A6FLCwD7b" outputId="d03c1e63-b1d2-48c3-e49a-043edc06c74f"
plt.plot(loss_history)
plt.ylabel(r'Loss $-\log$ $p(y\mid\mathbf{x})$')
plt.xlabel('Iteration')
plt.show()
# + [markdown] colab_type="text" id="Fz7FphO9LwVE"
# We also use a trace plot, which shows the Markov chain Monte Carlo algorithm's trajectory across specific latent dimensions. Below we see that specific instructor effects indeed meaningfully transition away from their initial state and explore the state space. The trace plot also indicates that the effects differ across instructors but with similar mixing behavior.
# + colab={"height": 281} colab_type="code" id="_NvaIhgrvY9o" outputId="45a53bbe-9fd3-45ae-f72d-2178629346cf"
for i in range(7):
plt.plot(effect_instructors_samples[:, i])
plt.legend([i for i in range(7)], loc='lower right')
plt.ylabel('Instructor Effects')
plt.xlabel('Iteration')
plt.show()
# + [markdown] colab_type="text" id="-xVCGWZoB7Vd"
# ## Criticism
#
# Above, we fitted the model. We now look into criticizing its fit using data, which lets us explore and better understand the model. One such technique is a residual plot, which plots the difference between the model's predictions and ground truth for each data point. If the model were correct, then their difference should be standard normally distributed; any deviations from this pattern in the plot indicate model misfit.
#
# We build the residual plot by first forming the posterior predictive distribution over ratings, which replaces the prior distribution on the random effects with its posterior given training data. In particular, we run the model forward and intercept its dependence on prior random effects with their inferred posterior means.²
# + colab={} colab_type="code" id="p4vreJekB7Vf"
lmm_test = lmm_jointdist(features_test)
[
effect_students_mean,
effect_instructors_mean,
effect_departments_mean,
] = [
np.mean(x, axis=0).astype(np.float32) for x in [
effect_students_samples,
effect_instructors_samples,
effect_departments_samples
]
]
# Get the posterior predictive distribution
(*posterior_conditionals, ratings_posterior), _ = lmm_test.sample_distributions(
value=(
effect_students_mean,
effect_instructors_mean,
effect_departments_mean,
))
ratings_prediction = ratings_posterior.mean()
# + [markdown] colab_type="text" id="zTQJ3d-Hv93z"
# Upon visual inspection, the residuals look somewhat standard-normally distributed. However, the fit is not perfect: there is larger probability mass in the tails than a normal distribution, which indicates the model might improve its fit by relaxing its normality assumptions.
#
# In particular, although it is most common to use a normal distribution to model ratings in the `InstEval` data set, a closer look at the data reveals that course evaluation ratings are in fact ordinal values from 1 to 5. This suggests that we should be using an ordinal distribution, or even Categorical if we have enough data to throw away the relative ordering. This is a one-line change to the model above; the same inference code is applicable.
# + colab={"height": 282} colab_type="code" id="0jIxfwuvEWLG" outputId="30c9d13f-c4d0-441a-d61b-e8bd57ecc057"
plt.title("Residuals for Predicted Ratings on Test Set")
plt.xlim(-4, 4)
plt.ylim(0, 800)
plt.hist(ratings_prediction - labels_test, 75)
plt.show()
# + [markdown] colab_type="text" id="wi4hnI8UxFD2"
# To explore how the model makes individual predictions, we look at the histogram of effects for students, instructors, and departments. This lets us understand how individual elements in a data point's feature vector tends to influence the outcome.
#
# Not surprisingly, we see below that each student typically has little effect on an instructor's evaluation rating. Interestingly, we see that the department an instructor belongs to has a large effect.
# + colab={"height": 282} colab_type="code" id="MU-L604RFkxg" outputId="a95dd688-9005-4cb6-802a-76e70d6d2efb"
plt.title("Histogram of Student Effects")
plt.hist(effect_students_mean, 75)
plt.show()
# + colab={"height": 282} colab_type="code" id="22qgTW7SGulD" outputId="3126539a-f08b-4dae-b766-c9c5b46d7395"
plt.title("Histogram of Instructor Effects")
plt.hist(effect_instructors_mean, 75)
plt.show()
# + colab={"height": 282} colab_type="code" id="lTd2_uodGu2F" outputId="ba12baae-5a36-4c29-d0ab-144e4dd4c419"
plt.title("Histogram of Department Effects")
plt.hist(effect_departments_mean, 75)
plt.show()
# + [markdown] colab_type="text" id="Ck3cPwIjvyqO"
# ## Footnotes
#
# ¹ Linear mixed effect models are a special case where we can analytically compute its marginal density. For the purposes of this tutorial, we demonstrate Monte Carlo EM, which more readily applies to non-analytic marginal densities such as if the likelihood were extended to be Categorical instead of Normal.
#
# ² For simplicity, we form the predictive distribution's mean using only one forward pass of the model. This is done by conditioning on the posterior mean and is valid for linear mixed effects models. However, this is not valid in general: the posterior predictive distribution's mean is typically intractable and requires taking the empirical mean across multiple forward passes of the model given posterior samples.
# + [markdown] colab_type="text" id="8pm6qMKvB7WB"
# ## Acknowledgments
#
# This tutorial was originally written in Edward 1.0 ([source](https://github.com/blei-lab/edward/blob/master/notebooks/linear_mixed_effects_models.ipynb)). We thank all contributors to writing and revising that version.
# + [markdown] colab_type="text" id="sHw7WpM1IzLO"
# ## References
#
# 1. <NAME> and <NAME> and <NAME> and <NAME>. Fitting Linear Mixed-Effects Models Using lme4. _Journal of Statistical Software_, 67(1):1-48, 2015.
#
# 2. <NAME>, <NAME>, and <NAME>. Maximum likelihood from incomplete data via the EM algorithm. _Journal of the Royal Statistical Society, Series B (Methodological)_, 1-38, 1977.
#
# 3. <NAME> and <NAME>. _Data analysis using regression and multilevel/hierarchical models._ Cambridge University Press, 2006.
#
# 4. <NAME>. Maximum likelihood approaches to variance component estimation and to related problems. _Journal of the American Statistical Association_, 72(358):320-338, 1977.
#
# 5. <NAME>. An Introduction to Graphical Models. Technical Report, 2003.
#
# 6. <NAME> and <NAME>. Random-effects models for longitudinal data. _Biometrics_, 963-974, 1982.
#
# 7. <NAME> and <NAME>. A Monte Carlo implementation of the EM algorithm and the poor man's data augmentation algorithms. _Journal of the American Statistical Association_, 699-704, 1990.
| site/en-snapshot/probability/examples/Linear_Mixed_Effects_Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Fractional BIM
# + colab={"base_uri": "https://localhost:8080/"} id="5ssUqKOaPVaE" outputId="38c1005a-39f4-4307-e305-19a4c9819396"
# Install Pyomo and solvers for Google Colab
import sys
if "google.colab" in sys.modules:
# !wget -N -q https://raw.githubusercontent.com/jckantor/MO-book/main/tools/install_on_colab.py
# %run install_on_colab.py
# + id="SKIqjt5CPSJf"
import pyomo.environ as pyo
# + id="m33AGCU_PSJw"
def BIM_with_revenues_minus_costs():
model = pyo.ConcreteModel('BIM')
model.x1 = pyo.Var(domain=pyo.NonNegativeReals)
model.x2 = pyo.Var(domain=pyo.NonNegativeReals)
model.revenue = pyo.Expression( expr = 12*model.x1 + 9*model.x2 )
model.variable_cost = pyo.Expression( expr = 7/6*model.x1 + 5/6*model.x2 )
model.fixed_cost = 100
model.profit = pyo.Objective( sense= pyo.maximize
, expr = model.revenue - model.variable_cost - model.fixed_cost )
model.silicon = pyo.Constraint(expr = model.x1 <= 1000)
model.germanium = pyo.Constraint(expr = model.x2 <= 1500)
model.plastic = pyo.Constraint(expr = model.x1 + model.x2 <= 1750)
model.copper = pyo.Constraint(expr = 4*model.x1 + 2*model.x2 <= 4800)
return model
# + id="VIsJHE5oXTKw"
def BIM_with_revenues_over_costs():
model = pyo.ConcreteModel('BIM')
model.y1 = pyo.Var(within=pyo.NonNegativeReals)
model.y2 = pyo.Var(within=pyo.NonNegativeReals)
model.t = pyo.Var(within=pyo.NonNegativeReals)
model.revenue = pyo.Expression( expr = 12*model.y1 + 9*model.y2 )
model.variable_cost = pyo.Expression( expr = 7/6*model.y1 + 5/6*model.y2 )
model.fixed_cost = 100
model.profit = pyo.Objective( sense= pyo.maximize
, expr = model.revenue)
model.silicon = pyo.Constraint(expr = model.y1 <= 1000*model.t)
model.germanium = pyo.Constraint(expr = model.y2 <= 1500*model.t)
model.plastic = pyo.Constraint(expr = model.y1 + model.y2 <= 1750*model.t)
model.copper = pyo.Constraint(expr = 4*model.y1 + 2*model.y2 <= 4800*model.t)
model.frac = pyo.Constraint(expr = model.variable_cost+model.fixed_cost*model.t == 1 )
return model
# + colab={"base_uri": "https://localhost:8080/"} id="l5emuDb-2xFf" outputId="6909d47d-77be-4a3a-8b76-498362de06ea"
BIM_linear = BIM_with_revenues_minus_costs()
results = pyo.SolverFactory('glpk').solve(BIM_linear)
print('X=({:.1f},{:.1f}) value={:.3f} revenue={:.3f} cost={:.3f}'.format(
pyo.value(BIM_linear.x1),
pyo.value(BIM_linear.x2),
pyo.value(BIM_linear.profit),
pyo.value(BIM_linear.revenue),
pyo.value(BIM_linear.variable_cost)+pyo.value(BIM_linear.fixed_cost)))
# + colab={"base_uri": "https://localhost:8080/"} id="FNqp7mptZDXl" outputId="b8b4f309-c735-4c44-9e38-4eed88357d7c"
BIM_fractional = BIM_with_revenues_over_costs()
results = pyo.SolverFactory('glpk').solve(BIM_fractional)
t = pyo.value(BIM_fractional.t)
print('X=({:.1f},{:.1f}) value={:.3f} revenue={:.3f} cost={:.3f}'.format(
pyo.value(BIM_fractional.y1/t),
pyo.value(BIM_fractional.y2/t),
pyo.value(BIM_fractional.profit/(BIM_fractional.variable_cost+BIM_fractional.fixed_cost*t)),
pyo.value(BIM_fractional.revenue/t),
pyo.value(BIM_fractional.variable_cost/t)+pyo.value(BIM_fractional.fixed_cost)))
# -
| _build/jupyter_execute/notebooks/02/fractional-bim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # More efficient broadcast of arrays with memmap
# Data movement is where IPython's naïve model suffers the most.
# But knowing about your cluster lets you make smarter decisions about data movement than a simple `rc[:].push`.
# +
import socket
import os, sys, re
import numpy as np
import ipyparallel as parallel
# -
#rc = parallel.Client(profile='dirac')
rc = parallel.Client()
eall = rc[:]
engine_hosts = eall.apply_async(socket.gethostname).get_dict()
engine_hosts
# +
host_engines = {}
for eid, host in engine_hosts.items():
if host not in host_engines:
host_engines[host] = []
host_engines[host].append(eid)
host_engines
# -
sz = 256
data = np.random.random((sz,sz))
data = data.dot(data.T)
# %time _ = rc[:].apply_sync(lambda : None)
ar = rc[:].push({'data': data}, block=False)
ar.wait_interactive()
# %px import numpy as np
def array_to_file(A):
"""write an array to a temporary file, return its filename"""
import tempfile
with tempfile.NamedTemporaryFile(suffix='.np', delete=False) as tf:
np.save(tf, data)
data_path = tf.name
return data_path
@parallel.interactive
def load_memmap(name, path, mode='r+'):
"""load a file on disk into the interactive namespace as a memmapped array"""
globals()[name] = np.memmap(path, mode=mode)
def bcast_memmap(data, name, client, host_engines):
"""broadcast a numpy array efficiently
- sends data to each remote host only once
- loads with memmap everywhere
"""
# actually push the data, just once to each machine
local_filename = None
filenames_ars = {}
for host, engines in host_engines.items():
h0 = engines[0]
if host == socket.gethostname():
# Don't push at all to local engines
local_filename = array_to_file(data)
else:
filenames_ars[host] = rc[h0].apply_async(array_to_file, data)
# load the data on all engines into a memmapped array
msg_ids = []
for host, engines in host_engines.items():
if host == socket.gethostname():
filename = local_filename
else:
filename = filenames_ars[host].get()
ar = rc[engines].apply_async(load_memmap, name, filename)
msg_ids.extend(ar.msg_ids)
return parallel.AsyncResult(client, msg_ids=msg_ids)
# %%time
ar = bcast_memmap(data, 'data', rc, host_engines)
ar.wait_interactive()
# %px np.linalg.norm(data, 2)
# You can also do the same thing [with MPI](MPI Broadcast.ipynb).
| parallel/examples/memmap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Bioassay experiment
#
# In the development of drugs and other chemical compounds, acute toxicity tests or bioassay experiments are commonly performed on animals. Such experiments proceed by adminis- tering various dose levels of the compound to batches of animals. The animals’ responses are typically characterized by a dichotomous outcome: for example, alive or dead, tumor or no tumor. An experiment of this kind gives rise to data of the form
# $$(x_i,n_i,y_i);\ i = 1,\ldots,k,$$
# where $x_i$ represents the $i$th of $k$ dose levels (often measured on a logarithmic scale) given to $n_i$ animals, of which $y_i$ subsequently respond with positive outcome.
# Example from Gelman et al. 2014
# +
import sys
sys.path.append('../../Utilities')
import pystan
import stan_utility
import arviz as az
import numpy as np
import scipy.stats as stats
import pandas as pd
from scipy.special import expit # aka logistic
# +
import matplotlib.pyplot as plt
import matplotlib as mpl
light="#FFFCDC"
light_highlight="#FEF590"
mid="#FDED2A"
mid_highlight="#f0dc05"
dark="#EECA02"
dark_highlight="#BB9700"
green="#00FF00"
light_grey="#DDDDDD"
plt.style.context('seaborn-white')
mpl.rcParams['figure.dpi']= 200
def ribbon_plot(xt, fs, ax=None,zorder=0):
'''Plot a ribbon plot for regression and similar.
Plot consists of quantiles (by 10%) of a variate as a function of covariate.
'''
if ax is None:
ax = plt.gca()
probs = [10, 20, 30, 40, 50, 60, 70, 80, 90]
perc_interv=np.percentile(fs, probs, axis=0)
ax.fill_between(xt,perc_interv[0,:],perc_interv[8,:],color=light,zorder=zorder)
ax.fill_between(xt,perc_interv[1,:],perc_interv[7,:],color=light_highlight,zorder=zorder)
ax.fill_between(xt,perc_interv[2,:],perc_interv[6,:],color=mid,zorder=zorder)
ax.fill_between(xt,perc_interv[3,:],perc_interv[5,:],color=mid_highlight,zorder=zorder)
ax.plot(xt,perc_interv[4,:],color=dark,zorder=zorder)
return(ax)
# -
# ### Ribbon plot
# This is a visual statistic, showing how behave quantiles of a sampled variate as a function of covariate.
# Example:
fig, axes = plt.subplots(1, 1, figsize=(7, 4))
covariate=np.linspace(-1,1)
np.random.seed(4052020)
variate=np.repeat(np.random.normal(size=1000)[:,None],50,axis=1)
axes=ribbon_plot(covariate,variate)
axes.annotate('Median',xy=(0,np.median(variate[:,0])),xytext=(-0.5, -0.5),arrowprops={'arrowstyle':'->'})
plt.show()
print(variate.shape)
print(covariate.shape)
# ## Data
# An example of real data from such an experiment is shown in below twenty animals were tested, five at each of four dose levels.
# data
x = np.array([-0.86, -0.30, -0.05, 0.73])
n = np.array([5, 5, 5, 5])
y = np.array([0, 1, 3, 5])
pd.DataFrame({'Dose xi':x,'Dose of animals ni':n,'Number of deaths yi':y})
# +
# plot the data
fig, axes = plt.subplots(1, 1, figsize=(7, 4))
axes.scatter(x, y/n, 50, color='black')
axes.set_xlim((-1, 1))
axes.set_xlabel('dose log [g/ml]')
axes.set_ylabel('Proportion of deaths');
axes.set_xticks(x)
axes.set_yticks(y/n)
axes.set_title('Data as proportion of deaths has sigmoid like shape')
plt.show()
# -
# ## Model
# - We can consider outcomes of the five animals within each group $i$ as exchangeable
# - it seems reasonable to model them as independent with equal probabilities,
# - which implies that the data points $y_i$ are binomially distributed:
#
# $$y_i|\theta_i\sim\mathrm{Binomial}(n_i,\theta_i)$$
# ### Relationship between dose and probability
#
# - The simplest model of the dose - response relation - that is, the relation of $\theta_i$ to $x_i$ - is linear: $$\theta_i = \alpha + \beta x_i$$
# - We need a restriction of probability to (0,1)
#
# $$
# \mathrm{logit}(\theta_i)=\alpha+\beta x_i
# $$
# ### Likelihood and posterior
# Likelihood for single experiment can be defined as
# $$
# p(y_i|\alpha,\beta,n_i,x_i)\propto[\mathrm{logit}^{-1}(\alpha+\beta x_i)]^{y_i}[1-\mathrm{logit}^{-1}(\alpha+\beta x_i)]^{n_i-y_i}
# $$
# And that leads to the posterior
# $$
# \begin{aligned}
# p(\alpha,\beta|y,n,x)\propto {}&p(\alpha,\beta|n,x)p(y_1,\ \ldots,\ y_i|\alpha,\beta,n,x)\\
# \propto {}&p(\alpha,\beta)\prod_{i=1}^k p(y_i|\alpha,\beta,n_i,x_i)
# \end{aligned}
# $$
# ### Prior for regression coefficients
# We are generally uninformed about prior. We however prefer to use weakly informative prior, just to provide some kind of regularization.
#
# Following "[Prior choice recommendation](https://github.com/stan-dev/stan/wiki/Prior-Choice-Recommendations)" page at [Stan Wiki](https://github.com/stan-dev/stan/wiki/)
#
# Prior for the regression coefficients in logistic regression (non-sparse case) $$\beta \sim t_\nu(0,s)$$
# where $s$ is chosen to provide weak information on the expected scale, and 3<$\nu$<7.
# ## Prior predictive checks
with open('bioassay_ppc.stan', 'r') as file:
print(file.read())
model_ppc=stan_utility.compile_model('bioassay_ppc.stan')
R=1000
data_sim=dict(M=len(x),N=n,X=x)
sim=model_ppc.sampling(data=data_sim,algorithm="Fixed_param", iter=R, warmup=0, chains=1, refresh=R,
seed=29042020)
params_sim=sim.extract()
alpha_sim=params_sim['alpha']
beta_sim=params_sim['beta']
y_sim=params_sim['y_sim']
fig, axes = plt.subplots(1, 1, figsize=(7, 4))
axes.scatter(alpha_sim, beta_sim, 20, color=dark_highlight)
axes.set_xlabel(r'$\alpha$')
axes.set_ylabel(r'$\beta$',rotation=0)
axes.set_title('Noninformative student-t prior gives large spread of parameters')
plt.show()
# +
fig, axes = plt.subplots(2, 2, figsize=(7, 8), sharex=True,sharey=True,squeeze=False)
axes_flat=axes.flatten()
for k in range(4):
ax = axes_flat[k]
ax.hist(y_sim[:,k],bins=[0,1,2,3,4,5,6],color=dark,edgecolor=dark_highlight,density=True)
ax.set_title('Dose of '+str(x[k])+' log g/ml')
ax.plot([y[k],y[k]],[0,1],linestyle='--',color='black')
ax.set_xticks([0,1,2,3,4,5,6])
ax.set_yticks([])
fig.tight_layout()
plt.show()
# -
# Prior predicted outputs are strongly skewed for one or the other end, but actual measurements are also possible.
# +
# plot samples with the data
xt = np.linspace(-1, 1)
fs = expit(alpha_sim[:, None] + beta_sim[:, None]*xt)
# ceate figure
fig, axes = plt.subplots(2, 1, figsize=(7, 8), sharex=True)
# plot 10 first samples
ax = axes[0]
ax.plot(xt, fs[:10].T, color=mid, alpha=0.5,zorder=0)
ax.scatter(x, y/n, 50, color='black',zorder=1)
ax.set_xlim((-1, 1))
ax.set_ylabel('proportion of deaths')
ax.set_title('10 sample draws from prior predictive distribution')
ax.set_yticks(y/n)
ax.set_xticks(x)
# plot ribbon of quantiles from 10% to 90%
ax = axes[1]
ax=ribbon_plot(xt,fs,ax)
ax.scatter(x, y/n, 50, color='black')
ax.set_xlim((-1, 1))
ax.set_xlabel('dose log [g/ml]')
ax.set_ylabel('proportion of deaths')
ax.set_title('Ribbon of quantiles from prior predictive distribution')
ax.set_yticks(y/n)
ax.set_xticks(x)
fig.tight_layout()
# -
# ## Posterior inference
with open('bioassay_fit.stan', 'r') as file:
print(file.read())
model = stan_utility.compile_model('bioassay_fit.stan')
data = dict(M = len(x),N = n, X=x,y=y)
fit = model.sampling(data=data,seed= 27042020,control={'adapt_delta':0.9})
stan_utility.check_all_diagnostics(fit)
params=fit.extract()
alpha=params['alpha']
beta=params['beta']
# +
fig, axes = plt.subplots(1, 1, figsize=(7, 4))
axes.scatter(alpha_sim, beta_sim, 20, color=mid_highlight)
axes.scatter(alpha, beta, 20, color=dark_highlight)
axes.set_xlabel(r'$\alpha$')
axes.set_ylabel(r'$\beta$',rotation=0)
axes.set_title('Samples from joint posterior are consistent with prior')
axes.text(8,30,'Posterior samples',color=dark_highlight)
axes.text(10,-37,'Prior samples',color=mid_highlight)
plt.show()
# -
fig, axes = plt.subplots(1, 2, figsize=(7, 4))
ax=axes[0]
ax.hist(alpha,bins=20,color=dark,edgecolor=dark_highlight,density=True)
ax.set_title(r'$\alpha$')
ax.set_yticks(());
ax2=axes[1]
ax2.hist(beta,bins=20,color=dark,edgecolor=dark_highlight,density=True)
ax2.set_title(r'$\beta$')
ax2.set_yticks(());
plt.show()
axes=az.plot_joint(fit, var_names=['alpha','beta'],kind='kde')
# ### Posterior predictive distribution
# +
y_sim=params['y_sim']
fig, axes = plt.subplots(2, 2, figsize=(7, 8), sharex=True,sharey=True,squeeze=False)
axes_flat=axes.flatten()
for k in range(4):
ax = axes_flat[k]
ax.hist(y_sim[:,k],bins=[0,1,2,3,4,5,6],color=dark,edgecolor=dark_highlight,density=True)
ax.plot([y[k],y[k]],[0,1],linestyle='--',color='black')
ax.set_title('Dose of '+str(x[k])+' log g/ml')
ax.set_xticks([0,1,2,3,4,5,6])
ax.set_yticks([])
fig.tight_layout()
plt.show()
# +
# plot samples with the data
xt = np.linspace(-1, 1)
fs = expit(alpha[:, None] + beta[:, None]*xt)
# ceate figure
fig, axes = plt.subplots(2, 1, figsize=(7, 8), sharex=True)
# plot 10 first samples
ax = axes[0]
ax.plot(xt, fs[:10].T, color=mid, alpha=0.5,zorder=0)
ax.scatter(x, y/n, 50, color='black',zorder=1)
ax.set_xlim((-1, 1))
ax.set_ylabel('proportion of deaths')
ax.set_title('10 sample draws from posterior predictive distribution')
ax.set_yticks(y/n)
# plot ribbon of quantiles from 10% to 90% and median
ax = axes[1]
ax=ribbon_plot(xt,fs,ax)
ax.scatter(x, y/n, 50, color='black',zorder=1)
ax.set_xlim((-1, 1))
ax.set_xlabel('dose log [g/ml]')
ax.set_ylabel('proportion of deaths')
ax.set_title('Ribbon of quantiles from posterior predictive distribution')
ax.set_yticks(y/n)
ax.set_xticks(x)
fig.tight_layout()
# -
# ## Estimation of LD50
# A parameter of common interest in bioassay studies is the LD50 - the dose level at which the probability of death is 50%. In our logistic model, a 50% survival rate means
#
# $$
# \mathrm{LD50:}\ \mathrm{E}\left(\frac{y_i}{n_i}\right)=\mathrm{logit}^{-1}(\alpha+\beta x_i)=0.5
# $$
# This parameter makes sense only for $\beta>0$ as otherwise the increase of the dose reduces death risk.
bpi = beta > 0
samp_ld50 = -alpha[bpi]/beta[bpi]
fig, axes = plt.subplots(1, 1, figsize=(7, 4))
axes.hist(samp_ld50, np.arange(-0.5, 0.51, 0.02),color=dark,edgecolor=dark_highlight)
axes.set_xlim([-0.5, 0.5])
axes.set_xlabel(r'LD50 = -$\alpha/\beta$')
axes.set_yticks(())
axes.set_title(r'LD50 estimate conditional on $\beta>0$')
plt.show()
print('Median: {:4.2f}'.format(np.median(samp_ld50)))
print('90% confidence interval: ',['{:4.2f}'.format(k) for k in np.percentile(samp_ld50,[5,95],axis=0)])
# In our case all samples of $\beta$ were positive, however if that was not the case using mean of $\alpha$ and $\beta$ to compute LD50 would be biased.
10**(-0.25),10**0.08
10**(-0.11)
| Data Analytics/Topic 4 - Multiparameter models/Bioassay/Bioassay.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
import random
from scipy.stats import pearsonr
from scipy.spatial import distance
import seaborn as sns
import matplotlib.pyplot as plt
def Euclidean_dist(A, B):
C = A - B
return sum(map(sum, C * C)) ** 0.5
def MAE(A, B): ## Mean Absolute Error
C = A - B
return sum(map(sum, C * C)) / (C.shape[0] * C.shape[1])
def random_split_train_test(X0, training_dictionary_fraction, seed, dictionary_size=0.5, biased_training=0.):
training_dictionary_size = max(int(training_dictionary_fraction * X0.shape[1]), 5)
if dictionary_size < 1:
dictionary_size = dictionary_size * training_dictionary_size
dictionary_size = int(dictionary_size)
xi = np.zeros(X0.shape[1], dtype=np.bool)
if biased_training > 0:
np.random.seed(seed)
i = np.random.randint(len(xi))
dist = distance.cdist([X0[:, i]], X0.T, 'correlation')[0]
didx = np.argsort(dist)[1:int(biased_training * training_dictionary_size) + 1]
else:
didx = []
xi[didx] = True
if biased_training < 1:
remaining_idx = np.setdiff1d(range(len(xi)), didx)
np.random.seed(seed)
xi[np.random.choice(remaining_idx, training_dictionary_size - xi.sum(), replace=False)] = True
xa = X0[:, xi]
xb = X0[:, np.invert(xi)]
return xa, xb
def compare_results(A, B):
results = list((1 - distance.correlation(A.flatten(), B.flatten())))
results += list(Euclidean_dist(A, B))
results += list(MAE(A, B))
return results
seed_all = {"GSE71858": [272, 781, 692, 219, 292], #
"GSE60361": [283, 446, 562, 114, 739], #
"GSE62270": [629, 685, 953, 595, 378], #
"GSE48968": [623, 19, 621, 802, 557], #
"GSE52529": [550, 939, 76, 260, 328], #
"GSE77564": [475, 649, 316, 639, 741],
"GSE78779": [152, 866, 808, 796, 184], #
"GSE10247": [702, 217, 944, 338, 701], #
"GSE69405": [317, 470, 798, 283, 695],
"GSE45235": [282, 713, 521, 717, 517], #
"GSE25038": [480, 402, 413, 64, 574],
"mass_cytomatry": [943, 800, 175, 486, 749]}
# +
tf.set_random_seed(1)
# Hyper Parameters
LR = 0.0001 # learning rate
Dropout_rate = 0.5
# GSE Data
data_path = "./Original_data/GSE78779.npy"
X = np.load(data_path)
training_dictionary_fraction = 0.05
genes, samples = X.shape
seeds = seed_all['GSE78779']
############################# Define architectures ##################################
# tf placeholder
tf_x = tf.placeholder(tf.float32, [None, genes]) # value in the range of (0, 1)
# encoder
# Dn0 = tf.layers.dropout(tf_x, rate=Dropout_rate, training=True)
en0 = tf.layers.dense(tf_x, 1280, tf.nn.leaky_relu)
en1 = tf.layers.dense(en0, 640, tf.nn.leaky_relu)
en2 = tf.layers.dense(en1, 256, tf.nn.leaky_relu)
encoded = tf.layers.dense(en2, 10)
# decoder
de0 = tf.layers.dense(encoded, 256, tf.nn.leaky_relu)
de1 = tf.layers.dense(de0, 640, tf.nn.leaky_relu)
de2 = tf.layers.dense(de1, 1280, tf.nn.leaky_relu)
decoded = tf.layers.dense(de2, genes, tf.nn.leaky_relu)
loss = tf.losses.mean_squared_error(labels=tf_x, predictions=decoded)
train = tf.train.AdamOptimizer(LR).minimize(loss)
# -
import numpy as np
data_path = "./Original_data/GSE48968.npy"
X = np.load(data_path)
X
# +
############################# Running ##################################
Results = {}
# seeds = random.sample(range(0, 1000), 5)
# seeds = [283, 446, 562, 114, 739]
print(seeds)
for i in range(2):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
X_train, X_test = random_split_train_test(X, training_dictionary_fraction, seed=seeds[i])
#print(xi)
#np.savetxt("GSE60361_Xi.csv", xi, delimiter=',')
print(X.shape) #
print(X_train.shape) #
print(X_test.shape) #
print(X_train[0, 0:10])
X_train = np.transpose(X_train)
X_test = np.transpose(X_test)
for step in range(500):
b_x = X_train
_, encoded_, decoded_, loss_ = sess.run([train, encoded, decoded, loss], {tf_x: b_x})
if step % 100 == 0:
# print('------------------Step: %d' % step + '---------------')
# print('train loss: %.4f' % loss_)
# plotting decoded image (second row)
decoded_data_train = sess.run(decoded, {tf_x: b_x})
# train_p = (1 - distance.correlation(X_train.flatten(), decoded_data_train.flatten()))
train_pp = pearsonr(X_train.flatten(), decoded_data_train.flatten())[0]
train_ED = Euclidean_dist(X_train, decoded_data_train)
train_MAE = MAE(X_train, decoded_data_train)
# print('train Pearson: %.4f' % train_p)
# print('train Pearson_: %.4f' % train_pp)
# print('train Euclidean_dist: %e' % train_ED)
# print('train MAE: %.4f' % train_MAE)
encod = sess.run(encoded, {tf_x: b_x})
# print(encod.shape)
# print('------------------Test---------------')
decoded_data_testing = sess.run(decoded, {tf_x: X_test})
encoded_data = sess.run(encoded, {tf_x: X_test})
# test_p = (1 - distance.correlation(X_test.flatten(), decoded_data.flatten()))
test_pp = pearsonr(X_test.flatten(), decoded_data_testing.flatten())[0]
test_ED = Euclidean_dist(X_test, decoded_data_testing)
test_MAE = MAE(X_test, decoded_data_testing)
# print('test Pearson: %.4f' % test_p)
# print('test Pearson_: %.4f' % test_pp)
# print('test Euclidean_dist: %e' % test_ED)
# print('test MAE: %.4f' % test_MAE)
# print('----------------------------------------')
# Result = compare_results(X_test, decoded_data)
# print(Result)
decoded_data_testing = sess.run(decoded, {tf_x: X_test})
print(decoded_data_testing.shape)
result_train = 'DeepAE4 (training)_' + str(i)
result_test = 'DeepAE4 (testing )_' + str(i)
Results[result_train] = [train_pp, train_ED, train_MAE]
Results[result_test] = [test_pp, test_ED, test_MAE]
print('----------------End Iteration: %d' % i + '------------------------')
print(data_path)
for k, v in sorted(Results.items()):
print('\t'.join([k] + [str(x) for x in v]))
# -
tf.trainable_variables()
w1=tf.get_default_graph().get_tensor_by_name('dense/kernel:0')
Weights = sess.run(w1)
type(Weights)
Weights.shape
out_w1 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_4/kernel:0'))
out_b1 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_4/bias:0'))
chl1 = np.dot(out_w1.T, chl) + out_b1
out_w2 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_5/kernel:0'))
out_b2 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_5/bias:0'))
chl2 = np.dot(out_w2.T, chl1) + out_b2
out_w3 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_6/kernel:0'))
out_b3 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_6/bias:0'))
chl3 = np.dot(out_w3.T, chl2) + out_b3
out_w4 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_7/kernel:0'))
out_b4 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_7/bias:0'))
chl4 = np.dot(out_w4.T, chl3) + out_b4
print(chl1.shape)
print(out_w2.shape)
print(out_b2.shape)
print(chl2.shape)
print(chl3.shape)
print(chl4.shape)
19972*0.1
6789*0.1
10972*0.1
# +
import heapq
import csv
top = []
for i in range(10):
chl = np.zeros((10,), dtype=np.int)
chl[i] = 1
out_w1 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_4/kernel:0'))
out_b1 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_4/bias:0'))
chl1 = np.dot(out_w1.T, chl) + out_b1
out_w2 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_5/kernel:0'))
out_b2 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_5/bias:0'))
chl2 = np.dot(out_w2.T, chl1) + out_b2
out_w3 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_6/kernel:0'))
out_b3 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_6/bias:0'))
chl3 = np.dot(out_w3.T, chl2) + out_b3
out_w4 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_7/kernel:0'))
out_b4 = sess.run(tf.get_default_graph().get_tensor_by_name('dense_7/bias:0'))
chl4 = np.dot(out_w4.T, chl3) + out_b4
top10 = heapq.nlargest(22814, range(len(chl4)), chl4.take)
top = np.hstack((top, top10))
np.savetxt("GSE78779_top.csv", top, delimiter=',')
print(top.shape)
# -
| Find_high_weighted_genes_from_key_hidden_dimensions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# language: python
# name: python38264bit735b8921aa184eefb4e65f4f9da620bb
# ---
# Whats in a product..
# how to get users,
# are they gonna pay,
# how to keep them,
#
| _TODO/Finale, Product- Creating a user base, monetization, scaling and retention. .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
a = True
b = False
c = 'ninjas'
print(type(a))
print(type(c))
# -
# ### Relational Operators
a = 10
b = 20
print(a > b)
print(a >= b)
print(a < b)
print(a <= b)
print(a == b)
print(a != b)
# ### Logical Operators
# +
c1 = a > 10 # here a is 10 and b is 20
c2 = b > 10
r1 = c1 and c2
r2 = c1 or c2
r3 = not(c1)
print(r1)
print(r2)
print(r3)
# -
| 01.Python-Basics/03. Conditionals and Loops/01. Boolean-Datatype.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # set up cell
#
# This cell is just to import the libraries that I will be using
import os
import networkx as nx
import pandas as pd
import numpy as np
from stellargraph import StellarGraph
from stellargraph.data import BiasedRandomWalk
from gensim.models import Word2Vec
# +
#The number of dimensions the embedding will use
dims = 30#The number of dimensions the embedding will use
G_graphml = nx.read_graphml( "/home/jonno/COVID_project/COVID_project_data/data_graphs/base_g.graphml")
# -
# # Create node2vec embedding
# +
G_graphml = nx.read_graphml( "/home/jonno/COVID_project/COVID_project_data/data_graphs/base_g.graphml")
#no node features are included... mostly because this is node2vec
# Convert the networkx graph to a Stellargraph
G = StellarGraph.from_networkx(G_graphml)
# The features aren't used by node2vec but it makes changing to DGI easier
rw = BiasedRandomWalk(G)
walks = rw.run(
nodes=list(G.nodes()), # root nodes
length=30, # maximum length of a random walk
n=100, # number of random walks per root node
p=0.5, # Defines (unormalised) probability, 1/p, of returning to source node
q=2.0, # Defines (unormalised) probability, 1/q, for moving away from source node
weighted=True,
)
print("Number of random walks: {}".format(len(walks)))
str_walks = [[str(n) for n in walk] for walk in walks]
model = Word2Vec(str_walks, size=dims, window=10, min_count=0, sg=1, workers=1, iter=1)
node_ids = model.wv.index2word # list of node IDs
node_embeddings = (
model.wv.vectors
) # numpy.ndarray of size number of nodes times embeddings dimensionality
node_embeddings_df = pd.DataFrame(data=node_embeddings)
# Save it all as a CSV to be loaded back into R
# -
node_embeddings_df.to_csv("/home/jonno/COVID_project/COVID_project_data/embedded_graphs/base_g_30.csv")
data2 = pd.read_pickle(target_file_path)
data3 = pd.read_pickle("/home/jonno/polycentric-mobility/demo_data.pkl")
print(data3)
output = set()
for time_spent in data3["time_spent"]:
output.add(time_spent) #print(output) dates = list(output)
print(output)
data2.dtypes
import time
start_time = time.time()
main(data_path = target_file_path, results_path = result_save_path)
print("--- %s seconds ---" % (time.time() - start_time))
| node2vec_mobility.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Exploratory Data Analysis (EDA) Round 2
#
# The purpose of this notebook is to do another pass at the fire risk data and see
# if we can't examine the entire thing easily, get a sense of how far back our dataset goes.
# To do this, we are going to use the offset function in the query (now that we know how to use it properly)
#
from __future__ import division, print_function # always future proof for python3
import pandas as pd
# for simplicity we'll store the url in a string that we'll then insert a new offset into each round
query_url = 'https://data.sfgov.org/resource/wbb6-uh78.json?$order=close_dttm%20DESC&$offset={}&$limit=1000'
df = pd.read_json(query_url.format('0'))
# I'm curious, how many pages of data do we have in this dataset, how many records?
# we could have done this programmatically by just continuing to go through the records
# and at a certain point we will need to do that, but for now
# we cheated and looked at the url https://data.sfgov.org/Public-Safety/Fire-Incidents/wr8u-xric
# and it shows we have 403,988 rows of data, so 403 pages that we'd need to page through if we wanted to create one
# big database.
# Certainly something we will do but for now let's grab the very last page and see what we've got
df = pd.read_json(query_url.format('403000'))
df.head()
# let's take a look at what cleanup work we need to do.
# Generally a field that is just labeled as an "object" is something we wish to clean up
# This code will eventually find its way into a pipeline and moved to src
df.info()
# +
# As you can see above in the second-to-last row, there are 10 float objects, 15 int objects, and 37 unspecified objects.
# We will want to fix that.
# first, what are the date values and all?
# df.describe(include='all')
# Uh-oh! The describe() function above failed with a type error,
# stating that there's an unhashable type of `dict` in our data. We'll need to find that and fix it
# before we can do a proper analysis
# let's look at a single record
df.iloc[0]
# -
# even though there are so many columns that some are hidden, we can see above that location is a dict field.
# so we should either consider removing it, or doing an eval to get it into the database
df.iloc[0]['location']
# +
# pretty simple, it's just a location type and then lat long. I'm not sure we need the type "Point" but let's store it for now
# just to see what we get
# doing a simple check on http://www.latlong.net/
# confirmed the general lat long for SF is 37.774929, -122.419416 respectively,
# so the data is most likely flipped, where longitude is first, and latitude the second value.
# let's use ast/eval to convert the location into separate columns
# we first should confirm that we are dealing with strings or dicts
type(df.iloc[0]['location'])
# -
#import ast # TODO: move this to the very top with the other imports
temp_df = df.join(pd.DataFrame(df["location"].to_dict()).T)
# we'll do the slow approach to this for now, just as an example, but later consider a faster solution
#for index, row in df.iterrows():
temp_df.head()
temp_df.iloc[0]['coordinates']
# let's quickly check if the `type` is necessary, or if it only contains point and we should just delete it
temp_df.type.value_counts(dropna=False)
# let's check if the null values are important?
temp_df[temp_df.type.isnull()].tail()
# Can we assume that if type is null, so is coordinates, in which case location may also be null?
temp_df[temp_df.type.isnull()].coordinates.value_counts(dropna=False)
# OK, so the above info shows we can drop type, and we can also drop location
# we'll also overwrite the original column and delete the temp_df
df = temp_df.drop(['type','location'],axis=1)
del temp_df
# TODO: when we circle back and do our next notebook and import, we should consider using int32, float32 to save memory
# TODO: use a publicly available database to fill in zipcode and lat lon with a geo lookup
# now that we've taken a look, remember that this is an OLD database, so it may have been overzealous to delete columns
# or decide that type wasn't useful.
# let's quickly do a check for the most recent 3,000 columns and see what data we get there.
temp_df = pd.read_json(query_url.format('0'))
temp_df = temp_df.join(pd.DataFrame(temp_df["location"].to_dict()).T)
temp_df.iloc[-1]
temp_df.type.value_counts(dropna=False)
# it appears that the coordinates always seem to represent a point, or is null.
# so for now let's drop it
# one of the challenges with this dataset is that there are a LOT of columns
# we should determine which ones to trim down.
# since we now have the first 1,000 rows of data in `df` and the most recent records in `temp_df` let's concat the two into a single dataframe
temp_df = temp_df.drop(['type','location'],axis=1)
df = pd.concat([temp_df, df])
df.head()
# NOW let's try describe()
# we now have a problem because we have a list, which we created when we made coordinates
# let's fix that
# remember, first value is the longitude, and the second value is latitude
# don't do this for null values
mask = df.coordinates.notnull()
df.loc[mask, 'long'] = df[mask]['coordinates'].apply(lambda x: x[0])
df.loc[mask, 'lat'] = df[mask]['coordinates'].apply(lambda x: x[1])
df.iloc[-1]
# now we can delete the coordinates column
df = df.drop(['coordinates'],axis=1)
# again, let's try to describe the data
df.describe(include='all')
# yay! it worked! we are getting closer to clean data but a long way from it.
# one important thing we should do is look at the close_dttm since that is what we used to order the data
# but it appears our dates are not viewed as dates
df.info()
# fortunately there was a nice use of the dttm to identify what should be datetime objects
# let's use that to filter and then convert
for col in df.columns:
if 'dttm' in col:
print(col)
# so there are three.. hmm I thought there were more? Let's start with converting these and then go from there
df['alarm_dttm'] = pd.to_datetime?
df['alarm_dttm'] = pd.to_datetime(df['alarm_dttm'])
df['arrival_dttm'] = pd.to_datetime(df['arrival_dttm'])
df['close_dttm'] = pd.to_datetime(df['close_dttm'])
df.info()
# now that we are starting to clean up our data, it's time that we discard some of these unnecessary columns
# let's do a quick values count on each to see if we have any that are just blank field
for col in df.columns:
print("\n", col.title(), "\n")
print(df[col].value_counts(dropna=False), "\n")
print("*"*20)
df.tail()
# +
# OK, so here's a list of columns that for the time being we'll just remove so that we can get a better sense of the
# data and hopefully, more easily, grab the entire body of records, and not just these few rows
cols_to_drop = ["automatic_extinguishing_sytem_failure_reason",
"automatic_extinguishing_sytem_type",
"battalion",
"box",
"call_number",
"detector_effectiveness",
"detector_failure_reason",
"ems_personnel",
"ems_units",
"exposure_number",
"first_unit_on_scene",
"ignition_factor_secondary",
"mutual_aid",
"no_flame_spead",
"other_personnel",
"other_units",
"station_area",
"supervisor_district"]
df = df.drop(cols_to_drop, axis=1)
# -
df.info()
df.head()
df[df.zipcode.isnull()].iloc[-1]
# +
# after googling around I found this API for doing a reverse geo lookup, using the lat long to get the nearest zipcode
# we will do that now, but try to be polite about it.
# http://api.geonames.org/findNearbyPostalCodesJSON?lat=37.728947&lng=-122.466270&username=demo
df[df.zipcode.isnull()].groupby(['lat','long'])['address'].value_counts(dropna=False)
# -
grouped = df[df.zipcode.isnull()].groupby(['lat','long'])
for name, group in grouped:
print("")
print(name)
print(group['address'].value_counts(dropna=False))
# +
geo_url = "http://api.geonames.org/findNearbyPostalCodesJSON?lat={}&lng={}&username={}"
username = 'mikezawitkowski' # TODO: hide this in a config file not in source control
temp_df = pd.read_json(geo_url.format('37.616900999999999', '-122.38415999999999', username))
temp_df.head()
# -
temp_df.iloc[0]['postalCodes']
# +
# OK, so let's populate the missing zipcodes
grouped = df[df.zipcode.isnull()].groupby(['lat','long'])
geo_url = "http://api.geonames.org/findNearbyPostalCodesJSON?lat={}&lng={}&username={}"
username = 'mikezawitkowski' # TODO: hide this in a config file not in source control
for name, group in grouped:
lat, lon = name[0], name[1]
print("lat: {}, long: {}".format(lat, lon))
temp_df = pd.read_json(geo_url.format(lat,
lon,
username))
mask = ((df.lat == float(lat)) &
(df['long'] == float(lon)) &
(df.zipcode.isnull())
)
df.loc[mask, 'zipcode'] = temp_df.iloc[0]['postalCodes']['postalCode']
# -
df[df.zipcode.isnull()].shape # there are still 214 values that are missing a lat lon AND zip
mask = (df.zipcode.isnull())
geocolumns = ['address','city','neighborhood_district','zipcode','lat','long']
df[mask][geocolumns]
df[mask].iloc[0][geocolumns]
# let's try building a street2coordinates endpoint
address = ('+').join(df[mask].iloc[0]['address'].split())
address = address + '%2c+San+Francisco%2c+CA'
s2c_url = "http://www.datasciencetoolkit.org/street2coordinates/"
temp_df = pd.read_json(s2c_url + address)
temp_df
# "http://www.datasciencetoolkit.org/street2coordinates/2543+Graystone+Place%2c+Simi+Valley%2c+CA+93065"
temp_df.loc['latitude'][0]
# rename the data
df.loc[df.city == 'SF', 'city'] = 'San Francisco'
# +
mask = (df.zipcode.isnull())
grouped = df[mask].groupby(['address','city'])
for name, group in grouped:
if '/' in str(name[0]): # the sign that it's an intersection, deal with those separately
continue
street, city = name[0], name[1]
address = ('+').join(street.split())
address = address + '%2c+' + '+'.join(city.split()) + '%2c+CA'
s2c_url = "http://www.datasciencetoolkit.org/street2coordinates/"
try:
temp_df = pd.read_json(s2c_url + address)
except ValueError as err:
print(err)
print(name)
continue
mask = ((df.address == street) & (df.city == city))
df.loc[mask, 'lat'] = temp_df.loc['latitude'][0]
df.loc[mask, 'long'] = temp_df.loc['longitude'][0]
# -
df[df.zipcode.isnull() & (df.lat.isnull())]
# now, we go back and we fill in the zipcodes using lat long
geo_url = "http://api.geonames.org/findNearbyPostalCodesJSON?lat={}&lng={}&username={}"
df[df.zipcode.isnull()]
# +
# TODO NEXT
# it's clear from teh above that the steps being used for cleaning are working fine.
# What's next is we need to refactor the above into a cleaning script
# and move that to src
# we also should figure out a more lightweight method of getting the
# the credentials from any .env file, perhaps a simple .json is fine here
# the biggest problem is how to make this work on someone else's library.
# Do we instruct one to download the dstk image?
# Is there a container that's lightweight that they can install?
# -
| notebooks/exploratory/0.3-EDA-second-pass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Beginning Programming in Python
#
# ### Supplemental Topics: Plotting
# #### CSE20 - Spring 2021
#
#
# Interactive Slides: [https://tinyurl.com/cse20-spr21-plotting](https://tinyurl.com/cse20-spr21-plotting)
# + [markdown] slideshow={"slide_type": "slide"}
# # Plotting
#
# ### This material will not be on the final, however it is a helpful tool if you are working with data in any way.
#
# + [markdown] slideshow={"slide_type": "slide"}
# # The `matplotlib` package
#
# - The `matplotlib` package can be used to make a variety of different plots. For example:
# - Line plots
# - Bar charts
# - Histograms
# - and more
#
# More information and examples can be found on the website: https://matplotlib.org/
# + [markdown] slideshow={"slide_type": "slide"}
# # Installation
#
# - `matplotlib` can be installed using wither `pip` or `conda`
#
# Run the cell below to install `matplotlib` and import the module we will be using
# -
# !pip install matplotlib
import matplotlib.pyplot as plt
# + [markdown] slideshow={"slide_type": "slide"}
# # Example: Line Plot
# +
xs = [0, 1, 2, 3, 4]
ys = [1, 2, 1, 3, 5]
plt.figure()
plt.plot(xs, ys)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # Example: Line Plot Customizations
# +
xs = [0, 1, 2, 3, 4, 5]
ys = [1, 2.2, 1.1, 3.5, 5, 6]
plt.figure()
plt.plot(xs, ys, color="red", label="loss")
plt.plot(xs, ys[::-1], "--", color="green", label="profit")
plt.legend()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # Example: Histogram
# +
import random
data = [random.randint(0, 10) for _ in range(int(50))]
plt.figure()
plt.hist(data)
plt.xlabel("X-Values")
plt.ylabel("Counts")
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # Example: Bar Chart
# +
data = [12, 15, 8, 20, 45]
categories = ["A", "B", "C", "D", "E"]
plt.figure()
plt.bar(range(len(data)), data, color="r")
#plt.bar([1, 3, 5, 7, 9], data, color="r")
#plt.xticks(range(len(data)), categories)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # Example: Scatterplot
# +
xs = [3, 5, 4, 2, 7]
ys = [1, 8, 7, -5, 3]
zs = [0, 1, 2, 2, 4]
plt.figure()
plt.scatter(xs, ys)
#plt.scatter(xs, ys, c=zs)
#plt.colorbar()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # Example: Image
# +
data_2d = [
[0, 13, 5],
[4, 5, 3],
[2, 22, 4],
]
plt.figure()
plt.imshow(data_2d)
#plt.colorbar()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # What's Due Next?
#
# - Assignment 5 due June 6th 11:59 PM
| CSE20_Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Download this page as a jupyter notebook at [Lesson 4](https://192.168.127.12/engr-1330-webroot/1-Lessons/Lesson04/ENGR-1330-Lesson04.ipynb)
# # Sequential Structures
# Copyright © 2021 <NAME> and <NAME>
#
# Last GitHub Commit Date: 13 July 2021
#
# ## Lesson 4 Program Flow Control:
# - Three structures: sequence, selection , repetition (loops)
# - Sequence structures
# - Selection structures
# - Structured FOR loops
# - Structured WHILE loops
# - Representing computational processes with flowcharts, a graphical abstraction
#
# ---
# Script block to identify host, user, and kernel
import sys
# ! hostname; ! whoami; ! pwd;
print(sys.executable)
# + language="html"
# <!-- Script Block to set tables to left alignment -->
# <style>
# table {margin-left: 0 !important;}
# </style>
# -
# ---
# ## Objectives
#
# 1) Develop awareness of loops, and their utility in automation.
# - To understand loop types available in Python.
# - To understand and implement loops in various examples and configurations.
#
# 2) Develop awareness of flowcharts as a tool for:
# - Post-development documentation
# - Pre-development program design
#
#
# ---
# ## Sequence
#
# Sequential processing are steps performed in sequence, one after another. A spreadsheet computation from top-to-bottom is a sequential process.
#
# **Reliability Example**
# Suppose we wish to estimate the reliability of a system comprised of many indetical parts iused in multiple places in a design, for instance rivets on an airplane wing. Using a Bernoulli model (which you will see in your statistics class) we can estimate the collective reliability of the system (all the parts work as desired). The reliability is expressed as the fraction of time that no parts have failed, if the fraction is small we would want to either improve part reliability, or ensure redundancy so the system can function with broken parts.
#
# Let $p$ be the probability a single component is good and $N$ be the total number of components in the system that work together in a "series" context. The reliability, or the percentage of time that none of the components have failed is given by the Bernoulli equation:
#
# $$\% = (\frac{p}{100.0})^N \cdot 100.0 $$
#
# Suppose we want a script to read in a component probability and count, and estimate system reliability -- we can apply our problem solving protocol and JupyterLab to do so, and the task will be mostly sequential
#
# **Step 1 Problem Statement** Estimate the reliability of a component in an instrument relative to a group of components using a Bernoulli approximation.
#
# **Step 2 Input/Output Decomposition** Inputs are the reliability of a single component and the number of components working together in a system, output is estimate of system reliability, governing principle is the Bernoulli equation above.
#
# **Step 3 By-Hand Example**
# SUppose the system is a small FPGA with 20 transistors, each with reliability of 96-percent. The entire array reliability is
#
# $$\text{percentage} = (\frac{96.0}{100.0})^{20} \cdot 100.0 = 44.2\%$$
#
# **Step 4 Algorithm Development**
# Decompose the computation problem as:
#
# 1. Read reliability of a single component
# 2. Read how many components
# 3. Compute reliability by bernoulli model
# 4. Report result
#
# **Step 5 Scripting**
# Written as a sequence we can have
component = float(input('Component Reliability (percentage-numeric)?'))
howmany = int(input('Number of Components (integer-numeric)?'))
reliability = 100.0*(component/100.0)**howmany
print('Component Reliability: ',round(component,1))
print('Number of Components : ',howmany)
print('System Relability is : ',round(reliability,1),'%')
# **Step 6 Refinement**
# We have tested the script with the by-hand example, no refinement really needed here, but lets apply to new conditions
component = float(input('Component Reliability (percentage-numeric)?'))
howmany = int(input('Number of Components (integer-numeric)?'))
reliability = 100.0*(component/100.0)**howmany
print('Component Reliability: ',round(component,1))
print('Number of Components : ',howmany)
print('System Relability is : ',round(reliability,1),'%')
# ### References
#
# 1. Computational and Inferential Thinking <NAME> and <NAME>, Computational and Inferential Thinking, The Foundations of Data Science, Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND) Chapters 3-6 https://www.inferentialthinking.com/chapters/03/programming-in-python.html
#
# 2. Learn Python the Hard Way (Online Book) (https://learnpythonthehardway.org/book/) Recommended for beginners who want a complete course in programming with Python.
#
# 3. LearnPython.org (Interactive Tutorial) (https://www.learnpython.org/) Short, interactive tutorial for those who just need a quick way to pick up Python syntax.
#
# 4. <NAME> and <NAME> (2016) ALGORITHMS TO LIVE BY: The Computer Science of Human Decisions Henry Holt and Co. (https://www.amazon.com/Algorithms-Live-Computer-Science-Decisions/dp/1627790365)
#
# 4. <NAME>, <NAME>, <NAME>, <NAME> (Batu), <NAME>, <NAME>, and <NAME>. (2021) Computational Thinking and Data Science: A WebBook to Accompany ENGR 1330 at TTU, Whitacre College of Engineering, DOI (pending)[https://3.137.111.182/engr-1330-webroot/engr-1330-webbook/ctds-psuedocourse/site/](https://3.137.111.182/engr-1330-webroot/engr-1330-webbook/ctds-psuedocourse/site/)
#
# ## Readings
#
# 1. Learn Python in One Day and Learn It Well. Python for Beginners with Hands-on Project. (Learn Coding Fast with Hands-On Project Book -- Kindle Edition by LCF Publishing (Author), <NAME> [https://www.amazon.com/Python-2nd-Beginners-Hands-Project-ebook/dp/B071Z2Q6TQ/ref=sr_1_3?dchild=1&keywords=learn+python+in+a+day&qid=1611108340&sr=8-3](https://www.amazon.com/Python-2nd-Beginners-Hands-Project-ebook/dp/B071Z2Q6TQ/ref=sr_1_3?dchild=1&keywords=learn+python+in+a+day&qid=1611108340&sr=8-3)
#
# 2. Learn Python the Hard Way (Online Book) (https://learnpythonthehardway.org/book/) Recommended for beginners who want a complete course in programming with Python.
#
# 3. How to Learn Python for Data Science, The Self-Starter Way (https://elitedatascience.com/learn-python-for-data-science)
#
# 4. Flowcharts (QA/QC Perspective) [https://asq.org/quality-resources/flowchart](https://asq.org/quality-resources/flowchart)
#
# 5. Flowcharts - Wikipedia [https://en.wikipedia.org/wiki/Flowchart](https://en.wikipedia.org/wiki/Flowchart)
#
# 6. Psuedocode - Wikipedia [https://en.wikipedia.org/wiki/Pseudocode](https://en.wikipedia.org/wiki/Pseudocode)
| docs/1-programming/9-sequentialstructures/.ipynb_checkpoints/sequentialstructures-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow 2.3 on Python 3.6 (CUDA 10.1)
# language: python
# name: python3
# ---
# **도구 - 넘파이(NumPy)**
#
# *넘파이(NumPy)는 파이썬의 과학 컴퓨팅을 위한 기본 라이브러리입니다. 넘파이의 핵심은 강력한 N-차원 배열 객체입니다. 또한 선형 대수, 푸리에(Fourier) 변환, 유사 난수 생성과 같은 유용한 함수들도 제공합니다."
#
# # 배열 생성
# `numpy`를 임포트해 보죠. 대부분의 사람들이 `np`로 알리아싱하여 임포트합니다:
import numpy as np
# ## `np.zeros`
# `zeros` 함수는 0으로 채워진 배열을 만듭니다:
np.zeros(5)
# 2D 배열(즉, 행렬)을 만들려면 원하는 행과 열의 크기를 튜플로 전달합니다. 예를 들어 다음은 $3 \times 4$ 크기의 행렬입니다:
np.zeros((3,4))
# ## 용어
#
# * 넘파이에서 각 차원을 **축**(axis) 이라고 합니다
# * 축의 개수를 **랭크**(rank) 라고 합니다.
# * 예를 들어, 위의 $3 \times 4$ 행렬은 랭크 2인 배열입니다(즉 2차원입니다).
# * 첫 번째 축의 길이는 3이고 두 번째 축의 길이는 4입니다.
# * 배열의 축 길이를 배열의 **크기**(shape)라고 합니다.
# * 예를 들어, 위 행렬의 크기는 `(3, 4)`입니다.
# * 랭크는 크기의 길이와 같습니다.
# * 배열의 **사이즈**(size)는 전체 원소의 개수입니다. 축의 길이를 모두 곱해서 구할 수 있습니다(가령, $3 \times 4=12$).
a = np.zeros((3,4))
a
a.shape
a.ndim # len(a.shape)와 같습니다
a.size
# ## N-차원 배열
# 임의의 랭크 수를 가진 N-차원 배열을 만들 수 있습니다. 예를 들어, 다음은 크기가 `(2,3,4)`인 3D 배열(랭크=3)입니다:
np.zeros((2,3,4))
# ## 배열 타입
# 넘파이 배열의 타입은 `ndarray`입니다:
type(np.zeros((3,4)))
# ## `np.ones`
# `ndarray`를 만들 수 있는 넘파이 함수가 많습니다.
#
# 다음은 1로 채워진 $3 \times 4$ 크기의 행렬입니다:
np.ones((3,4))
# ## `np.full`
# 주어진 값으로 지정된 크기의 배열을 초기화합니다. 다음은 `π`로 채워진 $3 \times 4$ 크기의 행렬입니다.
np.full((3,4), np.pi)
# ## `np.empty`
# 초기화되지 않은 $2 \times 3$ 크기의 배열을 만듭니다(배열의 내용은 예측이 불가능하며 메모리 상황에 따라 달라집니다):
np.empty((2,3))
# ## np.array
# `array` 함수는 파이썬 리스트를 사용하여 `ndarray`를 초기화합니다:
np.array([[1,2,3,4], [10, 20, 30, 40]])
# ## `np.arange`
# 파이썬의 기본 `range` 함수와 비슷한 넘파이 `arange` 함수를 사용하여 `ndarray`를 만들 수 있습니다:
np.arange(1, 5)
# 부동 소수도 가능합니다:
np.arange(1.0, 5.0)
# 파이썬의 기본 `range` 함수처럼 건너 뛰는 정도를 지정할 수 있습니다:
np.arange(1, 5, 0.5)
# 부동 소수를 사용하면 원소의 개수가 일정하지 않을 수 있습니다. 예를 들면 다음과 같습니다:
print(np.arange(0, 5/3, 1/3)) # 부동 소수 오차 때문에, 최댓값은 4/3 또는 5/3이 됩니다.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
# ## `np.linspace`
# 이런 이유로 부동 소수를 사용할 땐 `arange` 대신에 `linspace` 함수를 사용하는 것이 좋습니다. `linspace` 함수는 지정된 개수만큼 두 값 사이를 나눈 배열을 반환합니다(`arange`와는 다르게 최댓값이 **포함**됩니다):
print(np.linspace(0, 5/3, 6))
# ## `np.rand`와 `np.randn`
# 넘파이의 `random` 모듈에는 `ndarray`를 랜덤한 값으로 초기화할 수 있는 함수들이 많이 있습니다.
# 예를 들어, 다음은 (균등 분포인) 0과 1사이의 랜덤한 부동 소수로 $3 \times 4$ 행렬을 초기화합니다:
np.random.rand(3,4)
# 다음은 평균이 0이고 분산이 1인 일변량 [정규 분포](https://ko.wikipedia.org/wiki/%EC%A0%95%EA%B7%9C_%EB%B6%84%ED%8F%AC)(가우시안 분포)에서 샘플링한 랜덤한 부동 소수를 담은 $3 \times 4$ 행렬입니다:
np.random.randn(3,4)
# 이 분포의 모양을 알려면 맷플롯립을 사용해 그려보는 것이 좋습니다(더 자세한 것은 [맷플롯립 튜토리얼](tools_matplotlib.ipynb)을 참고하세요):
# %matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
# ## np.fromfunction
# 함수를 사용하여 `ndarray`를 초기화할 수도 있습니다:
# +
def my_function(z, y, x):
return x * y + z
np.fromfunction(my_function, (3, 2, 10))
# -
# 넘파이는 먼저 크기가 `(3, 2, 10)`인 세 개의 `ndarray`(차원마다 하나씩)를 만듭니다. 각 배열은 축을 따라 좌표 값과 같은 값을 가집니다. 예를 들어, `z` 축에 있는 배열의 모든 원소는 z-축의 값과 같습니다:
#
# [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
#
# [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
# [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
#
# [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]
# [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]
#
# 위의 식 `x * y + z`에서 x, y, z는 사실 `ndarray`입니다(배열의 산술 연산에 대해서는 아래에서 설명합니다). 중요한 점은 함수 `my_function`이 원소마다 호출되는 것이 아니고 딱 **한 번** 호출된다는 점입니다. 그래서 매우 효율적으로 초기화할 수 있습니다.
# # 배열 데이터
# ## `dtype`
# 넘파이의 `ndarray`는 모든 원소가 동일한 타입(보통 숫자)을 가지기 때문에 효율적입니다. `dtype` 속성으로 쉽게 데이터 타입을 확인할 수 있습니다:
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
# 넘파이가 데이터 타입을 결정하도록 내버려 두는 대신 `dtype` 매개변수를 사용해서 배열을 만들 때 명시적으로 지정할 수 있습니다:
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
# 가능한 데이터 타입은 `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64`, `complex64`|`128`가 있습니다. 전체 리스트는 [온라인 문서](http://docs.scipy.org/doc/numpy/user/basics.types.html)를 참고하세요.
#
# ## `itemsize`
# `itemsize` 속성은 각 아이템의 크기(바이트)를 반환합니다:
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
# ## `data` 버퍼
# 배열의 데이터는 1차원 바이트 버퍼로 메모리에 저장됩니다. `data` 속성을 사용해 참조할 수 있습니다(사용할 일은 거의 없겠지만요).
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
# 파이썬 2에서는 `f.data`가 버퍼이고 파이썬 3에서는 memoryview입니다.
# +
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
# -
# 여러 개의 `ndarray`가 데이터 버퍼를 공유할 수 있습니다. 하나를 수정하면 다른 것도 바뀝니다. 잠시 후에 예를 살펴 보겠습니다.
# # 배열 크기 변경
#
# ## 자신을 변경
#
# `ndarray`의 `shape` 속성을 지정하면 간단히 크기를 바꿀 수 있습니다. 배열의 원소 개수는 동일하게 유지됩니다.
g = np.arange(24)
print(g)
print("랭크:", g.ndim)
g.shape = (6, 4)
print(g)
print("랭크:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("랭크:", g.ndim)
# ## `reshape`
#
# `reshape` 함수는 동일한 데이터를 가리키는 새로운 `ndarray` 객체를 반환합니다. 한 배열을 수정하면 다른 것도 함께 바뀝니다.
g2 = g.reshape(4,6)
print(g2)
print("랭크:", g2.ndim)
# 행 1, 열 2의 원소를 999로 설정합니다(인덱싱 방식은 아래를 참고하세요).
g2[1, 2] = 999
g2
# 이에 상응하는 `g`의 원소도 수정됩니다.
g
# ## `ravel`
#
# 마지막으로 `ravel` 함수는 동일한 데이터를 가리키는 새로운 1차원 `ndarray`를 반환합니다:
g.ravel()
# # 산술 연산
#
# 일반적인 산술 연산자(`+`, `-`, `*`, `/`, `//`, `**` 등)는 모두 `ndarray`와 사용할 수 있습니다. 이 연산자는 원소별로 적용됩니다:
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
# 여기 곱셈은 행렬 곱셈이 아닙니다. 행렬 연산은 아래에서 설명합니다.
#
# 배열의 크기는 같아야 합니다. 그렇지 않으면 넘파이가 브로드캐스팅 규칙을 적용합니다.
# # 브로드캐스팅
# 일반적으로 넘파이는 동일한 크기의 배열을 기대합니다. 그렇지 않은 상황에는 브로드캐시틍 규칙을 적용합니다:
#
# ## 규칙 1
#
# 배열의 랭크가 동일하지 않으면 랭크가 맞을 때까지 랭크가 작은 배열 앞에 1을 추가합니다.
h = np.arange(5).reshape(1, 1, 5)
h
# 여기에 `(1,1,5)` 크기의 3D 배열에 `(5,)` 크기의 1D 배열을 더해 보죠. 브로드캐스팅의 규칙 1이 적용됩니다!
h + [10, 20, 30, 40, 50] # 다음과 동일합니다: h + [[[10, 20, 30, 40, 50]]]
# ## 규칙 2
#
# 특정 차원이 1인 배열은 그 차원에서 크기가 가장 큰 배열의 크기에 맞춰 동작합니다. 배열의 원소가 차원을 따라 반복됩니다.
k = np.arange(6).reshape(2, 3)
k
# `(2,3)` 크기의 2D `ndarray`에 `(2,1)` 크기의 2D 배열을 더해 보죠. 넘파이는 브로드캐스팅 규칙 2를 적용합니다:
k + [[100], [200]] # 다음과 같습니다: k + [[100, 100, 100], [200, 200, 200]]
# 규칙 1과 2를 합치면 다음과 같이 동작합니다:
k + [100, 200, 300] # 규칙 1 적용: [[100, 200, 300]], 규칙 2 적용: [[100, 200, 300], [100, 200, 300]]
# 또 매우 간단히 다음 처럼 해도 됩니다:
k + 1000 # 다음과 같습니다: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
# ## 규칙 3
#
# 규칙 1 & 2을 적용했을 때 모든 배열의 크기가 맞아야 합니다.
try:
k + [33, 44]
except ValueError as e:
print(e)
# 브로드캐스팅 규칙은 산술 연산 뿐만 아니라 넘파이 연산에서 많이 사용됩니다. 아래에서 더 보도록 하죠. 브로드캐스팅에 관한 더 자세한 정보는 [온라인 문서](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html)를 참고하세요.
# ## 업캐스팅
#
# `dtype`이 다른 배열을 합칠 때 넘파이는 (실제 값에 상관없이) 모든 값을 다룰 수 있는 타입으로 업캐스팅합니다.
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
# 모든 `int8`과 `uint8` 값(-128에서 255까지)을 표현하기 위해 `int16`이 필요합니다. 이 코드에서는 `uint8`이면 충분하지만 업캐스팅되었습니다.
k3 = k1 + 1.5
print(k3.dtype, k3)
# # 조건 연산자
# 조건 연산자도 원소별로 적용됩니다:
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
# 브로드캐스팅을 사용합니다:
m < 25 # m < [25, 25, 25, 25] 와 동일
# 불리언 인덱싱과 함께 사용하면 아주 유용합니다(아래에서 설명하겠습니다).
m[m < 25]
# # 수학 함수와 통계 함수
# `ndarray`에서 사용할 수 있는 수학 함수와 통계 함수가 많습니다.
#
# ## `ndarray` 메서드
#
# 일부 함수는 `ndarray` 메서드로 제공됩니다. 예를 들면:
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("평균 =", a.mean())
# 이 명령은 크기에 상관없이 `ndarray`에 있는 모든 원소의 평균을 계산합니다.
#
# 다음은 유용한 `ndarray` 메서드입니다:
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
# 이 함수들은 선택적으로 매개변수 `axis`를 사용합니다. 지정된 축을 따라 원소에 연산을 적용하는데 사용합니다. 예를 들면:
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # 첫 번째 축을 따라 더함, 결과는 3x4 배열
c.sum(axis=1) # 두 번째 축을 따라 더함, 결과는 2x4 배열
# 여러 축에 대해서 더할 수도 있습니다:
c.sum(axis=(0,2)) # 첫 번째 축과 세 번째 축을 따라 더함, 결과는 (3,) 배열
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
# ## 일반 함수
#
# 넘파이는 일반 함수(universal function) 또는 **ufunc**라고 부르는 원소별 함수를 제공합니다. 예를 들면 `square` 함수는 원본 `ndarray`를 복사하여 각 원소를 제곱한 새로운 `ndarray` 객체를 반환합니다:
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
# 다음은 유용한 단항 일반 함수들입니다:
print("원본 ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
# ## 이항 일반 함수
#
# 두 개의 `ndarray`에 원소별로 적용되는 이항 함수도 많습니다. 두 배열이 동일한 크기가 아니면 브로드캐스팅 규칙이 적용됩니다:
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # a + b 와 동일
np.greater(a, b) # a > b 와 동일
np.maximum(a, b)
np.copysign(a, b)
# # 배열 인덱싱
#
# ## 1차원 배열
#
# 1차원 넘파이 배열은 보통의 파이썬 배열과 비슷하게 사용할 수 있습니다:
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
# 물론 원소를 수정할 수 있죠:
a[3]=999
a
# 슬라이싱을 사용해 `ndarray`를 수정할 수 있습니다:
a[2:5] = [997, 998, 999]
a
# ## 보통의 파이썬 배열과 차이점
#
# 보통의 파이썬 배열과 대조적으로 `ndarray` 슬라이싱에 하나의 값을 할당하면 슬라이싱 전체에 복사됩니다. 위에서 언급한 브로드캐스팅 덕택입니다.
a[2:5] = -1
a
# 또한 이런 식으로 `ndarray` 크기를 늘리거나 줄일 수 없습니다:
try:
a[2:5] = [1,2,3,4,5,6] # 너무 길어요
except ValueError as e:
print(e)
# 원소를 삭제할 수도 없습니다:
try:
del a[2:5]
except ValueError as e:
print(e)
# 중요한 점은 `ndarray`의 슬라이싱은 같은 데이터 버퍼를 바라보는 뷰(view)입니다. 슬라이싱된 객체를 수정하면 실제 원본 `ndarray`가 수정됩니다!
a_slice = a[2:6]
a_slice[1] = 1000
a # 원본 배열이 수정됩니다!
a[3] = 2000
a_slice # 비슷하게 원본 배열을 수정하면 슬라이싱 객체에도 반영됩니다!
# 데이터를 복사하려면 `copy` 메서드를 사용해야 합니다:
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # 원본 배열이 수정되지 않습니다
a[3] = 4000
another_slice # 마찬가지로 원본 배열을 수정해도 복사된 배열은 바뀌지 않습니다
# ## 다차원 배열
#
# 다차원 배열은 비슷한 방식으로 각 축을 따라 인덱싱 또는 슬라이싱해서 사용합니다. 콤마로 구분합니다:
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # 행 1, 열 2
b[1, :] # 행 1, 모든 열
b[:, 1] # 모든 행, 열 1
# **주의**: 다음 두 표현에는 미묘한 차이가 있습니다:
b[1, :]
b[1:2, :]
# 첫 번째 표현식은 `(12,)` 크기인 1D 배열로 행이 하나입니다. 두 번째는 `(1, 12)` 크기인 2D 배열로 같은 행을 반환합니다.
# ## 팬시 인덱싱(Fancy indexing)
#
# 관심 대상의 인덱스 리스트를 지정할 수도 있습니다. 이를 팬시 인덱싱이라고 부릅니다.
b[(0,2), 2:5] # 행 0과 2, 열 2에서 4(5-1)까지
b[:, (-1, 2, -1)] # 모든 행, 열 -1 (마지막), 2와 -1 (다시 반대 방향으로)
# 여러 개의 인덱스 리스트를 지정하면 인덱스에 맞는 값이 포함된 1D `ndarray`를 반환됩니다.
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
# ## 고차원
#
# 고차원에서도 동일한 방식이 적용됩니다. 몇 가지 예를 살펴 보겠습니다:
c = b.reshape(4,2,6)
c
c[2, 1, 4] # 행렬 2, 행 1, 열 4
c[2, :, 3] # 행렬 2, 모든 행, 열 3
# 어떤 축에 대한 인덱스를 지정하지 않으면 이 축의 모든 원소가 반환됩니다:
c[2, 1] # 행렬 2, 행 1, 모든 열이 반환됩니다. c[2, 1, :]와 동일합니다.
# ## 생략 부호 (`...`)
#
# 생략 부호(`...`)를 쓰면 모든 지정하지 않은 축의 원소를 포함합니다.
c[2, ...] # 행렬 2, 모든 행, 모든 열. c[2, :, :]와 동일
c[2, 1, ...] # 행렬 2, 행 1, 모든 열. c[2, 1, :]와 동일
c[2, ..., 3] # 행렬 2, 모든 행, 열 3. c[2, :, 3]와 동일
c[..., 3] # 모든 행렬, 모든 행, 열 3. c[:, :, 3]와 동일
# ## 불리언 인덱싱
#
# 불리언 값을 가진 `ndarray`를 사용해 축의 인덱스를 지정할 수 있습니다.
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # 행 0과 2, 모든 열. b[(0, 2), :]와 동일
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # 모든 행, 열 1, 4, 7, 10
# ## `np.ix_`
#
# 여러 축에 걸쳐서는 불리언 인덱싱을 사용할 수 없고 `ix_` 함수를 사용합니다:
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
# `ndarray`와 같은 크기의 불리언 배열을 사용하면 해당 위치가 `True`인 모든 원소를 담은 1D 배열이 반환됩니다. 일반적으로 조건 연산자와 함께 사용합니다:
b[b % 3 == 1]
# # 반복
#
# `ndarray`를 반복하는 것은 일반적인 파이썬 배열을 반복한는 것과 매우 유사합니다. 다차원 배열을 반복하면 첫 번째 축에 대해서 수행됩니다.
c = np.arange(24).reshape(2, 3, 4) # 3D 배열 (두 개의 3x4 행렬로 구성됨)
c
for m in c:
print("아이템:")
print(m)
for i in range(len(c)): # len(c) == c.shape[0]
print("아이템:")
print(c[i])
# `ndarray`에 있는 모든 원소를 반복하려면 `flat` 속성을 사용합니다:
for i in c.flat:
print("아이템:", i)
# # 배열 쌓기
#
# 종종 다른 배열을 쌓아야 할 때가 있습니다. 넘파이는 이를 위해 몇 개의 함수를 제공합니다. 먼저 배열 몇 개를 만들어 보죠.
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
# ## `vstack`
#
# `vstack` 함수를 사용하여 수직으로 쌓아보죠:
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
# q1, q2, q3가 모두 같은 크기이므로 가능합니다(수직으로 쌓기 때문에 수직 축은 크기가 달라도 됩니다).
#
# ## `hstack`
#
# `hstack`을 사용해 수평으로도 쌓을 수 있습니다:
q5 = np.hstack((q1, q3))
q5
q5.shape
# q1과 q3가 모두 3개의 행을 가지고 있기 때문에 가능합니다. q2는 4개의 행을 가지고 있기 때문에 q1, q3와 수평으로 쌓을 수 없습니다:
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
# ## `concatenate`
#
# `concatenate` 함수는 지정한 축으로도 배열을 쌓습니다.
q7 = np.concatenate((q1, q2, q3), axis=0) # vstack과 동일
q7
q7.shape
# 예상했겠지만 `hstack`은 `axis=1`으로 `concatenate`를 호출하는 것과 같습니다.
# ## `stack`
#
# `stack` 함수는 새로운 축을 따라 배열을 쌓습니다. 모든 배열은 같은 크기를 가져야 합니다.
q8 = np.stack((q1, q3))
q8
q8.shape
# # 배열 분할
#
# 분할은 쌓기의 반대입니다. 예를 들어 `vsplit` 함수는 행렬을 수직으로 분할합니다.
#
# 먼저 6x4 행렬을 만들어 보죠:
r = np.arange(24).reshape(6,4)
r
# 수직으로 동일한 크기로 나누어 보겠습니다:
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
# `split` 함수는 주어진 축을 따라 배열을 분할합니다. `vsplit`는 `axis=0`으로 `split`를 호출하는 것과 같습니다. `hsplit` 함수는 `axis=1`로 `split`를 호출하는 것과 같습니다:
r4, r5 = np.hsplit(r, 2)
r4
r5
# # 배열 전치
#
# `transpose` 메서드는 주어진 순서대로 축을 뒤바꾸어 `ndarray` 데이터에 대한 새로운 뷰를 만듭니다.
#
# 예를 위해 3D 배열을 만들어 보죠:
t = np.arange(24).reshape(4,2,3)
t
# `0, 1, 2`(깊이, 높이, 너비) 축을 `1, 2, 0` (깊이→너비, 높이→깊이, 너비→높이) 순서로 바꾼 `ndarray`를 만들어 보겠습니다:
t1 = t.transpose((1,2,0))
t1
t1.shape
# `transpose` 기본값은 차원의 순서를 역전시킵니다:
t2 = t.transpose() # t.transpose((2, 1, 0))와 동일
t2
t2.shape
# 넘파이는 두 축을 바꾸는 `swapaxes` 함수를 제공합니다. 예를 들어 깊이와 높이를 뒤바꾸어 `t`의 새로운 뷰를 만들어 보죠:
t3 = t.swapaxes(0,1) # t.transpose((1, 0, 2))와 동일
t3
t3.shape
# # 선형 대수학
#
# 넘파이 2D 배열을 사용하면 파이썬에서 행렬을 효율적으로 표현할 수 있습니다. 주요 행렬 연산을 간단히 둘러 보겠습니다. 선형 대수학, 벡터와 행렬에 관한 자세한 내용은 [Linear Algebra tutorial](math_linear_algebra.ipynb)를 참고하세요.
#
# ## 행렬 전치
#
# `T` 속성은 랭크가 2보다 크거나 같을 때 `transpose()`를 호출하는 것과 같습니다:
m1 = np.arange(10).reshape(2,5)
m1
m1.T
# `T` 속성은 랭크가 0이거나 1인 배열에는 아무런 영향을 미치지 않습니다:
m2 = np.arange(5)
m2
m2.T
# 먼저 1D 배열을 하나의 행이 있는 행렬(2D)로 바꾼다음 전치를 수행할 수 있습니다:
m2r = m2.reshape(1,5)
m2r
m2r.T
# ## 행렬 곱셈
#
# 두 개의 행렬을 만들어 `dot` 메서드로 행렬 [곱셈](https://ko.wikipedia.org/wiki/%ED%96%89%EB%A0%AC_%EA%B3%B1%EC%85%88)을 실행해 보죠.
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
# **주의**: 앞서 언급한 것처럼 `n1*n2`는 행렬 곱셈이 아니라 원소별 곱셈(또는 [아다마르 곱](https://ko.wikipedia.org/wiki/%EC%95%84%EB%8B%A4%EB%A7%88%EB%A5%B4_%EA%B3%B1)이라 부릅니다)입니다.
# ## 역행렬과 유사 역행렬
#
# `numpy.linalg` 모듈 안에 많은 선형 대수 함수들이 있습니다. 특히 `inv` 함수는 정방 행렬의 역행렬을 계산합니다:
# +
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
# -
linalg.inv(m3)
# `pinv` 함수를 사용하여 [유사 역행렬](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse)을 계산할 수도 있습니다:
linalg.pinv(m3)
# ## 단위 행렬
#
# 행렬과 그 행렬의 역행렬을 곱하면 단위 행렬이 됩니다(작은 소숫점 오차가 있습니다):
m3.dot(linalg.inv(m3))
# `eye` 함수는 NxN 크기의 단위 행렬을 만듭니다:
np.eye(3)
# ## QR 분해
#
# `qr` 함수는 행렬을 [QR 분해](https://en.wikipedia.org/wiki/QR_decomposition)합니다:
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r는 m3와 같습니다
# ## 행렬식
#
# `det` 함수는 [행렬식](https://en.wikipedia.org/wiki/Determinant)을 계산합니다:
linalg.det(m3) # 행렬식 계산
# ## 고윳값과 고유벡터
#
# `eig` 함수는 정방 행렬의 [고윳값과 고유벡터](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)를 계산합니다:
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
# ## 특잇값 분해
#
# `svd` 함수는 행렬을 입력으로 받아 그 행렬의 [특잇값 분해](https://en.wikipedia.org/wiki/Singular_value_decomposition)를 반환합니다:
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
# `svd` 함수는 Σ의 대각 원소 값만 반환합니다. 전체 Σ 행렬은 다음과 같이 만듭니다:
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
# ## 대각원소와 대각합
np.diag(m3) # m3의 대각 원소입니다(왼쪽 위에서 오른쪽 아래)
np.trace(m3) # np.diag(m3).sum()와 같습니다
# ## 선형 방정식 풀기
# `solve` 함수는 다음과 같은 선형 방정식을 풉니다:
#
# * $2x + 6y = 6$
# * $5x + 3y = -9$
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
# solution을 확인해 보죠:
coeffs.dot(solution), depvars # 네 같네요
# 좋습니다! 다른 방식으로도 solution을 확인해 보죠:
np.allclose(coeffs.dot(solution), depvars)
# # 벡터화
#
# 한 번에 하나씩 개별 배열 원소에 대해 연산을 실행하는 대신 배열 연산을 사용하면 훨씬 효율적인 코드를 만들 수 있습니다. 이를 벡터화라고 합니다. 이를 사용하여 넘파이의 최적화된 성능을 활용할 수 있습니다.
#
# 예를 들어, $sin(xy/40.5)$ 식을 기반으로 768x1024 크기 배열을 생성하려고 합니다. 중첩 반복문 안에 파이썬의 math 함수를 사용하는 것은 **나쁜** 방법입니다:
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # 매우 비효율적입니다!
# 작동은 하지만 순수한 파이썬 코드로 반복문이 진행되기 때문에 아주 비효율적입니다. 이 알고리즘을 벡터화해 보죠. 먼저 넘파이 `meshgrid` 함수로 좌표 벡터를 사용해 행렬을 만듭니다.
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
# 여기서 볼 수 있듯이 `X`와 `Y` 모두 768x1024 배열입니다. `X`에 있는 모든 값은 수평 좌표에 해당합니다. `Y`에 있는 모든 값은 수직 좌표에 해당합니다.
#
# 이제 간단히 배열 연산을 사용해 계산할 수 있습니다:
data = np.sin(X*Y/40.5)
# 맷플롯립의 `imshow` 함수를 사용해 이 데이터를 그려보죠([matplotlib tutorial](tools_matplotlib.ipynb)을 참조하세요).
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot)
plt.show()
# # 저장과 로딩
#
# 넘파이는 `ndarray`를 바이너리 또는 텍스트 포맷으로 손쉽게 저장하고 로드할 수 있습니다.
#
# ## 바이너리 `.npy` 포맷
#
# 랜덤 배열을 만들고 저장해 보죠.
a = np.random.rand(2,3)
a
np.save("my_array", a)
# 끝입니다! 파일 이름의 확장자를 지정하지 않았기 때문에 넘파이는 자동으로 `.npy`를 붙입니다. 파일 내용을 확인해 보겠습니다:
# +
with open("my_array.npy", "rb") as f:
content = f.read()
content
# -
# 이 파일을 넘파이 배열로 로드하려면 `load` 함수를 사용합니다:
a_loaded = np.load("my_array.npy")
a_loaded
# ## 텍스트 포맷
#
# 배열을 텍스트 포맷으로 저장해 보죠:
np.savetxt("my_array.csv", a)
# 파일 내용을 확인해 보겠습니다:
with open("my_array.csv", "rt") as f:
print(f.read())
# 이 파일은 탭으로 구분된 CSV 파일입니다. 다른 구분자를 지정할 수도 있습니다:
np.savetxt("my_array.csv", a, delimiter=",")
# 이 파일을 로드하려면 `loadtxt` 함수를 사용합니다:
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
# ## 압축된 `.npz` 포맷
#
# 여러 개의 배열을 압축된 한 파일로 저장하는 것도 가능합니다:
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
# 파일 내용을 확인해 보죠. `.npz` 파일 확장자가 자동으로 추가되었습니다.
# +
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
# -
# 다음과 같이 이 파일을 로드할 수 있습니다:
my_arrays = np.load("my_arrays.npz")
my_arrays
# 게으른 로딩을 수행하는 딕셔너리와 유사한 객체입니다:
my_arrays.keys()
my_arrays["my_a"]
# # 그 다음은?
#
# 넘파이 기본 요소를 모두 배웠지만 훨씬 더 많은 기능이 있습니다. 이를 배우는 가장 좋은 방법은 넘파이를 직접 실습해 보고 훌륭한 [넘파이 문서](http://docs.scipy.org/doc/numpy/reference/index.html)에서 필요한 함수와 기능을 찾아 보세요.
| tools_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# -
# Plot parameters
sns.set()
# %pylab inline
pylab.rcParams['figure.figsize'] = (4, 4)
# + tags=[]
# Avoid inaccurate floating values (for inverse matrices in dot product for instance)
# See https://stackoverflow.com/questions/24537791/numpy-matrix-inversion-rounding-errors
np.set_printoptions(suppress=True)
# + language="html"
# <style>
# .pquote {
# text-align: left;
# margin: 40px 0 40px auto;
# width: 70%;
# font-size: 1.5em;
# font-style: italic;
# display: block;
# line-height: 1.3em;
# color: #5a75a7;
# font-weight: 600;
# border-left: 5px solid rgba(90, 117, 167, .1);
# padding-left: 6px;
# }
# .notes {
# font-style: italic;
# display: block;
# margin: 40px 10%;
# }
# img + em {
# text-align: center;
# display: block;
# color: gray;
# font-size: 0.9em;
# font-weight: 600;
# }
# </style>
# -
# # Introduction
#
# This chapter is light but contains some important definitions. The identity matrix or the inverse of a matrix are concepts that will be very useful in the next chapters. We will see at the end of this chapter that we can solve systems of linear equations by using the inverse matrix. So hang on!
# # 2.3 Identity and Inverse Matrices
#
#
# # Identity matrices
#
# The identity matrix $\bs{I}_n$ is a special matrix of shape ($n \times n$) that is filled with $0$ except the diagonal that is filled with 1.
#
# <img src="images/identity-matrix.png" width="150" alt="Example of an identity matrix" title="Identity matrix">
# <em>A 3 by 3 identity matrix</em>
# An identity matrix can be created with the Numpy function `eye()`:
np.eye(3)
np.identity(3)
# When 'apply' the identity matrix to a vector the result is this same vector:
#
# $$\bs{I}_n\bs{x} = \bs{x}$$
#
# ### Example 1.
#
# $$
# \begin{bmatrix}
# 1 & 0 & 0 \\\\
# 0 & 1 & 0 \\\\
# 0 & 0 & 1
# \end{bmatrix}
# \times
# \begin{bmatrix}
# x_{1} \\\\
# x_{2} \\\\
# x_{3}
# \end{bmatrix}=
# \begin{bmatrix}
# 1 \times x_1 + 0 \times x_2 + 0\times x_3 \\\\
# 0 \times x_1 + 1 \times x_2 + 0\times x_3 \\\\
# 0 \times x_1 + 0 \times x_2 + 1\times x_3
# \end{bmatrix}=
# \begin{bmatrix}
# x_{1} \\\\
# x_{2} \\\\
# x_{3}
# \end{bmatrix}
# $$
x = np.array([[2], [6], [3]])
x
x.shape
xid = np.eye(x.shape[0]).dot(x)
xid
# ## Intuition
#
# You can think of a matrix as a way to transform objects in a $n$-dimensional space. It applies a linear transformation of the space. We can say that we *apply* a matrix to an element: this means that we do the dot product between this matrix and the element (more details about the dot product in [2.2](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.2-Multiplying-Matrices-and-Vectors/)). We will see this notion thoroughly in the next chapters but the identity matrix is a good first example. It is a particular example because the space doesn't change when we *apply* the identity matrix to it.
#
# <span class='pquote'>
# The space doesn't change when we *apply* the identity matrix to it
# </span>
#
# We saw that $\bs{x}$ was not altered after being multiplied by $\bs{I}$.
# # Inverse Matrices
#
# The matrix inverse of $\bs{A}$ is denoted $\bs{A}^{-1}$. It is the matrix that results in the identity matrix when it is multiplied by $\bs{A}$:
#
# $$\bs{A}^{-1}\bs{A}=\bs{I}_n$$
#
# This means that if we apply a linear transformation to the space with $\bs{A}$, it is possible to go back with $\bs{A}^{-1}$. It provides a way to cancel the transformation.
#
# ### Example 2.
#
# $$
# \bs{A}=\begin{bmatrix}
# 3 & 0 & 2 \\\\
# 2 & 0 & -2 \\\\
# 0 & 1 & 1
# \end{bmatrix}
# $$
#
# For this example, we will use the Numpy function `linalg.inv()` to calculate the inverse of $\bs{A}$. Let's start by creating $\bs{A}$:
A = np.array([[3, 0, 2], [2, 0, -2], [0, 1, 1]])
A
# Now we calculate its inverse:
A_inv = np.linalg.inv(A)
A_inv
# We can check that $\bs{A_{inv}}$ is well the inverse of $\bs{A}$ with Python:
A_bis = A_inv.dot(A)
A_bis
# We will see that inverse of matrices can be very usefull, for instance to solve a set of linear equations. We must note however that non square matrices (matrices with more columns than rows or more rows than columns) don't have inverse.
# # Sovling a system of linear equations
#
# An introduction on system of linear equations.
#
# The inverse matrix can be used to solve the equation $\bs{Ax}=\bs{b}$ by adding it to each term:
#
# $$\bs{A}^{-1}\bs{Ax}=\bs{A}^{-1}\bs{b}$$
#
# Since we know by definition that $\bs{A}^{-1}\bs{A}=\bs{I}$, we have:
#
# $$\bs{I}_n\bs{x}=\bs{A}^{-1}\bs{b}$$
#
# We saw that a vector is not changed when multiplied by the identity matrix. So we can write:
#
# $$\bs{x}=\bs{A}^{-1}\bs{b}$$
#
# This is great! We can solve a set of linear equation just by computing the inverse of $\bs{A}$ and apply this matrix to the vector of results $\bs{b}$!
#
# Let's try that!
# +
## Write a Code and Shif+Enter
# -
# ### Example 3.
#
# We will take a simple solvable example:
#
# $$
# \begin{cases}
# y = 2x \\\\
# y = -x +3
# \end{cases}
# $$
#
# We will use the notation that :
#
# $$
# \begin{cases}
# A_{1,1}x_1 + A_{1,2}x_2 = b_1 \\\\
# A_{2,1}x_1 + A_{2,2}x_2= b_2
# \end{cases}
# $$
#
# Here, $x_1$ corresponds to $x$ and $x_2$ corresponds to $y$. So we have:
#
# $$
# \begin{cases}
# 2x_1 - x_2 = 0 \\\\
# x_1 + x_2= 3
# \end{cases}
# $$
#
# Our matrix $\bs{A}$ of weights is:
#
# $$
# \bs{A}=
# \begin{bmatrix}
# 2 & -1 \\\\
# 1 & 1
# \end{bmatrix}
# $$
#
# And the vector $\bs{b}$ containing the solutions of individual equations is:
#
# $$
# \bs{b}=
# \begin{bmatrix}
# 0 \\\\
# 3
# \end{bmatrix}
# $$
#
# Under the matrix form, our systems becomes:
#
# $$
# \begin{bmatrix}
# 2 & -1 \\\\
# 1 & 1
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\\\
# x_2
# \end{bmatrix}=
# \begin{bmatrix}
# 0 \\\\
# 3
# \end{bmatrix}
# $$
#
# Let's find the inverse of $\bs{A}$:
A = np.array([[2, -1], [1, 1]])
A
A_inv = np.linalg.inv(A)
A_inv
# We have also:
# + tags=[]
b = np.array([[0], [3]])
# -
# Since we saw that
#
# $$\bs{x}=\bs{A}^{-1}\bs{b}$$
#
# We have:
x = A_inv.dot(b)
x
# This is our solution!
#
# $$
# \bs{x}=
# \begin{bmatrix}
# 1 \\\\
# 2
# \end{bmatrix}
# $$
#
# This means that the point of coordinates (1, 2) is the solution and is at the intersection of the lines representing the equations. Let's plot them to check this solution:
# +
x = np.arange(-10, 10)
y = 2*x
y1 = -x + 3
plt.figure(figsize=(30,15)) # Size of Ouput
plt.plot(x, y) # 2D line plot
plt.plot(x, y1)
plt.xlim(0, 3) # limites of X axis
plt.ylim(0, 3) # limites of y axis
# draw axes
plt.axvline(x=0, color='grey') # axis Vertical Line
plt.axhline(y=0, color='grey') # axis horzintal line
plt.show() # display the Image
plt.close() # Close Plot
# -
# We can see that the solution (corresponding to the line crossing) is when $x=1$ and $y=2$. It confirms what we found with the matrix inversion!
# +
## Write a Code and Shif+Enter
# Plot all Q from above fig by change limites of x and y
# -
# ## BONUS: Coding tip - Draw an equation
#
# To draw the equation with Matplotlib, we first need to create a vector with all the $x$ values. Actually, since this is a line, only two points would have been sufficient. But with more complex functions, the length of the vector $x$ corresponds to the sampling rate. So here we used the Numpy function `arrange()` (see the [doc](https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html)) to create a vector from $-10$ to $10$ (not included).
np.arange(-10, 10)
# The first argument is the starting point and the second the ending point. You can add a third argument to specify the step:
np.arange(-10, 10, 2)
# Then we create a second vector $y$ that depends on the $x$ vector. Numpy will take each value of $x$ and apply the equation formula to it.
x = np.arange(-10, 10)
y = 2*x + 1
y
# Finally, you just need to plot these vectors.
## Write a Code and Shif+Enter
plt.figure(figsize=(30,15))
plt.plot(x,y)
# # Singular matrices
#
# Some matrices are not invertible. They are called **singular**.
# # Conclusion
#
# This introduces different cases according to the linear system because $\bs{A}^{-1}$ exists only if the equation $\bs{Ax}=\bs{b}$ has one and only one solution.is almost all about systems of linear equations and number of solutions.
| 2.3 Identity and Inverse Matrices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Experiments
# +
dataset = 'citeseer'
normalize_features = True
train_samples_per_class = 30
seed = 24
lr = 0.001
epochs = 1000
K = 6
dropout = 0.2
weight_decay = 5e-4
hidden = 50
embed_dim = 50
num_cluster_iter = 1
clustertemp = 70
# -
# ## Global arguments
# +
import torch
import numpy as np
import torch.optim as optim
import torch.nn as nn
from models import GCNLink, GCNClusterNet, GCNDeep, GCNDeepSigmoid, GCN, GCNLinear
from utils import make_normalized_adj, negative_sample, load_nofeatures, accuracy, calculate_accuracy
import matplotlib.pyplot as plt
# -
if(normalize_features):
from pygcn import load_data
else:
from utils import load_data
# +
no_cuda = True
train_pct = 0.40
cuda = not no_cuda and torch.cuda.is_available()
np.random.seed(seed)
torch.manual_seed(seed)
# -
# ## Preparations
# ### Load data
# +
adj_test, features_test, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(dataset), '{}_test_{:.2f}'.format(dataset, train_pct))
adj_valid, features_valid, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(dataset), '{}_valid_{:.2f}'.format(dataset, train_pct))
adj_train, features_train, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(dataset), '{}_train_{:.2f}'.format(dataset, train_pct))
adj_test = adj_test.coalesce()
adj_valid = adj_valid.coalesce()
adj_train = adj_train.coalesce()
n = adj_train.shape[0]
bin_adj_test = (adj_test.to_dense() > 0).float()
bin_adj_train = (adj_train.to_dense() > 0).float()
bin_adj_valid = (adj_valid.to_dense() > 0).float()
bin_adj_all = (bin_adj_train + bin_adj_test + bin_adj_valid > 0).float()
adj_all = make_normalized_adj(bin_adj_all.nonzero(), n)
nfeat = features_test.shape[1]
adj_all, features_test, labels, idx_train, idx_val, idx_test = load_data('data/{}/'.format(dataset), '{}'.format(dataset))
adj_all = adj_all.coalesce()
adj_test = adj_all
nfeat = features_test.shape[1]
# -
# ### Select samples to train on
# +
train_label_indices = []
for i in range(K):
for _ in range(train_samples_per_class):
idx = np.random.randint(labels.shape[0])
while(idx in train_label_indices or labels[idx] != i):
idx = np.random.randint(labels.shape[0])
train_label_indices.append(idx)
test_label_indices = list(set(range(labels.shape[0])) - set(train_label_indices))
# -
def plot_if_possible(r=None, print_normalized=False):
if features_train.shape[1] == 2:
if normalize_features and not print_normalized:
x = []
y = []
with open("data/%s/%s.content" % (dataset, dataset)) as ss:
for line in ss:
_, xx, yy, _ = line.split(' ')
x.append(float(xx))
y.append(float(yy))
else:
x = features_train[:,0]
y = features_train[:,1]
if print_normalized:
text = " normalized"
else:
text = ""
if r is None:
plt.scatter(x, y, c=labels, s=5)
plt.title("The%s dataset with true labels" % text)
else:
predictions = r.argmax(dim=1)
plt.scatter(x, y, c=predictions, s=5)
plt.title("The%s dataset with predicted labels" % text)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plot_if_possible()
if normalize_features:
plot_if_possible(print_normalized=True)
if cuda:
features = features.cuda()
adj_train = adj_train.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
# ## Training
# ### Decision-Focused
# +
model_cluster = GCNClusterNet(nfeat=nfeat,
nhid=hidden,
nout=embed_dim,
dropout=dropout,
K=K,
cluster_temp = clustertemp)
if cuda:
model_cluster.cuda()
optimizer = optim.Adam(model_cluster.parameters(),
lr=lr, weight_decay=weight_decay)
accuracies = []
for t in range(epochs):
mu, r, embeds, dist = model_cluster(features_train, adj_train, 1)
loss = nn.functional.cross_entropy(r[train_label_indices], labels[train_label_indices])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (t==500):
num_cluster_iter = 5
accuracies.append(accuracy(r, labels).item())
model_cluster.training = False
mu, r, embeds, dist = model_cluster(features_train, adj_train, num_cluster_iter)
print("ClusterNet accuracy: ", accuracy(r[test_label_indices], labels[test_label_indices]).item())
plot_if_possible(r)
# -
plt.plot(accuracies)
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
# ### E2E GCN
# +
print('GCN from paper')
model_gcn = GCNDeep(nfeat=nfeat,
nhid=hidden,
nout=K,
dropout=dropout,
nlayers=2)
optimizer_gcn = optim.Adam(model_gcn.parameters(), lr = lr,
weight_decay = weight_decay)
accuracies = []
for t in range(epochs):
r = model_gcn(features_train, adj_train)
loss = nn.functional.nll_loss(r[train_label_indices], labels[train_label_indices])
optimizer_gcn.zero_grad()
loss.backward()
optimizer_gcn.step()
accuracies.append(accuracy(r, labels).item())
print("e2e gcn accuracy: ", accuracy(r[test_label_indices], labels[test_label_indices]).item())
plot_if_possible(r)
# -
plt.plot(accuracies)
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
# +
print('linear GCN')
model_gcn = GCNLinear(nfeat=nfeat,
nhid=hidden,
nout=embed_dim,
dropout=dropout,
nlayers=2,
K=K)
optimizer_gcn = optim.Adam(model_gcn.parameters(), lr = lr,
weight_decay = weight_decay)
accuracies = []
for t in range(epochs):
r = model_gcn(features_train, adj_train)
loss = nn.functional.nll_loss(r[train_label_indices], labels[train_label_indices])
optimizer_gcn.zero_grad()
loss.backward()
optimizer_gcn.step()
accuracies.append(accuracy(r, labels).item())
print("e2e gcn accuracy: ", accuracy(r[test_label_indices], labels[test_label_indices]).item())
plot_if_possible(r)
# -
plt.plot(accuracies)
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
| experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import librosa
from IPython.display import Audio
from microphone import record_audio
def mp3_path_to_samples(path: str,*,duration = None, sampling_rate=44100) -> np.array:
"""
parameters
----------
path : str
the path to a .mp3 file
duration : int
the length (seconds) of each generated sub-sample
sampling_rate : int
the sampling rate for the digital sample [Hz] (initially 44100)
returns
---------
samples : np.array
an array of digital samples from the .mp3 signal
"""
samples, sampling_rate = librosa.load(path, sr=sampling_rate, mono=True, duration=duration)
return np.array(samples)
def microphone_audio_to_samples(listen_time=10):
"""
parameters
----------
listen_time : int
the length of time (seconds) that the microphone will listen for (initially 10)
returns
---------
sub_samples : np.array (num_samples, sample_length * sampling_rate)
an array of sub-samples derived form the original sample
"""
frames, sample_rate = record_audio(listen_time)
np.frombuffer(frames[0],np.int16)
samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])
return samples
def create_sub_samples(samples: np.array, sample_length: int, num_samples: int, sampling_rate=44100) -> np.array:
"""
parameters
----------
samples : np.array
an array of digital samples for an audio signal
sample_length : int
the length (seconds) of each generated sub-sample
num_samples : int
the amount of sub-samples to be created
sampling_rate : int
the sampling rate for the digital sample [Hz] (initially 44100)
returns
---------
sub_samples : np.array (num_samples, sample_length * sampling_rate)
an array of sub-samples derived form the original sample
"""
sample_n = sample_length*sampling_rate
sub_samples = []
for i in range(num_samples):
start = random.randint(0,len(samples)-sample_n)
sub_sample = samples[start:start+sample_n]
sub_samples.append(sub_sample)
return np.array(sub_samples)
| audio_to_samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Notes**: ANN is used as second layer, to ensemble all prediction from previous models. This is the final submission.
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
data = pd.read_csv("input/train_indessa.csv")
en_train = pd.read_csv("ensemble_train.csv")
en_test = pd.read_csv("ensemble_test.csv")
data.shape
# ## Split Training Set
# +
# One Hot Y
from sklearn.preprocessing import OneHotEncoder
data = data.fillna("0")
data = data.dropna()
split = int(len(data)*0.75)
oht_target = OneHotEncoder()
Y = pd.DataFrame(oht_target.fit_transform(data[data['loan_status'] >= 0].iloc[:,-1:]).todense())
Y_train = Y[:split]
Y_test = Y[split:]
# -
print(Y.shape)
print(Y_train.shape)
print(Y_test.shape)
print(split)
# ## Ensemble with ANN
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers.advanced_activations import PReLU
model = Sequential()
# +
model.add(Dense(units=40, input_dim=9))
model.add(Activation('relu'))
model.add(Dense(units=40))
model.add(Activation('relu'))
model.add(Dense(units=40))
model.add(Activation('relu'))
model.add(Dense(units=40))
model.add(Activation('relu'))
model.add(Dense(units=40))
model.add(Activation('relu'))
model.add(Dense(units=2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# -
model.fit(en_train.values, Y_train.values, epochs=10, batch_size=32)
loss_metrics = model.evaluate(en_test.values, Y_test.values, batch_size=128)
loss_metrics
# ## Prediction
# Get member id
data_test = pd.read_csv("input/test_indessa.csv")
rows = data_test['member_id'].copy()
# Predict Ensemble with ANN
en_new = pd.read_csv("ensemble_new.csv")
pred_test = model.predict_proba(en_new.values)
# Bagging Prob
pred_bc = pd.read_csv("submission_bagging.csv")
# Gradient Boosting Prob
pred_gbc = pd.read_csv("submission_gbc.csv")
pred_frame = pd.DataFrame({'loan_status': pred_test[:,1],
'gbc': pred_bc['loan_status'],
'bc': pred_gbc['loan_status']})
def rep(x):
if((x['loan_status'] == 0) or (x['loan_status'] == 1)):
return (x['gbc']+x['bc'])/2
else:
return (x['gbc']+x['bc']+x['loan_status'])/3
pred_frame_mean = pred_frame.apply(rep, axis=1)
pred_frame_mean = pred_frame_mean.round(2)
pd.DataFrame({
'member_id': rows,
'loan_status': pred_frame_mean
}).to_csv('submission_final.csv', index=False, columns=['member_id', 'loan_status'], float_format='%g')
| Bank Indessa/5. Ensemble with ANN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercises
#
# First, we must create NumPy arrays to storage each of the three columns of data in the table.
import numpy as np
p = np.array([5020, 60370, 20110, 46940, 362160])
V = np.array([0.8, 0.2, 1.0, 0.6, 0.1])
T = np.array([200, 600, 1000, 1400, 1800])
# Now we plot, $p$ against $T$.
import matplotlib.pyplot as plt
plt.plot(T, p, 'o')
plt.xlabel('$T$')
plt.ylabel('$p$')
plt.show()
# Then $V$ against $T$
plt.plot(T, V, 'o')
plt.xlabel('$T$')
plt.ylabel('$V$')
plt.show()
# Finally, $pV$ against $T$.
plt.plot(T, p * V, 'o')
plt.xlabel('$T$')
plt.ylabel('$pV$')
plt.show()
# There is a clear linear relationship between $pV$ and $T$, the ideal gas relation.
#
# We can now calculate $n$ for each data point by rearranging the ideal gas law to read,
#
# $$ n = \frac{pV}{RT} $$
#
# and we can use NumPy array to perform this mathematics.
from scipy.constants import R
n = p * V / (R * T)
print(n)
# We can then find the mean $n$ and standard error as follows,
mean = np.mean(n)
std_err = np.std(n) / len(n)
print(mean, std_err)
# Note that the `len()` function will return the number of items in a list as an `int`.
| CH40208/working_with_data/ideal_gas_law_exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# train
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# import tensorflow as tf
import argparse
# # %loadpy model.unet.py
# %run model.unet.py
from model.unet import UNet
def main(_):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = UNet(args.experiment_dir, batch_size=args.batch_size, experiment_id=args.experiment_id,
input_width=args.image_size, output_width=args.image_size, embedding_num=args.embedding_num,
embedding_dim=args.embedding_dim, L1_penalty=args.L1_penalty, Lconst_penalty=args.Lconst_penalty,
Ltv_penalty=args.Ltv_penalty, Lcategory_penalty=args.Lcategory_penalty)
model.register_session(sess)
if args.flip_labels:
model.build_model(is_training=True, inst_norm=args.inst_norm, no_target_source=True)
else:
model.build_model(is_training=True, inst_norm=args.inst_norm)
fine_tune_list = None
if args.fine_tune:
ids = args.fine_tune.split(",")
fine_tune_list = set([int(i) for i in ids])
model.train(lr=args.lr, epoch=args.epoch, resume=args.resume,
schedule=args.schedule, freeze_encoder=args.freeze_encoder, fine_tune=fine_tune_list,
sample_steps=args.sample_steps, checkpoint_steps=args.checkpoint_steps,
flip_labels=args.flip_labels, no_val=args.no_val)
if __name__ == '__main__':
tf.app.run()
# -
tf.__version__
pip install absl-py
# +
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("name", None, "Your name.")
flags.DEFINE_integer("num_times", 1,
"Number of times to print greeting.")
# Required flag.
flags.mark_flag_as_required("name1")
def main(argv):
del argv # Unused.
for i in range(0, FLAGS.num_times):
print('Hello, %s!' % FLAGS.name)
if __name__ == '__main__':
app.run(main)
| train_test_0428.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project 3 - Classification
# Welcome to the third project of Data 8! You will build a classifier that guesses whether a song is hip-hop or country, using only the numbers of times words appear in the song's lyrics. By the end of the project, you should know how to:
#
# 1. Build a k-nearest-neighbors classifier.
# 2. Test a classifier on data.
#
# #### Administrivia
# ##### Piazza
# While collaboration is encouraged on this and other assignments, sharing answers is never okay. In particular, posting code or other assignment answers publicly on Piazza (or elsewhere) is academic dishonesty. It will result in a reduced project grade at a minimum. If you wish to ask a question and include your code or an answer to a written question, you *must* make it a private post.
#
# ##### Partners
# You may complete the project with up to one partner. Partnerships are an exception to the rule against sharing answers. If you have a partner, one person in the partnership should submit your project on Gradescope and include the other partner in the submission. (Gradescope will prompt you to fill this in.)
#
# For this project, **you can partner with anyone in the class.**
#
# ##### Due Date and Checkpoint
# Part of the project will be due early. Parts 1 and 2 of the project (out of 4) are due **Tuesday, November 22nd at 7PM**. Unlike the final submission, this early checkpoint will be graded for completion. It will be worth approximately 10% of the total project grade. Simply submit your partially-completed notebook as a PDF, as you would submit any other notebook. (See the note above on submitting with a partner.)
#
# The entire project (parts 1, 2, 3, and 4) will be due **Tuesday, November 29th at 7PM**. (Again, see the note above on submitting with a partner.)
#
# #### On to the project!
#
# **Run the cell below** to prepare the automatic tests. **Passing the automatic tests does not guarantee full credit on any question.** The tests are provided to help catch some common errors, but it is your responsibility to answer the questions correctly.
# +
# Run this cell to set up the notebook, but please don't change it.
import numpy as np
import math
from datascience import *
# These lines set up the plotting functionality and formatting.
import matplotlib
matplotlib.use('Agg', warn=False)
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# These lines load the tests.
from client.api.assignment import load_assignment
tests = load_assignment('project3.ok')
# -
# # 1. The Dataset
#
# Our dataset is a table of songs, each with a name, an artist, and a genre. We'll be trying to predict each song's genre.
#
# The predict a song's genre, we have some attributes: the lyrics of the song, in a certain format. We have a list of approximately 5,000 words that might occur in a song. For each song, our dataset tells us how frequently each of these words occur in that song.
#
# Run the cell below to read the `lyrics` table. **It may take up to a minute to load.**
# +
# Just run this cell.
lyrics = Table.read_table('lyrics.csv')
# The first 5 rows and 8 columns of the table:
lyrics.where("Title", are.equal_to("In Your Eyes"))\
.select("Title", "Artist", "Genre", "i", "the", "like", "love")\
.show()
# -
# That cell prints a few columns of the row for the song "In Your Eyes". The song contains 168 words. The word "like" appears twice: $\frac{2}{168} \approx 0.0119$ of the words in the song. Similarly, the word "love" appears 10 times: $\frac{10}{168} \approx 0.0595$ of the words.
#
# Our dataset doesn't contain all information about a song. For example, it doesn't include the total number of words in each song, or information about the order of words in the song, let alone the melody, instruments, or rhythm. Nonetheless, you may find that word counts alone are sufficient to build an accurate genre classifier.
# All titles are unique. The `row_for_title` function provides fast access to the one row for each title.
title_index = lyrics.index_by('Title')
def row_for_title(title):
return title_index.get(title)[0]
# <div class="hide">\pagebreak</div>
# #### Question 1.1
# Set `expected_row_sum` to the number that you expect will result from summing all proportions in each row, excluding the first three columns.
# Set row_sum to a number that's the (approximate) sum of each row of word proportions.
expected_row_sum = ...
# <div class="hide">\pagebreak</div>
# You can draw the histogram below to check that the actual row sums are close to what you expect.
# Run this cell to display a histogram of the sums of proportions in each row.
# This computation might take up to a minute; you can skip it if it's too slow.
Table().with_column('sums', lyrics.drop([0, 1, 2]).apply(sum)).hist(0)
# This dataset was extracted from the Million Song Dataset (http://labrosa.ee.columbia.edu/millionsong/). Specifically, we are using the complementary datasets from musiXmatch (http://labrosa.ee.columbia.edu/millionsong/musixmatch) and Last.fm (http://labrosa.ee.columbia.edu/millionsong/lastfm).
#
# The counts of common words in the lyrics for all of these songs are provided by the musiXmatch dataset (called a bag-of-words format). Only the top 5000 most common words are represented. For each song, we divided the number of occurrences of each word by the total number of word occurrences in the lyrics of that song.
#
# The Last.fm dataset contains multiple tags for each song in the Million Song Dataset. Some of the tags are genre-related, such as "pop", "rock", "classic", etc. To obtain our dataset, we first extracted songs with Last.fm tags that included the words "country", or "hip" and "hop". These songs were then cross-referenced with the musiXmatch dataset, and only songs with musixMatch lyrics were placed into our dataset. Finally, inappropriate words and songs with naughty titles were removed, leaving us with 4976 words in the vocabulary and 1726 songs.
# ## 1.1. Word Stemming
# The columns other than Title, Artist, and Genre in the `lyrics` table are all words that appear in some of the songs in our dataset. Some of those names have been *stemmed*, or abbreviated heuristically, in an attempt to make different [inflected](https://en.wikipedia.org/wiki/Inflection) forms of the same base word into the same string. For example, the column "manag" is the sum of proportions of the words "manage", "manager", "managed", and "managerial" (and perhaps others) in each song.
#
# Stemming makes it a little tricky to search for the words you want to use, so we have provided another table that will let you see examples of unstemmed versions of each stemmed word. Run the code below to load it.
# Just run this cell.
vocab_mapping = Table.read_table('mxm_reverse_mapping_safe.csv')
stemmed = np.take(lyrics.labels, np.arange(3, len(lyrics.labels)))
vocab_table = Table().with_column('Stem', stemmed).join('Stem', vocab_mapping)
vocab_table.take(np.arange(900, 910))
# <div class="hide">\pagebreak</div>
# #### Question 1.1.1
# Assign `unchanged` to the **percentage** of words in `vocab_table` that are the same as their stemmed form (such as "coup" above).
#
# *Hint:* Try to use `where`. Start by computing an array of boolean values, one for each row in `vocab_table`, indicating whether the word in that row is equal to its stemmed form.
# + for_assignment_type="student"
# The staff solution took 3 lines.
unchanged = ...
print(str(round(unchanged)) + '%')
# -
# <div class="hide">\pagebreak</div>
# #### Question 1.1.2
# Assign `stemmed_message` to the stemmed version of the word "message".
# Set stemmed_message to the stemmed version of "message" (which
# should be a string). Use vocab_table.
stemmed_message = ...
stemmed_message
_ = tests.grade('q1_1_1_2')
# <div class="hide">\pagebreak</div>
# #### Question 1.1.3
# Assign `unstemmed_singl` to the word in `vocab_table` that has "singl" as its stemmed form. (*Note that multiple English words may stem to "singl", but only one example appears in `vocab_table`.*)
# Set unstemmed_singl to the unstemmed version of "singl" (which
# should be a string).
unstemmed_singl = ...
unstemmed_singl
_ = tests.grade('q1_1_1_3')
# <div class="hide">\pagebreak</div>
# #### Question 1.1.4
# What word in `vocab_table` was shortened the most by this stemming process? Assign `most_shortened` to the word. It's an example of how heuristic stemming can collapse two unrelated words into the same stem (which is bad, but happens a lot in practice anyway).
# + for_assignment_type="student"
# In our solution, we found it useful to first make an array
# called shortened containing the number of characters that was
# chopped off of each word in vocab_table, but you don't have
# to do that.
shortened = ...
most_shortened = ...
# This will display your answer and its shortened form.
vocab_table.where('Word', most_shortened)
# -
_ = tests.grade('q1_1_1_4')
# ## 1.2. Splitting the dataset
# We're going to use our `lyrics` dataset for two purposes.
#
# 1. First, we want to *train* song genre classifiers.
# 2. Second, we want to *test* the performance of our classifiers.
#
# Hence, we need two different datasets: *training* and *test*.
#
# The purpose of a classifier is to classify unseen data that is similar to the training data. Therefore, we must ensure that there are no songs that appear in both sets. We do so by splitting the dataset randomly. The dataset has already been permuted randomly, so it's easy to split. We just take the top for training and the rest for test.
#
# Run the code below (without changing it) to separate the datasets into two tables.
# +
# Here we have defined the proportion of our data
# that we want to designate for training as 11/16ths
# of our total dataset. 5/16ths of the data is
# reserved for testing.
training_proportion = 11/16
num_songs = lyrics.num_rows
num_train = int(num_songs * training_proportion)
num_valid = num_songs - num_train
train_lyrics = lyrics.take(np.arange(num_train))
test_lyrics = lyrics.take(np.arange(num_train, num_songs))
print("Training: ", train_lyrics.num_rows, ";",
"Test: ", test_lyrics.num_rows)
# -
# <div class="hide">\pagebreak</div>
# #### Question 1.2.1
# Draw a horizontal bar chart with two bars that show the proportion of Country songs in each dataset. Complete the function `country_proportion` first; it should help you create the bar chart.
# + for_assignment_type="student"
def country_proportion(table):
"""Return the proportion of songs in a table that have the Country genre."""
return ...
# The staff solution took 4 lines. Start by creating a table.
...
# -
# # 2. K-Nearest Neighbors - a Guided Example
#
# K-Nearest Neighbors (k-NN) is a classification algorithm. Given some *attributes* (also called *features*) of an unseen example, it decides whether that example belongs to one or the other of two categories based on its similarity to previously seen examples.
#
# A feature we have about each song is *the proportion of times a particular word appears in the lyrics*, and the categories are two music genres: hip-hop and country. The algorithm requires many previously seen examples for which both the features and categories are known: that's the `train_lyrics` table.
#
# We're going to visualize the algorithm, instead of just describing it. To get started, let's pick colors for the genres.
# +
# Just run this cell to define genre_colors.
def genre_color(genre):
"""Assign a color to each genre."""
if genre == 'Country':
return 'gold'
elif genre == 'Hip-hop':
return 'blue'
else:
return 'green'
# -
# ## 2.1. Classifying a song
#
# In k-NN, we classify a song by finding the `k` songs in the *training set* that are most similar according to the features we choose. We call those songs with similar features the "neighbors". The k-NN algorithm assigns the song to the most common category among its `k` neighbors.
#
# Let's limit ourselves to just 2 features for now, so we can plot each song. The features we will use are the proportions of the words "like" and "love" in the lyrics. Taking the song "In Your Eyes" (in the test set), 0.0119 of its words are "like" and 0.0595 are "love". This song appears in the test set, so let's imagine that we don't yet know its genre.
#
# First, we need to make our notion of similarity more precise. We will say that the *dissimilarity*, or *distance* between two songs is the straight-line distance between them when we plot their features in a scatter diagram. This distance is called the Euclidean ("yoo-KLID-ee-un") distance.
#
# For example, in the song *Insane in the Brain* (in the training set), 0.0203 of all the words in the song are "like" and 0 are "love". Its distance from *In Your Eyes* on this 2-word feature set is $\sqrt{(0.0119 - 0.0203)^2 + (0.0595 - 0)^2} \approx 0.06$. (If we included more or different features, the distance could be different.)
#
# A third song, *Sangria Wine* (in the training set), is 0.0044 "like" and 0.0925 "love".
#
# The function below creates a plot to display the "like" and "love" features of a test song and some training songs. As you can see in the result, *In Your Eyes* is more similar to *Sangria Wine* than to *Insane in the Brain*.
# +
# Just run this cell.
def plot_with_two_features(test_song, training_songs, x_feature, y_feature):
"""Plot a test song and training songs using two features."""
test_row = row_for_title(test_song)
distances = Table().with_columns(
x_feature, make_array(test_row.item(x_feature)),
y_feature, make_array(test_row.item(y_feature)),
'Color', make_array(genre_color('Unknown')),
'Title', make_array(test_song)
)
for song in training_songs:
row = row_for_title(song)
color = genre_color(row.item('Genre'))
distances.append([row.item(x_feature), row.item(y_feature), color, song])
distances.scatter(x_feature, y_feature, colors='Color', labels='Title', s=200)
training = make_array("Sangria Wine", "Insane In The Brain")
plot_with_two_features("In Your Eyes", training, "like", "love")
# -
# <div class="hide">\pagebreak</div>
# #### Question 2.1.1
# Compute the distance between the two country songs, *In Your Eyes* and *Sangria Wine*, using the `like` and `love` features only. Assign it the name `country_distance`.
#
# **Note:** If you have a row object, you can use `item` to get an element from a column by its name. For example, if `r` is a row, then `r.item("foo")` is the element in column `"foo"` in row `r`.
in_your_eyes = row_for_title("In Your Eyes")
sangria_wine = row_for_title("Sangria Wine")
country_distance = ...
country_distance
_ = tests.grade('q1_2_1_1')
# The `plot_with_two_features` function can show the positions of several training songs. Below, we've added one that's even closer to *In Your Eyes*.
training = make_array("Sangria Wine", "Lookin' for Love", "Insane In The Brain")
plot_with_two_features("In Your Eyes", training, "like", "love")
# <div class="hide">\pagebreak</div>
# #### Question 2.1.2
# Complete the function `distance_two_features` that computes the Euclidean distance between any two songs, using two features. The last two lines call your function to show that *Lookin' for Love* is closer to *In Your Eyes* than *Insane In The Brain*.
# +
def distance_two_features(title0, title1, x_feature, y_feature):
"""Compute the distance between two songs, represented as rows.
Only the features named x_feature and y_feature are used when computing the distance."""
row0 = ...
row1 = ...
...
for song in make_array("Lookin' for Love", "Insane In The Brain"):
song_distance = distance_two_features(song, "In Your Eyes", "like", "love")
print(song, 'distance:\t', song_distance)
# -
_ = tests.grade('q1_2_1_2')
# <div class="hide">\pagebreak</div>
# #### Question 2.1.3
# Define the function `distance_from_in_your_eyes` so that it works as described in its documentation.
def distance_from_in_your_eyes(title):
"""The distance between the given song and "In Your Eyes", based on the features "like" and "love".
This function takes a single argument:
* title: A string, the name of a song.
"""
...
# <div class="hide">\pagebreak</div>
# #### Question 2.1.4
# Using the features `"like" and "love"`, what are the names and genres of the 7 songs in the training set closest to "In Your Eyes"? To answer this question, make a table named `close_songs` containing those 7 songs with columns `"Title"`, `"Artist"`, `"Genre"`, `"like"`, and `"love"`, as well as a column called `"distance"` that contains the distance from "In Your Eyes". The table should be **sorted in ascending order by `distance`**.
# + for_assignment_type="student"
# The staff solution took 4 lines.
close_songs = ...
close_songs
# -
_ = tests.grade('q1_2_1_4')
# <div class="hide">\pagebreak</div>
# #### Question 2.1.5
# Define the function `most_common` so that it works as described in its documentation.
# +
def most_common(column_label, table):
"""The most common element in a column of a table.
This function takes two arguments:
* column_label: The name of a column, a string.
* table: A table.
It returns the most common value in that column of that table.
In case of a tie, it returns one of the most common values, selected
arbitrarily."""
...
# Calling most_common on your table of 7 nearest neighbors classifies
# "In Your Eyes" as a country song, 4 votes to 3.
most_common('Genre', close_songs)
# -
_ = tests.grade('q1_2_1_5')
# Congratulations are in order -- you've classified your first song!
# # 3. Features
# Now, we're going to extend our classifier to consider more than two features at a time.
#
# Euclidean distance still makes sense with more than two features. For `n` different features, we compute the difference between corresponding feature values for two songs, square each of the `n` differences, sum up the resulting numbers, and take the square root of the sum.
# <div class="hide">\pagebreak</div>
# #### Question 3.1
# Write a function to compute the Euclidean distance between two *arrays* of features of *arbitrary* (but equal) length. Use it to compute the distance between the first song in the training set and the first song in the test set, *using all of the features*. (Remember that the title, artist, and genre of the songs are not features.)
# +
def distance(features1, features2):
"""The Euclidean distance between two arrays of feature values."""
...
distance_first_to_first = ...
distance_first_to_first
# -
_ = tests.grade('q1_3_1')
# ## 3.1. Creating your own feature set
#
# Unfortunately, using all of the features has some downsides. One clear downside is *computational* -- computing Euclidean distances just takes a long time when we have lots of features. You might have noticed that in the last question!
#
# So we're going to select just 20. We'd like to choose features that are very *discriminative*, that is, which lead us to correctly classify as much of the test set as possible. This process of choosing features that will make a classifier work well is sometimes called *feature selection*, or more broadly *feature engineering*.
# <div class="hide">\pagebreak</div>
# #### Question 3.1.1
# Look through the list of features (the labels of the `lyrics` table after the first three). Choose 20 that you think will let you distinguish pretty well between country and hip-hop songs. You might want to come back to this question later to improve your list, once you've seen how to evaluate your classifier. The first time you do this question, spend some time looking through the features, but not more than 15 minutes.
# +
# Set my_20_features to an array of 20 features (strings that are column labels)
my_20_features = ...
train_20 = train_lyrics.select(my_20_features)
test_20 = test_lyrics.select(my_20_features)
# -
_ = tests.grade('q1_3_1_1')
# <div class="hide">\pagebreak</div>
# #### Question 3.1.2
# In a few sentences, describe how you selected your features.
# *Write your answer here, replacing this text.*
# Next, let's classify the first song from our test set using these features. You can examine the song by running the cells below. Do you think it will be classified correctly?
test_lyrics.take(0).select('Title', 'Artist', 'Genre')
test_20.take(0)
# As before, we want to look for the songs in the training set that are most alike our test song. We will calculate the Euclidean distances from the test song (using the 20 selected features) to all songs in the training set. You could do this with a `for` loop, but to make it computationally faster, we have provided a function, `fast_distances`, to do this for you. Read its documentation to make sure you understand what it does. (You don't need to read the code in its body unless you want to.)
# +
# Just run this cell to define fast_distances.
def fast_distances(test_row, train_rows):
"""An array of the distances between test_row and each row in train_rows.
Takes 2 arguments:
* test_row: A row of a table containing features of one
test song (e.g., test_20.row(0)).
* train_rows: A table of features (for example, the whole
table train_20)."""
counts_matrix = np.asmatrix(train_rows.columns).transpose()
diff = np.tile(np.array(test_row), [counts_matrix.shape[0], 1]) - counts_matrix
distances = np.squeeze(np.asarray(np.sqrt(np.square(diff).sum(1))))
return distances
# -
# <div class="hide">\pagebreak</div>
# #### Question 3.1.3
# Use the `fast_distances` function provided above to compute the distance from the first song in the test set to all the songs in the training set, using your set of 20 features. Make a new table called `genre_and_distances` with one row for each song in the training set and two columns:
# * The `"Genre"` of the training song
# * The `"Distance"` from the first song in the test set
#
# Ensure that `genre_and_distances` is **sorted in increasing order by distance to the first test song**.
# + for_assignment_type="student"
# The staff solution took 4 lines of code.
genre_and_distances = ...
genre_and_distances
# -
_ = tests.grade('q1_3_1_3')
# <div class="hide">\pagebreak</div>
# #### Question 3.1.4
# Now compute the 5-nearest neighbors classification of the first song in the test set. That is, decide on its genre by finding the most common genre among its 5 nearest neighbors, according to the distances you've calculated. Then check whether your classifier chose the right genre. (Depending on the features you chose, your classifier might not get this song right, and that's okay.)
# +
# Set my_assigned_genre to the most common genre among these.
my_assigned_genre = ...
# Set my_assigned_genre_was_correct to True if my_assigned_genre
# matches the actual genre of the first song in the test set.
my_assigned_genre_was_correct = ...
print("The assigned genre, {}, was{}correct.".format(my_assigned_genre, " " if my_assigned_genre_was_correct else " not "))
# -
_ = tests.grade('q1_3_1_4')
# ## 3.2. A classifier function
#
# Now we can write a single function that encapsulates the whole process of classification.
# <div class="hide">\pagebreak</div>
# #### Question 3.2.1
# Write a function called `classify`. It should take the following four arguments:
# * A row of features for a song to classify (e.g., `test_20.row(0)`).
# * A table with a column for each feature (for example, `train_20`).
# * An array of classes that has as many items as the previous table has rows, and in the same order.
# * `k`, the number of neighbors to use in classification.
#
# It should return the class a `k`-nearest neighbor classifier picks for the given row of features (the string `'Country'` or the string `'Hip-hop'`).
def classify(test_row, train_rows, train_classes, k):
"""Return the most common class among k nearest neigbors to test_row."""
distances = ...
genre_and_distances = ...
...
_ = tests.grade('q1_3_2_1')
# <div class="hide">\pagebreak</div>
# #### Question 3.2.2
# Assign `grandpa_genre` to the genre predicted by your classifier for the song "Grandpa Got Runned Over By A John Deere" in the test set, using 9 neighbors and using your 20 features.
# The staff solution first defined a row object called grandpa_features.
grandpa_features = ...
grandpa_genre = ...
grandpa_genre
_ = tests.grade('q1_3_2_2')
# Finally, when we evaluate our classifier, it will be useful to have a classification function that is specialized to use a fixed training set and a fixed value of `k`.
# <div class="hide">\pagebreak</div>
# #### Question 3.2.3
# Create a classification function that takes as its argument a row containing your 20 features and classifies that row using the 5-nearest neighbors algorithm with `train_20` as its training set.
# +
def classify_one_argument(row):
...
# When you're done, this should produce 'Hip-hop' or 'Country'.
classify_one_argument(test_20.row(0))
# -
_ = tests.grade('q1_3_2_3')
# ## 3.3. Evaluating your classifier
# Now that it's easy to use the classifier, let's see how accurate it is on the whole test set.
#
# **Question 3.3.1.** Use `classify_one_argument` and `apply` to classify every song in the test set. Name these guesses `test_guesses`. **Then**, compute the proportion of correct classifications.
test_guesses = ...
proportion_correct = ...
proportion_correct
_ = tests.grade('q1_3_3_1')
# At this point, you've gone through one cycle of classifier design. Let's summarize the steps:
# 1. From available data, select test and training sets.
# 2. Choose an algorithm you're going to use for classification.
# 3. Identify some features.
# 4. Define a classifier function using your features and the training set.
# 5. Evaluate its performance (the proportion of correct classifications) on the test set.
# ## 4. Extra Explorations
# Now that you know how to evaluate a classifier, it's time to build a better one.
# <div class="hide">\pagebreak</div>
# #### Question 4.1
# Find a classifier with better test-set accuracy than `classify_one_argument`. (Your new function should have the same arguments as `classify_one_argument` and return a classification. Name it `another_classifier`.) You can use more or different features, or you can try different values of `k`. (Of course, you still have to use `train_lyrics` as your training set!)
# +
# To start you off, here's a list of possibly-useful features:
staff_features = make_array("come", "do", "have", "heart", "make", "never", "now", "wanna", "with", "yo")
train_staff = train_lyrics.select(staff_features)
test_staff = test_lyrics.select(staff_features)
def another_classifier(row):
return ...
# -
# #### Ungraded and optional
# Try to create an even better classifier. You're not restricted to using only word proportions as features. For example, given the data, you could compute various notions of vocabulary size or estimated song length. If you're feeling very adventurous, you could also try other classification methods, like logistic regression. If you think you built a classifier that works well, post on Piazza and let us know.
#####################
# Custom Classifier #
#####################
# Congratulations: you're done with the final project!
| notebooks/data8_notebooks/project3/project3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Install library
# +
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.layers import Input, Dense, Conv2D, Activation
from tensorflow.keras.layers import MaxPooling2D, UpSampling2D, BatchNormalization
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
# -
# ## Dataset preprocessing and EDA
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() # load data
x_train,x_test = x_train.astype('float32')/255.0,x_test.astype('float32')/255.0 # normalization
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
y_train
# +
# No method on keras to get cifar10 category label name by categoly label?
cifar10_labels = np.array([
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck'])
bird_ind = np.where(cifar10_labels=='bird')
deer_ind = np.where(cifar10_labels=='deer')
truck_ind = np.where(cifar10_labels=='truck')
# +
remove_num = 2500
counter = 0
train_num = len(y_train) - remove_num * 3 # for 3 classes
x_train_removed = np.zeros(x_train.shape)[:train_num]
y_train_removed = np.zeros(y_train.shape)[:train_num]
# bird, deer, truck labeld data is removed by 2500
bird_limit, deer_limit, truck_limit = 0, 0, 0
for i, label in enumerate(y_train):
if (label != bird_ind) & (label != deer_ind) & (label != truck_ind):
x_train_removed[counter] = x_train[i]
y_train_removed[counter] = y_train[i]
counter += 1
else:
if label == bird_ind:
if bird_limit < 2500:
bird_limit += 1
x_train_removed[counter] = x_train[i]
y_train_removed[counter] = y_train[i]
counter += 1
continue
else: continue
if label == deer_ind:
if deer_limit < 2500:
deer_limit += 1
x_train_removed[counter] = x_train[i]
y_train_removed[counter] = y_train[i]
counter += 1
continue
else: continue
if label == truck_ind:
if truck_limit < 2500:
truck_limit += 1
x_train_removed[counter] = x_train[i]
y_train_removed[counter] = y_train[i]
counter += 1
continue
else: continue
y_train_removed = np.array(y_train_removed, dtype='uint8')
# -
print(x_train_removed.shape)
print(y_train_removed.shape)
df = pd.DataFrame(y_train_removed.flatten())
df.value_counts()
# +
import matplotlib.pyplot as plt
# データ数をプロット
plt.hist(y_train_removed.flatten())
# -
# ## AutoEncoder
# +
# For autoencode ver2
encoding_dim = 32 # 32 dimention
# input_img = Input(shape=(3072,), name="Input_AE") # 32 * 32 * 3
# encoded = Dense(encoding_dim, activation='relu', name="Dense_AE_0")(input_img)
# decoded = Dense(3072, activation='sigmoid', name="Dense_AE_1")(encoded) # 32 * 32 * 3
# autoencoder = Model(input_img, decoded)
# 中間層を 4 層まで増やしたネットワーク
autoencoder = models.Sequential()
autoencoder.add(layers.Dense(128, activation='relu',
input_shape=(3072,), name="Dense_AE_0"))
autoencoder.add(layers.Dense(64, activation='relu', name="Dense_AE_1"))
autoencoder.add(layers.Dense(encoding_dim, activation='relu', name="Dense_AE_2"))
autoencoder.add(layers.Dense(64, activation='relu', name="Dense_AE_3"))
autoencoder.add(layers.Dense(128, activation='relu', name="Dense_AE_4"))
autoencoder.add(layers.Dense(3072,
activation='sigmoid', name="Dense_AE_5"))
# -
autoencoder.summary()
# +
# hyperparameters
batch_size = 32
epochs = 50
# train
saveDir = "../models/AE/"
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
es_cb = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
# chkpt = saveDir + 'AE_Cifar10.{epoch:02d}-{loss:.2f}-{val_loss:.2f}.hdf5'
chkpt = saveDir + 'AE_Cifar10_002_Best.hdf5'
cp_cb = ModelCheckpoint(filepath = chkpt, \
monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
AE_history = autoencoder.fit(x_train_removed, x_train_removed,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, x_test),
callbacks=[es_cb, cp_cb],
shuffle=True)
# +
# plot training
AE_hist_df = pd.DataFrame(AE_history.history)
plt.figure()
AE_hist_df[['loss', 'val_loss']].plot()
plt.ylabel('loss')
plt.xlabel('epoch')
# -
autoencoder.load_weights("../models/AE/AE_Cifar10_002_Best.hdf5")
# autoencoder.trainable = False
# For encode
# layer_name_0 = 'Dense_AE_0'
encoder = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('Dense_AE_2').output)
autoencoder.summary()
# Encode and decode some digits
decoded_imgs = autoencoder.predict(x_test)
n = 10 # How many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# Display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(32, 32, 3))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(32, 32, 3))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# ## Classification Model
autoencoder.trainable = False
encoder.summary()
# +
model = Sequential()
model.add(encoder)
model.add(Dense(10, activation='softmax',name="Dense_0",input_shape=(32,)))
# model.add(BatchNormalization())
# model.add(Activation('softmax'))
adam = Adam(lr=1e-4)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=["accuracy"])
# -
model.summary()
# +
from tensorflow.keras.utils import plot_model, to_categorical
# one hot encoding
nb_classes = 10
y_train_removed_onehot = to_categorical(y_train_removed, nb_classes)
y_test_onehot = to_categorical(y_test, nb_classes)
# train
saveDir = "../models/CNN/"
es_cb = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
# chkpt = saveDir + 'Affine_Cifar10.{epoch:02d}-{loss:.2f}-{val_loss:.2f}.hdf5'
chkpt = saveDir + 'Affine_Cifar10_002_Best.hdf5'
cp_cb = ModelCheckpoint(filepath = chkpt, \
monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
Affine_history = model.fit(x_train_removed, y_train_removed_onehot,
batch_size=32,
epochs=400,
verbose=1,
validation_data=(x_test, y_test_onehot),
callbacks=[es_cb, cp_cb],
shuffle=True)
# -
# ## Result
# +
# plot training
Affine_hist_df = pd.DataFrame(Affine_history.history)
plt.figure()
Affine_hist_df[['loss', 'val_loss']].plot()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.figure()
Affine_hist_df[['accuracy', 'val_accuracy']].plot()
plt.ylabel('loss')
plt.xlabel('epoch')
# -
# load best model
model.load_weights("../models/CNN/Affine_Cifar10_002_Best.hdf5")
model.evaluate(x_test , y_test_onehot)
# +
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
predict_classes = model.predict_classes(x_test)
true_classes = y_test
confusion_mtx = confusion_matrix(true_classes, predict_classes)
plot_confusion_matrix(confusion_mtx, classes = range(10))
# -
| notebooks/AE_2_Affine_002.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: geo
# language: python
# name: geo
# ---
# +
import geopandas as gpd
import rasterio
import rasterio.mask
import geopandas as gpd
import numpy as np
import pandas as pd
import glob as glob
import rioxarray
import os
import gc
from osgeo import gdal
from osgeo import ogr
from shapely import wkb
from shapely.geometry import Polygon, MultiPolygon
from sklearn.cluster import MiniBatchKMeans
from yellowbrick.cluster import KElbowVisualizer
from sklearn.model_selection import train_test_split
# -
# Specify paths
rasPath = "D:\\TEMP\\_output\\_tif\\_Manning_Invasives\\Struc_Spec_1cm_near_fill.tif"
rasQuadratPath = 'E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\_quadrats\\'
# +
# Read Vector data
# Read Minority training data
gdfMinority = gpd.read_file("E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\Training_samples_both_species2.shp").set_crs('EPSG:26911')
# Read Transect outlines
quadrats = gpd.read_file('E:\Sync\_Documents\_Letter_invasives\_Data\Transect_grids.shp').set_crs('EPSG:26911')
# -
# ### Raster within quadrats to points (GeoDataFrame)
# Using GDAL Warp (clipping) and Rioxarray (conversion)
# +
# Split quadrats by transect and write as seperate shapefiles
quadratsSplit = [gpd.GeoSeries((quadrats[quadrats['Quadrat'].isin(['1_2', '1_3', '1_1'])])['geometry'].unary_union),
gpd.GeoSeries((quadrats[quadrats['Quadrat'].isin(['2_2', '2_7', '2_5', '2_9', '2_4', '2_10', '2_1', '2_3', '2_6', '2_8'])])['geometry'].unary_union),
gpd.GeoSeries((quadrats[quadrats['Quadrat'].isin(['3_3', '3_2', '3_1'])])['geometry'].unary_union)]
for i in range(3):
j = str(i+1)
outPath = f'E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\_quadrats\\{j}.shp'
quadratsSplit[i].to_file(outPath)
# +
# Clip raster using GDAL by transect
# https://gis.stackexchange.com/questions/45053/gdalwarp-cutline-along-with-shapefile
for i in range(1,4):
poly = f'E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\_quadrats\\{i}.shp'
ds = gdal.Warp(f"E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\_quadrats\\clippedQd{i}.tif", rasPath, cropToCutline=True, cutlineDSName=poly, format="GTiff")
ds = None # Close object
# +
# Final conve
outDfs =[]
for i in range(1,4):
tempDfs = []
for j in range(1,9):
path = "E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\_quadrats\\clippedQd{}.tif"
rds = rioxarray.open_rasterio(path.format(str(i))).sel(band=j)
df = rds.to_dataframe('band'+str(j))
df.drop(columns=['spatial_ref'], axis=1, inplace=True)
df.reset_index(level=['x', 'y'], inplace=True, drop=False)
df.reset_index(inplace=True, drop=True)
tempDfs.append(df) # Conversion
dfConcat = pd.concat([tempDfs[0]['x'], tempDfs[0]['y'], tempDfs[0]['band1'], tempDfs[1]['band2'], tempDfs[2]['band3'], tempDfs[3]['band4'], tempDfs[4]['band5'], tempDfs[5]['band6'], tempDfs[6]['band7'], tempDfs[7]['band8']], axis=1)
dfConcat = dfConcat.loc[dfConcat['band1']>=0,:]
outDfs.append(dfConcat)
# +
outDfs = pd.concat(outDfs, axis=0, ignore_index=True)
gdf = gpd.GeoDataFrame(outDfs, crs='EPSG:26911', geometry=gpd.points_from_xy(outDfs.x, outDfs.y))
gdf.to_file("E:\\Sync\\_Documents\\_Letter_invasives\\_Data\\_quadrats\\rasters2points.shp")
# -
# ### Further removal of potential IAS points was done manually in arcGIS
| Preprocessing_clip_and_raster2points.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img src="../../images/banners/python-advanced.png" width="600"/>
# # <img src="../../images/logos/python.png" width="23"/> Async IO in Python: A Complete Walkthrough
#
# ## <img src="../../images/logos/toc.png" width="20"/> Table of Contents
# * [Setting Up Your Environment](#setting_up_your_environment)
# * [The 10,000-Foot View of Async IO](#the_10,000-foot_view_of_async_io)
# * [Where Does Async IO Fit In?](#where_does_async_io_fit_in?)
# * [Async IO Explained](#async_io_explained)
# * [Async IO Is Not Easy](#async_io_is_not_easy)
# * [The `asyncio` Package and `async`/`await`](#the_`asyncio`_package_and_`async`/`await`)
# * [The `async`/`await` Syntax and Native Coroutines](#the_`async`/`await`_syntax_and_native_coroutines)
# * [The Rules of Async IO](#the_rules_of_async_io)
# * [Async IO Design Patterns](#async_io_design_patterns)
# * [Chaining Coroutines](#chaining_coroutines)
# * [Using a Queue](#using_a_queue)
# * [Async IO’s Roots in Generators](#async_io’s_roots_in_generators)
# * [Other Features: `async for` and Async Generators + Comprehensions](#other_features:_`async_for`_and_async_generators_+_comprehensions)
# * [The Event Loop and `asyncio.run()`](#the_event_loop_and_`asyncio.run()`)
# * [A Full Program: Asynchronous Requests](#a_full_program:_asynchronous_requests)
# * [Async IO in Context](#async_io_in_context)
# * [When and Why Is Async IO the Right Choice?](#when_and_why_is_async_io_the_right_choice?)
# * [Async IO It Is, but Which One?](#async_io_it_is,_but_which_one?)
# * [Odds and Ends](#odds_and_ends)
# * [Other Top-Level `asyncio` Functions](#other_top-level_`asyncio`_functions)
# * [The Precedence of `await`](#the_precedence_of_`await`)
# * [Conclusion](#conclusion)
# * [Resources](#resources)
# * [Python Version Specifics](#python_version_specifics)
# * [Articles](#articles)
# * [Related PEPs](#related_peps)
# * [Libraries That Work With `async`/`await`](#libraries_that_work_with_`async`/`await`)
#
# ---
# Async IO is a concurrent programming design that has received dedicated support in Python, evolving rapidly from Python 3.4 through 3.7, and [probably beyond](https://twitter.com/1st1/status/1041855365745455104).
# You may be thinking with dread, “Concurrency, parallelism, threading, multiprocessing. That’s a lot to grasp already. Where does async IO fit in?”
# This tutorial is built to help you answer that question, giving you a firmer grasp of Python’s approach to async IO.
# **Here’s what you’ll cover:**
# - **Asynchronous IO (async IO)**: a language-agnostic paradigm (model) that has implementations across a host of programming languages
# - **`async`/`await`**: two new [Python keywords](https://realpython.com/python-keywords/) that are used to define coroutines
# - **`asyncio`**: the Python package that provides a foundation and API for running and managing coroutines
# Coroutines (specialized generator functions) are the heart of async IO in Python, and we’ll dive into them later on.
# Before you get started, you’ll need to make sure you’re set up to use `asyncio` and other libraries found in this tutorial.
# <a class="anchor" id="setting_up_your_environment"></a>
#
# ## Setting Up Your Environment
# You’ll need Python 3.7 or above to follow this article in its entirety, as well as the `aiohttp` and `aiofiles` packages:
# ```sh
# $ python3.7 -m venv ./py37async
# $ source ./py37async/bin/activate # Windows: .\py37async\Scripts\activate.bat
# $ pip install --upgrade pip aiohttp aiofiles # Optional: aiodns
# ```
# For help with installing Python 3.7 and setting up a virtual environment, check out [Python 3 Installation & Setup Guide](https://realpython.com/installing-python/) or [Virtual Environments Primer](https://realpython.com/python-virtual-environments-a-primer/).
# With that, let’s jump in.
# <a class="anchor" id="the_10,000-foot_view_of_async_io"></a>
#
# ## The 10,000-Foot View of Async IO
# Async IO is a bit lesser known than its tried-and-true cousins, multiprocessing and [threading](https://realpython.com/intro-to-python-threading/). This section will give you a fuller picture of what async IO is and how it fits into its surrounding landscape.
# <a class="anchor" id="where_does_async_io_fit_in?"></a>
#
# ### Where Does Async IO Fit In?
# Concurrency and parallelism are expansive subjects that are not easy to wade into. While this article focuses on async IO and its implementation in Python, it’s worth taking a minute to compare async IO to its counterparts in order to have context about how async IO fits into the larger, sometimes dizzying puzzle.
# **Parallelism** consists of performing multiple operations at the same time. **Multiprocessing** is a means to effect parallelism, and it entails spreading tasks over a computer’s central processing units (CPUs, or cores). Multiprocessing is well-suited for CPU-bound tasks: tightly bound [`for` loops](https://realpython.com/python-for-loop/) and mathematical computations usually fall into this category.
# **Concurrency** is a slightly broader term than parallelism. It suggests that multiple tasks have the ability to run in an overlapping manner. (There’s a saying that concurrency does not imply parallelism.)
# **Threading** is a concurrent execution model whereby multiple [threads](https://en.wikipedia.org/wiki/Thread_(computing)) take turns executing tasks. One process can contain multiple threads. Python has a complicated relationship with threading thanks to its [GIL](https://realpython.com/python-gil/), but that’s beyond the scope of this article.
# What’s important to know about threading is that it’s better for IO-bound tasks. While a CPU-bound task is characterized by the computer’s cores continually working hard from start to finish, an IO-bound job is dominated by a lot of waiting on input/output to complete.
# To recap the above, concurrency encompasses both multiprocessing (ideal for CPU-bound tasks) and threading (suited for IO-bound tasks). Multiprocessing is a form of parallelism, with parallelism being a specific type (subset) of concurrency. The Python standard library has offered longstanding [support for both of these](https://docs.python.org/3/library/concurrency.html) through its `multiprocessing`, `threading`, and `concurrent.futures` packages.
# Now it’s time to bring a new member to the mix. Over the last few years, a separate design has been more comprehensively built into [CPython](https://realpython.com/cpython-source-code-guide/): asynchronous IO, enabled through the standard library’s `asyncio` package and the new `async` and `await` language keywords. To be clear, async IO is not a newly invented concept, and it has existed or is being built into other languages and runtime environments, such as [Go](https://gobyexample.com/goroutines), [C#](https://docs.microsoft.com/en-us/dotnet/csharp/async), or [Scala](https://docs.scala-lang.org/sips/async.html).
# The `asyncio` package is billed by the Python documentation as [a library to write concurrent code](https://docs.python.org/3/library/asyncio.html). However, async IO is not threading, nor is it multiprocessing. It is not built on top of either of these.
# In fact, async IO is a single-threaded, single-process design: it uses **cooperative multitasking**, a term that you’ll flesh out by the end of this tutorial. It has been said in other words that async IO gives a feeling of concurrency despite using a single thread in a single process. Coroutines (a central feature of async IO) can be scheduled concurrently, but they are not inherently concurrent.
# To reiterate, async IO is a style of concurrent programming, but it is not parallelism. It’s more closely aligned with threading than with multiprocessing but is very much distinct from both of these and is a standalone member in concurrency’s bag of tricks.
# That leaves one more term. What does it mean for something to be **asynchronous**? This isn’t a rigorous definition, but for our purposes here, I can think of two properties:
# - Asynchronous routines are able to “pause” while waiting on their ultimate result and let other routines run in the meantime.
# - [Asynchronous code](https://realpython.com/python-async-features/), through the mechanism above, facilitates concurrent execution. To put it differently, asynchronous code gives the look and feel of concurrency.
#
# Here’s a diagram to put it all together. The white terms represent concepts, and the green terms represent ways in which they are implemented or effected:
# <img src="images/async-io-in-python:-a-complete-walkthrough/Screen_Shot_2018-10-17_at_3.18.44_PM.c02792872031.jpg" width="600px">
# I’ll stop there on the comparisons between concurrent programming models. This tutorial is focused on the subcomponent that is async IO, how to use it, and the [APIs](https://realpython.com/python-api/) that have sprung up around it. For a thorough exploration of threading versus multiprocessing versus async IO, pause here and check out <NAME>’s [overview of concurrency in Python](https://realpython.com/python-concurrency/). Jim is way funnier than me and has sat in more meetings than me, to boot.
# <a class="anchor" id="async_io_explained"></a>
#
# ### Async IO Explained
# Async IO may at first seem counterintuitive and paradoxical. How does something that facilitates concurrent code use a single thread and a single CPU core? I’ve never been very good at conjuring up examples, so I’d like to paraphrase one from <NAME>’s 2017 PyCon talk, which explains everything quite beautifully:
# There is only one <NAME>, who has only two hands and makes only one move at a time by herself. But playing asynchronously cuts the exhibition time down from 12 hours to one. So, cooperative multitasking is a fancy way of saying that a program’s event loop (more on that later) communicates with multiple tasks to let each take turns running at the optimal time.
# Async IO takes long waiting periods in which functions would otherwise be blocking and allows other functions to run during that downtime. (A function that blocks effectively forbids others from running from the time that it starts until the time that it returns.)
# <a class="anchor" id="async_io_is_not_easy"></a>
#
# ### Async IO Is Not Easy
# I’ve heard it said, “Use async IO when you can; use threading when you must.” The truth is that building durable multithreaded code can be hard and error-prone. Async IO avoids some of the potential speedbumps that you might otherwise encounter with a threaded design.
# But that’s not to say that async IO in Python is easy. Be warned: when you venture a bit below the surface level, async programming can be difficult too! Python’s async model is built around concepts such as callbacks, events, transports, protocols, and futures—just the terminology can be intimidating. The fact that its API has been changing continually makes it no easier.
# Luckily, `asyncio` has matured to a point where most of its features are no longer provisional, while its documentation has received a huge overhaul and some quality resources on the subject are starting to emerge as well.
# <a class="anchor" id="the_`asyncio`_package_and_`async`/`await`"></a>
#
# ## The `asyncio` Package and `async`/`await`
# Now that you have some background on async IO as a design, let’s explore Python’s implementation. Python’s `asyncio` package (introduced in Python 3.4) and its two keywords, `async` and `await`, serve different purposes but come together to help you declare, build, execute, and manage asynchronous code.
# <a class="anchor" id="the_`async`/`await`_syntax_and_native_coroutines"></a>
#
# ### The `async`/`await` Syntax and Native Coroutines
# At the heart of async IO are coroutines. **A coroutine is a specialized version of a Python generator function**. Let’s start with a baseline definition and then build off of it as you progress here: a coroutine is a function that can suspend its execution before reaching `return`, and it can indirectly pass control to another coroutine for some time.
# Later, you’ll dive a lot deeper into how exactly the traditional generator is repurposed into a coroutine. For now, the easiest way to pick up how coroutines work is to start making some.
# Let’s take the immersive approach and write some async IO code. This short program is the `Hello World` of async IO but goes a long way towards illustrating its core functionality:
# +
import re
# ^^^ pyforest auto-imports - don't write above this line
# #!/usr/bin/env python3
# countasync.py
import asyncio
async def count():
print("One")
await asyncio.sleep(1)
print("Two")
async def main():
await asyncio.gather(count(), count(), count())
if __name__ == "__main__":
import time
s = time.perf_counter()
# Note: if you run this peice of code in jupyter lab/notebook you have to use await main()
# as jupyter notebook itself runs an event loop
# see here: https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
# however if you run it through a script, you have to run it with asysncio.run(main()) to create
# an event loop first
# asyncio.run(main()) # does not work in jupyter notebook
await main() # does not work in a python script
elapsed = time.perf_counter() - s
print(f"{__name__} executed in {elapsed:0.2f} seconds.")
# -
# When you execute this file, take note of what looks different than if you were to define the functions with just `def` and `time.sleep()`:
# ```sh
# $ python3 countasync.py
# One
# One
# One
# Two
# Two
# Two
# countasync.py executed in 1.01 seconds.
# ```
# The order of this output is the heart of async IO. Talking to each of the calls to `count()` is a single event loop, or coordinator. When each task reaches `await asyncio.sleep(1)`, the function yells up to the event loop and gives control back to it, saying, “I’m going to be sleeping for 1 second. Go ahead and let something else meaningful be done in the meantime.”
# Contrast this to the synchronous version:
# +
# #!/usr/bin/env python3
# countsync.py
import time
def count():
print("One")
time.sleep(1)
print("Two")
def main():
for _ in range(3):
count()
if __name__ == "__main__":
s = time.perf_counter()
main()
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
# -
# When executed, there is a slight but critical change in order and execution time:
# ```sh
# $ python3 countsync.py
# One
# Two
# One
# Two
# One
# Two
# countsync.py executed in 3.01 seconds.
# ```
# While using `time.sleep()` and `asyncio.sleep()` may seem banal, they are used as stand-ins for any time-intensive processes that involve wait time. (The most mundane thing you can wait on is a `sleep()` call that does basically nothing.) That is, `time.sleep()` can represent any time-consuming blocking function call, while `asyncio.sleep()` is used to stand in for a non-blocking call (but one that also takes some time to complete).
# As you’ll see in the next section, the benefit of awaiting something, including `asyncio.sleep()`, is that the surrounding function can temporarily cede control to another function that’s more readily able to do something immediately. In contrast, `time.sleep()` or any other blocking call is incompatible with asynchronous Python code, because it will stop everything in its tracks for the duration of the sleep time.
# <a class="anchor" id="the_rules_of_async_io"></a>
#
# ### The Rules of Async IO
# At this point, a more formal definition of `async`, `await`, and the coroutine functions that they create are in order. This section is a little dense, but getting a hold of `async`/`await` is instrumental, so come back to this if you need to:
# -
# The syntax `async def` introduces either a **native coroutine** or an **asynchronous generator**. The expressions `async with` and `async for` are also valid, and you’ll see them later on.
#
#
#
# -
# The keyword `await` passes function control back to the event loop. (It suspends the execution of the surrounding coroutine.) If Python encounters an `await f()` expression in the scope of `g()`, this is how `await` tells the event loop, “Suspend execution of `g()` until whatever I’m waiting on—the result of `f()`—is returned. In the meantime, go let something else run.”
#
#
#
#
# In code, that second bullet point looks roughly like this:
async def g():
# Pause here and come back to g() when f() is ready
r = await f()
return r
# There’s also a strict set of rules around when and how you can and cannot use `async`/`await`. These can be handy whether you are still picking up the syntax or already have exposure to using `async`/`await`:
# -
# A function that you introduce with `async def` is a coroutine. It may use `await`, `return`, or `yield`, but all of these are optional. Declaring `async def noop(): pass` is valid:
#
#
# * Using `await` and/or `return` creates a coroutine function. To call a coroutine function, you must `await` it to get its results.
# * It is less common (and only recently legal in Python) to use `yield` in an `async def` block. This creates an [asynchronous generator](https://www.python.org/dev/peps/pep-0525/), which you iterate over with `async for`. Forget about async generators for the time being and focus on getting down the syntax for coroutine functions, which use `await` and/or `return`.
# * Anything defined with `async def` may not use `yield from`, which will raise a [`SyntaxError`](https://realpython.com/invalid-syntax-python/).
#
#
#
# -
# Just like it’s a `SyntaxError` to use `yield` outside of a `def` function, it is a `SyntaxError` to use `await` outside of an `async def` coroutine. You can only use `await` in the body of coroutines.
#
#
#
#
# Here are some terse examples meant to summarize the above few rules:
# +
async def f(x):
y = await z(x) # OK - `await` and `return` allowed in coroutines
return y
async def g(x):
yield x # OK - this is an async generator
async def m(x):
yield from gen(x) # No - SyntaxError
def m(x):
y = await z(x) # Still no - SyntaxError (no `async def` here)
return y
# -
# Finally, when you use `await f()`, it’s required that `f()` be an object that is [awaitable](https://docs.python.org/3/reference/datamodel.html#awaitable-objects). Well, that’s not very helpful, is it? For now, just know that an awaitable object is either (1) another coroutine or (2) an object defining an `.__await__()` dunder method that returns an iterator. If you’re writing a program, for the large majority of purposes, you should only need to worry about case #1.
# That brings us to one more technical distinction that you may see pop up: an older way of marking a function as a coroutine is to decorate a normal `def` function with `@asyncio.coroutine`. The result is a **generator-based coroutine**. This construction has been outdated since the `async`/`await` syntax was put in place in Python 3.5.
# These two coroutines are essentially equivalent (both are awaitable), but the first is **generator-based**, while the second is a **native coroutine**:
# +
import asyncio
@asyncio.coroutine
def py34_coro():
"""Generator-based coroutine, older syntax"""
yield from stuff()
async def py35_coro():
"""Native coroutine, modern syntax"""
await stuff()
# -
# If you’re writing any code yourself, prefer native coroutines for the sake of being explicit rather than implicit. Generator-based coroutines will be [removed](https://docs.python.org/3/library/asyncio-task.html#generator-based-coroutines) in Python 3.10.
# Towards the latter half of this tutorial, we’ll touch on generator-based coroutines for explanation’s sake only. The reason that `async`/`await` were introduced is to make coroutines a standalone feature of Python that can be easily differentiated from a normal generator function, thus reducing ambiguity.
# Don’t get bogged down in generator-based coroutines, which have been [deliberately outdated](https://www.python.org/dev/peps/pep-0492/#rationale-and-goals) by `async`/`await`. They have their own small set of rules (for instance, `await` cannot be used in a generator-based coroutine) that are largely irrelevant if you stick to the `async`/`await` syntax.
# Without further ado, let’s take on a few more involved examples.
# Here’s one example of how async IO cuts down on wait time: given a coroutine `makerandom()` that keeps producing random integers in the range [0, 10], until one of them exceeds a threshold, you want to let multiple calls of this coroutine not need to wait for each other to complete in succession. You can largely follow the patterns from the two scripts above, with slight changes:
# +
# #!/usr/bin/env python3
# rand.py
import asyncio
import random
# ANSI colors
c = (
"\033[0m", # End of color
"\033[36m", # Cyan
"\033[91m", # Red
"\033[35m", # Magenta
)
async def makerandom(idx: int, threshold: int = 6) -> int:
print(c[idx + 1] + f"Initiated makerandom({idx}).")
i = random.randint(0, 10)
while i <= threshold:
print(c[idx + 1] + f"makerandom({idx}) == {i} too low; retrying.")
await asyncio.sleep(idx + 1)
i = random.randint(0, 10)
print(c[idx + 1] + f"---> Finished: makerandom({idx}) == {i}" + c[0])
return i
async def main():
res = await asyncio.gather(*(makerandom(i, 10 - i - 1) for i in range(3)))
return res
if __name__ == "__main__":
random.seed(444)
# r1, r2, r3 = asyncio.run(main()) # again not valid in jupyter notebook
r1, r2, r3 = await main()
print()
print(f"r1: {r1}, r2: {r2}, r3: {r3}")
# -
# The colorized output says a lot more than I can and gives you a sense for how this script is carried out:
# <img src="images/async-io-in-python:-a-complete-walkthrough/asyncio-rand.dffdd83b4256.gif" width="600px">
# This program uses one main coroutine, `makerandom()`, and runs it concurrently across 3 different inputs. Most programs will contain small, modular coroutines and one wrapper function that serves to chain each of the smaller coroutines together. [`main()`](https://realpython.com/python-main-function/) is then used to gather tasks (futures) by mapping the central coroutine across some iterable or pool.
# In this miniature example, the pool is `range(3)`. In a fuller example presented later, it is a set of URLs that need to be requested, parsed, and processed concurrently, and `main()` encapsulates that entire routine for each URL.
# While “making random integers” (which is CPU-bound more than anything) is maybe not the greatest choice as a candidate for `asyncio`, it’s the presence of `asyncio.sleep()` in the example that is designed to mimic an IO-bound process where there is uncertain wait time involved. For example, the `asyncio.sleep()` call might represent sending and receiving not-so-random integers between two clients in a message application.
# > Better Colorized Print Example Code
# > ```python
# from colorama import Fore
# import asyncio
# import time
# import random
# >
# > async def print_random():
# for _ in range(5):
# color = random.choice([
# Fore.YELLOW, Fore.BLUE,
# Fore.GREEN, Fore.RED
# ])
# print(color + f'This is a text in color!')
# await asyncio.sleep(1)
# >
# async def main():
# await asyncio.gather(print_random(), print_random(), print_random())
# >
# await main() # in jupyter notebook or asyncio.run(main()) in script
# ```
# <a class="anchor" id="async_io_design_patterns"></a>
#
# ## Async IO Design Patterns
# Async IO comes with its own set of possible script designs, which you’ll get introduced to in this section.
# <a class="anchor" id="chaining_coroutines"></a>
#
# ### Chaining Coroutines
# A key feature of coroutines is that they can be chained together. (Remember, a coroutine object is awaitable, so another coroutine can `await` it.) This allows you to break programs into smaller, manageable, recyclable coroutines:
# +
# #!/usr/bin/env python3
# chained.py
import asyncio
import random
import time
async def part1(n: int) -> str:
i = random.randint(0, 10)
print(f"Part 1 ({n}) sleeping for {i} seconds.")
await asyncio.sleep(i)
result = f"result{n}-1"
print(f"Returning part1({n}) == {result}.")
return result
async def part2(n: int, arg: str) -> str:
i = random.randint(0, 10)
print(f"part 2 {n, arg} sleeping for {i} seconds.")
await asyncio.sleep(i)
result = f"result{n}-2 derived from {arg}"
print(f"Returning part2{n, arg} == {result}.")
return result
async def chain(n: int) -> None:
start = time.perf_counter()
p1 = await part1(n)
p2 = await part2(n, p1)
end = time.perf_counter() - start
print(f"--> Chained result {n} => {p2} (took {end:0.2f} seconds).")
async def main(*args):
await asyncio.gather(*(chain(n) for n in args))
if __name__ == "__main__":
import sys
random.seed(444)
args = [1, 2, 3] if len(sys.argv) == 1 else map(int, sys.argv[1:])
start = time.perf_counter()
asyncio.run(main(*args))
end = time.perf_counter() - start
print(f"Program finished in {end:0.2f} seconds.")
# -
# Pay careful attention to the output, where `part1()` sleeps for a variable amount of time, and `part2()` begins working with the results as they become available:
# ```sh
# $ python3 chained.py 9 6 3
# part1(9) sleeping for 4 seconds.
# part1(6) sleeping for 4 seconds.
# part1(3) sleeping for 0 seconds.
# Returning part1(3) == result3-1.
# part2(3, 'result3-1') sleeping for 4 seconds.
# Returning part1(9) == result9-1.
# part2(9, 'result9-1') sleeping for 7 seconds.
# Returning part1(6) == result6-1.
# part2(6, 'result6-1') sleeping for 4 seconds.
# Returning part2(3, 'result3-1') == result3-2 derived from result3-1.
# -->Chained result3 => result3-2 derived from result3-1 (took 4.00 seconds).
# Returning part2(6, 'result6-1') == result6-2 derived from result6-1.
# -->Chained result6 => result6-2 derived from result6-1 (took 8.01 seconds).
# Returning part2(9, 'result9-1') == result9-2 derived from result9-1.
# -->Chained result9 => result9-2 derived from result9-1 (took 11.01 seconds).
# Program finished in 11.01 seconds.
# ```
# In this setup, the runtime of `main()` will be equal to the maximum runtime of the tasks that it gathers together and schedules.
# <a class="anchor" id="using_a_queue"></a>
#
# ### Using a Queue
# The `asyncio` package provides [queue classes](https://docs.python.org/3/library/asyncio-queue.html) that are designed to be similar to classes of the [`queue`](https://docs.python.org/3/library/queue.html#module-queue) module. In our examples so far, we haven’t really had a need for a queue structure. In `chained.py`, each task (future) is composed of a set of coroutines that explicitly await each other and pass through a single input per chain.
# There is an alternative structure that can also work with async IO: a number of producers, which are not associated with each other, add items to a queue. Each producer may add multiple items to the queue at staggered, random, unannounced times. A group of consumers pull items from the queue as they show up, greedily and without waiting for any other signal.
# In this design, there is no chaining of any individual consumer to a producer. The consumers don’t know the number of producers, or even the cumulative number of items that will be added to the queue, in advance.
# It takes an individual producer or consumer a variable amount of time to put and extract items from the queue, respectively. The queue serves as a throughput that can communicate with the producers and consumers without them talking to each other directly.
# The synchronous version of this program would look pretty dismal: a group of blocking producers serially add items to the queue, one producer at a time. Only after all producers are done can the queue be processed, by one consumer at a time processing item-by-item. There is a ton of latency in this design. Items may sit idly in the queue rather than be picked up and processed immediately.
# An asynchronous version, `asyncq.py`, is below. The challenging part of this workflow is that there needs to be a signal to the consumers that production is done. Otherwise, `await q.get()` will hang indefinitely, because the queue will have been fully processed, but consumers won’t have any idea that production is complete.
# (Big thanks for some help from a StackOverflow [user](https://stackoverflow.com/a/52615705/7954504) for helping to straighten out `main()`: the key is to `await q.join()`, which blocks until all items in the queue have been received and processed, and then to cancel the consumer tasks, which would otherwise hang up and wait endlessly for additional queue items to appear.)
# Here is the full script:
# +
# #!/usr/bin/env python3
# asyncq.py
import asyncio
import itertools as it
import os
import random
import time
async def makeitem(size: int = 5) -> str:
return os.urandom(size).hex()
async def randsleep(caller=None) -> None:
i = random.randint(0, 10)
if caller:
print(f"{caller} sleeping for {i} seconds.")
await asyncio.sleep(i)
async def produce(name: int, q: asyncio.Queue) -> None:
n = random.randint(0, 10)
for _ in it.repeat(None, n): # Synchronous loop for each single producer
await randsleep(caller=f"Producer {name}")
i = await makeitem()
t = time.perf_counter()
await q.put((i, t))
print(f"Producer {name} added <{i}> to queue.")
async def consume(name: int, q: asyncio.Queue) -> None:
while True:
await randsleep(caller=f"Consumer {name}")
i, t = await q.get()
now = time.perf_counter()
print(f"Consumer {name} got element <{i}>"
f" in {now-t:0.5f} seconds.")
q.task_done()
async def main(nprod: int, ncon: int):
q = asyncio.Queue()
producers = [asyncio.create_task(produce(n, q)) for n in range(nprod)]
consumers = [asyncio.create_task(consume(n, q)) for n in range(ncon)]
await asyncio.gather(*producers)
await q.join() # Implicitly awaits consumers, too
for c in consumers:
c.cancel()
if __name__ == "__main__":
# import argparse
# random.seed(444)
# parser = argparse.ArgumentParser()
# parser.add_argument("-p", "--nprod", type=int, default=5)
# parser.add_argument("-c", "--ncon", type=int, default=10)
# ns = parser.parse_args()
start = time.perf_counter()
# asyncio.run(main(**ns.__dict__))
await main(nprod=3, ncon=3)
elapsed = time.perf_counter() - start
print(f"Program completed in {elapsed:0.5f} seconds.")
# -
# The first few coroutines are helper functions that return a random string, a fractional-second performance counter, and a random integer. A producer puts anywhere from 1 to 5 items into the queue. Each item is a tuple of `(i, t)` where `i` is a random string and `t` is the time at which the producer attempts to put the tuple into the queue.
# When a consumer pulls an item out, it simply calculates the elapsed time that the item sat in the queue using the timestamp that the item was put in with.
# Keep in mind that `asyncio.sleep()` is used to mimic some other, more complex coroutine that would eat up time and block all other execution if it were a regular blocking function.
# Here is a test run with two producers and five consumers:
# ```sh
# $ python3 asyncq.py -p 2 -c 5
# Producer 0 sleeping for 3 seconds.
# Producer 1 sleeping for 3 seconds.
# Consumer 0 sleeping for 4 seconds.
# Consumer 1 sleeping for 3 seconds.
# Consumer 2 sleeping for 3 seconds.
# Consumer 3 sleeping for 5 seconds.
# Consumer 4 sleeping for 4 seconds.
# Producer 0 added <377b1e8f82> to queue.
# Producer 0 sleeping for 5 seconds.
# Producer 1 added <413b8802f8> to queue.
# Consumer 1 got element <377b1e8f82> in 0.00013 seconds.
# Consumer 1 sleeping for 3 seconds.
# Consumer 2 got element <413b8802f8> in 0.00009 seconds.
# Consumer 2 sleeping for 4 seconds.
# Producer 0 added <06c055b3ab> to queue.
# Producer 0 sleeping for 1 seconds.
# Consumer 0 got element <06c055b3ab> in 0.00021 seconds.
# Consumer 0 sleeping for 4 seconds.
# Producer 0 added <17a8613276> to queue.
# Consumer 4 got element <17a8613276> in 0.00022 seconds.
# Consumer 4 sleeping for 5 seconds.
# Program completed in 9.00954 seconds.
# ```
# In this case, the items process in fractions of a second. A delay can be due to two reasons:
# - Standard, largely unavoidable overhead
# - Situations where all consumers are sleeping when an item appears in the queue
#
# With regards to the second reason, luckily, it is perfectly normal to scale to hundreds or thousands of consumers. You should have no problem with `python3 asyncq.py -p 5 -c 100`. The point here is that, theoretically, you could have different users on different systems controlling the management of producers and consumers, with the queue serving as the central throughput.
# So far, you’ve been thrown right into the fire and seen three related examples of `asyncio` calling coroutines defined with `async` and `await`. If you’re not completely following or just want to get deeper into the mechanics of how modern coroutines came to be in Python, you’ll start from square one with the next section.
# <a class="anchor" id="async_io’s_roots_in_generators"></a>
#
# ## Async IO’s Roots in Generators
# Earlier, you saw an example of the old-style generator-based coroutines, which have been outdated by more explicit native coroutines. The example is worth re-showing with a small tweak:
# +
import asyncio
@asyncio.coroutine
def py34_coro():
"""Generator-based coroutine"""
# No need to build these yourself, but be aware of what they are
s = yield from stuff()
return s
async def py35_coro():
"""Native coroutine, modern syntax"""
s = await stuff()
return s
async def stuff():
return 0x10, 0x20, 0x30
# -
# As an experiment, what happens if you call `py34_coro()` or `py35_coro()` on its own, without `await`, or without any calls to `asyncio.run()` or other `asyncio` “porcelain” functions? Calling a coroutine in isolation returns a coroutine object:
>>> py35_coro()
<coroutine object py35_coro at 0x10126dcc8>
# This isn’t very interesting on its surface. The result of calling a coroutine on its own is an awaitable **coroutine object**.
# Time for a quiz: what other feature of Python looks like this? (What feature of Python doesn’t actually “do much” when it’s called on its own?)
# Hopefully you’re thinking of **generators** as an answer to this question, because coroutines are enhanced generators under the hood. The behavior is similar in this regard:
>>> def gen():
... yield 0x10, 0x20, 0x30
...
>>> g = gen()
>>> g # Nothing much happens - need to iterate with `.__next__()`
<generator object gen at 0x1012705e8>
>>> next(g)
(16, 32, 48)
# Generator functions are, as it so happens, the foundation of async IO (regardless of whether you declare coroutines with `async def` rather than the older `@asyncio.coroutine` wrapper). Technically, `await` is more closely analogous to `yield from` than it is to `yield`. (But remember that `yield from x()` is just syntactic sugar to replace `for i in x(): yield i`.)
# One critical feature of generators as it pertains to async IO is that they can effectively be stopped and restarted at will. For example, you can `break` out of iterating over a generator object and then resume iteration on the remaining values later. When a [generator function reaches `yield`](https://realpython.com/introduction-to-python-generators/), it yields that value, but then it sits idle until it is told to yield its subsequent value.
# This can be fleshed out through an example:
# +
>>> from itertools import cycle
>>> def endless():
... """Yields 9, 8, 7, 6, 9, 8, 7, 6, ... forever"""
... yield from cycle((9, 8, 7, 6))
>>> e = endless()
>>> total = 0
>>> for i in e:
... if total < 30:
... print(i, end=" ")
... total += i
... else:
... print()
... # Pause execution. We can resume later.
... break
9 8 7 6 9 8 7 6 9 8 7 6 9 8
>>> # Resume
>>> next(e), next(e), next(e)
(6, 9, 8)
# -
# The `await` keyword behaves similarly, marking a break point at which the coroutine suspends itself and lets other coroutines work. “Suspended,” in this case, means a coroutine that has temporarily ceded control but not totally exited or finished. Keep in mind that `yield`, and by extension `yield from` and `await`, mark a break point in a generator’s execution.
# This is the fundamental difference between functions and generators. A function is all-or-nothing. Once it starts, it won’t stop until it hits a `return`, then pushes that value to the caller (the function that calls it). A generator, on the other hand, pauses each time it hits a `yield` and goes no further. Not only can it push this value to calling stack, but it can keep a hold of its local variables when you resume it by calling `next()` on it.
# There’s a second and lesser-known feature of generators that also matters. You can send a value into a generator as well through its `.send()` method. This allows generators (and coroutines) to call (`await`) each other without blocking. I won’t get any further into the nuts and bolts of this feature, because it matters mainly for the implementation of coroutines behind the scenes, but you shouldn’t ever really need to use it directly yourself.
# If you’re interested in exploring more, you can start at [PEP 342](https://www.python.org/dev/peps/pep-0342/), where coroutines were formally introduced. <NAME>’s [How the Heck Does Async-Await Work in Python](https://snarky.ca/how-the-heck-does-async-await-work-in-python-3-5/) is also a good read, as is the [PYMOTW writeup on `asyncio`](https://pymotw.com/3/asyncio/coroutines.html). Lastly, there’s <NAME>’s [Curious Course on Coroutines and Concurrency](http://www.dabeaz.com/coroutines/), which dives deep into the mechanism by which coroutines run.
# Let’s try to condense all of the above articles into a few sentences: there is a particularly unconventional mechanism by which these coroutines actually get run. Their result is an attribute of the exception object that gets thrown when their `.send()` method is called. There’s some more wonky detail to all of this, but it probably won’t help you use this part of the language in practice, so let’s move on for now.
# To tie things together, here are some key points on the topic of coroutines as generators:
# -
# Coroutines are [repurposed generators](https://www.python.org/dev/peps/pep-0492/#differences-from-generators) that take advantage of the peculiarities of generator methods.
#
#
#
# -
# Old generator-based coroutines use `yield from` to wait for a coroutine result. Modern Python syntax in native coroutines simply replaces `yield from` with `await` as the means of waiting on a coroutine result. The `await` is analogous to `yield from`, and it often helps to think of it as such.
#
#
#
# -
# The use of `await` is a signal that marks a break point. It lets a coroutine temporarily suspend execution and permits the program to come back to it later.
#
#
#
#
# <a class="anchor" id="other_features:_`async_for`_and_async_generators_+_comprehensions"></a>
#
# ### Other Features: `async for` and Async Generators + Comprehensions
# Along with plain `async`/`await`, Python also enables `async for` to iterate over an **asynchronous iterator**. The purpose of an asynchronous iterator is for it to be able to call asynchronous code at each stage when it is iterated over.
# A natural extension of this concept is an **asynchronous generator**. Recall that you can use `await`, `return`, or `yield` in a native coroutine. Using `yield` within a coroutine became possible in Python 3.6 (via PEP 525), which introduced asynchronous generators with the purpose of allowing `await` and `yield` to be used in the same coroutine function body:
async def mygen(u: int = 10):
"""Yield powers of 2."""
i = 0
while i < u:
yield 2 ** i
i += 1
await asyncio.sleep(0.1)
# Last but not least, Python enables **asynchronous comprehension** with `async for`. Like its synchronous cousin, this is largely syntactic sugar:
async def main():
# This does *not* introduce concurrent execution
# It is meant to show syntax only
g = [i async for i in mygen()]
f = [j async for j in mygen() if not (j // 3 % 5)]
return g, f
# +
# g, f = asyncio.run(main())
t1 = time.perf_counter()
g, f = await main()
t2 = time.perf_counter()
t2 - t1
# -
g
f
# This is a crucial distinction: **neither asynchronous generators nor comprehensions make the iteration concurrent**. All that they do is provide the look-and-feel of their synchronous counterparts, but with the ability for the loop in question to give up control to the event loop for some other coroutine to run.
# In other words, asynchronous iterators and asynchronous generators are not designed to concurrently map some function over a sequence or iterator. They’re merely designed to let the enclosing coroutine allow other tasks to take their turn. The `async for` and `async with` statements are only needed to the extent that using plain `for` or `with` would “break” the nature of `await` in the coroutine. This distinction between asynchronicity and concurrency is a key one to grasp.
# <a class="anchor" id="the_event_loop_and_`asyncio.run()`"></a>
#
# ### The Event Loop and `asyncio.run()`
# You can think of an event loop as something like a `while True` loop that monitors coroutines, taking feedback on what’s idle, and looking around for things that can be executed in the meantime. It is able to wake up an idle coroutine when whatever that coroutine is waiting on becomes available.
# Thus far, the entire management of the event loop has been implicitly handled by one function call:
asyncio.run(main()) # Python 3.7+
# [`asyncio.run()`](https://github.com/python/cpython/blob/d4c76d960b8b286b75c933780416ace9cda682fd/Lib/asyncio/runners.py#L8), introduced in Python 3.7, is responsible for getting the event loop, running tasks until they are marked as complete, and then closing the event loop.
# There’s a more long-winded way of managing the `asyncio` event loop, with `get_event_loop()`. The typical pattern looks like this:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
# You’ll probably see `loop.get_event_loop()` floating around in older examples, but unless you have a specific need to fine-tune control over the event loop management, `asyncio.run()` should be sufficient for most programs.
# If you do need to interact with the event loop within a Python program, `loop` is a good-old-fashioned Python object that supports introspection with `loop.is_running()` and `loop.is_closed()`. You can manipulate it if you need to get more fine-tuned control, such as in [scheduling a callback](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio-example-lowlevel-helloworld) by passing the loop as an argument.
# What is more crucial is understanding a bit beneath the surface about the mechanics of the event loop. Here are a few points worth stressing about the event loop.
# **#1:** Coroutines don’t do much on their own until they are tied to the event loop.
# You saw this point before in the explanation on generators, but it’s worth restating. If you have a main coroutine that awaits others, simply calling it in isolation has little effect:
# +
import asyncio
async def main():
print("Hello ...")
await asyncio.sleep(1)
print("World!")
# -
routine = main()
routine
# Remember to use `asyncio.run()` to actually force execution by scheduling the `main()` coroutine (future object) for execution on the event loop:
# +
# asyncio.run(routine)
await routine
# Again, in notebook you have to run await, but in a normal script,
# you must use asyncio.run(routine)
# -
# (Other coroutines can be executed with `await`. It is typical to wrap just `main()` in `asyncio.run()`, and chained coroutines with `await` will be called from there.)
# **#2:** By default, an async IO event loop runs in a single thread and on a single CPU core. Usually, running one single-threaded event loop in one CPU core is more than sufficient. It is also possible to run event loops across multiple cores. Check out this [talk by <NAME>](https://youtu.be/0kXaLh8Fz3k?t=10m30s) for more, and be warned that your laptop may spontaneously combust.
# **#3.** Event loops are pluggable. That is, you could, if you really wanted, write your own event loop implementation and have it run tasks just the same. This is wonderfully demonstrated in the [`uvloop`](https://github.com/MagicStack/uvloop) package, which is an implementation of the event loop in Cython.
# That is what is meant by the term “pluggable event loop”: you can use any working implementation of an event loop, unrelated to the structure of the coroutines themselves. The `asyncio` package itself ships with [two different event loop implementations](https://docs.python.org/3/library/asyncio-eventloop.html#event-loop-implementations), with the default being based on the [`selectors`](https://docs.python.org/3/library/selectors.html#module-selectors) module. (The second implementation is built for Windows only.)
# <a class="anchor" id="a_full_program:_asynchronous_requests"></a>
#
# ## A Full Program: Asynchronous Requests
# You’ve made it this far, and now it’s time for the fun and painless part. In this section, you’ll build a web-scraping URL collector, `areq.py`, using `aiohttp`, a blazingly fast async HTTP client/server framework. (We just need the client part.) Such a tool could be used to map connections between a cluster of sites, with the links forming a [directed graph](https://en.wikipedia.org/wiki/Directed_graph).
# The high-level program structure will look like this:
# 1.
# Read a sequence of URLs from a local file, `urls.txt`.
#
#
#
# 2.
# Send GET requests for the URLs and decode the resulting content. If this fails, stop there for a URL.
#
#
#
# 3.
# Search for the URLs within `href` tags in the HTML of the responses.
#
#
#
# 4.
# Write the results to `foundurls.txt`.
#
#
#
# 5.
# Do all of the above as asynchronously and concurrently as possible. (Use `aiohttp` for the requests, and `aiofiles` for the file-appends. These are two primary examples of IO that are well-suited for the async IO model.)
#
#
#
#
# Here are the contents of `urls.txt`. It’s not huge, and contains mostly highly trafficked sites:
# ```sh
# $ cat urls.txt
# https://regex101.com/
# https://docs.python.org/3/this-url-will-404.html
# https://www.nytimes.com/guides/
# https://www.mediamatters.org/
# https://1.1.1.1/
# https://www.politico.com/tipsheets/morning-money
# https://www.bloomberg.com/markets/economics
# https://www.ietf.org/rfc/rfc2616.txt
# ```
# The second URL in the list should return a 404 response, which you’ll need to handle gracefully. If you’re running an expanded version of this program, you’ll probably need to deal with much hairier problems than this, such a server disconnections and endless redirects.
# The requests themselves should be made using a single session, to take advantage of reusage of the session’s internal connection pool.
# Let’s take a look at the full program. We’ll walk through things step-by-step after:
# +
# #!/usr/bin/env python3
# areq.py
"""Asynchronously get links embedded in multiple pages' HMTL."""
import asyncio
import logging
import re
import sys
from typing import IO
import urllib.error
import urllib.parse
import aiofiles
import aiohttp
from aiohttp import ClientSession
logging.basicConfig(
format="%(asctime)s %(levelname)s:%(name)s: %(message)s",
level=logging.DEBUG,
datefmt="%H:%M:%S",
stream=sys.stderr,
)
logger = logging.getLogger("areq")
logging.getLogger("chardet.charsetprober").disabled = True
HREF_RE = re.compile(r'href="(.*?)"')
async def fetch_html(url: str, session: ClientSession, **kwargs) -> str:
"""GET request wrapper to fetch page HTML.
kwargs are passed to `session.request()`.
"""
resp = await session.request(method="GET", url=url, **kwargs)
resp.raise_for_status()
logger.info("Got response [%s] for URL: %s", resp.status, url)
html = await resp.text()
return html
async def parse(url: str, session: ClientSession, **kwargs) -> set:
"""Find HREFs in the HTML of `url`."""
found = set()
try:
html = await fetch_html(url=url, session=session, **kwargs)
except (
aiohttp.ClientError,
aiohttp.http_exceptions.HttpProcessingError,
) as e:
logger.error(
"aiohttp exception for %s [%s]: %s",
url,
getattr(e, "status", None),
getattr(e, "message", None),
)
return found
except Exception as e:
logger.exception(
"Non-aiohttp exception occured: %s", getattr(e, "__dict__", {})
)
return found
else:
for link in HREF_RE.findall(html):
try:
abslink = urllib.parse.urljoin(url, link)
except (urllib.error.URLError, ValueError):
logger.exception("Error parsing URL: %s", link)
pass
else:
found.add(abslink)
logger.info("Found %d links for %s", len(found), url)
return found
async def write_one(file: IO, url: str, **kwargs) -> None:
"""Write the found HREFs from `url` to `file`."""
res = await parse(url=url, **kwargs)
if not res:
return None
async with aiofiles.open(file, "a") as f:
for p in res:
await f.write(f"{url}\t{p}\n")
logger.info("Wrote results for source URL: %s", url)
async def bulk_crawl_and_write(file: IO, urls: set, **kwargs) -> None:
"""Crawl & write concurrently to `file` for multiple `urls`."""
async with ClientSession() as session:
tasks = []
for url in urls:
tasks.append(
write_one(file=file, url=url, session=session, **kwargs)
)
await asyncio.gather(*tasks)
if __name__ == "__main__":
import pathlib
import sys
assert sys.version_info >= (3, 7), "Script requires Python 3.7+."
here = pathlib.Path(__file__).parent
with open(here.joinpath("urls.txt")) as infile:
urls = set(map(str.strip, infile))
outpath = here.joinpath("foundurls.txt")
with open(outpath, "w") as outfile:
outfile.write("source_url\tparsed_url\n")
asyncio.run(bulk_crawl_and_write(file=outpath, urls=urls))
# -
# This script is longer than our initial toy programs, so let’s break it down.
# The constant `HREF_RE` is a [regular expression](https://realpython.com/regex-python/) to extract what we’re ultimately searching for, `href` tags within HTML:
>>> HREF_RE.search('Go to <a href="https://realpython.com/">Real Python</a>')
<re.Match object; span=(15, 45), match='href="https://realpython.com/"'>
# The coroutine `fetch_html()` is a wrapper around a GET request to make the request and decode the resulting page HTML. It makes the request, awaits the response, and raises right away in the case of a non-200 status:
resp = await session.request(method="GET", url=url, **kwargs)
resp.raise_for_status()
# If the status is okay, `fetch_html()` returns the page HTML (a `str`). Notably, there is no exception handling done in this function. The logic is to propagate that exception to the caller and let it be handled there:
html = await resp.text()
# We `await` `session.request()` and `resp.text()` because they’re awaitable coroutines. The request/response cycle would otherwise be the long-tailed, time-hogging portion of the application, but with async IO, `fetch_html()` lets the event loop work on other readily available jobs such as parsing and writing URLs that have already been fetched.
# Next in the chain of coroutines comes `parse()`, which waits on `fetch_html()` for a given URL, and then extracts all of the `href` tags from that page’s HTML, making sure that each is valid and formatting it as an absolute path.
# Admittedly, the second portion of `parse()` is blocking, but it consists of a quick regex match and ensuring that the links discovered are made into absolute paths.
# In this specific case, this synchronous code should be quick and inconspicuous. But just remember that any line within a given coroutine will block other coroutines unless that line uses `yield`, `await`, or `return`. If the parsing was a more intensive process, you might want to consider running this portion in its own process with [`loop.run_in_executor()`](https://docs.python.org/3/library/asyncio-eventloop.html#executing-code-in-thread-or-process-pools).
# Next, the coroutine `write()` takes a file object and a single URL, and waits on `parse()` to return a `set` of the parsed URLs, writing each to the file asynchronously along with its source URL through use of `aiofiles`, a package for async file IO.
# Lastly, `bulk_crawl_and_write()` serves as the main entry point into the script’s chain of coroutines. It uses a single session, and a task is created for each URL that is ultimately read from `urls.txt`.
# Here are a few additional points that deserve mention:
# -
# The default `ClientSession` has an [adapter](https://aiohttp.readthedocs.io/en/stable/client_reference.html#connectors) with a maximum of 100 open connections. To change that, pass an instance of `asyncio.connector.TCPConnector` to `ClientSession`. You can also specify limits on a per-host basis.
#
#
#
# -
# You can specify max [timeouts](https://aiohttp.readthedocs.io/en/stable/client_quickstart.html#timeouts) for both the session as a whole and for individual requests.
#
#
#
# -
# This script also uses `async with`, which works with an [asynchronous context manager](https://www.python.org/dev/peps/pep-0492/#asynchronous-context-managers-and-async-with). I haven’t devoted a whole section to this concept because the transition from synchronous to asynchronous context managers is fairly straightforward. The latter has to define `.__aenter__()` and `.__aexit__()` rather than `.__exit__()` and `.__enter__()`. As you might expect, `async with` can only be used inside a coroutine function declared with `async def`.
#
#
#
#
# If you’d like to explore a bit more, the [companion files](https://github.com/realpython/materials/tree/master/asyncio-walkthrough) for this tutorial up at GitHub have comments and docstrings attached as well.
# Here’s the execution in all of its glory, as `areq.py` gets, parses, and saves results for 9 URLs in under a second:
# ```sh
# $ python3 areq.py
# 21:33:22 DEBUG:asyncio: Using selector: KqueueSelector
# 21:33:22 INFO:areq: Got response [200] for URL: https://www.mediamatters.org/
# 21:33:22 INFO:areq: Found 115 links for https://www.mediamatters.org/
# 21:33:22 INFO:areq: Got response [200] for URL: https://www.nytimes.com/guides/
# 21:33:22 INFO:areq: Got response [200] for URL: https://www.politico.com/tipsheets/morning-money
# 21:33:22 INFO:areq: Got response [200] for URL: https://www.ietf.org/rfc/rfc2616.txt
# 21:33:22 ERROR:areq: aiohttp exception for https://docs.python.org/3/this-url-will-404.html [404]: Not Found
# 21:33:22 INFO:areq: Found 120 links for https://www.nytimes.com/guides/
# 21:33:22 INFO:areq: Found 143 links for https://www.politico.com/tipsheets/morning-money
# 21:33:22 INFO:areq: Wrote results for source URL: https://www.mediamatters.org/
# 21:33:22 INFO:areq: Found 0 links for https://www.ietf.org/rfc/rfc2616.txt
# 21:33:22 INFO:areq: Got response [200] for URL: https://1.1.1.1/
# 21:33:22 INFO:areq: Wrote results for source URL: https://www.nytimes.com/guides/
# 21:33:22 INFO:areq: Wrote results for source URL: https://www.politico.com/tipsheets/morning-money
# 21:33:22 INFO:areq: Got response [200] for URL: https://www.bloomberg.com/markets/economics
# 21:33:22 INFO:areq: Found 3 links for https://www.bloomberg.com/markets/economics
# 21:33:22 INFO:areq: Wrote results for source URL: https://www.bloomberg.com/markets/economics
# 21:33:23 INFO:areq: Found 36 links for https://1.1.1.1/
# 21:33:23 INFO:areq: Got response [200] for URL: https://regex101.com/
# 21:33:23 INFO:areq: Found 23 links for https://regex101.com/
# 21:33:23 INFO:areq: Wrote results for source URL: https://regex101.com/
# 21:33:23 INFO:areq: Wrote results for source URL: https://1.1.1.1/
# ```
# That’s not too shabby! As a sanity check, you can check the line-count on the output. In my case, it’s 626, though keep in mind this may fluctuate:
# ```sh
# $ wc -l foundurls.txt
# 626 foundurls.txt
#
# $ head -n 3 foundurls.txt
# source_url parsed_url
# https://www.bloomberg.com/markets/economics https://www.bloomberg.com/feedback
# https://www.bloomberg.com/markets/economics https://www.bloomberg.com/notices/tos
# ```
# <a class="anchor" id="async_io_in_context"></a>
#
# ## Async IO in Context
# Now that you’ve seen a healthy dose of code, let’s step back for a minute and consider when async IO is an ideal option and how you can make the comparison to arrive at that conclusion or otherwise choose a different model of concurrency.
# <a class="anchor" id="when_and_why_is_async_io_the_right_choice?"></a>
#
# ### When and Why Is Async IO the Right Choice?
# This tutorial is no place for an extended treatise on async IO versus threading versus multiprocessing. However, it’s useful to have an idea of when async IO is probably the best candidate of the three.
# The battle over async IO versus multiprocessing is not really a battle at all. In fact, they can be [used in concert](https://youtu.be/0kXaLh8Fz3k?t=10m30s). If you have multiple, fairly uniform CPU-bound tasks (a great example is a [grid search](http://scikit-learn.org/stable/modules/grid_search.html#parallelism) in libraries such as `scikit-learn` or `keras`), multiprocessing should be an obvious choice.
# Simply putting `async` before every function is a bad idea if all of the functions use blocking calls. (This can actually slow down your code.) But as mentioned previously, there are places where async IO and multiprocessing can [live in harmony](https://youtu.be/0kXaLh8Fz3k?t=10m30s).
# The contest between async IO and threading is a little bit more direct. I mentioned in the introduction that “threading is hard.” The full story is that, even in cases where threading seems easy to implement, it can still lead to infamous impossible-to-trace bugs due to race conditions and memory usage, among other things.
# Threading also tends to scale less elegantly than async IO, because threads are a system resource with a finite availability. Creating thousands of threads will fail on many machines, and I don’t recommend trying it in the first place. Creating thousands of async IO tasks is completely feasible.
# Async IO shines when you have multiple IO-bound tasks where the tasks would otherwise be dominated by blocking IO-bound wait time, such as:
# -
# Network IO, whether your program is the server or the client side
#
#
#
# -
# Serverless designs, such as a peer-to-peer, multi-user network like a group chatroom
#
#
#
# -
# Read/write operations where you want to mimic a “fire-and-forget” style but worry less about holding a lock on whatever you’re reading and writing to
#
#
#
#
# The biggest reason not to use it is that `await` only supports a specific set of objects that define a specific set of methods. If you want to do async read operations with a certain DBMS, you’ll need to find not just a Python wrapper for that DBMS, but one that supports the `async`/`await` syntax. Coroutines that contain synchronous calls block other coroutines and tasks from running.
# For a shortlist of libraries that work with `async`/`await`, see the [list](#libraries-that-work-with-asyncawait) at the end of this tutorial.
# <a class="anchor" id="async_io_it_is,_but_which_one?"></a>
#
# ### Async IO It Is, but Which One?
# This tutorial focuses on async IO, the `async`/`await` syntax, and using `asyncio` for event-loop management and specifying tasks. `asyncio` certainly isn’t the only async IO library out there. This observation from <NAME> says a lot:
# To that end, a few big-name alternatives that do what `asyncio` does, albeit with different APIs and different approaches, are [`curio`](https://github.com/dabeaz/curio) and [`trio`](https://github.com/python-trio/trio). Personally, I think that if you’re building a moderately sized, straightforward program, just using `asyncio` is plenty sufficient and understandable, and lets you avoid adding yet another large dependency outside of Python’s standard library.
# But by all means, check out `curio` and `trio`, and you might find that they get the same thing done in a way that’s more intuitive for you as the user. Many of the package-agnostic concepts presented here should permeate to alternative async IO packages as well.
# <a class="anchor" id="odds_and_ends"></a>
#
# ## Odds and Ends
# In these next few sections, you’ll cover some miscellaneous parts of `asyncio` and `async`/`await` that haven’t fit neatly into the tutorial thus far, but are still important for building and understanding a full program.
# <a class="anchor" id="other_top-level_`asyncio`_functions"></a>
#
# ### Other Top-Level `asyncio` Functions
# In addition to `asyncio.run()`, you’ve seen a few other package-level functions such as `asyncio.create_task()` and `asyncio.gather()`.
# You can use `create_task()` to schedule the execution of a coroutine object, followed by `asyncio.run()`:
# +
>>>>>> import asyncio
>>> async def coro(seq) -> list:
... """'IO' wait time is proportional to the max element."""
... await asyncio.sleep(max(seq))
... return list(reversed(seq))
...
>>> async def main():
... # This is a bit redundant in the case of one task
... # We could use `await coro([3, 2, 1])` on its own
... t = asyncio.create_task(coro([3, 2, 1])) # Python 3.7+
... await t
... print(f't: type {type(t)}')
... print(f't done: {t.done()}')
...
>>> t = asyncio.run(main())
t: type <class '_asyncio.Task'>
t done: True
# -
# There’s a subtlety to this pattern: if you don’t `await t` within `main()`, it may finish before `main()` itself signals that it is complete. Because `asyncio.run(main())` [calls `loop.run_until_complete(main())`](https://github.com/python/cpython/blob/7e18deef652a9d413d5dbd19d61073ba7eb5460e/Lib/asyncio/runners.py#L43), the event loop is only concerned (without `await t` present) that `main()` is done, not that the tasks that get created within `main()` are done. Without `await t`, the loop’s other tasks [will be cancelled](https://github.com/python/cpython/blob/7e18deef652a9d413d5dbd19d61073ba7eb5460e/Lib/asyncio/runners.py#L46), possibly before they are completed. If you need to get a list of currently pending tasks, you can use `asyncio.Task.all_tasks()`.
# Separately, there’s `asyncio.gather()`. While it doesn’t do anything tremendously special, `gather()` is meant to neatly put a collection of coroutines (futures) into a single future. As a result, it returns a single future object, and, if you `await asyncio.gather()` and specify multiple tasks or coroutines, you’re waiting for all of them to be completed. (This somewhat parallels `queue.join()` from our earlier example.) The result of `gather()` will be a list of the results across the inputs:
>>> import time
>>> async def main():
... t = asyncio.create_task(coro([3, 2, 1]))
... t2 = asyncio.create_task(coro([10, 5, 0])) # Python 3.7+
... print('Start:', time.strftime('%X'))
... a = await asyncio.gather(t, t2)
... print('End:', time.strftime('%X')) # Should be 10 seconds
... print(f'Both tasks done: {all((t.done(), t2.done()))}')
... return a
...
>>> a = asyncio.run(main())
Start: 16:20:11
End: 16:20:21
Both tasks done: True
>>> a
[[1, 2, 3], [0, 5, 10]]
# You probably noticed that `gather()` waits on the entire result set of the Futures or coroutines that you pass it. Alternatively, you can loop over `asyncio.as_completed()` to get tasks as they are completed, in the order of completion. The function returns an iterator that yields tasks as they finish. Below, the result of `coro([3, 2, 1])` will be available before `coro([10, 5, 0])` is complete, which is not the case with `gather()`:
>>> async def main():
... t = asyncio.create_task(coro([3, 2, 1]))
... t2 = asyncio.create_task(coro([10, 5, 0]))
... print('Start:', time.strftime('%X'))
... for res in asyncio.as_completed((t, t2)):
... compl = await res
... print(f'res: {compl} completed at {time.strftime("%X")}')
... print('End:', time.strftime('%X'))
... print(f'Both tasks done: {all((t.done(), t2.done()))}')
...
>>> a = asyncio.run(main())
Start: 09:49:07
res: [1, 2, 3] completed at 09:49:10
res: [0, 5, 10] completed at 09:49:17
End: 09:49:17
Both tasks done: True
# Lastly, you may also see `asyncio.ensure_future()`. You should rarely need it, because it’s a lower-level plumbing API and largely replaced by `create_task()`, which was introduced later.
# <a class="anchor" id="the_precedence_of_`await`"></a>
#
# ### The Precedence of `await`
# While they behave somewhat similarly, the `await` keyword has significantly higher precedence than `yield`. This means that, because it is more tightly bound, there are a number of instances where you’d need parentheses in a `yield from` statement that are not required in an analogous `await` statement. For more information, see [examples of `await` expressions](https://www.python.org/dev/peps/pep-0492/#examples-of-await-expressions) from PEP 492.
# <a class="anchor" id="conclusion"></a>
#
# ## Conclusion
# You’re now equipped to use `async`/`await` and the libraries built off of it. Here’s a recap of what you’ve covered:
# -
# Asynchronous IO as a language-agnostic model and a way to effect concurrency by letting coroutines indirectly communicate with each other
#
#
#
# -
# The specifics of Python’s new `async` and `await` keywords, used to mark and define coroutines
#
#
#
# -
# `asyncio`, the Python package that provides the API to run and manage coroutines
#
#
#
#
# <a class="anchor" id="resources"></a>
#
# ## Resources
# <a class="anchor" id="python_version_specifics"></a>
#
# ### Python Version Specifics
# Async IO in Python has evolved swiftly, and it can be hard to keep track of what came when. Here’s a list of Python minor-version changes and introductions related to `asyncio`:
# - 3.3: The `yield from` expression allows for generator delegation.
# - 3.4: `asyncio` was introduced in the Python standard library with provisional API status.
# - 3.5: `async` and `await` became a part of the Python grammar, used to signify and wait on coroutines. They were not yet reserved keywords. (You could still define functions or variables named `async` and `await`.)
# - 3.6: Asynchronous generators and asynchronous comprehensions were introduced. The API of `asyncio` was declared stable rather than provisional.
# - 3.7: `async` and `await` became reserved keywords. (They cannot be used as identifiers.) They are intended to replace the `asyncio.coroutine()` decorator. `asyncio.run()` was introduced to the `asyncio` package, among [a bunch of other features](https://docs.python.org/3/whatsnew/3.7.html#whatsnew37-asyncio).
# If you want to be safe (and be able to use `asyncio.run()`), go with Python 3.7 or above to get the full set of features.
# <a class="anchor" id="articles"></a>
#
# ### Articles
# Here’s a curated list of additional resources:
# - Real Python: [Speed up your Python Program with Concurrency](https://realpython.com/python-concurrency/)
# - Real Python: [What is the Python Global Interpreter Lock?](https://realpython.com/python-gil/)
# - CPython: The `asyncio` package [source](https://github.com/python/cpython/tree/master/Lib/asyncio)
# - Python docs: [Data model > Coroutines](https://docs.python.org/3/reference/datamodel.html#coroutines)
# - TalkPython: [Async Techniques and Examples in Python](https://training.talkpython.fm/courses/details/async-in-python-with-threading-and-multiprocessing)
# - <NAME>: [How the Heck Does Async-Await Work in Python 3.5?](https://snarky.ca/how-the-heck-does-async-await-work-in-python-3-5/)
# - PYMOTW: [`asyncio`](https://pymotw.com/3/asyncio/)
# - <NAME> and <NAME>: [A Web Crawler With asyncio Coroutines](http://aosabook.org/en/500L/a-web-crawler-with-asyncio-coroutines.html)
# - <NAME>: [The State of Python Coroutines: `yield from`](http://www.andy-pearce.com/blog/posts/2016/Jun/the-state-of-python-coroutines-yield-from/)
# - <NAME>: [Some Thoughts on Asynchronous API Design in a Post-`async`/`await` World](https://vorpus.org/blog/some-thoughts-on-asynchronous-api-design-in-a-post-asyncawait-world/)
# - <NAME>: [I don’t understand Python’s Asyncio](http://lucumr.pocoo.org/2016/10/30/i-dont-understand-asyncio/)
# - <NAME>: [series on `asyncio`](http://www.artificialworlds.net/blog/2017/05/31/basic-ideas-of-python-3-asyncio-concurrency/) (4 posts)
# - Stack Overflow: [Python `asyncio.semaphore` in `async`-`await` function](https://stackoverflow.com/q/40836800/7954504)
# - <NAME>:* [AsyncIO for the Working Python Developer](https://hackernoon.com/asyncio-for-the-working-python-developer-5c468e6e2e8e)
# * [Asyncio Coroutine Patterns: Beyond `await`](https://medium.com/python-pandemonium/asyncio-coroutine-patterns-beyond-await-a6121486656f)
#
#
#
#
# A few Python *What’s New* sections explain the motivation behind language changes in more detail:
# - [What’s New in Python 3.3](https://docs.python.org/3/whatsnew/3.3.html#pep-380) (`yield from` and PEP 380)
# - [What’s New in Python 3.6](https://docs.python.org/3/whatsnew/3.6.html#whatsnew36-pep525) (PEP 525 & 530)
#
# From <NAME>:
# - [Generator: Tricks for Systems Programmers](http://www.dabeaz.com/generators/)
# - [A Curious Course on Coroutines and Concurrency](http://www.dabeaz.com/coroutines/)
# - [Generators: The Final Frontier](http://dabeaz.com/finalgenerator/index.html)
#
# YouTube talks:
# - [<NAME> - Thinking Outside the GIL with AsyncIO and Multiprocessing - PyCon 2018](https://youtu.be/0kXaLh8Fz3k)
# - [Keynote <NAME> - Topics of Interest (Python Asyncio)](https://youtu.be/ZzfHjytDceU)
# - [<NAME> - Python Concurrency From the Ground Up: LIVE! - PyCon 2015](https://youtu.be/MCs5OvhV9S4)
# - [<NAME>, Keynote on Concurrency, PyBay 2017](https://youtu.be/9zinZmE3Ogk)
# - [Thinking about Concurrency, <NAME>, Python core developer](https://youtu.be/Bv25Dwe84g0)
# - [<NAME> Asynchronous Python for the Complete Beginner PyCon 2017](https://youtu.be/iG6fr81xHKA)
# - [<NAME> asyncawait and asyncio in Python 3 6 and beyond PyCon 2017](https://youtu.be/2ZFFv-wZ8_g)
# - [Fear and Awaiting in Async: A Savage Journey to the Heart of the Coroutine Dream](https://youtu.be/E-1Y4kSsAFc)
# - [What Is Async, How Does It Work, and When Should I Use It? (PyCon APAC 2014)](https://youtu.be/kdzL3r-yJZY)
#
# <a class="anchor" id="related_peps"></a>
#
# ### Related PEPs
# |PEP|Date Created|
# |:--|:--|
# |[PEP 342 – Coroutines via Enhanced Generators](https://www.python.org/dev/peps/pep-0342/)|2005-05|
# |[PEP 380 – Syntax for Delegating to a Subgenerator](https://www.python.org/dev/peps/pep-0380/)|2009-02|
# |[PEP 3153 – Asynchronous IO support](https://www.python.org/dev/peps/pep-3153/)|2011-05|
# |[PEP 3156 – Asynchronous IO Support Rebooted: the “asyncio” Module](https://www.python.org/dev/peps/pep-3156/)|2012-12|
# |[PEP 492 – Coroutines with async and await syntax](https://www.python.org/dev/peps/pep-0492/)|2015-04|
# |[PEP 525 – Asynchronous Generators](https://www.python.org/dev/peps/pep-0525/)|2016-07|
# |[PEP 530 – Asynchronous Comprehensions](https://www.python.org/dev/peps/pep-0530/)|2016-09|
#
# <a class="anchor" id="libraries_that_work_with_`async`/`await`"></a>
#
# ### Libraries That Work With `async`/`await`
# From [aio-libs](https://github.com/aio-libs):
# - [`aiohttp`](https://github.com/aio-libs/aiohttp): Asynchronous HTTP client/server framework
# - [`aioredis`](https://github.com/aio-libs/aioredis): Async IO Redis support
# - [`aiopg`](https://github.com/aio-libs/aiopg): Async IO PostgreSQL support
# - [`aiomcache`](https://github.com/aio-libs/aiomcache): Async IO memcached client
# - [`aiokafka`](https://github.com/aio-libs/aiokafka): Async IO Kafka client
# - [`aiozmq`](https://github.com/aio-libs/aiozmq): Async IO ZeroMQ support
# - [`aiojobs`](https://github.com/aio-libs/aiojobs): Jobs scheduler for managing background tasks
# - [`async_lru`](https://github.com/aio-libs/async_lru): Simple [LRU cache](https://realpython.com/lru-cache-python/) for async IO
#
# From [magicstack](https://magic.io/):
# - [`uvloop`](https://github.com/MagicStack/uvloop): Ultra fast async IO event loop
# - [`asyncpg`](https://github.com/MagicStack/asyncpg): (Also very fast) async IO PostgreSQL support
#
# From other hosts:
# - [`trio`](https://github.com/python-trio/trio): Friendlier `asyncio` intended to showcase a radically simpler design
# - [`aiofiles`](https://github.com/Tinche/aiofiles): Async file IO
# - [`asks`](https://github.com/theelous3/asks): Async requests-like http library
# - [`asyncio-redis`](https://github.com/jonathanslenders/asyncio-redis): Async IO Redis support
# - [`aioprocessing`](https://github.com/dano/aioprocessing): Integrates `multiprocessing` module with `asyncio`
# - [`umongo`](https://github.com/Scille/umongo): Async IO MongoDB client
# - [`unsync`](https://github.com/alex-sherman/unsync): Unsynchronize `asyncio`
# - [`aiostream`](https://github.com/vxgmichel/aiostream): Like `itertools`, but async
#
| 01. Python/04. Advanced/07.4 Async IO in Python, A Complete Walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import scipy.stats as s
import configparser
import pandas as pd
import statsmodels.api as sm
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10, 6)
import matplotlib.pyplot as plt
import scipy.stats as ss
from datetime import datetime
from itertools import groupby
import pickle
from collections import namedtuple, defaultdict
from scipy import stats
import re
from nltk import word_tokenize
import nltk
nltk.download('punkt')
# -
data = pd.read_csv('target/test_predict_243k_balanced_2911_0_20171129T162503.tsv', sep='\t', na_values='None')
data.groupby('result').count()
plt.hist(data['best_discriminator'], label='best', alpha=0.3)
plt.hist(data['random_discriminator'], label='random', alpha=0.3)
plt.legend()
plt.show()
# +
data['best_discriminator'].mean(), data['random_discriminator'].mean()
# +
from run_bot_choose_best import prepare_dataset, INPUT_FILE
input = prepare_dataset(INPUT_FILE)
# with open('downloads/test_predict_243k_balanced_2911_0.csv_pickbest.pickle', 'rb') as f:
# dataset = pickle.load(f)
# -
scores = []
for context, rows in input.items():
bot_rows = [r for r in rows if r.operator == 'bot']
scores.extend(r.discriminator for r in bot_rows)
np.mean(scores)
| anazyze_choosebest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from collections import Counter
import csv
import dgl.data
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from torch.distributions import Categorical
import matplotlib.pyplot as plt
from math import log2
# %run MoleculeGenerator2.ipynb
# +
# drugs = pandas.read_csv('./SmallDrug.csv',error_bad_lines=False,delimiter=';')
# smiles_values = drugs['Smiles'].values
# +
# len(smiles_values)
# +
class MolStat():
'''
class for gathering statistics over a collection of molecules
'''
def __init__(self,smiles):
self.smiles = smiles
self.atom_list = ['N','C','O','S','F','Cl','Na','P','Br','Si','B','Se','K', 'Aro']
self.action_keys = ['atom_per_mol','bonds_per_atom_type','bonds_per_atom',
'degree_per_atom_type','degree_per_atom']
self.cache = dict.fromkeys(self.action_keys,[None,None])
def __mol_query_atom_type(self, mol, atom_func):
# higher order function for gathering atom wise stats
#returns dictionary with atom types as keys and list containing different stats as values
atom_type_dict = {atom: [] for atom in atom_list}
for atom in mol.GetAtoms():
stat = atom_func(atom)
atom_type_dict[atom.GetSymbol()].append(stat)
return atom_type_dict
def __degree_per_atom(self,atom):
return atom.GetDegree()
def __bonds_per_atom(self,atom):
return atom.GetExplicitValence()
def __in_aro_ring(self,atom):
return atom.GetIsAromatic()
def __map(self,atom_func_name,smiles):
#function which maps over each molecule
#func: (mol -> stat)
stats = []
atom_func = self.__func_name_parser(atom_func_name)
for smile in smiles:
try:
mol = Chem.MolFromSmiles(smile)
stat = self.__mol_query_atom_type(mol,atom_func)
stats.append(stat)
except:
print('bad smile')
self.cache[atom_func_name][0] = stats
def __sum_states_all(self,stat):
if self.cache[stat][0] == None:
print('calculating '+ stat + 'on initial smiles now')
self.GetStats(__func_name_parser(stat))
else:
print('using cached ' + stat)
hist = atom_type_dict = {atom: [] for atom in atom_list}
for mol_dict in self.cache[stat][0]:
for atom in self.atom_list:
hist[atom] += mol_dict[atom]
self.cache[stat][1] = hist
def __func_name_parser(self,func_name):
if func_name == 'degree_per_atom_type':
return self.__degree_per_atom
if func_name == 'bonds_per_atom_type':
return self.__bonds_per_atom
def GetDeviation(self,stat):
return
def GetStats(self,atom_func_name,smiles=None):
if any(smiles == None):
smiles = self.smiles
self.__map(atom_func_name,smiles)
def GetHist(self,stat):
self.__sum_states_all(stat)
def GetHistPlot(self,stat,atom_type):
if self.cache[stat][1] == None:
self.__sum_states_all(stat)
plt.hist(self.cache[stat][1][atom_type], density=True, bins=4)
# +
# molStat_container = MolStat('fra')
# +
# molStat_container.GetStats('degree_per_atom_type',smiles_values[0:200])
# +
# molStat_container.GetHist('degree_per_atom_type')
# +
# molStat_container.cache['degree_per_atom_type'][1]['C']
# +
# molStat_container.GetHistPlot('degree_per_atom_type','F')
# +
# mol = Chem.MolFromSmiles('CCCCC(=O)O.N')
# +
# for atom in mol.GetAtoms():
# atom.GetIsAromatic
# -
# mol_atoms_list = []
# for smile in smiles_values:
# atom_list = []
# try:
# ok = True
# mol = Chem.MolFromSmiles(smile)
# for atom in mol.GetAtoms():
# symbol = atom.GetSymbol()
# atom_list.append(symbol)
# if symbol not in ['N','C','O','S','F','Cl','Na','P','Br','Si','B','Se','K']:
# ok = False
# if ok:
# mol_atoms_list.append(Counter(atom_list))
# except:
# pass
# +
# for dic in mol_atoms_list:
# num_atoms = 0
# for key in dic:
# num_atoms += dic[key]
# for key in dic:
# dic[key] /= num_atoms
# +
class DiscriminatorWrapper():
'''
Wrapper Class for Discriminator
Implements TrainOnBatch
'''
def __init__(self, input_dim: int, hidden_dim: int, lr: float, batch_size: int, csv: str, chck_pnt_dir: str):
'''
Model Variables
'''
self.model = GraphDiscriminator(input_dim,hidden_dim)
self.optim = Adam(self.model.parameters(), lr)
self.loss_fn = nn.BCELoss()
'''
Logging Variables
'''
self.loss_plot = []
self.accuracy_plot = []
self.chck_pnt_dir = chck_pnt_dir
'''
Dataset Variabbles
'''
self.batch_size = batch_size
self.smiles = smiles_values
self.RealGraphGenerator = None
self.init_gen()
def _grabCSV(self) -> list:
pass
# produces list of smile strings, length = batch_size
def GrabRealBatch(self):
return self.RealGraphGenerator.__next__()
def init_gen(self):
self.RealGraphGenerator = self._gen()
def _gen(self):
counter = 0
dataset_len = smiles_values.__len__()
while True:
if counter + self.batch_size > dataset_len:
counter = 0
random.shuffle(smiles_values)
graph_list = []
i = 0
tick = 0
while i < self.batch_size:
graph = smiles_to_graph([smiles_values[tick+counter]])
if len(graph) == 0:
pass
elif len(graph) == 1:
graph_list.append(graph[0])
i += 1
else:
print("error")
tick +=1
counter+= self.batch_size
yield dgl.batch(graph_list)
def TrainOnBatch(self,fake_batch):
real_batch = self.GrabRealBatch()
real_out = self.model(real_batch)
real_correct = (real_out>0.5).flatten().float()
c_acc = real_correct.detach().sum()
fake_out = self.model(fake_batch)
fake_correct = (fake_out<0.5).flatten().float()
f_acc = fake_correct.detach().sum()
acc = (c_acc+f_acc)/(real_out.shape[0]+fake_out.shape[0])
loss = self.loss_fn(real_out,torch.ones(real_out.shape)) + self.loss_fn(fake_out,torch.zeros(fake_out.shape))
self.optim.zero_grad()
loss.backward(retain_graph=True)
self.optim.step()
print(acc)
self.loss_plot.append(loss.detach().numpy())
self.accuracy_plot.append(acc)
| horrid archive/Discrim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: smbo-explanation
# language: python
# name: smbo-explanation
# ---
# # Credit Experiment
#
# This notebook contains the code to reproduce the Credit experiment. The datasets are stored in the _data_ folder in the repo.
#
# **Run the following cells in order to reproduce the experiment from the paper.**
from boexplain import fmax
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from imblearn.over_sampling import SMOTE
# ## The objective function
#
# The objective function takes as input the filtered source data, and then proceed through an ML pipeline that includes joining with the record table, one-hot encoding binning numerical variables using quantile and equi-range binning, and grouping categorical values.
# +
def obj(source_filtered):
data = source_filtered.copy()
record = record_orig.copy()
# determine if the user defaults or not (create labels)
record['target'] = 0
record.loc[record['STATUS'].isin({'2', '3', '4', '5'}), 'target'] = 1
# determine all IDs associated with a loan default
cpunt = record.groupby('ID').sum()
cpunt.loc[cpunt['target'] > 0, 'target'] = 1
cpunt.loc[cpunt['target'] == 0, 'target'] = 0
new_data = pd.merge(data, cpunt[['target']], how='inner', on='ID')
new_data = new_data.dropna()
# feature engineering
new_data['CODE_GENDER'] = new_data['CODE_GENDER'].replace(['F', 'M'],
[0, 1])
new_data['FLAG_OWN_CAR'] = new_data['FLAG_OWN_CAR'].replace(['N', 'Y'],
[0, 1])
new_data['FLAG_OWN_REALTY'] = new_data['FLAG_OWN_REALTY'].replace(
['N', 'Y'], [0, 1])
new_data.loc[new_data['CNT_CHILDREN'] >= 2, 'CNT_CHILDREN'] = 'over2'
new_data = convert_dummy(new_data, 'CNT_CHILDREN')
new_data = get_category(new_data,
'AMT_INCOME_TOTAL',
3, ["low", "medium", "high"],
qcut=True)
new_data = convert_dummy(new_data, 'AMT_INCOME_TOTAL')
new_data['Age'] = -new_data['DAYS_BIRTH'] // 365
new_data = get_category(new_data, 'Age', 5,
["lowest", "low", "medium", "high", "highest"])
new_data = convert_dummy(new_data, 'Age')
new_data.loc[new_data['FLAG_PHONE'] >= 3, 'FLAG_PHONE'] = '3more'
new_data = convert_dummy(new_data, 'FLAG_PHONE')
new_data.loc[new_data['NAME_INCOME_TYPE'].isin({'Pensioner', 'Student'}),
'NAME_INCOME_TYPE'] = 'State servant'
new_data = convert_dummy(new_data, 'NAME_INCOME_TYPE')
new_data.loc[new_data['OCCUPATION_TYPE'].isin({
'Cleaning staff', 'Cooking staff', 'Drivers', 'Laborers',
'Low-skill Laborers', 'Security staff', 'Waiters/barmen staff'
}), 'OCCUPATION_TYPE'] = 'Laborwk'
new_data.loc[new_data['OCCUPATION_TYPE'].isin({
'Accountants', 'Core staff', 'HR staff', 'Medicine staff',
'Private service staff', 'Realty agents', 'Sales staff', 'Secretaries'
}), 'OCCUPATION_TYPE'] = 'officewk'
new_data.loc[new_data['OCCUPATION_TYPE'].
isin({'Managers', 'High skill tech staff', 'IT staff'}),
'OCCUPATION_TYPE'] = 'hightecwk'
new_data = convert_dummy(new_data, 'OCCUPATION_TYPE')
new_data = convert_dummy(new_data, 'NAME_HOUSING_TYPE')
new_data.loc[new_data['NAME_EDUCATION_TYPE'] == 'Academic degree',
'NAME_EDUCATION_TYPE'] = 'Higher education'
new_data = convert_dummy(new_data, 'NAME_EDUCATION_TYPE')
new_data = convert_dummy(new_data, 'NAME_FAMILY_STATUS')
new_data['work_time'] = -new_data['DAYS_EMPLOYED'] // 365
new_data.loc[new_data['work_time'] < 0, "work_time"] = np.nan
new_data['work_time'] = new_data['work_time'].fillna(
new_data['work_time'].mean())
new_data = get_category(new_data, 'work_time', 5,
["lowest", "low", "medium", "high", "highest"])
new_data = convert_dummy(new_data, 'work_time')
for col in new_data.select_dtypes(include=np.number):
new_data[col] = new_data[col].astype(float)
Y = new_data['target'].astype('int')
for col in features:
if col not in new_data.columns:
new_data[col] = 0.0
X = new_data[features]
try:
X_balance, Y_balance = SMOTE(random_state=0).fit_sample(X, Y)
except ValueError:
return 0
X_balance = pd.DataFrame(X_balance, columns=X.columns)
# train the model
dt = DecisionTreeClassifier(random_state=0)
dtfit = dt.fit(X_balance, Y_balance)
y_predict = dtfit.predict(X_test_final)
# return the predicte accuracy score
return accuracy_score(y_test_final, y_predict)
def convert_dummy(df, feature):
pos = pd.get_dummies(df[feature], prefix=feature)
df = df.drop(columns=[feature])
df = df.join(pos)
return df
def get_category(df, col, nbins, labels, qcut=False):
if qcut:
df[col] = pd.qcut(df[col], q=nbins, labels=labels) # quantile cut
else:
df[col] = pd.cut(df[col], bins=nbins,
labels=labels) # equal-length cut
return df
features = [
'CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY', 'DAYS_BIRTH',
'DAYS_EMPLOYED', 'FLAG_MOBIL', 'FLAG_WORK_PHONE', 'FLAG_EMAIL',
'CNT_FAM_MEMBERS', 'CNT_CHILDREN_0', 'CNT_CHILDREN_1',
'CNT_CHILDREN_over2', 'AMT_INCOME_TOTAL_low', 'AMT_INCOME_TOTAL_medium',
'AMT_INCOME_TOTAL_high', 'Age_lowest', 'Age_low', 'Age_medium', 'Age_high',
'Age_highest', 'FLAG_PHONE_0', 'FLAG_PHONE_1',
'NAME_INCOME_TYPE_Commercial associate', 'NAME_INCOME_TYPE_State servant',
'NAME_INCOME_TYPE_Working', 'OCCUPATION_TYPE_Laborwk',
'OCCUPATION_TYPE_hightecwk', 'OCCUPATION_TYPE_officewk',
'NAME_HOUSING_TYPE_Co-op apartment', 'NAME_HOUSING_TYPE_House / apartment',
'NAME_HOUSING_TYPE_Municipal apartment',
'NAME_HOUSING_TYPE_Office apartment', 'NAME_HOUSING_TYPE_Rented apartment',
'NAME_HOUSING_TYPE_With parents', 'NAME_EDUCATION_TYPE_Higher education',
'NAME_EDUCATION_TYPE_Incomplete higher',
'NAME_EDUCATION_TYPE_Lower secondary',
'NAME_EDUCATION_TYPE_Secondary / secondary special',
'NAME_FAMILY_STATUS_Civil marriage', 'NAME_FAMILY_STATUS_Married',
'NAME_FAMILY_STATUS_Separated', 'NAME_FAMILY_STATUS_Single / not married',
'NAME_FAMILY_STATUS_Widow', 'work_time_lowest', 'work_time_low',
'work_time_medium', 'work_time_high', 'work_time_highest'
]
# -
# ## Load and corrupt the source data
# +
data = pd.read_csv("data/application_record_train.csv", encoding='utf-8')
record_orig = pd.read_csv("data/credit_record_train.csv", encoding='utf-8')
X_test_final = pd.read_csv("data/credit_test.csv")
y_test_final = pd.read_csv("data/credit_labels.csv")
data["CNT_FAM_MEMBERS"] = data["CNT_FAM_MEMBERS"].astype(int)
# corrupt the source data
record_orig.loc[record_orig["ID"].isin(data.loc[(data["DAYS_BIRTH"] >= -23e3) &
(data["DAYS_BIRTH"] <= -17e3) &
(data["CNT_FAM_MEMBERS"] >= 2)
&
(data["CNT_FAM_MEMBERS"] <= 3),
"ID"].unique()),
"STATUS"] = '5'
# -
# ## BOExplain API call
#
# The function *fmax* is used to maximize the objective function. The columns DAYS_BIRTH and CNT_FAM_MEMBERS are searched for an explanation. The runtime is 200 seconds, and the results are averaged over 10 runs. The correct predicate is provided so that F-score, precision and recall can be calculated. Statistics about the run are saved to the file credit_boexplain.json.
df_rem = fmax(
data=data,
f=obj,
num_cols=["DAYS_BIRTH", "CNT_FAM_MEMBERS"],
runtime=7,
runs=10,
random=True, # perform a random iteration
correct_pred={
"DAYS_BIRTH_min": -23e3,
"DAYS_BIRTH_len": 6e3,
"CNT_FAM_MEMBERS_min": 2,
"CNT_FAM_MEMBERS_len": 1
},
name="credit",
file="credit.json",
use_seeds_from_paper=True,
)
# # Recreate Figure 8
#
# From the output of the above call to the BOExplain APi, the following two cells can be used to recreate Figure 8 from the paper.
# +
import pandas as pd
import altair as alt
alt.data_transformers.disable_max_rows()
import numpy as np
from json import loads
experiments = {}
fo = open("results/credit.json", "r")
for i, line in enumerate(fo.readlines()):
experiments[i] = loads(line)
fo.close()
df = pd.DataFrame({}, columns=["Algorithm", "Time (seconds)", "Value"])
for i in range(len(experiments)):
df_new = pd.DataFrame.from_dict({"Algorithm": experiments[i]["cat_enc"],
"Time (seconds)": list(range(5, experiments[i]["runtime"]+5, 5)),
"Value": experiments[i]["time_array"]},
orient='index').T
df = df.append(df_new)
df = df.explode("Value")
df = df.set_index(['Algorithm']).apply(pd.Series.explode).reset_index()
df["Algorithm"] = df["Algorithm"].replace({"individual_contribution_warm_start_top1": "BOExplain", np.nan: "Random"})
line = alt.Chart(df).mark_line().encode(
x='Time (seconds)',
y=alt.Y('mean(Value)', title=['Mean Objective', 'Function Value'], scale=alt.Scale(domain=[0.7, 0.9])),
color="Algorithm"
).properties(
width=225,
height=90
)
band = alt.Chart(df).mark_errorband(extent='stdev').encode(
x='Time (seconds)',
y=alt.Y('Value', title='Mean Objective Function Value', scale=alt.Scale(domain=[0.7, 0.9])),
color=alt.Color("Algorithm")
)
chart = line# + band
chart = chart.configure_title(
anchor='start',
)
chart.configure_legend(
title=None,
orient='none',
legendX=30,
legendY=163,
columns=4,
labelFontSize=15,
symbolSize=700,
labelLimit=275,
).configure_axis(
labelFontSize=15,
titleFontSize=15,
titlePadding=2
).configure_title(
fontSize=15
)
# +
from json import loads
import altair as alt
experiments = {}
fo = open("results/credit.json", "r")
for i, line in enumerate(fo.readlines()):
experiments[i] = loads(line)
fo.close()
import re
df = pd.DataFrame({}, columns=["Algorithm", "Time (seconds)", "Precision", "Recall", "F-score", "Jaccard"])
for i in range(len(experiments)):
df_new = pd.DataFrame.from_dict({"Algorithm": experiments[i]["cat_enc"],
# "Iteration": tuple(range(experiments[i]["n_trials"])),
"Time (seconds)": tuple(range(5, experiments[i]["runtime"]+5, 5)),
"Precision": experiments[i]["precision_time_array"],
"Recall": experiments[i]["recall_time_array"],
"F-score": experiments[i]["f_score_time_array"],
"Jaccard": experiments[i]["jaccard_time_array"]
}, orient='index').T
df = df.append(df_new)
df = df.set_index(['Algorithm', "Time (seconds)"]).apply(pd.Series.explode).reset_index()
df = df.set_index(['Algorithm']).apply(pd.Series.explode).reset_index()
df["Algorithm"] = df["Algorithm"].replace({"individual_contribution_warm_start_top1": "BOExplain", np.nan: "Random"})
num_cols = f"{len(experiments[0]['num_cols'])} numerical columns: "
for i, col in enumerate(experiments[0]["num_cols"]):
num_cols += f"{col} (range {experiments[0]['num_cols_range'][i][0]} to {experiments[0]['num_cols_range'][i][1]}), "
cat_cols = f"{experiments[0]['cat_cols']} categorical columns: "
for i, col in enumerate(experiments[0]["cat_cols"]):
cat_cols += f"{col} ({experiments[0]['cat_cols_n_uniq'][i]} unique values), "
out_str = f"Experiment: {experiments[0]['name']}. Completed {experiments[0]['n_trials']} iterations for {experiments[0]['runs']} runs. Search space includes "
if len(experiments[0]['num_cols']) > 0:
out_str += num_cols
if len(experiments[0]['cat_cols']) > 0:
out_str += "and "
if len(experiments[0]['cat_cols']) > 0:
out_str += cat_cols
out_str = f"{out_str[:-2]}."
out_lst = [line.strip() for line in re.findall(r'.{1,140}(?:\s+|$)', out_str)]
# df = pd.melt(df, id_vars=["Algorithm", "Iteration"], value_vars=["Precision", "Recall", "F1_score", "Jaccard"], value_name="Metric")
# altair
metric = "Metric"
f1_score = alt.Chart(df).mark_line().encode(
x=alt.X('Time (seconds)', axis=alt.Axis()),
y=alt.Y(f'mean(F-score)', scale=alt.Scale(domain=[0, 1]), axis=alt.Axis(values=[0.2, 0.5, 0.8]), title=None),
color=alt.Color("Algorithm")
).properties(
title="F-score",
width=225,
height=90
)
jaccard = alt.Chart(df).mark_line().encode(
x=alt.X('Time (seconds)', axis=alt.Axis(labels=False, title=None, tickSize=0)),
y=alt.Y(f'mean(Jaccard)', scale=alt.Scale(domain=[0, 1]), axis=alt.Axis(values=[0.2, 0.5, 0.8], labels=False, title=None, tickSize=0), title=None),
color=alt.Color("Algorithm")
).properties(
title="Jaccard Similarity",
width=225,
height=90
)
prec = alt.Chart(df).mark_line().encode(
x='Time (seconds)',
y=alt.Y(f'mean(Precision)', scale=alt.Scale(domain=[0, 1]), axis=alt.Axis(values=[0.2, 0.5, 0.8], labels=False, title=None, tickSize=0), title=None),
color=alt.Color("Algorithm")
).properties(
title="Precision",
width=225,
height=90
)
recall = alt.Chart(df).mark_line().encode(
x='Time (seconds)',
y=alt.Y(f'mean(Recall)', scale=alt.Scale(domain=[0, 1]), axis=alt.Axis(values=[0.2, 0.5, 0.8], labels=False, title=None, tickSize=0), title=None),
color=alt.Color("Algorithm")
).properties(
title="Recall",
width=225,
height=90
)
# first = alt.hconcat(f1_score, jaccard, spacing=0)
# second = alt.hconcat(prec, recall, spacing=0)
alt.hconcat(f1_score, prec,recall, spacing=0).resolve_scale(x='shared', y='shared').configure_legend(
title=None,
orient='none',
legendX=200,
legendY=135,
labelFontSize=15,
symbolSize=700,
columns=2,
labelLimit=275,
).configure_axis(
labelFontSize=15,
titleFontSize=15
).configure_title(
fontSize=15
)
| credit.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# +
using PyPlot
using Distributed
using SharedArrays
addprocs(4)
@everywhere include("../src/num.jl")
@everywhere include("../src/phy.jl")
@everywhere using .num
@everywhere using .physics
@everywhere using DSP
# -
const t = 0.5
U = 3.2
T = 0.0
const nωn = 2^12
const nω = 2^12
ωrange = [-16.0,16.0]
const zeroplus = 0.01
const itermax = 200
const tol = 0.01
const mix = 0.80;
ω = range(ωrange[1],length=nω,stop=ωrange[2])
ω = convert(Array{Float64},ω);
@time D0ω = baredos.("cubic",t,ω);
gloc = zeros(ComplexF64,nω,2);
# +
@everywhere function ipt_solver(Aw, nf, U)
Ap = Aw[:,1] .* nf
Am = Aw[:,2] .* nf
App = conv_same(Ap,Ap)
Appp = conv_same(Am, App)
return -π .* U^2 .* (Appp + Appp[end:-1:1])
return -π .* U^2 .* (AAB + BBA)
end
@everywhere function ipt_selfcons(ω,dos,t,U,T,itermax,nω,zeroplus,mix,tol)
gloc = zeros(ComplexF64,nω,2)
g0 = zeros(ComplexF64,nω,2)
isi = zeros(Float64,nω,2)
hsi = zeros(Float64,nω,2)
A0 = zeros(Float64,nω,2)
Σ2 = zeros(ComplexF64,nω,2)
magnet = 0.0
dω = ω[2] - ω[1]
nf = fermi.(ω,T)
η = zeroplus
α = mix
ρe = dos[1:4:nω]
w = ω[1:4:nω]
Σ1 = U .* [0.9 -0.9]
for i = 1:nω
ζ_up = zeta(ω[i] - Σ1[1],η)
ζ_down = zeta(ω[i] - Σ1[2],η)
intg = ρe ./ (ζ_up*ζ_down .- w.^2.)
sum = trapz(w,intg)
gloc[i,1] = sum * ζ_down
gloc[i,2] = sum * ζ_up
end
for iter = 1:itermax
gloc_old = deepcopy(gloc)
ncalc = zeros(Float64,length(gloc[1,:]))
@fastmath @inbounds for i in 1:2
ncalc[i] = -1/π .* trapz(ω,imag(gloc[:,i]) .* nf)
end
Σ1[1] = U .* (ncalc[2] - sum(ncalc)/2)
Σ1[2] = U .* (ncalc[1] - sum(ncalc)/2)
magnet = (ncalc[2] - ncalc[1]) / sum(ncalc)
g0[:,1] = 1. ./ (ω .+ im*η .- t^2 .* gloc_old[:,2])
g0[:,2] = 1. ./ (ω .+ im*η .- t^2 .* gloc_old[:,1])
for i = 1:2 A0[:,i] = -imag(g0[:,i]) ./ π end
for i = 1:2
isi[:,i] = ipt_solver(A0,nf,U) * dω * dω
isi[:,i] = 0.5 .* (isi[:,i] + isi[end:-1:1,i])
hsi[:,i] = -imag.(Util.hilbert(isi[:,i]))
end
Σ2 = hsi .+ im .* isi
for i = 1:nω
ζ_up = zeta(ω[i] - Σ1[1] .- Σ2[i,1],η)
ζ_down = zeta(ω[i] - Σ1[2] .- Σ2[i,2],η)
intg = ρe ./ (ζ_up*ζ_down .- w.^2.)
sum = trapz(w,intg)
gloc[i,1] = sum * ζ_down
gloc[i,2] = sum * ζ_up
end
convg, error = convergent(gloc_old,gloc,ω,nω,tol)
if convg == false
gloc = mixing(gloc_old,gloc,mix)
elseif iter == itermax
println("Convergent is not achieved. Try Lower Mixings or Higher Iterations")
break
elseif convg == true
println("Convergent is achieved for U = $U, and T = $T K")
break
end
end
return gloc,Σ2,magnet
end;
# +
nU = 50
U = range(0.0, length=nU,stop=7.5)
U = convert(Array{Float64},U)
nT = 50
T = range(0.0, length=nT,stop=1000)
T = convert(Array{Float64},T)
magnet = SharedArray{Float64}(nU,nT)
@inbounds @sync @distributed for iU in 1:nU
for iT in 1:nT
_,_,magnet[iU,iT] = ipt_selfcons(ω,D0ω,t,U[iU],T[iT],itermax,nω,zeroplus,mix,tol)
end
end
# -
using JLD2
@save "ipt_ph_diagram_antifero.jld2" magnet U T
# +
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.contourf(U,T,transpose(magnet[:,:]),cmap="viridis")
plt.ylim(0,500)
plt.xlim(0,7)
plt.ylabel("T (K)")
plt.xlabel("U (eV)")
plt.subplot(1,2,2)
y = [0, 50, 100, 150, 200, 250, 300, 305, 270, 220, 180, 130, 100, 80, 50, 30, 20, 10, 0]
x = [1.38, 1.4, 1.42, 1.45, 1.54, 1.69, 1.9, 2.3, 2.6, 3.0, 3.4, 3.9, 4.3, 4.7, 5.0, 5.1, 5.2, 5.3, 5.35]
plt.ylim(0,500)
plt.xlim(0,7)
plt.plot(x,y,"-o")
plt.xlabel("U (eV)")
plt.text(2.5,70,"AF",fontsize=18)
plt.text(4.3,330,"PM",fontsize=18)
plt.show()
plt.savefig("ipt_ph_diagram_antiferro.pdf",format="pdf")
# -
| nb/IPT antiferromagnetic phase diagram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="dvssVBDSIZ2k" colab_type="code" colab={}
# %tensorflow_version 1.x
# + id="VrkJBszJIMGH" colab_type="code" colab={}
import warnings
warnings.filterwarnings("ignore")
# !pip install -U git+https://github.com/qubvel/efficientnet
# + id="_HU5arc2IPoV" colab_type="code" colab={}
import efficientnet.keras as enet
# + id="MtSzy8rAIUHH" colab_type="code" outputId="dc1167c2-3d58-43c1-a08b-512cd718fe08" executionInfo={"status": "ok", "timestamp": 1586291741055, "user_tz": 180, "elapsed": 1459822, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhO9QmdKl5p0Ix9OpvQel9h111iVlvMD6nla1juuw=s64", "userId": "04694447128035841155"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
import h5py
import numpy as np
import tensorflow as tf
filename = '/<path to hierarchical data part 1>/h5/dataset_Hierarch_part_1.h5'
with h5py.File(filename, "r") as f:
x_train = np.array(list(f['train_X']))
y_train = np.array(list(f['train_y']))
x_test = np.array(list(f['test_X']))
y_test = np.array(list(f['test_y']))
import tensorflow as tf
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# classes Normal and Pneumonia
num_classes = 2
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# + id="Aku9t4enIss9" colab_type="code" outputId="d9b6ea8a-5ffb-4985-bff7-34411c8b93a0" executionInfo={"status": "ok", "timestamp": 1586291741060, "user_tz": 180, "elapsed": 1459794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhO9QmdKl5p0Ix9OpvQel9h111iVlvMD6nla1juuw=s64", "userId": "04694447128035841155"}} colab={"base_uri": "https://localhost:8080/", "height": 71}
from keras.backend import sigmoid
from keras.utils import get_custom_objects
from keras.layers import Activation
class SwishActivation(Activation):
def __init__(self, activation, **kwargs):
super(SwishActivation, self).__init__(activation, **kwargs)
self.__name__ = 'swish_act'
def swish_act(x, beta = 1):
return (x * sigmoid(beta * x))
get_custom_objects().update({'swish_act': SwishActivation(swish_act)})
# + id="QiR4jm1hIwj5" colab_type="code" colab={}
from keras import layers
from keras.layers import AveragePooling2D, BatchNormalization
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
model = enet.EfficientNetB0(include_top=False, input_shape=(224,224,3), pooling='avg', weights='imagenet')
x = model.output
x = BatchNormalization()(x)
x = Dropout(0.7)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation(swish_act)(x)
x = Dropout(0.5)(x)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = Activation(swish_act)(x)
# Output layer
predictions = Dense(num_classes, activation="softmax")(x)
model_final_1 = Model(inputs = model.input, outputs = predictions)
model_final_1.summary()
# + id="Sl_l30N1I0w5" colab_type="code" colab={}
from keras.callbacks import ModelCheckpoint
model_final_1.compile(loss='categorical_crossentropy',
optimizer=Adam(0.0001),
metrics=['accuracy'])
mcp_save = ModelCheckpoint('/<path to save checkpoint models>/models/EnetB0_hierarq_BALANCED_part1.h5', save_best_only=True, monitor='val_acc')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=2, verbose=1,)
#print("Training....")
model_final_1.fit(x_train, y_train,
batch_size=32,
epochs=5,
validation_split=0.1,
callbacks=[mcp_save, reduce_lr],
shuffle=True,
verbose=1)
# + id="0QCHGiTJI2yk" colab_type="code" colab={}
_, acc = model_final_1.evaluate(x_test, y_test)
print("Test Accuracy: {}%".format(acc*100))
import seaborn as sns
from sklearn.metrics import confusion_matrix
test_pred = model_final_1.predict(x_test)
import numpy as np
ax = sns.heatmap(confusion_matrix(np.argmax(y_test, axis=1),np.argmax(test_pred, axis=1)), cmap="binary",annot=True,fmt="d")
# + [markdown] id="9pecTBimnBtl" colab_type="text"
# ## PARTE **2**
# + id="LdAFm01-m8Sr" colab_type="code" outputId="3c87e982-4fd4-4e11-fd9a-7812faf82e3e" executionInfo={"status": "ok", "timestamp": 1586293545293, "user_tz": 180, "elapsed": 3263948, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhO9QmdKl5p0Ix9OpvQel9h111iVlvMD6nla1juuw=s64", "userId": "04694447128035841155"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
import h5py
import numpy as np
import tensorflow as tf
filename = '/<path to hierarch data part 2>/h5/dataset_Hierarch_part_2.h5'
with h5py.File(filename, "r") as f:
# List all groups
#print("Keys: %s" % f.keys())
x_train = np.array(list(f['train_X']))
y_train = np.array(list(f['train_y']))
x_test = np.array(list(f['test_X']))
y_test = np.array(list(f['test_y']))
import tensorflow as tf
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Converting class vectors to binary class matrices
num_classes = 2
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# + id="oiIJTtbpnK4P" colab_type="code" colab={}
from keras import layers
from keras.layers import AveragePooling2D, BatchNormalization
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
model = enet.EfficientNetB0(include_top=False, input_shape=(224,224,3), pooling='avg', weights='imagenet')
x = model.output
x = BatchNormalization()(x)
x = Dropout(0.7)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation(swish_act)(x)
x = Dropout(0.5)(x)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = Activation(swish_act)(x)
# Output layer
predictions = Dense(2, activation="softmax")(x)
model_final_2 = Model(inputs = model.input, outputs = predictions)
model_final_2.summary()
# + id="07lnF9pPnfQQ" colab_type="code" colab={}
from keras.callbacks import ModelCheckpoint
model_final_2.compile(loss='categorical_crossentropy',
optimizer=Adam(0.0001),
metrics=['accuracy'])
mcp_save = ModelCheckpoint('/<path to save checkpoint model>/models/EnetB0_hierarq_BALANCED_part2.h5', save_best_only=True, monitor='val_acc')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=2, verbose=1,)
#print("Training....")
model_final_2.fit(x_train, y_train,
batch_size=32,
epochs=5,
validation_split=0.1,
callbacks=[mcp_save, reduce_lr],
shuffle=True,
verbose=1)
# + id="eYBHFjEInltJ" colab_type="code" outputId="fa7fefe3-8d13-4dd8-a415-27b86b6017c4" executionInfo={"status": "ok", "timestamp": 1586294512712, "user_tz": 180, "elapsed": 4231299, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhO9QmdKl5p0Ix9OpvQel9h111iVlvMD6nla1juuw=s64", "userId": "04694447128035841155"}} colab={"base_uri": "https://localhost:8080/", "height": 299}
_, acc = model_final_2.evaluate(x_test, y_test)
print("Test Accuracy: {}%".format(acc*100))
import seaborn as sns
from sklearn.metrics import confusion_matrix
test_pred = model_final_2.predict(x_test)
import numpy as np
ax = sns.heatmap(confusion_matrix(np.argmax(y_test, axis=1),np.argmax(test_pred, axis=1)), cmap="binary",annot=True,fmt="d")
# + [markdown] id="pwFNz8k4StI9" colab_type="text"
# Perform test on the 3 classes (full hierarchical)
# + id="9oGQrMl6SsPd" colab_type="code" colab={}
# load test data with 3 classes
filename = '/content/drive/My Drive/Colab Notebooks/COVID-19/proposal/h5/dataset_covidNet_TEST_DATA_ONLY.h5'
with h5py.File(filename, "r") as f:
x_test = np.array(list(f['test_X']))
y_test = np.array(list(f['test_y']))
# + id="SZoYAoefTm3Z" colab_type="code" colab={}
num_classes = 3
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_test = x_test.astype('float32')
# + id="_eSlBN0FiISS" colab_type="code" colab={}
a = x_test[i,:,:,:].reshape( [1,x_test.shape[1],x_test.shape[2],x_test.shape[3]])
# + id="3tA7pbjfTxT-" colab_type="code" colab={}
test_pred_1 = model_final_1.predict(x_test)
test_pred_2 = model_final_2.predict(x_test)
# + id="81D8p0pcmfWD" colab_type="code" colab={}
fase_1 = np.argmax(test_pred_1, axis=1)
fase_2 = np.argmax(test_pred_2, axis=1)
# + id="WX60aBoBmyDD" colab_type="code" colab={}
preditos = np.zeros(fase_1.shape)
for i in range(len(fase_1)):
if fase_1[i] == 1:
preditos[i] = fase_2[i]+1
else:
preditos[i] = fase_1[i]
y_preditos = tf.keras.utils.to_categorical(preditos, 3)
# + id="ODmiSUM0nZHZ" colab_type="code" outputId="c7432f27-830c-4390-94c9-e7e6fbfaff02" executionInfo={"status": "ok", "timestamp": 1586296280207, "user_tz": 180, "elapsed": 908, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhO9QmdKl5p0Ix9OpvQel9h111iVlvMD6nla1juuw=s64", "userId": "04694447128035841155"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
# final confusion matrix
ax = sns.heatmap(confusion_matrix(np.argmax(y_test, axis=1),np.argmax(y_preditos, axis=1)), cmap="binary",annot=True,fmt="d")
| src/ia/covid19/EfficientNet_C19_hierarchical.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prepare GSEA
#
# ## Content
# - GSEA needs two files as input, one is expression dataset in TXT format, the other is phenotype dataset in CLS format
# - See the explaination of TXT and CLS format in GSEA doc: http://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats
# - This is just some spetial format needed by GSEA, so we prepare based on their requirment...
#
# **NOTE: GSEA is mainly designed for human data, so I will not perform this with mouse data... But one the files are prepared, its pretty straight forward to run it...**
import pandas as pd
import pathlib
# make a sub dir called GSEA for new files
pathlib.Path('GSEA').mkdir(exist_ok=True)
# ## Load data
gene_meta = pd.read_csv('gene_metadata.csv.gz', index_col='gene_id')
# ## Prepare GSEA files
deg_result_paths = list(pathlib.Path().glob('*vs*.deg_results.csv.gz'))
for path in deg_result_paths:
pair_name = '.'.join(path.name.split('.')[:-3])
"""
Prepare expression data
"""
# load the original DESeq2 output to get the normalized counts
deg_with_norm_count = pd.read_csv(path, index_col=0)
# take the last four columns, which are nrom counts
nrom_count_df = deg_with_norm_count.iloc[:, -4:].copy()
# change the index into gene names
nrom_count_df.index = nrom_count_df.index.map(gene_meta['gene_name'])
# Add modifications to match the TXT format as requied by GSEA
# See here: http://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats#CLS:_Categorical_.28e.g_tumor_vs_normal.29_class_file_format_.28.2A.cls.29
nrom_count_df.index.name = 'NAME'
nrom_count_df['DESCRIPTION'] = 'na' # add a DESCRIPTION col
# move DESCRIPTION col into the first
col_names = list(nrom_count_df.columns)
reordered_col_names = ['DESCRIPTION'] + col_names[:-1]
nrom_count_df = nrom_count_df[reordered_col_names]
nrom_count_df.to_csv(f'GSEA/{pair_name}.expression_data.txt', sep='\t')
"""
Prepare phenotype data
"""
# prepare the CLS format as required by GSEA
# see here: http://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats#CLS:_Categorical_.28e.g_tumor_vs_normal.29_class_file_format_.28.2A.cls.29
# get sample names and their class number (0 for time 1, 1 for time 2)
time1, time2 = pair_name.split('_vs_')
sample_names = nrom_count_df.columns[1:]
sample_dev_times = sample_names.str.split('_').str[1]
cls_format_str = f"""
4 2 1
# {time1} {time2}
{' '.join(sample_dev_times)}
"""
# the above cls_format_str create a string like this:
"""
4 2 1
# E10.5 E14.5
E10.5 E14.5 E14.5 E10.5
"""
with open(f'GSEA/{pair_name}.phenotype_data.cls', 'w') as f:
f.write(cls_format_str)
print(cls_format_str)
nrom_count_df
| analysis/DEGDemo/analysis/5.Prepare GSEA input files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="4e9b5332c0a25889c3c1180fde43610de0163153"
# <a id="0"></a> <br>
# ## Kernel Headlines
# 1. [Introduction and Crisp Methodology](#1)
# 2. [Data Analysis](#2)
# 1. [imports](#3)
# 1. [Reading Data](#4)
# 1. [Features Descriptions](#5)
# 1. [ChannelGrouping_barchart](#6)
# 1. [date and visitStartTime_describe](#7)
# 1. [device_barchart](#8)
# 1. [geoNetwork_barchart](#9)
# 1. [socialEngagement_describe](#10)
# 1. [totals_line_violin](#11)
# 1. [visitNumber_line_violin_hist](#12)
# 1. [trafficSource_barchart](#13)
# 1. [fullVisitorId_qpercentile](#14)
#
# 3. [Compound Features](#15)
# 1. [Churn Rate and Conversion Rate](#16)
# 1. [revenue_datetime](#17)
# 1. [device_revenue](#18)
# 4. [Basic Regression](#19)
# 5. [Preparing for More Evaluations and Tests](#20)
# 1. [Investigation of Feature Importance](#21)
# + [markdown] _uuid="aea8d808ddedd900aff77abfd66bdd7f466c3d61"
# <a id="1"></a> <br>
# # 1-INTRODUCTION AND CRISP METHODOLOGY
#
# 
# Crisp methodology is on the acceptable manners for data mining tasks. As it is belowed in the following figure, it contains three main parts should be passed to deliver a product to business
# * Data cleaning
# 1. Understanding the business and data.
# 2. Try to comprehent the business and extract the data which is needed
# 3. Understand the dependencies between attributes. Analyzing the target variables. Handling missing values. Transforming data formats to standard data format.
# * Data Modeling
# 1. Understanding the business and data.
# 2. Selecting more accurate classfier or regression engine based on the charactristic any of them have.
# 3. Train a model
# * Evaluation and Deployment.
# 1. Evalute created model using evaluation methods (test-data, cross-validation, etc)
# 2. Catrefully Evaluate model with real data (i.e AB testing) (As it is shown in crisp diagram, there is a link between business undestanding and evaluation part).
# 3. Migrate to new model and replace the old one with new version.
#
# + [markdown] _uuid="679d84d693888c93c63762754e36a58749be5b4d"
# <a id="1"></a> <br>
# # 2-DATA ANALYSIS
#
#
# <a id="3"></a> <br>
# * **A. IMPORTS**
#
# Importing packages and libraries.
# + _uuid="a6430eaf18e19cc83ff3bd1a322668fde9f43f2a"
import os
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
import datetime as datetime
from datetime import timedelta, date
import seaborn as sns
import matplotlib.cm as CM
import lightgbm as lgb
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV, train_test_split
# %matplotlib inline
# + [markdown] _uuid="86ef2c2d21767682cf351db547c3119a2339bd5e"
# <a id="4"></a> <br>
# * **B. READING DATA**
#
# Reading data and caughting a glimpse of what data it is.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
train_data = pd.read_csv("../input/train_v2.csv",nrows=700000)
train_data.head()
# + _uuid="7d6396d4c5365e58dc6a10ec748cd70fce4c7c2c"
train_data.describe()
# + _uuid="15bd454caf0148863c83b5762658f4ee2c6e7220"
list(train_data.columns.values)
# + [markdown] _uuid="2e880ed5017f078fda5067e0b17c53c980d4fea1"
# <a id="5"></a> <br>
# * **C. FEATURES DESCRIPTION**
#
# Returning back to Data description for understanding features.
#
# * channelGrouping - The channel via which the user came to the Store.
# * date - The date on which the user visited the Store.
# * device - The specifications for the device used to access the Store.
# * fullVisitorId- A unique identifier for each user of the Google Merchandise Store.
# * geoNetwork - This section contains information about the geography of the user.
# * sessionId - A unique identifier for this visit to the store.
# * socialEngagementType - Engagement type, either "Socially Engaged" or "Not Socially Engaged".
# * totals - This section contains aggregate values across the session.
# * trafficSource - This section contains information about the Traffic Source from which the session originated.
# * visitId - An identifier for this session. This is part of the value usually stored as the _utmb cookie. This is only unique to the user. For a completely unique ID, you should use a combination of fullVisitorId and visitId.
# * visitNumber - The session number for this user. If this is the first session, then this is set to 1.
# * visitStartTime - The timestamp (expressed as POSIX time).
#
# + [markdown] _uuid="dcb51011a920d5741ce007c15fe6f9cc122ab234"
# <a id="6"></a> <br>
# * **D. CHANNEL_GROUPING**
# + _uuid="24d6d732b0e816e2a890c817dd5543ab3e5e561c"
train_data.channelGrouping.value_counts().plot(kind="bar",title="channelGrouping distro",figsize=(8,8),rot=25,colormap='Paired')
# + [markdown] _uuid="98d005134d4677798e3c1431a6d95e8ac55ebe3a"
# <a id="7"></a> <br>
# * **E. DATE&VISIT_START_TIME**
#
# There are two varialbe related to time and can be used in time dependent analyzes specially TimeSeries.
# + _uuid="4a21ac9e2c038995911eb25f643e157878ae9c12"
"date :{}, visitStartTime:{}".format(train_data.head(1).date[0],train_data.head(1).visitStartTime[0])
# + [markdown] _uuid="9426c06830d5872f44195d2f1c4acbafea4f6649"
# date is stored in String and should be converted to pandas datetime format.
# visitStartTime is stored in epoch unix format and should be converted to pandas datetime format.
# doing the correspondence transforms and storing on the same attribute.
# + _uuid="3488ada0cedb4a6df1b205ecbc0ebe38ee2e3d40"
train_data["date"] = pd.to_datetime(train_data["date"],format="%Y%m%d")
train_data["visitStartTime"] = pd.to_datetime(train_data["visitStartTime"],unit='s')
# + [markdown] _uuid="02792ece205e46a3e291572c72635b6d65c75136"
# Checking the transformed features.
# + _uuid="fa57bd9f58d7b27873d7bee7913cc18ebc0e94d3"
train_data.head(1)[["date","visitStartTime"]]
# + [markdown] _uuid="d2f7bc22259657a159827c45ca65972c27c55ca2"
# <a id="8"></a> <br>
# * **F. DEVICE**
#
# device is stored in json format. There is a need to extract its fields and analyze them. Using json library to deserializing json values.
# + _uuid="f0b75ca11dca15238bc3e6e6ec36e79d3d8ae90a"
list_of_devices = train_data.device.apply(json.loads).tolist()
keys = []
for devices_iter in list_of_devices:
for list_element in list(devices_iter.keys()):
if list_element not in keys:
keys.append(list_element)
# + [markdown] _uuid="39d240415ddc55a1ba7744fb48662f1411828c46"
# keys existed in device attribute are listed below.
# Now we should ignore the features which are not usefull in rest of the process. If feature is misrelated, or it contains lot of "NaN" values it should be discarded.
# We select the ["browser","operatingSystem","deviceCategory","isMobile"] for doing the analyzing. The rest of the device features are ignored and will be removed.
# + _uuid="5cd55978b512a3e1bc986a998d80a3b033c315ed"
"keys existed in device attribute are:{}".format(keys)
# + _uuid="cadd6e2ecea749307f6c52056406ef9b6acbc1e2"
tmp_device_df = pd.DataFrame(train_data.device.apply(json.loads).tolist())[["browser","operatingSystem","deviceCategory","isMobile"]]
# + _uuid="499f0847262e10fc07829916236b456364c1c472"
tmp_device_df.head()
# + _uuid="b12bb719c32f9a7e23ab0440514149832a33a805"
tmp_device_df.describe()
# + _uuid="1a9f91b20b7d28fd8f7efbdd0ea3ae3e2d29aea7"
fig, axes = plt.subplots(2,2,figsize=(15,15))
tmp_device_df["isMobile"].value_counts().plot(kind="bar",ax=axes[0][0],rot=25,legend="isMobile",color='tan')
tmp_device_df["browser"].value_counts().head(10).plot(kind="bar",ax=axes[0][1],rot=40,legend="browser",color='teal')
tmp_device_df["deviceCategory"].value_counts().head(10).plot(kind="bar",ax=axes[1][0],rot=25,legend="deviceCategory",color='lime')
tmp_device_df["operatingSystem"].value_counts().head(10).plot(kind="bar",ax=axes[1][1],rot=80,legend="operatingSystem",color='c')
# + [markdown] _uuid="8fcdcdddde4e9b6c4818ea9cb46331a8f1890a70"
# <a id="9"></a> <br>
# * **G. GEO_NETWORK**
#
# It is json and the similar manner to previous feature (device) should be done.
#
# + _uuid="9d2c8de2826719ba58ca3785471aab39af56b371"
tmp_geo_df = pd.DataFrame(train_data.geoNetwork.apply(json.loads).tolist())[["continent","subContinent","country","city"]]
# + _uuid="3e3e0826f8a489bb9642968a12f0ac7875a8c7b7"
tmp_geo_df.head()
# + _uuid="1a750933b4507d43e92a7ce5491e5216caabfe85"
tmp_geo_df.describe()
# + [markdown] _uuid="dba43137c32caff107e861abb03427dd5417b5d3"
# analysing the distribution of users in 5 continents.
# + _uuid="c8b86fc6149849a1f78b7102edfcbb9e16cb341b"
fig, axes = plt.subplots(3,2, figsize=(15,15))
tmp_geo_df["continent"].value_counts().plot(kind="bar",ax=axes[0][0],title="Global Distributions",rot=0,color="c")
tmp_geo_df[tmp_geo_df["continent"] == "Americas"]["subContinent"].value_counts().plot(kind="bar",ax=axes[1][0], title="America Distro",rot=0,color="tan")
tmp_geo_df[tmp_geo_df["continent"] == "Asia"]["subContinent"].value_counts().plot(kind="bar",ax=axes[0][1], title="Asia Distro",rot=0,color="r")
tmp_geo_df[tmp_geo_df["continent"] == "Europe"]["subContinent"].value_counts().plot(kind="bar",ax=axes[1][1], title="Europe Distro",rot=0,color="lime")
tmp_geo_df[tmp_geo_df["continent"] == "Oceania"]["subContinent"].value_counts().plot(kind="bar",ax = axes[2][0], title="Oceania Distro",rot=0,color="teal")
tmp_geo_df[tmp_geo_df["continent"] == "Africa"]["subContinent"].value_counts().plot(kind="bar" , ax=axes[2][1], title="Africa Distro",rot=0,color="silver")
# + [markdown] _uuid="337926fa6261abcbd7fa6fd0804f300c81316a46"
# <a id="10"></a> <br>
# * **H.SOCIAL_ENGANEMENT_TYPE **
#
# Describing this feature confirms its uniqueness. It should be dropped. Because its entropy is 0.
# + _uuid="7b3911b4fee714c5f1d2c5dc2b033f7c7c7d1e01"
train_data["socialEngagementType"].describe()
# + [markdown] _uuid="24c3161564ec95f36881869b0e2f4fbf6d834b86"
# <a id="11"></a> <br>
# * **I. TOTALS**
#
# + _uuid="ed05f7a083c3863e88b9d820bc10940765de245d"
train_data.head()
train_data["revenue"] = pd.DataFrame(train_data.totals.apply(json.loads).tolist())[["transactionRevenue"]]
# + [markdown] _uuid="0d6a1b3a04a6af113c0bd6ae5f75ed8ce0f6b0b3"
# Extracting all the revenues can bring us an overview about the total revenue.
# + _uuid="11f9072ff0e17577979bbfee4dea7a6139bef5c8"
revenue_datetime_df = train_data[["revenue" , "date"]].dropna()
revenue_datetime_df["revenue"] = revenue_datetime_df.revenue.astype(np.int64)
revenue_datetime_df.head()
# + [markdown] _uuid="95155bf9d934bf271a424c8a98255fc9a9194363"
# Aggregation on days and plotting daily revenue.
# + _uuid="3ddbf6c05926782adbc4b6d2424e88df2c0a728a"
daily_revenue_df = revenue_datetime_df.groupby(by=["date"],axis = 0 ).sum()
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10))
axes.set_title("Daily Revenue")
axes.set_ylabel("Revenue")
axes.set_xlabel("date")
axes.plot(daily_revenue_df["revenue"])
# + _uuid="a8e89d239eb5b6d510ecc8e9f17215c5bc527b16"
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 9))
axes.set_title("Daily revenue Violin")
axes.set_ylabel("revenue")
axes.violinplot(list(daily_revenue_df["revenue"].values),showmeans=False,showmedians=True)
# + [markdown] _uuid="7a1a7ed010b0ae312a18612e20fc3a4a0e78f3a8"
# <a id="12"></a> <br>
# * **J. VISIT_NUMBER**
#
# Number of visits have profound potential to be an important factor in regression progress.
# + _uuid="280d50807c7221484f954f4baef407d583d55795"
visit_datetime_df = train_data[["date","visitNumber"]]
visit_datetime_df["visitNumber"] = visit_datetime_df.visitNumber.astype(np.int64)
# + _uuid="94394db7363f82dc3e8d4b63ce04b451cfd858fa"
daily_visit_df = visit_datetime_df.groupby(by=["date"], axis = 0).sum()
fig, axes = plt.subplots(1,1,figsize=(20,10))
axes.set_ylabel("# of visits")
axes.set_xlabel("date")
axes.set_title("Daily Visits")
axes.plot(daily_visit_df["visitNumber"])
# + _uuid="a19b0c54518e1b4cf2a9d40d721567c1764c6f90"
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 9))
axes.set_title("Daily visits Violin")
axes.set_ylabel("# of visitors")
axes.violinplot(list(daily_visit_df["visitNumber"].values),showmeans=False,showmedians=True)
# + [markdown] _uuid="bb23c63389e45b1a1aaba846ee8b892425d37db7"
# Now, lets check another side of 'visitNumber' feature. As it is mentioned in data description, visitNumber is the number of sessions for each user. It can also be the factor of users interest. lets 'describe' and 'visualize' them.
# using 'collections' package, we can count repetition of each element.
# + _uuid="0181ed5f1fee9854dee68331646ac2d159bfb9f4"
train_data.visitNumber.describe()
# + [markdown] _uuid="789a4341cacf0338f8d68d9b847e94d7a8c8887b"
# The 75% of sessions have visitNumber lower than one time. You can get more information about percentiles by calling np.percentile method.
# + _uuid="88e2b52a6055587655b99a59f97b24897015f59c"
"90 percent of sessions have visitNumber lower than {} times.".format(np.percentile(list(train_data.visitNumber),90))
# + [markdown] _uuid="2aba10a10c0670314a0c4d5286f66b3afdc6a08d"
# Lets find most_common and least_common visitNumbers for being familiar with collections module and its powrefull tools ;-)
# + _uuid="53cbb935fed4dd082b7de40d0a6b93029dd82dc3"
import collections
tmp_least_10_visitNumbers_list = collections.Counter(list(train_data.visitNumber)).most_common()[:-10-1:-1]
tmp_most_10_visitNumbers_list = collections.Counter(list(train_data.visitNumber)).most_common(10)
least_visitNumbers = []
most_visitNumbers = []
for i in tmp_least_10_visitNumbers_list:
least_visitNumbers.append(i[0])
for i in tmp_most_10_visitNumbers_list:
most_visitNumbers.append(i[0])
"10 most_common visitNumbers are {} times and 10 least_common visitNumbers are {} times".format(most_visitNumbers,least_visitNumbers)
# + [markdown] _uuid="a69c4b314f21f0913fdf9640704802652a6f59e9"
# It is clear that the dispersion of the 'visitNumber' per session is huge. for this sort of features, we can use Log and map the feature space to
# new lower space. As a result of this mapping, visualization the data will be easier.
# + _uuid="9ff582a30ec1ec665fde671aed193095b6daf8e3"
fig,ax = plt.subplots(1,1,figsize=(9,5))
ax.set_title("Histogram of log(visitNumbers) \n don't forget it is per session")
ax.set_ylabel("Repetition")
ax.set_xlabel("Log(visitNumber)")
ax.grid(color='b', linestyle='-', linewidth=0.1)
ax.hist(np.log(train_data.visitNumber))
# + [markdown] _uuid="a7aa14f5d0c7bcf8c602144bbe29821a228e6268"
# <a id="13"></a> <br>
# * **K. TRAFFIC_SOURCE**
#
# What is the most conventional manner for visitor who visit to the website and do their shopping ? trafficSource attribute can resolve this qurestion.
# Like a previous Json elements existed in the dataset, this attribute is also Json file. so, we use the similar way to deserialize it. We have select keyword, source and the medium as a features which can bring more useful infromation.
#
# + _uuid="027a738e3c4f5a3d4066762bc66d287302470039"
traffic_source_df = pd.DataFrame(train_data.trafficSource.apply(json.loads).tolist())[["keyword","medium" , "source"]]
# + _uuid="48e7f72f09c8ae71c3682fd8e7b47e53d3775b7a"
fig,axes = plt.subplots(1,2,figsize=(15,10))
traffic_source_df["medium"].value_counts().plot(kind="bar",ax = axes[0],title="Medium",rot=0,color="tan")
traffic_source_df["source"].value_counts().head(10).plot(kind="bar",ax=axes[1],title="source",rot=75,color="teal")
# + [markdown] _uuid="df8a4d663158bcc3a00568a41c4ec3fc5fefb7c7"
# As it is completely obvious in source diagram, google is the most repetitive source. It would be interesting if we replace all google subdomains with exact 'google' and do the same analyze again. let's do it.
# + _uuid="c1e14eb231912f233cadb2bb0ba48e0ce64b4366"
traffic_source_df.loc[traffic_source_df["source"].str.contains("google") ,"source"] = "google"
fig,axes = plt.subplots(1,1,figsize=(8,8))
traffic_source_df["source"].value_counts().head(15).plot(kind="bar",ax=axes,title="source",rot=75,color="teal")
# + [markdown] _uuid="32f46b9faea7670aafecf55b922bb2df0ef8d0c0"
# Google dependent redirects are more than twice the youtube sources. Combination of this feature with revenue and visits may have important result. We will do it in next step (when we are analyzing feature correlations).
# Now let's move on keywords feature.
# A glance to keyword featre represnets lot of missing values '(not provided)'. Drawing a bar chart for both of them...
#
# + _uuid="4f0703fa5288ee3e0570f30aefb2739761ce66bd"
fig,axes = plt.subplots(1,2,figsize=(15,10))
traffic_source_df["keyword"].value_counts().head(10).plot(kind="bar",ax=axes[0], title="keywords (total)",color="orange")
traffic_source_df[traffic_source_df["keyword"] != "(not provided)"]["keyword"].value_counts().head(15).plot(kind="bar",ax=axes[1],title="keywords (dropping NA)",color="c")
# + [markdown] _uuid="d47e98109052c593a71437ab8c28d4cfcd674569"
# <a id="14"></a> <br>
# * **L. FULL_VISITOR_ID**
#
# Now, lets see how many of users are repetitive ?! This feature can represent important information answering this question ? (Is more repeation proportional to more buy ?! )
# The response will be discussed in next section (Where we are analyzing compound features) but now, lets move on calculation of repetitive visits percentiles.
# + _uuid="25a3dc6ee29147c2af2037e3b167f84bcf724183"
repetitive_users = list(np.sort(list(collections.Counter(list(train_data["fullVisitorId"])).values())))
"25% percentile: {}, 50% percentile: {}, 75% percentile: {}, 88% percentile: {}, 88% percentile: {}".format(
np.percentile(repetitive_users,q=25),np.percentile(repetitive_users,q=50),
np.percentile(repetitive_users,q=75),np.percentile(repetitive_users,q=88), np.percentile(repetitive_users,q=89))
# + [markdown] _uuid="338ab31e3cc9cf9301225f999020888394025edb"
# As it is shown, only 12 percent of users are repetitive and visited the website more than once.
# (Search about churn rate and conversion rate if you want to know why we have analyzed this feature ;-) )
#
# + [markdown] _uuid="89a7f318e36ea214cb77777bb09e3ceaf3d6a591"
# # 3-COMPOUND FEATURES
#
# <a id="16"></a> <br>
# * **A. CHURN&CONVERSION_VISUALIZATION**
#
# The main definition of ChurnRate is The percentage rate at which customers stop subscribing to a service or employees leave a job. Churn rate period can be various from day to a year correspoding to business type. In this section, we will compute and visualize the monthly churn rate.
# Lets do more investigation on features date and fullVisitorId for more detail mining.
#
# + _uuid="3aecd41e62d35ee288e6f8194b8a15b4fee7e145"
date_list = np.sort(list(set(list(train_data["date"]))))
"first_day:'{}' and last_day:'{}' and toal number of data we have is: '{}' days.".format(date_list[0], date_list[-1],len(set(list(train_data["date"]))))
# + [markdown] _uuid="504213d4e82a108eafe96340236c61929e27836d"
# So, we have 366 days (12 month = 1 year) from August 2016 to August 2017 data for churn rate calculations. The bes period for churn maybe is the monthly churn rate.
# Now, lets list all the months existed in library for checking having no missing months.
# + _uuid="d0b87c3fb522149f972888671047b1d9608d033e"
month = 8
start_date = datetime.date(2016, month, 1)
end_date = datetime.date(2017, month, 1)
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
dates_month = []
for single_date in daterange(start_date, end_date):
dates_month.append(single_date.strftime("%Y-%m"))
dates_month = list(set(dates_month))
dates_month
# + [markdown] _uuid="cddac95d8be2bd4a1eb951a260f4aefa4a32896a"
# Whole the period exist in data. So, Lets define new empty dataframe and do this calculations on it and copy the our requirements ot it.
# for churn rate calculations, we need to check which users visited have visited the website monthly. this information is located in fullVisitorId. we will copy it to new df.
# + _uuid="dc25de8cdbf95d0d7e6bf3d5f7f212035b9b6ee4"
tmp_churn_df = pd.DataFrame()
tmp_churn_df["date"] = train_data["date"]
tmp_churn_df["yaer"] = pd.DatetimeIndex(tmp_churn_df["date"]).year
tmp_churn_df["month"] =pd.DatetimeIndex(tmp_churn_df["date"]).month
tmp_churn_df["fullVisitoId"] = train_data["fullVisitorId"]
tmp_churn_df.head()
# + [markdown] _uuid="6510f3a5700041d334b60731565e2757e03a52d0"
# For calculation of churn rate we need to count the users appeared in two, three, four, etc. continus months.
# We will using the following format for collocation of users.
# For example assume we want to extract the number of distinct users who visited the website on 2016-08.
# + _uuid="b265e974d9adef4f2eba1e829a345e76c66aef0b"
"distinct users who visited the website on 2016-08 are:'{}'persons".format(len(set(tmp_churn_df[(tmp_churn_df.yaer == 2016) & (tmp_churn_df.month == 8) ]["fullVisitoId"])))
# + [markdown] _uuid="d5e0e7dac39dd4b15b1c84c6461a129976ee90ed"
# By generalizing the above solution we have:
# + _uuid="1eb62cdcfbe01828f8ee4b9e8c1e34c3b1ba73a5"
target_intervals_list = [(2016,8),(2016,9),(2016,10),(2016,11),(2016,12),(2017,1),(2017,2),(2017,3),(2017,4),(2017,5),(2017,6),(2017,7)]
intervals_visitors = []
for tmp_tuple in target_intervals_list:
intervals_visitors.append(tmp_churn_df[(tmp_churn_df.yaer == tmp_tuple[0]) & (tmp_churn_df.month == tmp_tuple[1]) ]["fullVisitoId"])
"Size of intervals_visitors:{} ".format(len(intervals_visitors))
# + [markdown] _uuid="aeecd93d97a70f54885e4841c1d8866ad065b520"
# So, we have 12 list and each elemets contains the users who visited the website on the correspondence period.
# Now its time to do some matrix calculation for filling the churn-rate matrix.
# It is very probable that you calculate the matrix with more efficient ways. I used this manner for more simplicity.
# + _uuid="b844e618c9beb2bf76be68ac13c6c7fb20b6263e"
tmp_matrix = np.zeros((11,11))
for i in range(0,11):
k = False
tmp_set = []
for j in range(i,11):
if k:
tmp_set = tmp_set & set(intervals_visitors[j])
else:
tmp_set = set(intervals_visitors[i]) & set(intervals_visitors[j])
tmp_matrix[i][j] = len(list(tmp_set))
k = True
# + [markdown] _uuid="64f1ae0db9049c9cda12afb17386eb9e380ecf6c"
# Now we have 2D matrix containig the continus visited users.
# + _uuid="bb0ec2429438226f82b5bd7dd08e0e610c00f6fb"
xticklabels = ["interval 1","interval 2","interval 3","interval 4","interval 5","interval 6","interval 7","interval 8",
"interval 9","interval 10","interval 11"]
yticklabels = [(2016,8),(2016,9),(2016,10),(2016,11),(2016,12),(2017,1),(2017,2),(2017,3),(2017,4),(2017,5),(2017,6),(2017,7)]
fig, ax = plt.subplots(figsize=(11,11))
ax = sns.heatmap(np.array(tmp_matrix,dtype=int), annot=True, cmap="RdBu_r",xticklabels=xticklabels,fmt="d",yticklabels=yticklabels)
ax.set_title("Churn-rate heatmap")
ax.set_xlabel("intervals")
ax.set_ylabel("months")
# + [markdown] _uuid="5acd00596bd46ac286484a1bac0c9a3b3c9de9b1"
# A churn-rate heat map is the one the important keys of business. The more repetitive users in continues time periods, the more success in user loyalty.
#
# Generaly it is better to drop the zeors below the main diagonal for better visualization and more clearer representaion.
# (I couldn't find the sns pleasant visualization for half churn rate matrix. If you find it, it will be appreciated to ping me. Ill replace the below diagram with your recommendation as soon as possible).
# + _uuid="7786164b7ad678c1bb7de2e699ade662d6920763"
A = tmp_matrix
mask = np.tri(A.shape[0], k=-1)
A = np.ma.array(A, mask=mask) # mask out the lower triangle
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(111)
ax.set_xlabel("interval")
ax.set_ylabel("period")
cmap = CM.get_cmap('RdBu_r', 50000)
cmap.set_bad('w') # default value is 'k'
ax.imshow(A, interpolation="nearest", cmap=cmap)
# + [markdown] _uuid="ba8655158e0f81976b6691dbe3377602c07e4c0a"
#
# <a id="17"></a> <br>
# * **B. REVENUE&DATETIME**
#
#
# Now, it is time to move on to analysing compound features. The main target of this section is undestanding the features correlation.
# At the first point, lets analyze this probable assumption :
#
# "Is more visitNumber proportional to more Revenue ?!"
#
# + _uuid="d5c53baecad877d02711d142b8803a9eb1348332"
revenue_datetime_df = train_data[["revenue" , "date"]].dropna()
revenue_datetime_df["revenue"] = revenue_datetime_df.revenue.astype(np.int64)
revenue_datetime_df.head()
# + [markdown] _kg_hide-input=true _uuid="1a048dc239959ec71029c2d1ecd4e9b97d05d293"
# Doing groupby on date and getting the total revenue per day:
# + _uuid="5fe90fc6b104f1b0586cd196ee3bbe43febc6931"
total_revenue_daily_df = revenue_datetime_df.groupby(by=["date"],axis=0).sum()
total_revenue_daily_df.head()
# + [markdown] _uuid="ca1d6f05bf4befad1e755950c89b7c2b21540926"
# Doing similar process on visitNumber and getting total visitNumber per day.
# + _uuid="2c37cd5e4b5feb8bc3b99865c1e1a78c309ab0d9"
total_visitNumber_daily_df = train_data[["date","visitNumber"]].groupby(by=["date"],axis=0).sum()
total_visitNumber_daily_df.head()
# + [markdown] _uuid="44698a25f7eebdeb67da3ca2c1ba2ddded9c06c3"
# Concatenate these two dataframe and compound visualization.
# + _uuid="e1eddb7ab69446da5566340ecd4a319cbffbc55e"
datetime_revenue_visits_df = pd.concat([total_revenue_daily_df,total_visitNumber_daily_df],axis=1)
fig, ax1 = plt.subplots(figsize=(20,10))
t = datetime_revenue_visits_df.index
s1 = datetime_revenue_visits_df["visitNumber"]
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('day')
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel('visitNumber', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
s2 = datetime_revenue_visits_df["revenue"]
ax2.plot(t, s2, 'r--')
ax2.set_ylabel('revenue', color='r')
ax2.tick_params('y', colors='r')
fig.tight_layout()
# + [markdown] _uuid="df6a04b932bd865c1eec26b0b7edf621fbca6652"
# By comparing the revenue and visitNumbers we can confirm our consumption. Where there is more visit the revenue is also more than neighbour days.
# The behaviour of line charts is completely similar.
#
# Another point to touch on is the rate of visitNumber and revenue which have a peak on December. Before christmas people visit and buy more than other days. The same behaviour is represented in the days after christmas where people have bought their requirements and the level of visit and buy goes down (They are in vacation and have less time to check the website ;-) )
#
# The above diagram can be represent more detail if you visualize period of it. Do it yourself with by quering on daterange and check our confirmed assumtion :-D.
# + [markdown] _uuid="792e4f71aad4e7f5d86457cf62364c4dd29653d2"
# Lets focus on user who have addressed our challenge (users who have revenue) and checking some assumptions
# + _uuid="f09aa84f3fcbc907ed13145ac232449808a5088f"
revenue_df = train_data.dropna(subset=["revenue"])
revenue_os_df = pd.DataFrame(revenue_df.device.apply(json.loads).tolist())[["browser","operatingSystem","deviceCategory","isMobile"]]
buys_is_mobile_dict = dict(collections.Counter(list(revenue_os_df.isMobile)))
percent_buys_is_mobile_dict = {k: v / total for total in (sum(buys_is_mobile_dict.values()),) for k, v in buys_is_mobile_dict.items()}
sizes = list(percent_buys_is_mobile_dict.values())
explode=(0,0.1)
labels = 'isNotMobile', 'isMobile'
fig, ax = plt.subplots(1,1, figsize=(8,8))
ax.set_title("buys mobile distro")
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',shadow=True, startangle=90)
# + [markdown] _uuid="d87db6c6826d5070c13dfec70744c65c17480c2b"
# Going deeper to subclasses ...
# + _uuid="94d501a18ab2cba0b084054563891ea10ce496c1"
mobiles_browsers = dict(collections.Counter(revenue_os_df[revenue_os_df["isMobile"] == True]["browser"]))
not_mobiles_browsers = dict(collections.Counter(revenue_os_df[revenue_os_df["isMobile"] == False]["browser"]))
print("for mobile users:")
for i,v in mobiles_browsers.items():
print("{}:{}".format(i,v))
print("\nfor not mobile users:")
for i,v in not_mobiles_browsers.items():
print("{}:{}".format(i,v))
vals = np.array([[552.,6.,2.,12.,16.,431.], [9801.,189.,58.,93.,5.,349.]])
fig, ax = plt.subplots(subplot_kw=dict(polar=True),figsize=(9,9))
size = 0.3
valsnorm = vals / np.sum(vals) * 2 * np.pi
# obtain the ordinates of the bar edges
valsleft = np.cumsum(np.append(0, valsnorm.flatten()[:-1])).reshape(vals.shape)
cmap = plt.get_cmap("tab20c")
outer_colors = cmap(np.arange(3) * 4)
inner_colors = cmap(np.array([1, 2, 5, 6, 9, 10]))
ax.bar(x=valsleft[:, 0],
width=valsnorm.sum(axis=1), bottom=1 - size, height=size,
color=outer_colors, edgecolor='w', linewidth=1, align="edge")
ax.bar(x=valsleft.flatten(),
width=valsnorm.flatten(), bottom=1 - 2 * size, height=size,
color=inner_colors, edgecolor='w', linewidth=1, align="edge")
# ax.set_axis_off()
ax.set(title="Nested pi-plot for buyers devices.")
# + [markdown] _uuid="d6c8ce4ae2af1e892a2e5a3d3be74ccba12618fb"
# For buyers who use mobile for purchasing, Safari and chrome approximately have equal number of users. But for users who do not use mobile for purchasing, Safari have a few percent of total users.
# + [markdown] _uuid="0880a45beba53b8f8d4a1af54ef6ab52ed89a548"
# <a id="18"></a> <br>
# # 4-BASIC REGRESSION
#
# Now, based on the analysis we have done, it's time to start the regression progress.
# Lets do a simple regression for testing our regression method.
# Calling our main_df and dropping the features that may have not any positive effect on the regression results ( HINT: consider it is starting point. We want to learn how we can use the regression for combination of categorical features and continus features. We will change our features in next steps for getting better results).
# We use the tricks we used during this tutorial for dealing with features.
# + _uuid="b3adcc9a0d69e50089384929b359f7c3ffbf3ed6"
df_train = train_data.drop(["date", "socialEngagementType", "visitStartTime", "visitId", "fullVisitorId" , "revenue","customDimensions"], axis=1)
devices_df = pd.DataFrame(df_train.device.apply(json.loads).tolist())[["browser", "operatingSystem", "deviceCategory", "isMobile"]]
geo_df = pd.DataFrame(df_train.geoNetwork.apply(json.loads).tolist())[["continent", "subContinent", "country", "city"]]
traffic_source_df = pd.DataFrame(df_train.trafficSource.apply(json.loads).tolist())[["keyword", "medium", "source"]]
totals_df = pd.DataFrame(df_train.totals.apply(json.loads).tolist())[["transactionRevenue", "newVisits", "bounces", "pageviews", "hits"]]
df_train = pd.concat([df_train.drop(["hits"],axis=1), devices_df, geo_df, traffic_source_df, totals_df], axis=1)
df_train = df_train.drop(["device", "geoNetwork", "trafficSource", "totals"], axis=1)
# + _uuid="4ac3da5624dc46c18fc5ae39c8365411d859cdbd"
df_train.head(1)
# + [markdown] _uuid="c0467b40c1cca0df9cc2f58baacd4bce770efc72"
# Replacing NaN variables with 0 (It may have positive/negative effect. We will check it later).
# Another point we must touch on is we need to convert
# + _uuid="3f50dd46169c630328557f814c0e38476fd15a73"
df_train["transactionRevenue"] = df_train["transactionRevenue"].fillna(0)
df_train["bounces"] = df_train["bounces"].fillna(0)
df_train["pageviews"] = df_train["pageviews"].fillna(0)
df_train["hits"] = df_train["hits"].fillna(0)
df_train["newVisits"] = df_train["newVisits"].fillna(0)
# + [markdown] _uuid="09d288e14bc0e9d9d98fa31b500f050be68e2ce1"
# Using train_test_split for splitting data to train and evaluate sets. Converting revenue (our target variable) to float for performing regression.
# + _uuid="ff52b66f6ba4d2874ecaf7aab14028dfbe01d9df"
df_train, df_test = train_test_split(df_train, test_size=0.2, random_state=42)
df_train["transactionRevenue"] = df_train["transactionRevenue"].astype(np.float)
df_test["transactionRevenue"] = df_test["transactionRevenue"].astype(np.float)
"Finaly, we have these columns for our regression problems: {}".format(df_train.columns)
# + _uuid="a7715a788b5ee41e377d82078f40b0378ee9e844"
df_train.head(1)
# + [markdown] _uuid="29ea798cce627e29cac6a9c1d458a45a8a7ef00d"
# Another point to touch on is we need to convert our categorical data to (int) values. As a result regression algorithm and classifiers can deal with these sort of features ( String features are not supported).
# (Search about LabelEncoder and its use case. This tool helps us to convert the categorical features to Integer ones).
# + _uuid="dbd00b141e9796c80379034f41324a0ff8879b5d"
categorical_features = ['channelGrouping', 'browser', 'operatingSystem', 'deviceCategory', 'isMobile',
'continent', 'subContinent', 'country', 'city', 'keyword', 'medium', 'source']
numerical_features = ['visitNumber', 'newVisits', 'bounces', 'pageviews', 'hits']
for column_iter in categorical_features:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(df_train[column_iter].values.astype('str')) + list(df_test[column_iter].values.astype('str')))
df_train[column_iter] = lbl.transform(list(df_train[column_iter].values.astype('str')))
df_test[column_iter] = lbl.transform(list(df_test[column_iter].values.astype('str')))
for column_iter in numerical_features:
df_train[column_iter] = df_train[column_iter].astype(np.float)
df_test[column_iter] = df_test[column_iter].astype(np.float)
# + [markdown] _uuid="96deaf50b6a81e0eee438146b01e44c126a5f738"
# OK. Now we have all requirements which are needed for first regression test ;-).
#
# Check [this link](http://github.com/Microsoft/LightGBM/tree/master/examples/python-guide) out for get more information about Regression algorithm we are using.
#
# It is mentioned in competition description that we need to use ln(x+1) for evaluation of results. So, check [this links](https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.log1p.html) and [this link](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.expm1.html) about these scipy built-in methods.
# We can control prints with 'verbose_eval' parameter.
# + _uuid="a2a2fc640d4da78291252d6e079147a176506d77"
params = {
"objective": "regression",
"metric": "rmse",
"num_leaves": 30,
"min_child_samples": 100,
"learning_rate": 0.1,
"bagging_fraction": 0.7,
"feature_fraction": 0.5,
"bagging_frequency": 5,
"bagging_seed": 2018,
"verbosity": -1
}
lgb_train = lgb.Dataset(df_train.loc[:,df_train.columns != "transactionRevenue"], np.log1p(df_train.loc[:,"transactionRevenue"]))
lgb_eval = lgb.Dataset(df_test.loc[:,df_test.columns != "transactionRevenue"], np.log1p(df_test.loc[:,"transactionRevenue"]), reference=lgb_train)
gbm = lgb.train(params, lgb_train, num_boost_round=2000, valid_sets=[lgb_eval], early_stopping_rounds=100,verbose_eval=100)
# + [markdown] _uuid="ebd4453a8261f116b641671793264ab3d6e9640b"
# Congrate !
# You get reasonable rmse in first step. Now, lets predict the revenues for our evaluation dataset to be familar with transformation needed.
#
# (Point that the revenue of user cant be negative ;-) so, remove them).
# + _uuid="4ae51a08b3f9dc61f18083fde4c9abd04ccac70b"
predicted_revenue = gbm.predict(df_test.loc[:,df_test.columns != "transactionRevenue"], num_iteration=gbm.best_iteration)
predicted_revenue[predicted_revenue < 0] = 0
df_test["predicted"] = np.expm1(predicted_revenue)
df_test[["transactionRevenue","predicted"]].head(10)
# + [markdown] _uuid="a66388ab5b448072538e4a9f3929d22b75147f15"
# Now, you can generalize this procedure and use do your submission.
#
# We will try to do a more supervised regression in next steps. We will utilize our preprocess facts for getting regression results with less error.
# + [markdown] _uuid="8f614d7c9535213661d1ec54ae35f7eb19551299"
# <a id="19"></a> <br>
# # 5-PREPARING FOR MORE EVALUATIONS AND TESTS
#
# Now, we have all requirements for doing more tests. Lets summarize all the prcess we have done earlier in one script. You can change parameters regarding your understanding about feature engineering process. If you cant understand any section of it, please read the details which are described below.
# + _uuid="9b20b5fbccaf2a71c72db5c11db19b1a7ccf775c"
import gc; gc.collect()
import time; time.sleep(5)
df_train = pd.read_csv(filepath_or_buffer="../input/train_v2.csv",nrows=50000)
df_actual_test = pd.read_csv(filepath_or_buffer="../input/test_v2.csv",nrows=25000)
# drop useless features => date, fullVisitorId, sessionId, socialEngagement, visitStartTime
df_train = df_train.drop(["date", "socialEngagementType", "visitStartTime", "visitId", "fullVisitorId"], axis=1)
df_actual_test = df_actual_test.drop(["date", "socialEngagementType", "visitStartTime", "visitId"], axis=1)
#preprocessing for trains
devices_df = pd.DataFrame(df_train.device.apply(json.loads).tolist())[["browser", "operatingSystem", "deviceCategory", "isMobile"]]
geo_df = pd.DataFrame(df_train.geoNetwork.apply(json.loads).tolist())[["continent", "subContinent", "country", "city"]]
traffic_source_df = pd.DataFrame(df_train.trafficSource.apply(json.loads).tolist())[["keyword", "medium", "source"]]
totals_df = pd.DataFrame(df_train.totals.apply(json.loads).tolist())[["transactionRevenue", "newVisits", "bounces", "pageviews", "hits"]]
df_train = pd.concat([df_train.drop(["hits"],axis=1), devices_df, geo_df, traffic_source_df, totals_df], axis=1)
df_train = df_train.drop(["device", "geoNetwork", "trafficSource", "totals"], axis=1)
df_train["transactionRevenue"] = df_train["transactionRevenue"].fillna(0)
df_train["bounces"] = df_train["bounces"].fillna(0)
df_train["pageviews"] = df_train["pageviews"].fillna(0)
df_train["hits"] = df_train["hits"].fillna(0)
df_train["newVisits"] = df_train["newVisits"].fillna(0)
#preprocessing for tests
devices_df = pd.DataFrame(df_actual_test.device.apply(json.loads).tolist())[["browser", "operatingSystem", "deviceCategory", "isMobile"]]
geo_df = pd.DataFrame(df_actual_test.geoNetwork.apply(json.loads).tolist())[["continent", "subContinent", "country", "city"]]
traffic_source_df = pd.DataFrame(df_actual_test.trafficSource.apply(json.loads).tolist())[["keyword", "medium", "source"]]
totals_df = pd.DataFrame(df_actual_test.totals.apply(json.loads).tolist())[["newVisits", "bounces", "pageviews", "hits"]]
df_actual_test = pd.concat([df_actual_test.drop(["hits"],axis=1), devices_df, geo_df, traffic_source_df, totals_df], axis=1)
df_actual_test = df_actual_test.drop(["device", "geoNetwork", "trafficSource", "totals"], axis=1)
# df_actual_test["transactionRevenue"] = df_train["transactionRevenue"].fillna(0)
df_actual_test["bounces"] = df_train["bounces"].fillna(0)
df_actual_test["pageviews"] = df_train["pageviews"].fillna(0)
df_actual_test["hits"] = df_train["hits"].fillna(0)
df_actual_test["newVisits"] = df_train["newVisits"].fillna(0)
#garbage collector ';-)'
del devices_df,geo_df,traffic_source_df,totals_df
#evaluation
df_train, df_eval = train_test_split(df_train, test_size=0.2, random_state=42)
# lgb_train = lgb.Dataset(df_train.loc[:, df_train.columns != "revenue"], df_train["revenue"])
# lgb_eval = lgb.Dataset(df_test.loc[:, df_test.columns != "revenue"], df_test["revenue"], reference=lgb_train)
df_train["transactionRevenue"] = df_train["transactionRevenue"].astype(np.float)
df_eval["transactionRevenue"] = df_eval["transactionRevenue"].astype(np.float)
print(df_train.columns)
# + _uuid="7422339fe51434aa3f9a35e866fa4f12b3a01eda"
params = {
"objective": "regression",
"metric": "rmse",
"num_leaves": 30,
"min_child_samples": 100,
"learning_rate": 0.1,
"bagging_fraction": 0.7,
"feature_fraction": 0.5,
"bagging_frequency": 5,
"bagging_seed": 2018,
"verbosity": -1
}
print('Start training...')
# + _uuid="a48f07cf051a84064969a1a3cb563aec42bf6738"
df_actual_test = df_actual_test.drop(["customDimensions"],axis=1)
df_train = df_train.drop(["customDimensions"],axis=1)
df_eval = df_eval.drop(["customDimensions"],axis=1)
# + _uuid="765541d7b167588704e56d13efd31d323e0c77b6"
categorical_features = ['channelGrouping', 'browser', 'operatingSystem', 'deviceCategory', 'isMobile',
'continent', 'subContinent', 'country', 'city', 'keyword', 'medium', 'source']
numerical_features = ['visitNumber', 'newVisits', 'bounces', 'pageviews', 'hits']
for column_iter in categorical_features:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(df_train[column_iter].values.astype('str')) + list(df_eval[column_iter].values.astype('str')) + list(df_actual_test[column_iter].values.astype('str')))
df_train[column_iter] = lbl.transform(list(df_train[column_iter].values.astype('str')))
df_eval[column_iter] = lbl.transform(list(df_eval[column_iter].values.astype('str')))
df_actual_test[column_iter] = lbl.transform(list(df_actual_test[column_iter].values.astype('str')))
for column_iter in numerical_features:
df_train[column_iter] = df_train[column_iter].astype(np.float)
df_eval[column_iter] = df_eval[column_iter].astype(np.float)
df_actual_test[column_iter] = df_actual_test[column_iter].astype(np.float)
# + _uuid="c6962025557a22ea96b7297fd7f1fe260f219192"
lgb_train = lgb.Dataset(df_train.loc[:,df_train.columns != "transactionRevenue"], np.log1p(df_train.loc[:,"transactionRevenue"]))
lgb_eval = lgb.Dataset(df_eval.loc[:,df_eval.columns != "transactionRevenue"], np.log1p(df_eval.loc[:,"transactionRevenue"]), reference=lgb_train)
# + _uuid="9fb8a62b7dd7d8597201ef894a56eeb60c3fcb82"
gbm = lgb.train(params, lgb_train, num_boost_round=2000, valid_sets=[lgb_eval], early_stopping_rounds=100,verbose_eval=100)
# + _uuid="b89a7709181c01f83a91f46a50cff5578f755a47"
eval_predicted_revenue = gbm.predict(df_eval.loc[:,df_eval.columns != "transactionRevenue"], num_iteration=gbm.best_iteration)
eval_predicted_revenue[eval_predicted_revenue < 0] = 0
df_eval["predicted"] = np.expm1(eval_predicted_revenue)
df_eval[["transactionRevenue","predicted"]].head()
# + _uuid="aae37660a0f72dc4647688cb86914aa985bea634"
actual_predicted_revenue = gbm.predict(df_actual_test.loc[:,df_actual_test.columns != "fullVisitorId"], num_iteration=gbm.best_iteration)
actual_predicted_revenue[actual_predicted_revenue < 0] = 0
# df_actual_test["predicted"] = np.expm1(actual_predicted_revenue)
df_actual_test["predicted"] = actual_predicted_revenue
df_actual_test.head()
df_actual_test = df_actual_test[["fullVisitorId" , "predicted"]]
df_actual_test["fullVisitorId"] = df_actual_test.fullVisitorId.astype('str')
df_actual_test["predicted"] = df_actual_test.predicted.astype(np.float)
df_actual_test.index = df_actual_test.fullVisitorId
df_actual_test = df_actual_test.drop("fullVisitorId",axis=1)
# + _uuid="68c429e59b66978a64d07e7da0b639d94d49a596"
df_actual_test.head()
# + _uuid="8214a198eb20694306aaf95749a96dfe7aef841a"
df_submission_test = pd.read_csv(filepath_or_buffer="../input/sample_submission_v2.csv",index_col="fullVisitorId")
df_submission_test.shape
# + _uuid="6487a05fa79a0dbb78aa0dde419f455839481b75"
"test shape is :{} and submission shape is : {}".format(df_actual_test.shape , df_submission_test.shape)
final_df = df_actual_test.loc[df_submission_test.index,:]
# + _uuid="152185fb4323c05c347ac830150d1618a614fa8b"
final_df = final_df[~final_df.index.duplicated(keep='first')]
final_df = final_df.rename(index=str, columns={"predicted": "PredictedLogRevenue"})
final_df.PredictedLogRevenue.fillna(0).head()
# final_df.head()
# + [markdown] _uuid="8055e5f64d79d2aa7b42455c5d6ce7bc6f7884e6"
# We will try to do more supervisored regressions in next steps...
# + [markdown] _uuid="df7bf744e2fba6f5d60c33619cc776e7198273b1"
# <a id="20"></a> <br>
# * **A. INVESTIGATION OF FEATURE IMPORTANCE**
#
# LightGBM have a method for representation of feature importance.
#
# + _uuid="d4b0c8538cc99e1ae391921a2d0c52323cf3c612"
fig, ax = plt.subplots(figsize=(10,16))
lgb.plot_importance(gbm, max_num_features=30, height=0.8, ax=ax)
plt.title("Feature Importance", fontsize=15)
plt.show()
# + [markdown] _uuid="e6511dfd34878c525831dc1115dfcd4ba782699e"
# Now you have a information about which features are more important that others.
| 9 google customer revenue prediction/tutorial-preprocessing-processing-evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from glob import glob
len(glob("../DATASETS/idd20k_final/train/labels/*")),len(glob("../DATASETS/idd20k_final/valid/labels/*"))
# # !ls ../DATASETS/Cityspaces/images/train/*/*
# -
# !rm -r Dataset/IDD
# + id="kWxmemQJt8kT"
# !mkdir Dataset/IDD
# !mkdir Dataset/IDD/images
# !mkdir Dataset/IDD/masks_org
# !mkdir Dataset/IDD/masks
# !mkdir Dataset/IDD/Test
# !mkdir Dataset/IDD/Test/images
# !mkdir Dataset/IDD/Test/masks_org
# !mkdir Dataset/IDD/Test/masks
# !cp ../DATASETS/idd20k_final/train/images/* Dataset/IDD/images/
# !cp ../DATASETS/idd20k_final/train/labels/* Dataset/IDD/masks_org/
# !cp ../DATASETS/idd20k_final/valid/images/* Dataset/IDD/Test/images/
# !cp ../DATASETS/idd20k_final/valid/labels/* Dataset/IDD/Test/masks_org/
# +
# i = !ls Dataset/IDD/images/
# l = !ls Dataset/IDD/masks_org/
# test_i = !ls Dataset/IDD/Test/images/
# test_l = !ls Dataset/IDD/Test/masks_org/
len(i), len(l),len(test_i), len(test_l)
# -
l[-5:], i[-5:]
# +
import cv2
from glob import glob
import os
import numpy as np
import matplotlib.pyplot as plt
l = glob('Dataset/IDD/masks_org/*')
print(l[9])
m = cv2.imread(l[9],0)
print(m.shape)
print(np.unique(m))
plt.imshow(m*75,cmap="gray")
plt.show()
# -
# !pip install tqdm
# + id="iNP1SNZ5t8bB"
from tqdm import tqdm
def mask_mapping(path=''):
for i, filepath in tqdm(enumerate(glob(path))):
img = cv2.imread(filepath,0)
# 100 = background
# 101 = road
# 102 = obstacle
for j in [2,3,13,14,15,16,17,18,19,20,21,22,23,24,25,255]:
img = np.where(img == j , 100, img)
for j in [4,5,6,7,8,9,10,11,12]:
img = np.where(img == j , 102, img)
for j in [0,1]:
img = np.where(img == j , 101, img)
img = img%100
splits_ = filepath.rsplit("/",2)
filepath_ = splits_[0]+"/masks/"+ f"{splits_[2].split('.')[0]}.png"
cv2.imwrite(filepath_, img)
lst= np.unique(img)
if i<1 or not all(ii < 3 for ii in lst):
print(np.unique(img))
print(filepath_)
mask_mapping(path='Dataset/IDD/masks_org/*')
mask_mapping(path='Dataset/IDD/Test/masks_org/*')
# + colab={"base_uri": "https://localhost:8080/"} id="JLjUhiwat8Wu" outputId="8d64a866-f250-4843-961f-1eb71f81fe43"
len(os.listdir('Dataset/IDD/images/')), len(os.listdir('Dataset/IDD/Test/images/'))
# + colab={"base_uri": "https://localhost:8080/"} id="pLMYf9cFt8Sd" outputId="230ebade-79b8-4a52-f13c-54da9afda2e9"
len(os.listdir('Dataset/IDD/masks/')), len(os.listdir('Dataset/IDD/Test/masks/'))
# -
import matplotlib.pyplot as plt
import numpy as np
for i in glob('Dataset/IDD/masks/*')[:2]:
t = cv2.imread(i,0)
t = t.astype(np.float32)
print(np.unique(t))
plt.imshow(t)
plt.show()
for i in glob('Dataset/IDD/Test/masks/*')[:2]:
t = cv2.imread(i,0)
t = t.astype(np.float32)
print(np.unique(t))
plt.imshow(t)
plt.show()
| Scripts/DatasetPreparation/prepare_IDD_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
This example plots NEXRAD 3 algorithm, precipitation, and derived products (not base data).
# +
import warnings
from awips.dataaccess import DataAccessLayer
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# %matplotlib inline
DataAccessLayer.changeEDEXHost("edex-cloud.unidata.ucar.edu")
request = DataAccessLayer.newDataRequest("radar")
available_locs = DataAccessLayer.getAvailableLocationNames(request)
available_locs.sort()
list(available_locs)
request.setLocationNames("kmhx")
availableParms = DataAccessLayer.getAvailableParameters(request)
availableParms.sort()
#list(availableParms)
productIDs = DataAccessLayer.getRadarProductIDs(availableParms)
productNames = DataAccessLayer.getRadarProductNames(availableParms)
print(productIDs)
print(productNames)
# +
warnings.filterwarnings("ignore",category =RuntimeWarning)
def make_map(bbox, projection=ccrs.PlateCarree()):
fig, ax = plt.subplots(figsize=(16, 16),
subplot_kw=dict(projection=projection))
ax.set_extent(bbox)
ax.coastlines(resolution='50m')
gl = ax.gridlines(draw_labels=True)
gl.top_labels = gl.right_labels = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
nexrad_data = {}
for prod in productNames:
request.setParameters(prod)
availableLevels = DataAccessLayer.getAvailableLevels(request)
if availableLevels:
request.setLevels(availableLevels[0])
else:
print("No levels found for " + prod)
continue
cycles = DataAccessLayer.getAvailableTimes(request, True)
times = DataAccessLayer.getAvailableTimes(request)
if times:
print()
response = DataAccessLayer.getGridData(request, [times[-1]])
print("Recs : ", len(response))
if response:
grid = response[0]
else:
continue
data = grid.getRawData()
lons, lats = grid.getLatLonCoords()
nexrad_data[prod] = data
print('Time :', str(grid.getDataTime()))
flat = np.ndarray.flatten(data)
print('Name :', str(grid.getLocationName()))
print('Prod :', str(grid.getParameter()))
print('Range:' , np.nanmin(flat), " to ", np.nanmax(flat), " (Unit :", grid.getUnit(), ")")
print('Size :', str(data.shape))
print()
cmap = plt.get_cmap('rainbow')
bbox = [lons.min()-0.5, lons.max()+0.5, lats.min()-0.5, lats.max()+0.5]
fig, ax = make_map(bbox=bbox)
cs = ax.pcolormesh(lons, lats, data, cmap=cmap)
cbar = fig.colorbar(cs, extend='both', shrink=0.5, orientation='horizontal')
cbar.set_label(grid.getParameter() +" " + grid.getLevel() + " " \
+ grid.getLocationName() + " (" + prod + "), (" + grid.getUnit() + ") " \
+ "valid " + str(grid.getDataTime().getRefTime()))
plt.show()
| pages/workshop/AWIPS/NEXRAD_Level3_Radar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from os import listdir
import pandas as pd
from bs4 import BeautifulSoup
dataset_path = '../../data/150k/'
refined_dataset_path = '../../data/refined_dataset.csv/'
html_blacklist = ['script', 'noscript', 'img', 'audio', 'video']
def remove_html(text):
soup = BeautifulSoup(text, 'html.parser')
text_only = soup.find_all(text=True)
output = ''
for t in text_only:
if t.parent.name not in html_blacklist:
output += f"{t} "
output = output.replace(',', ' ')
output = output.replace('\n', ' ')
output = output.replace('\t', ' ')
output = output.strip()
return output
refined_sheet = pd.DataFrame()
for csv_file in listdir(dataset_path):
sheet = pd.read_csv(f'{dataset_path}{csv_file}')
refined_sheet = refined_sheet.append(sheet)
refined_sheet.drop(['source_url'], axis=1, inplace=True)
refined_sheet.drop(['subcategory'], axis=1, inplace=True)
refined_sheet.drop(['category'], axis=1, inplace=True)
refined_sheet['content'] = refined_sheet['content'].astype(str).apply(lambda x: remove_html(x))
refined_sheet['meta_tags'] = refined_sheet['meta_tags'].apply(lambda x: x[1:-1].replace('\"', '') if x != '[]' else '')
refined_sheet['publish_date'] = refined_sheet['publish_date'].apply(lambda x: x[:-7])
print("refining csv completed")
refined_sheet.to_csv(refined_dataset_path, index=False)
print("successfully saved the refined csv")
# refined_sheet.head()
# +
values = {'content': ' ', 'thumbnail':'/', 'meta_tags':' ', 'title':' ', 'summary':' '}
refined_sheet = refined_sheet.fillna(value=values)
print(sheet.columns[sheet.isna().any()].tolist())
sheet.head()
| engine/analysis/corpus_refine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
m1 = [[1.0, 2.0], [3.0, 4.0]]
m2 = np.array([[1.0,2.0],
[3.0,4.0]], dtype=np.float32)
m3 = tf.constant([[1.0, 2.0],
[3.0, 4.0]])
print(type(m1))
print(type(m2))
print(type(m3))
t1 = tf.convert_to_tensor(m1, dtype=tf.float32)
t2 = tf.convert_to_tensor(m2, dtype=tf.float32)
t3 = tf.convert_to_tensor(m3, dtype=tf.float32)
print(type(t1))
print(type(t2))
print(type(t3))
| ch02/Listing 2.03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
dfresid = pd.read_csv('public_data/both_residuals.csv', index_col=0)
print(dfresid.sort_values(by='xgb_residual')[:30][['county_state',
'xgb_residual']].set_index('county_state').to_markdown())
# | Most under-predicted counties | Prediction Error |
# |:-----------------------------|---------------:|
# | Forest County, Pennsylvania | -3950.66 |
# | Greensville County, Virginia | -2412.21 |
# | Most under-predicted counties | Prediction Error |
# |:---------------------------------|---------------:|
# | Forest County, Pennsylvania | -3950.66 |
# | Greensville County, Virginia | -2412.21 |
# | North Slope Borough, Alaska | -2378.68 |
# | Wheeler County, Georgia | -1994.88 |
# | Manassas city, Virginia | -1881.26 |
# | Issaquena County, Mississippi | -1878.01 |
# | Powell County, Montana | -1850.93 |
# | Stewart County, Georgia | -1838.78 |
# | Jones County, Texas | -1570.68 |
# | Buena Vista County, Iowa | -1511.32 |
# | DeKalb County, Missouri | -1286.27 |
# | Lee County, Kentucky | -1265.57 |
# | Emmet County, Iowa | -1248.64 |
# | Union County, Florida | -1194.15 |
# | Duplin County, North Carolina | -1173.87 |
# | New York County, New York | -1106.6 |
# | Ector County, Texas | -1047.72 |
# | Petersburg city, Virginia | -1008.12 |
# | Sevier County, Arkansas | -963.681 |
# | Tunica County, Mississippi | -952.414 |
# | San Francisco County, California | -934.171 |
# | Lincoln County, Colorado | -930.412 |
# | Arlington County, Virginia | -898.588 |
# | Greene County, North Carolina | -867.263 |
# | Alexandria city, Virginia | -861.727 |
# | Scott County, Iowa | -856.461 |
# | Bailey County, Texas | -841.174 |
# | New Castle County, Delaware | -838.501 |
# | Dallam County, Texas | -830.803 |
# | Johnson County, Nebraska | -818.163 |
| .ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# [](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
#
# + [markdown] colab_type="text" id="-pVhOfzLx9us"
# # Using Google Colab with GitHub
#
#
# + [markdown] colab_type="text" id="wKJ4bd5rt1wy"
#
# [Google Colaboratory](http://colab.research.google.com) is designed to integrate cleanly with GitHub, allowing both loading notebooks from github and saving notebooks to github.
# + [markdown] colab_type="text" id="K-NVg7RjyeTk"
# ## Loading Public Notebooks Directly from GitHub
#
# Colab can load public github notebooks directly, with no required authorization step.
#
# For example, consider the notebook at this address: https://github.com/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb.
#
# The direct colab link to this notebook is: https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb.
# + [markdown] colab_type="text" id="WzIRIt9d2huC"
# ## Browsing GitHub Repositories from Colab
#
# Colab also supports special URLs that link directly to a GitHub browser for any user/organization, repository, or branch. For example:
#
# - http://colab.research.google.com/github will give you a general github browser, where you can search for any github organization or username.
# - http://colab.research.google.com/github/googlecolab/ will open the repository browser for the ``googlecolab`` organization. Replace ``googlecolab`` with any other github org or user to see their repositories.
# - http://colab.research.google.com/github/googlecolab/colabtools/ will flet you browse the main branch of the ``colabtools`` repository within the ``googlecolab`` organization. Substitute any user/org and repository to see its contents.
# - http://colab.research.google.com/github/googlecolab/colabtools/blob/master will let you browse ``master`` branch of the ``colabtools`` repository within the ``googlecolab`` organization. (don't forget the ``blob`` here!) You can specify any valid branch for any valid repository.
# + [markdown] colab_type="text" id="Rmai0dD30XzL"
# ## Loading Private Notebooks
#
# Loading a notebook from a private GitHub repository is possible, but requires an additional step to allow Colab to access your files.
# Do the following:
#
# 1. Navigate to http://colab.research.google.com/github.
# 2. Click the "Include Private Repos" checkbox.
# 3. In the popup window, sign-in to your Github account and authorize Colab to read the private files.
# 4. Your private repositories and notebooks will now be available via the github navigation pane.
# + [markdown] colab_type="text" id="8J3NBxtZpPcK"
# ## Saving Notebooks To GitHub or Drive
#
# Any time you open a GitHub hosted notebook in Colab, it opens a new editable view of the notebook. You can run and modify the notebook without worrying about overwriting the source.
#
# If you would like to save your changes from within Colab, you can use the File menu to save the modified notebook either to Google Drive or back to GitHub. Choose **File→Save a copy in Drive** or **File→Save a copy to GitHub** and follow the resulting prompts. To save a Colab notebook to GitHub requires giving Colab permission to push the commit to your repository.
# + [markdown] colab_type="text" id="8QAWNjizy_3O"
# ## Open In Colab Badge
#
# Anybody can open a copy of any github-hosted notebook within Colab. To make it easier to give people access to live views of GitHub-hosted notebooks,
# colab provides a [shields.io](http://shields.io/)-style badge, which appears as follows:
#
# [](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
#
# The markdown for the above badge is the following:
#
# ```markdown
# [](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
# ```
#
# The HTML equivalent is:
#
# ```HTML
# <a href="https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb">
# <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
# </a>
# ```
#
# Remember to replace the notebook URL in this template with the notebook you want to link to.
# + colab={} colab_type="code" id="3VQqVi-3ScBC"
| notebooks/colab-github-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 2: Collect text content of the book David Copperfield available at www.gutenberg.org
# # Using Requests
import requests
# Let's read the text version of david copper field available online
r = requests.get('https://www.gutenberg.org/files/766/766-0.txt')
r.status_code
r.text[:1000]
from pathlib import Path
open(Path("../data/David_Copperfield.txt"), 'w',encoding='utf-8').write(r.text)
# # Using Urllib3
import urllib3
http = urllib3.PoolManager()
rr = http.request('GET', 'https://www.gutenberg.org/files/766/766-0.txt')
rr.status
rr.data[:1000]
open(Path("../data/David_Copperfield_new.txt"), 'wb').write(rr.data)
| Chapter04/Exercise 4.02/Exercise 4.02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Boltzmann Wealth Model
# # Basic Overview
#
# The agent based model comes with 2 parts. The model itself and the individual agents within the model.
# First, we impor the relevant classes from the Mesa module and create an Agent and Model.
from mesa import Model, Agent
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id):
# Each agent should have a unique identifier, stored in the unique_id field.
self.unique_id = unique_id
self.wealth = 1
class MoneyModel(Model):
"""A model with some number of agents."""
def __init__(self, N):
self.num_agents = N
# The scheduler will be added here
self.create_agents()
def create_agents(self):
"""Method to create all the agents."""
for i in range(self.num_agents):
a = MoneyAgent(i)
# Now what? See below.
# With the classes defined, we can now initialize and create the model and populate it with agents.
money_model = MoneyModel(10)
money_model.create_agents()
# # Scheduler
# A Schedule defines what each agent will do during each time tick of the simulation.
# In this example we are using the `RandomActivation` schedule from `mesa.time`.
#
# In the `MoneyModel` we need to
#
# 1. Create the schedule we would like each agent to use.
# 2. Add agents to the schedule when they are created
# 3. Define a model step function that determines what the model will do at each step
#
# In the `MoneyAgent` we need to
#
# 1. Define a Step function that determines what each agent that is selected during a model step will do.
# +
import random
from mesa import Model, Agent
from mesa.time import RandomActivation
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
"""Give money to another agent."""
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
"""A model with some number of agents."""
def __init__(self, N):
self.num_agents = N
# Adding the scheduler:
# Scheduler needs to be created before agents do
# Scheduler objects are instantiated with their model object,
# which they then pass to the agents at each step.
self.schedule = RandomActivation(self)
self.create_agents()
def create_agents(self):
"""Method to create all the agents."""
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
def step(self):
# The scheduler's step method activates the step methods of all the
# agents that have been added to it, in this case in random order.
self.schedule.step()
def run_model(self, steps):
# Because the model has no inherent end conditions,
# the user must specify how many steps to run it for.
for i in range(steps):
self.step()
# -
# With the newly updated code, we can create the model and agents (just like above).
# Since we defined a schedule, we can now tell the model to run for `n` number of steps, in this example, we picked `5`.
money_model = MoneyModel(10)
money_model.create_agents()
money_model.run_model(5)
# # Space
# In order to assign a location to an agent we need to provide a grid or space to assign coordinates to the agents.
# We get this from the `mesa.space` module.
#
# Since agents have a coordinate, we can also define a `move` method.
# +
import random
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import MultiGrid
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
"""Give money to another agent."""
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
def move(self, model):
"""Take a random step."""
grid = model.grid
# The get_neighborhood method returns a list of coordinate tuples for
# the appropriate neighbors of the given coordinates. In this case,
# it's getting the Moore neighborhood (including diagonals) and
# includes the center cell. The agent decides where to move by choosing
# one of those tuples at random. This is a good way of handling random
# moves, since it still works for agents on an edge of a non-toroidal
# grid, or if the grid itself is hexagonal.
possible_steps = grid.get_neighborhood(
self.pos, moore=True, include_center=True)
choice = random.choice(possible_steps)
# the move_agent method works like place_agent, but removes the agent
# from its current location before placing it in its new one.
grid.move_agent(self, choice)
def give_money(self, model):
grid = model.grid
pos = [self.pos]
# This is a helper method which returns the contents of the entire list
# of cell tuples provided. It's not strictly necessary here; the
# alternative would be: x, y = self.pos; others = grid[y][x]
# (note that grids are indexed y-first).
others = grid.get_cell_list_contents(pos)
if len(others) > 1:
other = random.choice(others)
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
"""A model with some number of agents."""
def __init__(self, N, width, height, torus):
# The arguments needed to create a new grid are its
# width, height, and a boolean for whether it is a torus or not.
self.grid = MultiGrid(height, width, torus)
self.num_agents = N
self.schedule = RandomActivation(self)
self.create_agents()
def create_agents(self):
"""Method to create all the agents."""
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
x = random.randrange(self.grid.width)
y = random.randrange(self.grid.width)
# The place_agent method places the given object in the grid cell
# specified by the (x, y) tuple, and assigns that tuple to the
# agent's pos property.
self.grid.place_agent(a, (x, y))
def step(self):
# The scheduler's step method activates the step methods of all the
# agents that have been added to it, in this case in random order.
self.schedule.step()
def run_model(self, steps):
# Because the model has no inherent end conditions,
# the user must specify how many steps to run it for.
for i in range(steps):
self.step()
# -
# Here, we create a model with `N` agents, with `with` and `height` of 50.
money_model = MoneyModel(N=100, width=50, height=50, torus=True)
money_model.create_agents()
money_model.run_model(50)
# Since our agents are on a coordinate plane, and each agent has a wealth value, we can plot our data!
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
wealth_grid = np.zeros((money_model.grid.width, money_model.grid.height))
for cell in money_model.grid.coord_iter():
cell_content, x, y = cell
cell_wealth = sum(a.wealth for a in cell_content)
wealth_grid[y][x] = cell_wealth
plt.imshow(wealth_grid, interpolation='nearest')
plt.show()
# -
# # Data Collection
#
# Generic DataCollector class, which can store and export data from most models without needing to be subclassed.
#
# The data collector stores three categories of data: model-level variables, agent-level variables, and tables which are a catch-all for everything else.
#
# Internally, the data collector stores all variables and tables in Python's standard dictionaries and lists. This reduces the need for external dependencies, and allows the data to be easily exported to JSON or CSV. However, one of the goals of Mesa is facilitating integration with Python's larger scientific and data-analysis ecosystems, and thus the data collector also includes methods for exporting the collected data to pandas data frames. This allows rapid, interactive processing of the data, easy charting, and access to the full range of statistical and machine-learning tools that are compatible with pandas.
#
# **Since data is not written out to a file, large simulations cannot be run yet. A way to append values to a external CSV is being developed so data does not need to be persisted in memory**
# ```python
#
# from mesa.datacollector import DataCollector
#
# class MoneyModel(Model):
#
# def __init__(self, N):
# # ... everything above
# ar = {"Wealth": lambda a: a.wealth}
# self.dc = DataCollector(agent_reporters=ar)
#
# def step(self):
# self.dc.collect(self)
# self.schedule.step()
# ```
# +
import random
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
"""Give money to another agent."""
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
def move(self, model):
"""Take a random step."""
grid = model.grid
# The get_neighborhood method returns a list of coordinate tuples for
# the appropriate neighbors of the given coordinates. In this case,
# it's getting the Moore neighborhood (including diagonals) and
# includes the center cell. The agent decides where to move by choosing
# one of those tuples at random. This is a good way of handling random
# moves, since it still works for agents on an edge of a non-toroidal
# grid, or if the grid itself is hexagonal.
possible_steps = grid.get_neighborhood(
self.pos, moore=True, include_center=True)
choice = random.choice(possible_steps)
# the move_agent method works like place_agent, but removes the agent
# from its current location before placing it in its new one.
grid.move_agent(self, choice)
def give_money(self, model):
grid = model.grid
pos = [self.pos]
# This is a helper method which returns the contents of the entire list
# of cell tuples provided. It's not strictly necessary here; the
# alternative would be: x, y = self.pos; others = grid[y][x]
# (note that grids are indexed y-first).
others = grid.get_cell_list_contents(pos)
if len(others) > 1:
other = random.choice(others)
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
"""A model with some number of agents."""
def __init__(self, N, width, height, torus):
# The arguments needed to create a new grid are its
# width, height, and a boolean for whether it is a torus or not.
self.grid = MultiGrid(height, width, torus)
self.num_agents = N
self.schedule = RandomActivation(self)
self.create_agents()
ar = {"Wealth": lambda a: a.wealth}
self.dc = DataCollector(agent_reporters=ar)
def create_agents(self):
"""Method to create all the agents."""
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
x = random.randrange(self.grid.width)
y = random.randrange(self.grid.width)
# The place_agent method places the given object in the grid cell
# specified by the (x, y) tuple, and assigns that tuple to the
# agent's pos property.
self.grid.place_agent(a, (x, y))
def step(self):
# The scheduler's step method activates the step methods of all the
# agents that have been added to it, in this case in random order.
self.schedule.step()
self.dc.collect(self)
def run_model(self, steps):
# Because the model has no inherent end conditions,
# the user must specify how many steps to run it for.
for i in range(steps):
self.step()
# -
# Create a model with 100 agents
model = MoneyModel(100, 10, 10, True)
# Run it for 1,000 steps:
model.run_model(1000)
# Get the data as a DataFrame
wealth_history = model.dc.get_agent_vars_dataframe()
# wealth_history indexed on Step and AgentID, and...
# ...has Wealth as one data column
wealth_history.reset_index(inplace=True)
# Plot a histogram of final wealth
wealth_history[wealth_history.Step==999].\
Wealth.hist(bins=range(10))
# # Batch Runner
#
# Since most ABMs are stochastic, a single model run gives us only one particular realization of the process the model describes. Furthermore, the questions we want to use ABMs to answer are often about how a particular parameter drives the behavior of the entire system -- requiring multiple model runs with different parameter values. In order to facilitate this, Mesa provides the BatchRunner class. Like the DataCollector, it does not need to be subclassed in order to conduct parameter sweeps on most models.
# +
from mesa.batchrunner import BatchRunner
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id, starting_wealth):
# Each agent should have a unique_id
self.unique_id = unique_id
self.wealth = starting_wealth
def step(self, model):
"""Give money to another agent."""
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
"""A model with some number of agents."""
def __init__(self, N, starting_wealth):
self.running = True
self.num_agents = N
self.starting_wealth = starting_wealth
self.schedule = RandomActivation(self)
self.create_agents()
ar = {"Wealth": lambda a: a.wealth}
self.dc = DataCollector(agent_reporters=ar)
def create_agents(self):
"""Method to create all the agents."""
for i in range(self.num_agents):
a = MoneyAgent(i, self.starting_wealth)
self.schedule.add(a)
def step(self):
self.dc.collect(self)
self.schedule.step()
def run_model(self, steps):
"""The model has no end condition
so the user needs to specify how long to run"""
for _ in range(steps):
self.step()
def compute_gini(model):
agent_wealths = [agent.wealth for agent in model.schedule.agents]
x = sorted(agent_wealths)
N = model.num_agents
B = sum( xi * (N-i) for i,xi in enumerate(x) ) / (N*sum(x))
return (1 + (1/N) - 2*B)
param_values = {"N": 100, "starting_wealth": range(1,10)}
model_reporter={"Gini": compute_gini}
batch = BatchRunner(MoneyModel, param_values,
10, 1000, model_reporter)
batch.run_all()
out = batch.get_model_vars_dataframe()
plt.scatter(out.starting_wealth, out.Gini)
plt.grid(True)
plt.xlabel("Starting wealth")
plt.ylabel("Gini Coefficient")
| examples/Tutorial-Boltzmann_Wealth_Model/Tutorial-Boltzmann_Wealth_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 1
#
# ## Overview
#
# As explained in the [*Before week 1* notebook](https://nbviewer.jupyter.org/github/lalessan/comsocsci2021/blob/master/lectures/Before_week_1.ipynb), each week of this class is a Jupyter notebook like this one. **_In order to follow the class, you simply start reading from the top_**, following the instructions.
#
# **Hint**: And you can ask me for help at any point if you get stuck!
# ## Today
#
# This first lecture will go over a few different topics to get you started
#
# * First, we will learn about Computational Social Science.
# * Second, we talk a bit about APIs and how they work.
# * Third, we'll use an API to download Reddit data from the _r/wallstreetbet_ subreddit
#
#
# ## Part 1: Computational Social Science
#
#
# But _What is Computational Social Science_? Watch the video below, where I will give a short introduction to the topic.
#
# > **_Video lecture_**: Watch the video below about Computational Social Science
from IPython.display import YouTubeVideo
YouTubeVideo("qoPk_C3buD8",width=600, height=337.5)
# Now that you have learnt what Computational Social Science, read about the advantages and challenges of using _"Big Data"_ for Social Science Research in Sections 2.1 to 2.3 of the book Bit by Bit.
#
# > _Reading_: [Bit by Bit, sections 2.1 to 2.3](https://www.bitbybitbook.com/en/1st-ed/observing-behavior/observing-intro/) Read sections 2.1 and 2.3, then skim through section 2.3. The idea is for you to understand, in general terms, advantages and challenges of large observational datasets (a.k.a. Big Data) for social studies.
# > *Exercise 1*: This year, lockdowns have helped governments contain the pandemic. But they also negatively impacted our wellbeing. Imagine you had to study the following question: "_What are some of the strategies people adopt to preserve their mental and physical wellbeing during lockdown?_"
#
# > * Write in a couple of lines:
#
# >> * Which data would you collect to study this topic?
# >>> * Ideally observational data with a camera spying on people in their homes, but this cannot be done due to ethical and lawful problems
# >>> * Big data such as internet traces could be used, to see whether people look into strategies on the internet and which strategies the might prefer
# >>> * Data from social networks, where people post how they preserve their wellbeing
# >>> * Data about the participant, ie. job, age, who they live with (child, spouse etc.)
# >>> * What type of tv does people watch?
# >>> * How much does people communicate with others? Sentiment analysis on their texts
# >>> * Data about peoples level of excercising, ie. google fit data
# >>> * How has their diet changed, look at receipts
#
#
# >> * How would you collect it?
# >>> * Could observe people on the internet through their clicks, social media and such
# >>> * Combine above with a survey
# >>> * Request access to differnet medias
#
# > * Describe the data you would need more in details (also by writing down a couple of lines):
#
# >> * How big is the data (number of users/number of data points)?
# >>> * Very big, social media is one of the largest
# >>> * Walks are tracked on phones, so large data
# >>> * Media data is big
#
# >> * Which variables it contains?
# >>> * Social media contains information
#
#
# ### Final study approach
#
# >* 1: Look at their changed lives through digital data such as fitness apps, receipts and social media activity
# >* 2: Look at new subscriptions for various services, ie. online yoga classes, streaming services
#
# >* Collect data via requesting acces from companies who own the data, but be ware of potential sensitive and inaccessible data
#
# Advantages:
# >* 2 is more qulitative, as we cannot measure the effect on mental health
# >* 1 is more individualised, as 2 can be bought for multiple people and doesn't reflect the direct use
# ## Part 2: Using APIs to download Reddit data
#
# But what is an API? Find the answer in the short video below, where we get familiar with APIs to access Reddit data.
#
# > **_Video lecture_**: Watch the video below about the Reddit API
from IPython.display import YouTubeVideo
YouTubeVideo("eqBIFua00O4",width=600, height=337.5)
# It's time for you to get to work. Take a look at the two texts below - just to get a sense of a more technical description of how the Pushshift API works.
#
#
# > _Reading_ (just skim): [New to Pushshift? Read this! FAQ](https://www.reddit.com/r/pushshift/comments/bcxguf/new_to_pushshift_read_this_faq/)
# > _Reading_ (just skim): [Pushshift Github Repository](https://github.com/pushshift/api)
# >
# ## Prelude to part 3: Pandas Dataframes
#
# Before starting, we will also learn a bit about [pandas dataframes](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html), a very user-friendly data structure that you can use to manipulate tabular data. Pandas dataframes are implemented within the [pandas package](https://pandas.pydata.org/).
#
# Pandas dataframes should be intuitive to use. **I suggest you to go through the [10 minutes to Pandas tutorial](https://pandas.pydata.org/pandas-docs/stable/user_guide/10min.html) to learn what you need to solve the next exercise.**
# ## Part 3: Getting data from the _r/wallstreetbet_ subreddit
# There has been a lot of interest in the social platform Reddit this week, after investors from the [_r/wallstreetbet_](https://www.reddit.com/r/wallstreetbets/) subreddit managed to [give a huge boost](https://www.google.com/search?q=GME+price&oq=GME+price&aqs=chrome..69i57.1261j0j4&sourceid=chrome&ie=UTF-8) to the shares of the video game retailer's GameStop (traded as "_GME_"), causing massive losses to professional investors and established hedge funds.
#
# There is so much buzz about _Gamestop_ because it is really something unprecedented! Online discussions about stocks on social media have fuelled massive price moves that cannot be explained by traditional valuation metrics and can seriously destabilize the established market. Many ordinary investors on Reddit have coordinated to buy shares of a stock that had been losing value for a long time. __But how did this all happen?__
#
# Today and in the following classes, we will try to answer precisely this question, by studying the social network of Redditors of _r/wallstreetbet_ throughout last year.
# The starting point will be to understand how to download data from Reddit using APIs. But before we start getting our hands diry, if you feel like you don't know much about Gamestop, I suggest to watch this short video summarizing the latest events. If you already know everything about it, feel free to skip it.
#
# >
# > **_Video_**: [Stocks explained: What's going on with GameStop?](https://www.bbc.com/news/av/technology-55864312)
# >
# > *Exercise 2*: __Download submissions of the [_r/wallstreetbets_](https://www.reddit.com/r/wallstreetbets/) subreddit using the [Pushift API](https://github.com/pushshift/api)__
# > 1. Use the [psaw Python library](https://pypi.org/project/psaw/) (a wrapper for the Pushshift API) to find all the submissions in subreddit _r/wallstreetbet_', related to either "_GME_" or "_Gamestop_" (**Hint**: Use the [``q``](https://github.com/pushshift/api) parameter to search text. To search multiple words you can separate them with character "|"). Focus on the period included __between Jan,1st 2020 and Jan 25th, 2021__, where time must be provided in [Unix Timestamp](https://www.unixtimestamp.com/). _Note: The Pushift API returns at most 100 results per query, so you may need to divide your entire time period in small enough sub-periods._
# > 2. For each submission, find the following information: __title, id, score, date of creation, author, and number of comments__ (**Hint**: access the dictionary with all attributes by typing ``my_submission.d_``). Store this data in a pandas DataFrame and save it into a file. (Downloading required me 30 minutes using two cores. While you wait for the results, you can start thinking about _Exercise 3_).
# > 3. Create a figure using [``matplotlib``](https://matplotlib.org/) and plot the total number of submissions per day (**Hint**: You can use the function [``datetime.datetime.utcfromtimestamp``](https://docs.python.org/3/library/datetime.html) to convert a timestamp into a date, and you can use the function [``pd.resample``](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html) to aggregate by day). What do you observe?
# > 4. _Optional_: How many unique authors are there each week in the period under study?
#
# > *Exercise 3*: __Download comments from the [_r/wallstreetbets_](https://www.reddit.com/r/wallstreetbets/) subreddit.__ The second task for today is to download the comments associated to each submission, which we will use to build the social network of Redditers.
# > 1. For each submission you found in _Exercise 2_, download all the comments (*Hint*: Use the [``search_comments``](https://github.com/pushshift/api) function to search comments. You can specify the parameter ``link_id``, which corresponds to the _id_ of the submission for which you require comments).
# > 2. For each comment, store the following information: __title, id, submission, score, date of creation, author, and number of comments__. Store this in a pandas DataFrame and save it into a file. We will use it in the next classes.
#
# > __Note__: It took me about a night to get the data for _Exercise 3_. I guess Pushshift servers are going through increasing stress due to the raising interest in the Gamestop saga. If you experience extremely slow downloading time, reach out to me! If you are brave, you can also check out the Reddit API, which is wrapped by [praw](https://praw.readthedocs.io/en/latest/tutorials/comments.html). It functions very much like psaw, but it requires you to first get credentials [here](https://www.reddit.com/prefs/apps) (click on _Create another app_)
# # Setup
# +
# # !pip install psaw pandas numpy datetime matplotlib
from psaw import PushshiftAPI
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
api = PushshiftAPI()
# -
# # Get submissions
#
# Either extract data from new or use the saved csv file
# +
my_subreddit = "wallstreetbets"
date1 = int(datetime.datetime(2020, 1, 1).timestamp())
date2 = int(datetime.datetime(2021, 1, 25).timestamp())
query = "GME|Gamestop"
gen = api.search_submissions(subreddit = my_subreddit,
after = date1,
before = date2,
q = query)
# consider adding filter
# ,
# filter=['author', 'title', 'id', 'score', 'created_utc', 'num_comments']
results = list(gen)
# +
df_res = pd.DataFrame([(p.d_["title"],
p.d_["id"],
p.d_["score"],
p.d_["created_utc"],
datetime.datetime.utcfromtimestamp(p.d_["created_utc"]).strftime("%Y-%m-%d"),
p.d_["author"],
p.d_["num_comments"]) for p in results], columns = ["title", "id", "score", "created_utc", "creation_date", "author", "num_comments"])
df_res.to_csv("Data/gme_reddit_submissions.csv", index = False)
# -
df_res = pd.read_csv("Data/gme_reddit_submissions.csv")
df_res
# ## Submissions per day
#
# Plot of the submissions per day
df_res["creation_date"] = pd.to_datetime(df_res["creation_date"])
df_plot = df_res.resample('1D', on = 'creation_date').count()
del df_plot["creation_date"]
df_plot = df_plot.reset_index()
display(df_plot)
df_plot.plot.line(x='creation_date', y='id')
plt.show()
# ## Unique authors
# How many unique authors are there each week in the period under study?
# # Get comments
# Either extract data from new or use the saved csv file
# +
my_subreddit = "wallstreetbets"
date1 = int(datetime.datetime(2020, 1, 1).timestamp())
date2 = int(datetime.datetime(2021, 1, 25).timestamp())
query = "GME|Gamestop"
gen = api.search_submissions(subreddit = my_subreddit,
after = date1,
before = date2,
q = query)
# consider adding filter
# ,
# filter=['author', 'title', 'id', 'score', 'created_utc', 'num_comments']
results = list(gen)
| lectures/Week1 - Copy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras.preprocessing.text import Tokenizer
import nltk
from nltk.tokenize import word_tokenize
import numpy as np
import re
from keras.utils import to_categorical
from doc3 import training_doc3
cleaned = re.sub(r'\W+', ' ', training_doc3).lower()
tokens = word_tokenize(cleaned)
train_len = 3+1
text_sequences = []
for i in range(train_len,len(tokens)):
seq = tokens[i-train_len:i]
text_sequences.append(seq)
sequences = {}
count = 1
for i in range(len(tokens)):
if tokens[i] not in sequences:
sequences[tokens[i]] = count
count += 1
tokenizer = Tokenizer()
tokenizer.fit_on_texts(text_sequences)
sequences = tokenizer.texts_to_sequences(text_sequences)
#Collecting some information
vocabulary_size = len(tokenizer.word_counts)+1
n_sequences = np.empty([len(sequences),train_len], dtype='int32')
for i in range(len(sequences)):
n_sequences[i] = sequences[i]
# -
train_inputs = n_sequences[:,:-1]
train_targets = n_sequences[:,-1]
train_targets = to_categorical(train_targets, num_classes=vocabulary_size)
seq_len = train_inputs.shape[1]
train_inputs.shape
#print(train_targets[0])
train_targets[0]
# +
from keras.models import Sequential, load_model
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
#model = load_model("mymodel.h5")
model = Sequential()
model.add(Embedding(vocabulary_size, seq_len, input_length=seq_len))
model.add(LSTM(50,return_sequences=True))
model.add(LSTM(50))
model.add(Dense(50,activation='relu'))
model.add(Dense(vocabulary_size, activation='softmax'))
print(model.summary())
# compile network
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train_inputs,train_targets,epochs=500,verbose=1)
model.save("mymodel.h5")
# -
from keras.preprocessing.sequence import pad_sequences
input_text = input().strip().lower()
encoded_text = tokenizer.texts_to_sequences([input_text])[0]
pad_encoded = pad_sequences([encoded_text], maxlen=seq_len, truncating='pre')
print(encoded_text, pad_encoded)
for i in (model.predict(pad_encoded)[0]).argsort()[-3:][::-1]:
pred_word = tokenizer.index_word[i]
print("Next word suggestion:",pred_word)
| MVP_version_1/Next Word Predictor/.ipynb_checkpoints/Next_Word_Predictor-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MAP ADT
#
# **Overall Structures**
# $$
# \text{MutableMapping (collections module)}\\
# \overbrace{\text{MapBase}}^{\Uparrow}\\
# \overbrace{
# \text{UnsortedListMap}\quad
# \text{HashMapBase} \quad
# \text{SortedListMap} \quad
# \text{TreeMap}}^{\Uparrow}\\
# \overbrace{\text{ChainHashMap}\quad\text{ProbeHashMap}}^{\Uparrow}
# $$
from collections import MutableMapping
class MapBase(MutableMapping):
class _Item:
__slots__ = '_key', '_value'
def __init__(self, key, value):
self._key = key
self._value = value
def __eq__(self, other):
return self._key == other._key
def __ne__(self, other):
return self._key != other._key
def __lt__(self, other):
return self._key < other._key
# ## UnsortedListMap
# 1. List based Implementation
# 2. Worst-cases scenarios
#
# |operation|complexity|
# |:-|:-|
# |`find`|$\mathcal{O}(n)$|
# |`insert`|$\mathcal{O}(n)$|
# |`update`|$\mathcal{O}(n)$|
# |`delete`|$\mathcal{O}(n)$|
class UnsortedListMap(MapBase):
# five basic behaviors that we need to define
def __init__(self):
self._list = [] # use built-in list as a container
def __getitem__(self, key):
for item in self._list:
if key == item._key:
return item._value
raise KeyError('Key Error:{}'.format(repr(key)))
def __setitem__(self, key, value):
# print("===")
# print('About to set Key: {} Value: {}'.format(key, value))
for item in self._list:
# print('Search Key {}: Value: {}'.format(item._key, item._value))
if key == item._key:
item._value = value
return
# print("This is a new key-value. We append it to list.")
self._list.append(self._Item(key,value))
def __delitem__(self, key):
for index in range(len(self._list)):
if key == self._list[index]._key:
temp = self._list.pop(index)
return temp
raise KeyError('Key Error:{}'.format(repr(key)))
def __len__(self):
return len(self._list)
def __iter__(self):
for item in self._list:
yield item._key
if __name__ == '__main__':
# working exmaple
Movies = UnsortedListMap()
Cherbobly = UnsortedListMap()
UDI = UnsortedListMap()
Movies['chernobly'] = Cherbobly
Movies['udi'] = UDI
Cherbobly['Episods'] = 5
Cherbobly['Producer'] = 'HBO'
UDI['Episods'] = 10
UDI['Producer'] = 'JaH'
for movie_key in Movies:
print(Movies[movie_key]['Producer'])
# ## Hash Table
# ### Hash-then-compression
#
# step 1. Hash function: `hash([immutable object])`
#
# step 2. Compression function: $[a\times integer + b \text{ mod }p] \text{ mod } N$, where $N$ is the size of a key-holding-list, $p$ is a prime number that is greater than $N$, and $a \in (0, N-1], b \in [0, N-1]$. This scheme is called MAD compression.
#
# The reason using the particular choice of a compression function is to avoid 'cyclic' pattern.
# ### Collision-handling Scheme
#
# 1. separate chain: the `index list` is implemented as a built-in lust while each `bucket` is implemented as `UnsortedListMap`.
# <img src="./figs/seperateChain.png" width="200" heigh='80'/>
# * Suppose we have a key-holding-list of size $N$, and we want to index $n$ items. Then the expected length of a `UnsortedListMap` is $[\frac{n}{N}]$, which is as a result of a really good hash function. Define $\lambda= [\frac{n}{N}]$. Then as long as $\lambda$ is $\mathcal{O}(1)$. Then the complexity for `find`, `insert`, `delete`, `update` all take $\mathcal{O}(1)$ time.
#
# 2. linear probing: no additional data structure required.
# <img src="./figs/linearProbe.png" width="400" heigh='300'/>
# * If we try to insert an item (key, value) into a list `L[j]` that is already occupied, where `j = hash(key)`, then we next try `L[(j+1) mod N]`. If `L[(j+1) mod N]` is also occupied, then we try `L[( j + 2) mod N ]`, and so on, until we find an empty bucket that can accept the new item. Once this bucket is located, we simply insert the item there.
# * Easy to form clusters, which is bad.
# * Deletion should be taken care of as you can not simply remove it. You need some place holder.
#
# 3. Double hashing: use secondary function to generate indices if collision happens.
#
#
#
#
# |operation|complexity||
# |:-|:-|:-|
# ||Expected|Worst Case|
# |`find`|$\mathcal{O}(1)$|$\mathcal{O}(n)$|
# |`insert`|$\mathcal{O}(1)$|$\mathcal{O}(n)$|
# |`update`|$\mathcal{O}(1)$|$\mathcal{O}(n)$|
# |`delete`|$\mathcal{O}(1)$|$\mathcal{O}(n)$|
import random as rnd
class HashMapBase(MapBase):
"""
Abstract Hash Base Class using MAD compression.
"""
def __init__(self, capacity, prime=109345121):
self._list = [None] * capacity
self._size = 0
self._prime = prime
self._a = 1 + rnd.randrange(prime-1)
self._b = rnd.randrange(prime-1)
def __len__(self):
return self._size
def _hash_compression(self, key):
return ((self._a * hash(key) + self._b) % self._prime ) % len(self._list)
def __getitem__(self, key):
idx = self._hash_compression(key)
return self._getitem(idx, key) # depends on concrete implementation
def __setitem__(self, key, value):
idx = self._hash_compression(key)
self._setitem(idx, key, value)
if self._size > len(self._list) // 2: # keep load factor <= 0.5
self._resize(2 * len(self._table) - 1)
def __delitem__(self, key):
idx = self._hash_compression(key)
self._size -= 1
self._delitem(idx, key) # depends on concrete implementation
def _resize(self, capacity):
old = self.items() # return key-valuer pairs; items method inheret from MutableMapping
self_list = capacity * [None]
self._size = 0
for (key, value) in old:
self[key] = value
# +
class ChainHashMap(HashMapBase):
'''
Concrete implementation of the HashMapBase using separate-chain collision scheme.
'''
def _getitem(self, idx, key):
bucket = self._list[idx] # slot should be an UnsortedListMap; this operation is O(1)
if bucket is None:
raise KeyError('Key Error' + repr(key))
return bucket[key] # this operation is O(1) while worst case is O(n)
def _setitem(self, idx, key, value):
print("Bucket index: {}".format(idx))
if self._list[idx] is None:
self._list[idx] = UnsortedListMap()
bucket_size = len(self._list[idx])
self._list[idx][key] = value # set item; this operation is O(1) while worst case is O(n)
if len(self._list[idx]) > bucket_size:
self._size += 1
def _delitem(self, idx, key):
bucket = self._list[idx] # slot should be an UnsortedListMap; this operation is O(1)
if bucket is None:
raise KeyError('Key Error' + repr(key))
else:
del bucket[key]
def __iter__(self):
for bucket in self._list:
if bucket is not None:
for key in bucket:
yield key
if __name__ == "__main__":
IntFloat = ChainHashMap(20)
IntFloat[5.0] = 'float'
IntFloat[5] = 'int'
IntFloat[6] = 'int'
print(len(IntFloat)) # since key 5 == key 5.0, then value float is overwritten by int.
print(IntFloat[5.0])
print(IntFloat[5])
del IntFloat[5]
print(len(IntFloat))
# -
class ProbeHashMap(HashMapBase):
_AVLOBJ = object() # use as place holder for slot where the item is removed
def _is_availabe(self, idx):
condition1 = self._list[idx] is None
condition2 = self._list[idx] is self._AVLOBJ
return condition1 or condition2
def _find_slot(self, idx, key):
first_available_position = None
print("checking Key:[{}] at Position:[{}]".format(key, idx))
while True:
if self._is_availabe(idx):
print("Position:[{}] is avaialable.".format(idx))
if first_available_position is None:
first_available_position = idx
print("First avaiable position is [{}]".format(idx))
print(self._list[idx])
if self._list[idx] is None:
# key is new thus not found at the idx position
print("Key: [{}] is new.".format(key))
return (False, first_available_position)
elif key == self._list[idx]._key:
print("Key: [{}] has been found at Position [{}]".format(key, idx))
return (True, idx) # the key is already exist in the position idx
idx = (idx+1) % len(self._list) # look for the next position
print("keep searching")
def _getitem(self, idx, key):
found, position = self._find_slot(idx, key)
if not found:
raise KeyError('Key Error: ' + repr(key))
return self._list[position]._value
def _setitem(self,idx, key, value):
found, position = self._find_slot(idx, key)
if not found:
self._list[position] = self._Item(key, value)
self._size += 1
else:
self._list[position]._value = value
print("Position: [{}] is inserted Key: [{}] Value: [{}]".format(position, key, value))
def _delitem(self, idx, key):
print("Try to delete Key: [{}]".format(key))
found, position = self._find_slot(idx, key)
if not found:
raise KeyError('Key Error: ' + repr(key))
else:
print("Position at [{}] is set to placeholder".format(position))
self._list[position] = self._AVLOBJ
self._size -= 1
def __iter__(self):
for idx in range(len(self._list)):
if not self._is_availabe(idx):
yield self._list[idx]._key
test = ProbeHashMap(capacity=10)
test[5] = 'this is 5'
print("===")
del test[5]
print("===")
test[5] = 'this is 5 again'
# ## SortedMap
#
# This kind of data structure is useful for range-based search. So in additional to all features provided in the `UnsortedListMap`, we will have following features
#
# * find_min, find_max: find the key-value pair with minimal/maximal key.
# * find_lt, find_le, find_gt, find_ge: find the key-value pair with key less than/ less or equal to/ greater than / greater or equal to the give key.
# * find_range: iterate all (key,value) pairs with st`art <= key < stop`. If start is None, iteration begins with minimum key; if stop is None, iteration concludes with maximum key.
# * reversed: iterate all keys of the map in reverse order; in Python
# # LeetCode
#
# ## TwoSum
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
#
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
#
# Example:
# ```
# Given nums = [2, 7, 11, 15], target = 9,
#
# Because nums[0] + nums[1] = 2 + 7 = 9,
# return [0, 1].
# ```
# +
class Solution:
def twosum(self, nums, target):
mydict = {} # use python built-in hash table
for idx in range(len(nums)):
leftover = target - nums[idx]
if leftover in mydict.keys():
return [mydict[leftover], idx]
else:
mydict[nums[idx]] = idx
if __name__ == "__main__":
nums = [2, 7, 11, 15]
mine = Solution()
print(mine.twosum(nums, target=9))
print(mine.twosum(nums, target=14))
# -
# ## Longest sub-String without repeating
#
# Given a string, find the length of the longest substring without repeating characters.
#
# ```
# Example 1:
#
# Input: "abcabcbb"
# Output: 3
# Explanation: The answer is "abc", with the length of 3.
# Example 2:
#
# Input: "bbbbb"
# Output: 1
# Explanation: The answer is "b", with the length of 1.
# Example 3:
#
# Input: "pwwkew"
# Output: 3
# Explanation: The answer is "wke", with the length of 3.
# Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
# ```
# +
class Solution:
def lengthOfLongestSubstring(self, s):
used = {}
max_length = start = 0
for i, c in enumerate(s):
if c in used and start <= used[c]:
start = used[c] + 1 # skip the duplicate string c betwwen [start, c]
else:
max_length = max(max_length, i - start + 1)
used[c] = i
return max_length
if __name__ == "__main__":
mine = Solution()
print(mine.lengthOfLongestSubstring("pwwkew"))
print(mine.lengthOfLongestSubstring("abcabcbb"))
print(mine.lengthOfLongestSubstring("bbbb"))
print(mine.lengthOfLongestSubstring(""))
print(mine.lengthOfLongestSubstring(' '))
print(mine.lengthOfLongestSubstring('dvdf'))
# -
mydict = {}
' ' in mydict.keys()
longest_new =''
mydict = {}
for char in ' ':
if char not in mydict.keys():
mydict[char] = 0
longest_new+=char
len(longest_new)
| Python_DataStructures/Maps, Hash Tables, and Skip Lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import librarys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# # Importing data set as pandas DataFrame
dataset=pd.read_csv('iris_dataset.csv')
dataset.head()
# # Converting DataFrame to numpy arrays
ds=dataset.to_numpy()
sepal_length_field=ds[:,0]
sepal_width_field=ds[:,1]
median_sl=np.median(sepal_length_field)
print('median of the sepal width filed is: ',median_sl)
median_sw=np.median(sepal_width_field)
std_sw=np.std(sepal_width_field)
print('median of the sepal width filed is: ',median_sw)
print('standrd deviation of the sepal width filed is: ',std_sw)
# # Percentile of the field
# Percentiles are used in statistics to give you a number that describes the value that a given percent of the values are lower than.
petal_length_field=ds[:,2]
petal_width_field=ds[:,3]
per_pl=np.percentile(petal_length_field,50)
print(f'petal lenght are lower than {per_pl}')
# # Ploting histogram
plt.hist(petal_length_field)
| ML_Basic_iris_dataset_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Energy Packet Initialization
#
# While it is instructive to think about tracking the propagation history of
# individual photons when illustrating the basic idea behind Monte Carlo radiative transfer
# techniques, there are important numerical reasons for using a different
# discretization scheme. Instead of thinking in the photon picture, it brings
# significant advantages to follow the idea of <strong data-cite="Abbott1985">[]</strong> and
# <strong data-cite="Lucy1999">[]</strong> and consider parcels of radiant energy as the fundamental
# building blocks of the Monte Carlo calculation. These basic Monte Carlo quanta
# are commonly referred to as "energy packets" or simply "packets", and are composed of many photons with the same frequency.
#
# During a Monte Carlo calculation, $N$ (a large number) packets, all with a certain
# energy $\varepsilon$, are created at the inner boundary of the computational domain (which is discussed in [Model of Supernova Domain](../setup/model.rst)) known as the photosphere. Currently, the photosphere is modeled as a spherical blackbody with a radius $R_\mathrm{phot}$ and temperature $T_\mathrm{phot}$. In TARDIS, all packets are assigned identical energies, and the total energy of the packets is 1 erg (and thus each packet has an energy of $\frac{1}{N}$ ergs).
#
# <div class="alert alert-info">
#
# Note
#
# The indivisible energy packet scheme does not require that all packets have the same energy. This is just a convenient and simple choice adopted in TARDIS.
#
# </div>
#
# Since the photosphere is modeled as a blackbody, its total luminosity $L$ (recall that luminosity is energy emitted divided by the time in which it is emitted) is
# $$L=\frac{N\varepsilon}{\Delta t}=4 \pi R_{\mathrm{phot}}^2 \sigma_{\mathrm{R}} T_{\mathrm{phot}}^4$$
# where $\sigma_\mathrm{R}$ is the Stefan-Boltzmann constant and $\Delta t$ is the physical duration of the simulation. In order to make this relationship hold (remembering that $N\varepsilon = 1$ erg), we use
# $$\Delta t = \frac{1}{L}=\frac{1}{4 \pi R_{\mathrm{phot}}^2 \sigma_{\mathrm{R}} T_{\mathrm{phot}}^4}.$$
#
# During packet initialization, each packet is assigned an initial propagation direction $\mu$ which is the cosine of the angle $\theta$ which the packet's path makes with the radial direction. Using a pseudo-random number generator which generates numbers $z$ uniformly distributed on the interval $[0,1]$, the propagation direction is determined according to
# $$\mu = \sqrt{z}.$$
# This sampling is demonstrated in the code below.
#
# Finally, each packet is assigned an initial frequency (or more precisely, the initial frequency of its constituent photons). Note that since each packet has the same energy, each packet will represent a different number of real photons. The sampling on packet frequencies is more involved than that of the propagation direction, as it involves sampling the Planck distribution (see below). TARDIS uses the technique described in <strong data-cite="Carter1975">[]</strong> and summarized in <strong data-cite="Bjorkman2001">[]</strong> for this purpose.
#
# During the simulation, the energy of the packet remains constant in the local
# co-moving frame (see [Reference Frames](propagation.rst#reference-frames)). This naturally ensures energy
# conservation and constitutes the main advantage of this discretization scheme. **However, while the energy of the packets is conserved in the co-moving frame, the frequency of the constituent photons (in the local co-moving frame) may vary over the course of the simulation. Thus, a packet may represent several different numbers of real photons throughout their lifetimes.**
#
# We now demonstrate the TARDIS packet initialization framework:
# +
import numpy as np
from tardis.montecarlo.packet_source import BlackBodySimpleSource
from astropy import units as u
from tardis import constants as const
import matplotlib.pyplot as plt
#The random number generator that will be used
rng = np.random.default_rng()
# -
# The following cell contains values that you can change to see how it affects the spectrum:
# +
# Seed for the pseudo-random number generator
seed = 1
# Radius of the supernova's photosphere in cm
r_phot = 1e15 * u.cm
# Number of packets generated
n_packets = 40000
# -
# You can either set a temperatature of the photosphere, which will determine its luminosity; or you can set the luminosity of the photosphere, which will determine its temperature.
# +
# Temperature in K
temperature = 10000 * u.K
luminosity = 4 * np.pi * (r_phot**2) * const.sigma_sb * (temperature**4)
# Makes sure the luminosity is given in erg/s
luminosity = luminosity.to('erg/s')
print('Luminosity:', luminosity)
# +
# Luminosity in erg/s
luminosity = 7e42 * u.erg / u.s
temperature = (luminosity / (4 * np.pi * const.sigma_sb))**0.25 / np.sqrt(r_phot)
# Makes sure the termperature is given in K
temperature = temperature.to('K')
print('Temperature:', temperature)
# -
# We now generate the ensemble of packets. The array of packet energies and radii are also shown.
# +
# We define our packet source
packet_source = BlackBodySimpleSource(seed)
radii, nus, mus, energies = packet_source.create_packets(
temperature.value,
n_packets,
rng,
r_phot)
# Sets the energies in units of ergs
energies *= u.erg
# Sets the frequencies in units of Hz
nus *= u.Hz
print('Energies:', energies)
print('Radii:', radii)
# -
# We set the timespan of the simulation so that each packet contributes the appropriate luminosity to the spectrum.
# +
# Time of simulation
t_simulation = 1 * u.erg / luminosity
print('Time of simulation:', t_simulation)
# Array of luminosity contribution by each packet
lumin_per_packet = energies / t_simulation
print('Luminosity per packet:', lumin_per_packet)
# -
# We define important constants, and for comparison's sake, we code the Planck distribution function
# $$L_\nu (\nu)=\frac{8\pi R_\mathrm{phot}^2 h\nu^3}{c^2}\frac{1}{\exp\left(\frac{h\nu}{k_BT_\mathrm{phot}}\right)-1}$$
# where $L_\nu$ is the luminosity density (see [Basic Spectrum Generation](../spectrum/basic.ipynb)) with respect to frequency, $\nu$ is frequency, $h$ is Planck's constant, $c$ is the speed of light, and $k_B$ is Boltzmann's constant:
# +
h = const.h.cgs
c2 = const.c.cgs**2
kB = const.k_B.cgs
def planck_function(nu):
return 8 * np.pi**2 * r_phot**2 * h * nu**3 / (c2 * (np.exp(h * nu / (kB * temperature)) - 1))
# -
# We plot the Planck distribution and a histogram of the generated packet distribution:
# +
# We set important quantites for making our histogram
bins = 200
nus_planck = np.linspace(min(nus), max(nus), bins)
bin_width = nus_planck[1] - nus_planck[0]
# In the histogram plot below, the weights argument is used
# to make sure our plotted spectrum has the correct y-axis scale
plt.hist(nus.value,
bins=bins,
weights=lumin_per_packet/bin_width)
# We plot the planck function for comparison
plt.plot(nus_planck, planck_function(nus_planck))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Luminosity density w.r.t. frequency (erg/s/Hz)')
plt.show()
# -
# We finally plot the generated $\mu$ density distribution, followed by the generated $\theta=\arccos (\mu)$ density distribution, compared with the respective curves $\rho = 2\mu$ and $\rho = \sin(2\theta)$:
# +
x = np.linspace(0, 1, 1000)
plt.hist(mus, bins=bins, density=True)
plt.plot(x, 2*x)
plt.xlabel('Propagation direction')
plt.ylabel('Probability density')
plt.show()
# +
thetas = np.linspace(0, np.pi/2, 1000)
plt.hist(np.arccos(mus), bins=bins, density=True)
plt.plot(thetas, np.sin(2*thetas))
plt.xlabel('Angle with normal (rad)')
plt.ylabel('Probability density')
plt.show()
# -
# ## Custom Packet Source
#
# TARDIS allows for the user to input a custom function that generates energy packets instead of the basic blackbody source described here. See [Running TARDIS with a Custom Packet Source](../../io/optional/custom_source.ipynb) for more information.
| docs/physics/montecarlo/initialization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from lifelines import *
import pandas as pd
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
# get list of all csv file
DIR_NAME = "rod_pump"
file_names = [f for f in listdir(DIR_NAME) if isfile(join(DIR_NAME, f))]
# +
# create a list of dataframes for each csv file in ./rod_pump
# then concat to create master dataframe
df_list = []
for file in file_names:
df = pd.read_csv(f'rod_pump/{file}')
# make sure name of csv is included
df['Name'] = file.replace('.csv', '')
df_list.append(df)
len(df_list)
# +
ax = plt.subplot(111)
kmf = KaplanMeierFitter()
for df in df_list:
df['T'] = df['time (hours)']
df['E'] = 0
df.ix[df["Casing Pressure (psi)"] > 0, ['E']] = 1
T = df['T']
E = df['E']
# get axe for plot and change to log scale
y = kmf.fit(T, E)
kmf.plot(ax=ax)
ax.set_xscale('log')
ax.set_xlim()
# -
| misc/rod_pump_individual_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Let's read the "nuclear" data.
nuclear_data = pd.read_csv("https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/boot/nuclear.csv")
nuclear_data.head()
# Let's see what is the mean cost of a power plant, in millions of USD.
print(round(nuclear_data.cost.mean(), 2))
# Let's see how many of all stations were constructed in the north-east region of the US.
nuclear_data[nuclear_data["ne"] == 1].shape[0]
# What is the general behaviour of the cost of a power plant over time?
# We need to choose between these answers: "increasing", "decreasing" or "constant".
#
# Let's make a scatter plot "cost over time" and answer this question.
plt.title("Cost over time")
plt.scatter(nuclear_data.date, nuclear_data.cost)
plt.xlabel("Time")
plt.ylabel("Cost")
plt.show()
# It seems that the general behaviour of the cost of a power plant over time is increasing.
# Let's see the correlation between these columns.
nuclear_data.cost.corr(nuclear_data.date)
# We can see that the correlation number is positive, so we can confirm our answer. There is an increasing behaviour between the cost and time columns.
# Some stations are built on places whete there were existing stations in the past.
# Our task here is to find the power of the most powerful one from all of these.
#
# Let's see that power.
nuclear_data[nuclear_data.pr == 1].cap.max()
| 03. Data-Visualization-Exploratory-Data-Analysis/Quiz/Data Visualization EDA Quiz.ipynb |