code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Za8-Nr5k11fh"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="Eq10uEbw0E4l"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="UysiGN3tGQHY"
# # Running TFLite models
# + [markdown] colab_type="text" id="2hOrvdmswy5O"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_lite/tflite_c01_linear_regression.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_lite/tflite_c01_linear_regression.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="W-VhTkyTGcaQ"
# ## Setup
# + colab={} colab_type="code" id="dy4BcTjBFTWx"
import tensorflow as tf
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
# + [markdown] colab_type="text" id="ceibQLDeGhI4"
# ## Create a basic model of the form y = mx + c
# + colab={} colab_type="code" id="YIBCsjQNF46Z"
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=200, verbose=1)
# + [markdown] colab_type="text" id="EjsB-QICGt6L"
# ## Generate a SavedModel
# + colab={} colab_type="code" id="a9xcbK7QHOfm"
export_dir = 'saved_model/1'
tf.saved_model.save(model, export_dir)
# + [markdown] colab_type="text" id="RRtsNwkiGxcO"
# ## Convert the SavedModel to TFLite
# + colab={} colab_type="code" id="TtM8yKTVTpD3"
# Convert the model.
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
tflite_model = converter.convert()
# + colab={} colab_type="code" id="4idYulcNHTdO"
tflite_model_file = pathlib.Path('model.tflite')
tflite_model_file.write_bytes(tflite_model)
# + [markdown] colab_type="text" id="HgGvp2yBG25Q"
# ## Initialize the TFLite interpreter to try it out
# + colab={} colab_type="code" id="DOt94wIWF8m7"
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# + colab={} colab_type="code" id="JGYkEK08F8qK"
# Test the TensorFlow Lite model on random input data.
input_shape = input_details[0]['shape']
inputs, outputs = [], []
for _ in range(100):
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_results = interpreter.get_tensor(output_details[0]['index'])
# Test the TensorFlow model on random input data.
tf_results = model(tf.constant(input_data))
output_data = np.array(tf_results)
inputs.append(input_data[0][0])
outputs.append(output_data[0][0])
# + [markdown] colab_type="text" id="t1gQGH1KWAgW"
# ## Visualize the model
# + colab={} colab_type="code" id="ccvQ1mEJVrqo"
plt.plot(inputs, outputs, 'r')
plt.show()
# + [markdown] colab_type="text" id="WbugMH6yKvtd"
# ## Download the TFLite model file
# + colab={} colab_type="code" id="FOAIMETeJmkc"
try:
from google.colab import files
files.download(tflite_model_file)
except:
pass
| courses/udacity_intro_to_tensorflow_lite/tflite_c01_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Successive halving
# ---------------------------------
#
# This example shows how to compare multiple tree-based models using successive halving.
#
# Import the california housing dataset from [sklearn.datasets](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html).
# This is a small and easy to train dataset whose goal is to predict house prices.
# ## Load the data
from sklearn.datasets import fetch_california_housing
from atom import ATOMRegressor
# Load the data
X, y = fetch_california_housing(return_X_y=True)
# ## Run the pipeline
atom = ATOMRegressor(X, y, verbose=2, random_state=1)
# Compare tree-based models via successive halving
atom.successive_halving(
models=["Tree", "Bag", "ET", "RF", "LGB", "CatB"],
metric="mae",
n_bootstrap=5,
)
# ## Analyze results
# The results is now multi-index, where frac is the fraction
# of the training set used to fit the model. The model names
# end with the number of models fitted during that run
atom.results
# Plot the successive halving's results
atom.plot_successive_halving()
# Use an acronym to call all the models with the same estimator
atom.plot_errors(models=["CatB"])
# Use the number to call the models from the same run
atom.plot_errors(models="3")
| examples/successive_halving.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os,re, random,cv2
# -
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense,BatchNormalization,Dropout
from keras import backend as K
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
# # Prepare Traning Data
TRAIN_DIR_CAT = '../input/dogs-vs-cats/dataset/dataset/training_set/cats/'
train_img_cats = [TRAIN_DIR_CAT+i for i in os.listdir(TRAIN_DIR_CAT)] # use this for full dataset
TRAIN_DIR_DOG = '../input/dogs-vs-cats/dataset/dataset/training_set/dogs/'
train_img_dogs = [TRAIN_DIR_DOG+i for i in os.listdir(TRAIN_DIR_DOG)] # use this for full dataset
def make_data(list_img,enc):
X=[]
y=[]
count = 0
random.shuffle(list_img)
for img in list_img:
#X.append(Image.open(img).resize((inp_wid,inp_ht), Image.ANTIALIAS))
X.append(cv2.resize(cv2.imread(img), (inp_wid,inp_ht), interpolation=cv2.INTER_CUBIC))
y.append(enc)
return X,y
inp_wid = 128
inp_ht = 128
batch_size = 16
X_cat,y_cat = make_data(train_img_cats,0)
X_dog,y_dog = make_data(train_img_dogs,1)
c = list(zip(X_cat+X_dog,y_cat+y_dog))
random.shuffle(c)
X,Y = list(zip(*c))
print(len(X))
print(len(Y))
X_train, X_val, Y_train, Y_val = train_test_split(X[0:4000],Y[0:4000], test_size=0.125, random_state=1)
n_train = len(X_train)
n_val = len(X_val)
# # Neural net
# +
model = Sequential()
# layer num 1
model.add(Conv2D(32,(3,3),input_shape=(inp_wid,inp_ht,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# layer num 2
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# layer num 3
model.add(Conv2D(128,(3,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# layer num 4
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
# -
# # Training Generator
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow(np.array(X_train), Y_train, batch_size=batch_size)
val_generator = val_datagen.flow(np.array(X_val), Y_val, batch_size=batch_size)
# ## Callbacks
earlystop = EarlyStopping(patience=10)
lrr = ReduceLROnPlateau(monitor='val_acc',
patience=2,
verbose=1,
factor=0.5,
min_lr=0.00001)
callbacks = [earlystop,lrr]
history = model.fit_generator(
train_generator,
steps_per_epoch=n_train // batch_size,
epochs=32,
validation_data=val_generator,
validation_steps=n_val // batch_size,
callbacks = callbacks
)
model.save_weights("model_weights.h5")
model.save('model_keras.h5')
| cat-n-dog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Load the MNIST data.
from keras.datasets import mnist
(train_images, train_labels), (val_images, val_labels) = mnist.load_data()
# Show a sample image
from matplotlib import pyplot as plt
plt.imshow(train_images[0,:,:])
plt.colorbar()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype("float32") / 255
val_images = val_images.reshape((10000, 28, 28, 1))
val_images = val_images.astype("float32") / 255
from keras.utils import to_categorical
train_labels = to_categorical(train_labels)
val_labels = to_categorical(val_labels)
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(10, activation="softmax"))
model.summary()
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
hist_MNIST = model.fit(train_images, train_labels, epochs=5, batch_size=64,
validation_data=(val_images, val_labels))
history_dict = hist_MNIST.history
train_loss = history_dict["loss"]
val_loss = history_dict["val_loss"]
epochs = range(1, len(train_loss) + 1)
plt.plot(epochs, train_loss, "b-", label="train")
plt.plot(epochs, val_loss, "r-", label="validation")
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.savefig("MNIST_loss.pdf")
plt.show()
train_acc = history_dict["acc"]
val_acc = history_dict["val_acc"]
plt.plot(epochs, train_acc, "bo", label="train")
plt.plot(epochs, val_acc, "ro", label="validation")
plt.legend()
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.savefig("MNIST_accuracy.pdf")
plt.show()
| notes/10.1 MNIST revisited.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''base'': conda)'
# language: python
# name: python37364bitbaseconda65b5f305a1974c36abb2297a98801d43
# ---
from Ardi import UnivariateLinearRegression
dataset_path = 'dataset_test/student_scores.csv'
linreg = UnivariateLinearRegression(random_state=99999)
linreg.take_data_csv(dataset_path)
linreg.train()
linreg.plot_before(grid=True)
linreg.plot_after(line_color='red', xlabel='Hours', ylabel='Socre', grid=True)
linreg.plot_errors(grid=True)
| Ardi/run_test2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data persistence and data caching
# This notebook presents data persistence and data caching features in steps.
# * Persistence helps to avoid re-running early steps of a pipeline when subsequent steps are changed
# * Caching makes it possible to run complex, multi-path pipelines without re-computing the results of early steps
#
# Note that the features presented here are different from *model persistence*, which saves the transformers as the steps are trained.
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
# %matplotlib inline
import os
# -
from steppy.base import Step, BaseTransformer
from steppy.adapter import Adapter, E
EXPERIMENT_DIR_A = './ex4a'
EXPERIMENT_DIR_B = './ex4b'
# +
import shutil
# By default pipelines will try to load previously trained models so we delete the cache to ba sure we're starting from scratch
shutil.rmtree(EXPERIMENT_DIR_A, ignore_errors=True)
shutil.rmtree(EXPERIMENT_DIR_B, ignore_errors=True)
# -
# ## Data
# This time we'll have a look at text classification. We'll use the classic 20newsgroups dataset, but without the headers, footers or quotes which would make the task too easy.
# +
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
newsgroups_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
# +
from sklearn.model_selection import train_test_split
X_train, y_train = newsgroups_train.data, newsgroups_train.target
X_fit, X_val, y_fit, y_val = train_test_split(X_train, y_train, test_size=0.1, stratify=y_train, random_state=42)
# -
# Let's use a label encoder to ensure our labels are well-behaved
from sklearn.preprocessing import LabelEncoder
input_label_enc = LabelEncoder().fit(newsgroups_train.target)
# This time we have pre-defined training and test sets but we would like to have a hold-out set of training data available for ensembling
# +
data_fit = {'input':
{
'text': X_fit,
'label': input_label_enc.transform(y_fit),
}
}
data_val = {'input':
{
'text': X_val,
'label': input_label_enc.transform(y_val),
}
}
data_test = {'input':
{
'text': newsgroups_test.data,
'label': input_label_enc.transform(newsgroups_test.target),
}
}
def print_data_summary(data, title):
print(title)
print(' Num. documents: {}'.format(len(data['input']['text'])))
print(' Num. categories: {}'.format(len(np.unique(data['input']['label']))))
for data, title in [(data_fit, 'Model fitting data'), (data_val, 'Validation data'), (data_test, 'Testing data')]:
print_data_summary(data, title)
# -
# ## Text processing transformers
# We define a transformer that does count vectorization on our documents - again, we can just wrap the one from sklearn:
# +
from sklearn.feature_extraction.text import CountVectorizer
class CountVecTransformer(BaseTransformer):
def __init__(self, max_features):
self.estimator = CountVectorizer(max_features=max_features)
def fit(self, X):
self.estimator.fit(X)
return self
def transform(self, X, **kwargs):
X_tfm = self.estimator.transform(X)
return {'X': X_tfm}
def persist(self, filepath):
joblib.dump(self.estimator, filepath)
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
# -
# Similarly for the IDFs in our TF-IDF model:
# +
from sklearn.feature_extraction.text import TfidfTransformer
class StepsTfidfTransformer(BaseTransformer):
def __init__(self):
self.estimator = TfidfTransformer()
def fit(self, X):
self.estimator.fit(X)
return self
def transform(self, X, **kwargs):
X_tfm = self.estimator.transform(X)
return {'X': X_tfm}
def persist(self, filepath):
joblib.dump(self.estimator, filepath)
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
# -
# This will give us a bunch of features to train on.
# ## Linear model
# As a first attempt, we'll try to do our predictions with (sparse) logistic regression
# +
from sklearn.linear_model import LogisticRegression
class SparseLogRegProbaTransformer(BaseTransformer):
def __init__(self):
self.estimator = LogisticRegression(penalty='l1')
def fit(self, X, y):
self.estimator.fit(X, y)
return self
def transform(self, X, **kwargs):
y_proba = self.estimator.predict_proba(X)
return {'y_proba': y_proba}
def persist(self, filepath):
joblib.dump(self.estimator, filepath)
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
# +
count_vec_step = Step(name='CountVec',
transformer=CountVecTransformer(max_features=1000),
input_data=['input'],
adapter=Adapter({'X': E('input', 'text')}),
experiment_directory=EXPERIMENT_DIR_A)
tfidf_step = Step(name='TF-IDF',
transformer=StepsTfidfTransformer(),
input_steps=[count_vec_step],
experiment_directory=EXPERIMENT_DIR_A,
persist_output=True,
load_persisted_output=True # This breaks when switching from training data to val data or test data!
)
logreg_step = Step(name='SparseLogReg',
transformer=SparseLogRegProbaTransformer(),
input_steps=[tfidf_step],
input_data=['input'],
adapter=Adapter({'X': E('TF-IDF', 'X'),
'y': E('input', 'label')
}),
experiment_directory=EXPERIMENT_DIR_A)
# -
# Note that we have passed `persist_output=True` to the `tfidf_step` constructor. This will make this step save its output so that once it's been computed once, it can later just be loaded from disk. Therefore, we will be able to work on the logistic regression classifier without having to re-compute the outputs of its ancestor nodes. Additionally, we have also set `load_persisted_output=True`, which tells this step to load the previously computed and saved outputs instead of processing the data.
logreg_step
preds_linear_fit = logreg_step.fit_transform(data_fit)
# +
from sklearn.metrics import accuracy_score
acc_linear_fit = accuracy_score(y_true=data_fit['input']['label'], y_pred=np.argmax(preds_linear_fit['y_proba'], axis=1))
print('Model fitting accuracy: {:.4f}'.format(acc_linear_fit))
# -
# Bug workaround: manually delete saved output when switching datasets
os.remove(os.path.join(EXPERIMENT_DIR_A, 'outputs', 'TF-IDF'))
preds_linear_val = logreg_step.transform(data_val)
acc_linear_val = accuracy_score(y_true=data_val['input']['label'], y_pred=np.argmax(preds_linear_val['y_proba'], axis=1))
print('Validation accuracy: {:.4f}'.format(acc_linear_val))
# ## Random forest model
# As an alternative, we'll also build a neural net model on top of the same TF-IDF features. We'll use the multi-layer perceptron (MLP) which is available in Scikit-learn
# +
from sklearn.ensemble import RandomForestClassifier
class RfClfTransformer(BaseTransformer):
def __init__(self, n_estimators, max_depth):
self.estimator = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth)
def fit(self, X, y):
self.estimator.fit(X, y)
return self
def transform(self, X, **kwargs):
y_proba = self.estimator.predict_proba(X)
return {'y_proba': y_proba}
def persist(self, filepath):
joblib.dump(self.estimator, filepath)
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
# -
rf_step = Step(name='RF',
transformer=RfClfTransformer(n_estimators=200, max_depth=8),
input_steps=[tfidf_step],
input_data=['input'],
adapter=Adapter({'X': E('TF-IDF', 'X'),
'y': E('input', 'label')
}),
experiment_directory=EXPERIMENT_DIR_A)
rf_step
# OK, so it was easy to add a different model on top of TF-IDF features. Indeed, this time we will be able to use the **saved** TF-IDF output, so we can get straight to fitting the random forest.
# +
# Bug workaround: manually delete saved output when switching datasets
os.remove(os.path.join(EXPERIMENT_DIR_A, 'outputs', 'TF-IDF'))
preds_rf_fit = rf_step.fit_transform(data_fit)
# -
acc_rf_fit = accuracy_score(y_true=data_fit['input']['label'], y_pred=np.argmax(preds_rf_fit['y_proba'], axis=1))
print('Model fitting accuracy: {:.4f}'.format(acc_rf_fit))
# +
# Bug workaround: manually delete saved output when switching datasets
os.remove(os.path.join(EXPERIMENT_DIR_A, 'outputs', 'TF-IDF'))
preds_rf_val = rf_step.transform(data_val)
# -
acc_rf_val = accuracy_score(y_true=data_val['input']['label'],
y_pred=np.argmax(preds_rf_val['y_proba'], axis=1))
print('Validation accuracy: {:.4f}'.format(acc_rf_val))
# ## Ensembling
# We'll do simple ensembling by averaging predictions:
class AvgTransformer(BaseTransformer):
def __init__(self):
pass
def fit(self, y_proba_1, y_proba_2):
return self
def transform(self, y_proba_1, y_proba_2, **kwargs):
y_proba = (y_proba_1 + y_proba_2) / 2
return {'y_proba': y_proba}
def persist(self, filepath):
joblib.dump({}, filepath)
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
ens_step = Step(name='Ensembler',
transformer=AvgTransformer(),
input_steps=[logreg_step, rf_step],
adapter=Adapter({'y_proba_1': E('SparseLogReg', 'y_proba'),
'y_proba_2': E('RF', 'y_proba'),
}),
experiment_directory=EXPERIMENT_DIR_A)
ens_step
# Note that for the TF-IDF step we set `cache_output` to `True`. What does this do? Note that the output of the TF-IDF step is used both by RF and SparseLogReg. This means that when we run the Ensemble node on some data, it will in turn call MLP and SparseLogReg, which will both call TF-IDF. Without caching, this would mean we're computing the output of the TF-IDF step twice, which is definitely a waste of precious compute time and could possibly lead to some inconsistencies in the data (e.g. if the TF-IDF step was randomized in some way). Caching solves both problems without keeping anything in memory - the caching is done on disk, not in RAM.
# +
os.remove(os.path.join(EXPERIMENT_DIR_A, 'outputs', 'TF-IDF')) # Bug workaround: manually delete saved output when switching datasets
preds_ens_val = ens_step.fit_transform(data_val)
os.remove(os.path.join(EXPERIMENT_DIR_A, 'outputs', 'TF-IDF')) # Bug workaround: manually delete saved output when switching datasets
preds_ens_test = ens_step.transform(data_test)
# +
acc_ens_val = accuracy_score(y_true=data_val['input']['label'], y_pred=np.argmax(preds_ens_val['y_proba'], axis=1))
print('Validation accuracy: {:.4f}'.format(acc_ens_val))
acc_ens_test = accuracy_score(y_true=data_test['input']['label'], y_pred=np.argmax(preds_ens_test['y_proba'], axis=1))
print('Test accuracy: {:.4f}'.format(acc_ens_test))
# -
# ## Caching: saving output within one run only
# Sometimes you want to keep your output within one run of your pipeline but discard it at the end. This use case is handled by **caching**. Let's build a new pipeline that uses caching instead of saving to avoid re-computing results:
# +
new_count_vec_step = Step(name='CountVec',
transformer=CountVecTransformer(max_features=1000),
input_data=['input'],
adapter=Adapter({'X': E('input', 'text')}),
experiment_directory=EXPERIMENT_DIR_B)
new_tfidf_step = Step(name='TF-IDF',
transformer=StepsTfidfTransformer(),
input_steps=[new_count_vec_step],
experiment_directory=EXPERIMENT_DIR_B,
cache_output=True)
new_logreg_step = Step(name='SparseLogReg',
transformer=SparseLogRegProbaTransformer(),
input_steps=[new_tfidf_step],
input_data=['input'],
adapter=Adapter({'X': E('TF-IDF', 'X'),
'y': E('input', 'label')
}),
experiment_directory=EXPERIMENT_DIR_B)
new_rf_step = Step(name='RF',
transformer=RfClfTransformer(n_estimators=200, max_depth=8),
input_steps=[new_tfidf_step],
input_data=['input'],
adapter=Adapter({'X': E('TF-IDF', 'X'),
'y': E('input', 'label')
}),
experiment_directory=EXPERIMENT_DIR_B)
new_ens_step = Step(name='Ensembler',
transformer=AvgTransformer(),
input_steps=[new_logreg_step, new_rf_step],
adapter=Adapter({'y_proba_1': E('SparseLogReg', 'y_proba'),
'y_proba_2': E('RF', 'y_proba')
}),
experiment_directory=EXPERIMENT_DIR_B)
# -
new_ens_step
new_ens_step.clean_cache()
new_preds_ens_fit = new_ens_step.fit_transform(data_fit)
new_ens_step.clean_cache()
# If you look carefully at the training log above, you should see that when training the second branch, TF-IDF just loaded outputs instead of re-computing them.
new_ens_step.clean_cache()
new_preds_ens_val = new_ens_step.transform(data_val)
new_ens_step.clean_cache()
new_ens_step.clean_cache()
new_preds_ens_test = new_ens_step.transform(data_test)
new_ens_step.clean_cache()
# +
new_acc_ens_fit = accuracy_score(y_true=data_fit['input']['label'], y_pred=np.argmax(new_preds_ens_fit['y_proba'], axis=1))
print('New fitting accuracy: {:.4f}'.format(new_acc_ens_fit))
new_acc_ens_val = accuracy_score(y_true=data_val['input']['label'], y_pred=np.argmax(new_preds_ens_val['y_proba'], axis=1))
print('New validation accuracy: {:.4f}'.format(new_acc_ens_val))
new_acc_ens_test = accuracy_score(y_true=data_test['input']['label'], y_pred=np.argmax(new_preds_ens_test['y_proba'], axis=1))
print('New test accuracy: {:.4f}'.format(new_acc_ens_test))
# -
# Now you should be familiar with data persistence features. The next few notebooks will focus on building deep learning pipelines with steps.
| tutorials/4-caching-persistence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (teaching-3.8.3)
# language: python
# name: teaching-3.8.3
# ---
# # Assignment 4b MA : Build your own corpus exploration tool
#
# **Deadline for Assignment 4a+b: Friday October 15, 2021, before 14:30 via Canvas (Assignment 4)**
#
#
# In this assignment, you're going to build your own tool for exploring a the **Parallel Meaning Bank** (PMB). This resource is a **parallel corpus**, which means that it contains the **same documents translated into multiple languages**. Such resources are very valuable for many aspects of linguistics and Natural Language Processing (NLP), but most importantly for Machine Translation (ML).
#
# For this part of assignment 4, you will submit two python scripts called:
#
# * `explore_pmb.py`
# * `utils.py`
#
# The corpus contains a lot of data, but not every document is translated into every language. Therefore, we will build a tool which explores different aspects of coverage. Your tool will be able to:
#
# * explore the **overall coverage per language**
# * explore the the **parallel coverage of a given language pair** (i.e. how many documents and tokens exist in a language pair?)
# * **browse parallel text** in given language pairs
#
# Before diving into building the tool, we're going to guide you through a couple of warm-up examples. You can use them to explore the data structure and write your code. It is permitted to copy-paste bits of code (you will have to modify them to solve all exercises).
#
# The assignment is structured as follows:
#
# 1. Understanding the data structure (code snippets to guide you through the corpus)
# 2. Writing functions (writing the actual code)
# 3. Putting the tool together (combining the code)
# 4. Testing and submission (a final check of whether your code does what it is supposed to do)
#
#
# You can learn more about the PMB [here](https://pmb.let.rug.nl/).
#
# If you have **questions** about this chapter, please contact us at <EMAIL>. Questions and answers will be collected in [this Q&A document](https://docs.google.com/document/d/1551Db87zckRPbKDosZ4105htEUxVWZu9ejDj3MM8qck/edit?usp=sharing), so please check it before you email.
#
# **Tip**: Read the entire assignment before you start writing code. Try to understand the tool we're building before you start. Making notes with pen and paper can be very helpful.
# ## 1. Understanding the data structure
#
# In this part, we guide you through the data structure. You can use the code below for the rest of your assignment. You can play with the code and add things to it, but you will not receive points in this part. Its purpose is to make you familiar with the data structure.
#
# ### 1.a Download the data
#
# 1.) Please go to this website: [here](https://pmb.let.rug.nl/data.php)
#
# 2.) Select version 2.1.0 (the latest version is too big for our purposes) and store the zip file as `PMB/pmb-2.1.0.zip` on your computer (remember where).
#
# 3.) Unpack the data. You can do this from the terminal by navigating to the directory using `cd`. You should be able to run `unzip pmb-2.1.0.zip` to unzip the file. Alternatively, you can simply unzip by clicking on it. Attention: Unpacking the file may take a while.
#
# Use the cell below to assign the path to the data to a variable. We will only consider the gold data for this assignment, therefore you can add the gold directory to the path.
#
# Path: `'PMB/pmb-2.1.0/data/gold/'`
#
# **Please run the following cell to check if your data are in the right place. If they are, it will not print anything.**
# +
import os
#my_path = #insert path to the directory containing the PMB on your computer
# e.g.:
# my_path = '/Users/piasommerauer/Data/'
path_pmb = f'{my_path}PMB/pmb-2.1.0/data/gold'
assert os.path.isdir(path_pmb), 'corpus data not found'
# -
# ### 1.b Parallel documents
# Before we can build anything, we have to understand how the data are strucutred. We start by looking at a single document.
#
# Parallel documents are stored in the same document directory (d+number). The filenames indicate the language (e.g. en = English). The data we're interested in are stored in .xml format. Run the cell below to inspect the filepaths of a single document. Feel free to modify the path to inspect other documents.
# +
import glob
test_part = 'p27'
test_document = 'd0852'
test_doc_path = f'{path_pmb}/{test_part}/{test_document}/'
test_doc_files = glob.glob(f'{test_doc_path}*.xml')
for f in test_doc_files:
print(f)
# -
# ### 1.c XML structure of a single document
#
# Below, we access a single document and load the xml structure using lxml.etree. Run the cell to print the xml tree.
#
# Explore the document structure and try to answer these questions:
#
# * Where can you find the full text of the document?
# * Where can you find information about each token in the text?
# +
from lxml import etree
test_doc_path_en = test_doc_path+'en.drs.xml'
doc_tree = etree.parse(test_doc_path_en)
doc_root = doc_tree.getroot()
etree.dump(doc_root, pretty_print=True)
# -
# ## 2. Writing functions
#
# In this part of the assigment, we guide you through writing the functions for your tool. Feel free to use the notebook for exploration, but your final functions should be stored in `utils.py`.
# ### 2.a Get all token elements of a document in a given language
#
# Write a function which fulfills the following requirements:
#
# * Positional parameter: path to the document in a particular language
# * Output: list of token elements (the token elements are called 'tagtoken')
# +
def get_tokens(path_to_doc):
# your code here
pass
# Test you function
test_part = 'p27'
test_document = 'd0852'
language = 'en'
test_doc_path = f'{path_pmb}/{test_part}/{test_document}/{language}.drs.xml'
# Function call
tokens = get_tokens(test_doc_path)
assert len(tokens) == 6 and type(tokens[1]) == etree._Element, 'token list not correct'
# -
# ### 2.b Get token and pos from a token element
#
# Write a function which fulfills the following requirements:
#
# * Positional parameter: token element
# * Output: token (string) and part of speech tag (string) of the token element
#
# An example token element is shown below. (You can use it for testing.)
# +
test_token_str = """
<tagtoken xml:id="i1002">
<tags>
<tag type="verbnet" n="0">[]</tag>
<tag type="tok">'m</tag>
<tag type="sym">be</tag>
<tag type="lemma">be</tag>
<tag type="from">1</tag>
<tag type="to">3</tag>
<tag type="pos">VBP</tag>
<tag type="sem">NOW</tag>
<tag type="wordnet">O</tag>
</tags>
</tagtoken>
"""
test_token = etree.fromstring(test_token_str)
print(test_token)
# +
def get_token_pos(token_element):
pass
# Test your function using the first token
token, pos = get_token_pos(test_token)
assert token == "'m" and pos == 'VBP', 'token and pos not correct'
# -
# ### 2.c Get document text
#
# Define a function with the following requirements:
#
# * Positional parameter: filepath of a document in a particular language (i.e. full, relativ filepath)
# * Output: the text of the document as a string
#
# **Hint**:
#
# There are two options to get the document text of a file:
#
# * Option 1: Access the comment indicated by `<!-- -->`. Look at the file above to find the comment. You will see that it contains the entire text represented in the xml file. You can access it by iterating over the child-elements of the root. Try this out in the notebook before defining your function. You can get started using the code below.
#
#
# * Option 2: Use the tokens. You can collect all the tokens in a document using the functions we have defined above. Once you have all tokens, you can use a string method to join them with whitespaces between them.
#
# Only implement **one** of these options.
# +
# Code snippet for option 1
# use the test document
test_doc_path_en = test_doc_path
# load
doc_tree = etree.parse(test_doc_path_en)
# get root
root = doc_tree.getroot()
# iterate over child-elements
for ch in root.getchildren():
print('tag', ch.tag)
print('text', ch.text)
# +
def get_doc_text(path_to_doc):
#Your code here
pass
# Test you function
test_part = 'p27'
test_document = 'd0852'
language = 'en'
test_doc_path = f'{path_pmb}/{test_part}/{test_document}/{language}.drs.xml'
text = get_doc_text(test_doc_path)
assert text == "I 'm not tired at~all .", 'doc text not correct'
# -
# ### 2.d Sort documents on languages
#
# To explore the coverage of the corpus, it is convenient to store the documents as sets mapped to their language. We can then use set methods (i.e. intersection) to check which documents exist in a given language pair.
#
# Write a function which fulfills the following criteria:
#
# * mandatory positional argument: path to the corpus (e.g. '../../../Data/PMB/pmb-2.1.0/data/gold')
# * output: a dictionary of the following format:
# `{
# 'language1': {'path_to_doc1', 'path_to_doc2', ...},
# 'language2': {'path_to_doc1', 'path_to_doc4', ...},
# 'language3': {'path_to_doc2', 'path_to_doc3', ...},
# }`
#
#
# Hints:
#
# * filepaths are strings; you can manipulate them using string methods (e.g. split on '/' or '.').
# * The os mudule has a convenient way of extracting the filename from a long path (i.e. the last bit of the path): os.path.basename(your_path)
# * Feel free to use [defaultdict](https://docs.python.org/3/library/collections.html#collections.defaultdict) (with a set as the default value) (`from collections import defaultdict`)
# +
# Example for filepath manipulation:
import os
my_path = '../../some/dir/containing/a/file/with/an/interesting/name.txt'
f_name = os.path.basename(my_path)
print(f_name)
remaining_path = my_path.rstrip(f_name)
print(remaining_path)
name, extension = f_name.split('.')
print(name, extension)
# +
def sort_documents(path_pmb):
pass
# Test you function:
language_doc_dict = sort_documents(path_pmb)
n_en = len(language_doc_dict['en'])
n_it = len(language_doc_dict['it'])
n_de = len(language_doc_dict['de'])
n_nl = len(language_doc_dict['nl'])
assert n_en == 4555 and n_it == 635 and n_de == 1175 and n_nl == 586, 'sorting not correct'
# -
# ## 3. Putting the tool together
#
# Congratulations! You've written most of the code already!
#
# From now on, we will mostly use the functions defined above and combine them in the file called `explore_pmb.py`.
#
# Some code snippets are provided here to help you along the way.
# ### 3.a Printing statistics for all languages
#
# Let's start by exploring the coverage of all languages individually. In `explore_pmb.py`, write the following code:
#
# * Import the function`sort_docs`, call it and assign the output dictionary to a variable called `language_doc_dict`. Don't forget to define the path to the corpus, which you use as function input.
# * Create a list of all languages (hint: you can simply use the keys of `language_doc_dict`).
# * For each language, print the following:
# `[Language]: num docs: [number of documents], num tokens: [number of tokens]
#
# Hints:
#
# * Each document is unique - you can simply count the elements in the sets to get the number of documents.
# * Use the function `get_tokens` to access the tokens. Then count them.
# * You will most likly use two nested loops: An outer loop for languages and an inner loop to access the tokens in the documents.
# * Use f-strings to print the results.
#
# +
# some code to help you along the way (you can also start from scratch)
languages = # your code here
for languagage in languages:
n_docs = # your code here
n_tokens = # your code here
docs = # your code here
# your code here
for doc in docs:
path_to_doc = f'{doc}/{language}.drs.xml'
tokens = get_tokens(path_to_doc)
# your code here
print(f'{language}: num docs: {n_docs}, num tokens: {n_tokens}')
# -
# ### 3.b Printing statistics for language pairs
#
# We also want to explore the coverage of **parallel data** for the language pairs in the corpus. To do this, use an additional loop to iterate over all possible language pairs in the parallal meaning bank and print the number of documents which exist for both languages.
#
# Use the function below to generate the language pairs. Simply copy-paste it into the script called `utils.py` and import it into `explore_pmb.py`. Use the cell below to explore how it works.
#
# The list of language pair should look similar to this (and contain all possible pairs):
#
# `pairs = [(‘nl’, ‘en’), (‘it’, ‘de’), (‘en’, ‘it’)]`
#
# Print the following for each language pair (use f-strings):
#
# `Coverage for parallel data in [language_1] and [language_2]: [number of documents]`
#
#
# Hints:
#
# * You can unpack tuples in a loop.
# * Use a set method to get the document paths covered by both languages. Then simply count the paths.
# * You do not have to match the file-contents. Instead, use the information provided in the filepaths (in a previous step, you have sorted your corpus files according to language). The file paths in the sets (representing the documents) are supposed to consist of the base names only (i.e. no directory paths). You can use set operations to get the overlap between two languages.
#
# +
def get_pairs(language_list):
"""Given a list, return a list of tuples of all element pairs."""
pairs = []
for l1 in language_list:
for l2 in language_list:
if l1 != l2:
if (l1, l2) not in pairs and (l2, l1) not in pairs:
pairs.append((l1, l2))
return pairs
language_list = ['a', 'b', 'c']
pairs = get_pairs(language_list)
print(pairs)
# +
# Here's a start (feel free to modify this)
for lang1, lang2 in pairs:
docs_lang1 = language_doc_dict[lang1]
docs_lang2 = language_doc_dict[lang2]
# you code here
# -
# ### 3.c Explore parallel documents
#
# As a final step, we want let the user browse the parallel documents in a chosen language pair. Write the following code (in `explore_pmb.py`):
#
# * use input() to define two variables: language_1 and language_2
# * get the document paths for all documents covered by both languages
# * Loop over the documents and print the documents in both languages. After each document, ask the user whether they want to continue. If the answer is 'no', stop. Else, show the next.
#
# ### Bonus: Come up with your own comparison
#
# Got insterested in parallel data? Extract a comparison you find interesting!
#
# **This is an additional exercise - it is not required to complete this to get a full score.**
#
# If you complete this exercise, you can earn up to 3 additional points which can be used to make up for other points you missed. Note that you cannot get more than a full score.
# ## 4. Testing & submission
#
# Congratulations! You've built a corpus exploration tool! Before you submit, please make sure to test your code:
#
# * Can you run the script `explore_pmb.py` from the command line?
# * Do you get a general corpus overview (see 3.a)?
# * Do you get an overview of language pairs (see 3.b)?
# * Are you asked to provide a language pair and do you see examples of parallel documents once you entered a pair (see 3.c?)
#
# If you did not manage to complete all of the exercises, submit what you have and, if possible, explain how you were going to solve them. You get points for intermediate steps.
#
# **Please submit python scripts only. You can use this notebook for exploration and development, but we will not consider the code written here.**
#
| Assignments/ASSIGNMENT-4b-MA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# - By [lazarusA](https://lazarusa.github.io/Webpage/index.html)
# +
using CairoMakie, Random
include("makieTheme1.jl") # don't forget to include the theme.
Random.seed!(123)
n = 15
x, y, color = rand(n), rand(n), rand(n)
cmaps = [:cool, :viridis, :plasma, :inferno, :thermal,
:leonardo, :winter, :spring, :ice]
markers = [:+, :diamond, :star4, :rtriangle, :rect,
:circle, :pentagon, :cross, :star5]
function FigGridHeatSharedCbar()
fig = Figure(resolution = (500, 400))
c = 1
for i in 1:2, j in 1:2:3
ax = fig[i, j] = Axis(fig, aspect = 1, xgridvisible = false, ygridvisible = false)
pnts = heatmap!(rand(10,10), colormap= cmaps[j], colorrange=(0, 1))
ax.xticks = [0,10]
ax.yticks = [0,10]
ax.xticklabelsize = 14
ax.yticklabelsize = 14
cbar = Colorbar(fig, pnts, ticklabelsize = 12, height = Relative(3/4), tickwidth = 2)
cbar.ticks = [0, 0.5, 1]
fig[1:2, j+1] = cbar
c+=1
end
fig
end
fig = FigGridHeatSharedCbar()
#save("/results/FigGridHeatSharedCbar.svg", fig, pt_per_unit = 0.7);
save("./results/FigGridHeatSharedCbar.png", fig, px_per_unit = 2)
fig
| FigGridHeatSharedCbar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from sklearn.linear_model import LinearRegression
# %matplotlib inline
df = pd.read_csv('data_galton.csv', header='infer')
df.head()
# cross table will show us how many time values are there in the dataset.
myTable = pd.crosstab(df.parent,df.child)
myTable
nrows = myTable.shape[0]
ncols = myTable.shape[1]
# the calues on the x-axies is called headers or columns
childHeights = list(myTable.columns)
# the values on the y axes are called the indexs
parentHeights = list(myTable.index)
# This will tell about the location of x and y on each index
combo = [ (x,y,myTable.loc[x,y]) for x in parentHeights for y in childHeights]
combo = pd.DataFrame(combo, columns = ['x','y','freq'])
# +
# Show the Scatter plot.
plt.scatter(combo.x, combo.y, s=combo.freq*6, c='green',alpha=0.5)
plt.xlabel('Parent height')
plt.ylabel('Child height')
plt.title("Galton's Data")
plt.show()
# By visualization what we have learn:
#the middle point on the graph will tell us that the frequence of the ages is much higher
# +
# Linera regression will tell us continus predication, not one result
# If you give the values of the age of the parent then we will get the height of the child
lm = LinearRegression(fit_intercept= True)
# -
lm.fit(df[['parent']],df[['child']])
# this will find the vales of the formula which is y = a+bx
lm.intercept_
lm.coef_
predY= lm.predict(df[['parent']])
# Show the Scatter plot + Regression line.
plt.plot(df.parent.values,predY,c = 'red',linestyle='-',linewidth=0.5)
plt.scatter(combo.x, combo.y, s=combo.freq*2, c='blue',alpha=0.5)
plt.xlabel('Parent height')
plt.ylabel('Child height')
plt.title("Galton's Regression")
plt.show()
# ## Exercise 06
#
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
from sklearn import metrics
# %matplotlib inline
# -
data= load_boston()
data.keys()
# +
# Display the description on the data.
print(data['DESCR'])
# +
# The explanatory variables.
X = data['data']
header = data['feature_names']
# -
# The response variable.
Y = data['target']
# the data will be in the form of into column
Y = Y.reshape(-1, 1)
#if the axes is 0 then there will be error , dementation will not be the same
df = pd.DataFrame(np.append(X,Y,axis = 1))
df.columns = list(header)+['PRICE']
df.shape
df.head(5)
df.tail(5)
df.describe()
np.round(df.corr(),2)
# Visualize the correlation matrix.
sns.heatmap(df.corr(),cmap='coolwarm')
plt.show()
plt.scatter(X[:,5],Y[:,0],c = 'g',s=15,alpha=0.5)
plt.xlabel('RM')
plt.ylabel('PRICE')
plt.show()
lm = LinearRegression(fit_intercept=True)
lm.fit(X,Y)
lm.intercept_
lm.coef_
# Display the parameters as a DataFrame.
parametersDF = pd.DataFrame(lm.coef_,index=['Parameter Value'],columns=header)
parametersDF['Intercept'] = lm.intercept_[0]
parametersDF
predY = lm.predict(X)
# +
# Display real Y vs predicted Y.
plt.scatter(Y,predY,c = 'blue', s=15, alpha=0.5)
plt.xlabel('REAL PRICE')
plt.ylabel('PREDICTED PRICE')
plt.show()
# fro linear regression you must haive equal number of x axis and y axies
# -
# Calculate the correlation between the real Y and predicted Y.
pd.Series(Y[:,0]).corr(pd.Series(predY[:,0]))
# +
# Coefficient of determination (R^2):
lm.score(X,Y)
# +
# Split the dataset.
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=123)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
# +
# Split the dataset.
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=123)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
# -
# predY_in = in-sample prediction of Y.
# predY_out = out-of-sample prediction of Y.
lm = LinearRegression()
lm.fit(X_train,Y_train)
predY_in = lm.predict(X_train)
predY_out = lm.predict(X_test)
lm = LinearRegression()
lm.fit(X_train,Y_train)
predY_in = lm.predict(X_train)
predY_out = lm.predict(X_test)
print('In-sample MSE is : ' + str(metrics.mean_squared_error(Y_train, predY_in)))
print('Out-of-sample MSE is : ' + str(metrics.mean_squared_error(Y_test, predY_out)))
print('-'*50)
print('In-sample RMSE is : ' + str(np.sqrt(metrics.mean_squared_error(Y_train, predY_in))))
print('Out-of-sample RMSE is : ' + str(np.sqrt(metrics.mean_squared_error(Y_test, predY_out))))
# +
# Calculate residual.
residual = Y_train - predY_in
# Q: Can you check "visually" that the mean = 0 and variance = constant?
plt.scatter(Y_train,residual,c = 'red', s=15, alpha=0.5)
plt.xlabel('Y')
plt.ylabel('Residual')
plt.title('Residual')
plt.show()
# +
# Q: Are the residuals normally distributed centered around 0?
sns.distplot(residual, bins=50, color='green').set_title("Residual Histogram")
plt.show()
# -
X_new = np.array([0.03, 0.0, 13.0, 0.0, 0.4, 4.3, 23.5, 1.9, 1.0, 273.0, 18.0, 380.0, 7.5]).reshape(1,-1) # Reshaped as a row.
Y_pred_new = lm.predict(X_new)
print(np.round(Y_pred_new[0,0],3))
| Rafay notes/Samsung Course/Chapter 5/Class Work/Lecture 03-23-oct-2021/.ipynb_checkpoints/Class Work from 23 October 2021-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import os, sys, time, copy
import numpy as np
import matplotlib.pyplot as plt
import myokit
sys.path.append('../')
sys.path.append('../Protocols')
sys.path.append('../Models')
sys.path.append('../Lib')
import protocol_lib
import mod_trace
import simulator_myokit
import simulator_scipy
import vc_protocols
# +
# VC_protocol = vc_protocols.hERG_CiPA()
# VC_protocol = vc_protocols.cav12_CiPA()
# VC_protocol = vc_protocols.lateNav15_CiPA()
# +
'''
SongV1
'''
VC_protocol = protocol_lib.VoltageClampProtocol() # steps=steps
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=100) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-120, duration=20) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-40, duration=200) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=60, duration=200) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=0, duration=200) ) # <- why?? vo
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=50, duration=200) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-10, duration=200) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=50) )
VC_protocol.add( protocol_lib.VoltageClampRamp(voltage_start=30, voltage_end=-50, duration=100)) # ramp step
vhold = -80
print(f'The protocol is {VC_protocol.get_voltage_change_endpoints()[-1]} ms')
# +
'''
leemV1
'''
VC_protocol = protocol_lib.VoltageClampProtocol() # steps=steps
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=100) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-90, duration=100) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=100) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-35, duration=40) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-80, duration=200) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=-40, duration=40) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=0, duration=40) )
VC_protocol.add( protocol_lib.VoltageClampStep(voltage=40, duration=500) )
VC_protocol.add( protocol_lib.VoltageClampRamp(voltage_start=40, voltage_end=-120, duration=200)) # ramp step
# settings
# -
times = np.arange(0, VC_protocol.get_voltage_change_endpoints()[-1], 0.5)
VC_protocol.plot_voltage_clamp_protocol(times)
# +
end_time = VC_protocol.get_voltage_change_endpoints()[-1]
bcl = 1000
duration = 0.5
offset = 20
window = 10
step_size = 5
cell_types = {
'Endocardial' : 0,
'Epicardial' : 1,
'Mid-myocardial' : 2,
}
extra_log=['ina.INa', 'inal.INaL', 'ito.Ito', 'ical.ICaL', 'ical.ICaNa', 'ical.ICaK', 'ikr.IKr', 'iks.IKs', 'ik1.IK1', 'inaca.INaCa', 'inacass.INaCa_ss', 'inak.INaK', 'ikb.IKb', 'inab.INab', 'icab.ICab', 'ipca.IpCa']
t_span = (0, end_time)
t_eval = np.linspace(0, t_span[1], 10000)
# +
start_time = time.time()
model, p, s = myokit.load("../mmt-model-files/ohara-cipa-v1-2017_JK-v2.mmt")
sim = simulator_myokit.Simulator(model, VC_protocol, max_step=1.0, abs_tol=1e-06, rel_tol=1e-6, vhold=vhold) # 1e-12, 1e-14 # 1e-08, 1e-10
sim.name = "ohara2017"
f = 1.5
params = {
'cell.mode': cell_types['Mid-myocardial'],
'setting.simType': 1, # 0: AP | 1: VC
'ina.gNa' : 75.0 * f,
'inal.gNaL' : 0.0075 * 2.661 * f,
'ito.gto' : 0.02 * 4 * f,
'ical.PCa' : 0.0001 * 1.007 * 2.5 * f,
'ikr.gKr' : 4.65854545454545618e-2 * 1.3 * f, # [mS/uF]
'iks.gKs' : 0.0034 * 1.87 * 1.4 * f,
'ik1.gK1' : 0.1908 * 1.698 * 1.3 * f,
'inaca.gNaCa' : 0.0008 * 1.4,
'inak.PNaK' : 30 * 0.7,
'ikb.gKb' : 0.003,
'inab.PNab' : 3.75e-10,
'icab.PCab' : 2.5e-8,
'ipca.GpCa' : 0.0005,
'ina.g_adj' : 1,
'inal.g_adj' : 1,
'ito.g_adj' : 1,
'ical.g_adj' : 1,
'ikr.g_adj' : 1,
'iks.g_adj' : 1,
'ik1.g_adj' : 1,
'inaca.g_adj' : 1,
'inak.g_adj' : 1,
'ikb.g_adj' : 1,
'inab.g_adj' : 1,
'icab.g_adj' : 1,
'ipca.g_adj' : 1,
}
sim.set_simulation_params(params)
y0 = sim.pre_simulate(5000, sim_type=1)
d = sim.simulate(end_time, log_times=None, extra_log=['membrane.i_ion'] + extra_log)
sol1 = {}
times1 = d['engine.time']
sol1["Voltage"] = VC_protocol.get_voltage_clamp_protocol(d['engine.time'])
sol1["I_total"] = d['membrane.i_ion']
sol1["INa"] = d['ina.INa']
sol1["INaL"] = d['inal.INaL']
sol1["Ito"] = d['ito.Ito']
sol1["ICaL"] = d['ical.ICaL']
sol1["IKr"] = d['ikr.IKr']
sol1["IKs"] = d['iks.IKs']
sol1["IK1"] = d['ik1.IK1']
tr = mod_trace.Trace(VC_protocol,
cell_params=None,
t=times1,
y=sol1["Voltage"], # simulator.model.V,
command_voltages=sol1["Voltage"],
current_response_info=sim.current_response_info,
default_unit=None)
max_contributions1 = tr.current_response_info.get_max_current_contributions(time=times1,
window=window,
step_size=step_size)
print("--- %s seconds ---"%(time.time()-start_time))
# -
max_contributions1
# +
start_time = time.time()
params = {
'cell.mode': cell_types['Mid-myocardial'],
'setting.simType': 1, # 0: AP | 1: VC
'ina.g_adj' : 0.8389102,
'inal.g_adj' : 0.35914838,
'ito.g_adj' : 0.8480872,
'ical.g_adj' : 0.1362117,
'ikr.g_adj' : 0.08961055,
'iks.g_adj' : 0.86768395,
'ik1.g_adj' : 0.01095095,
# 'inaca.g_adj' : 1,
# 'inak.g_adj' : 1,
# 'ikb.g_adj' : 1,
# 'inab.g_adj' : 1,
# 'icab.g_adj' : 1,
# 'ipca.g_adj' : 1,
}
sim.set_simulation_params(params)
# sim.set_initial_values([-88] + y0)
sim.pre_simulate(5000, sim_type=1)
d = sim.simulate(end_time, log_times=None , extra_log=['membrane.VC', 'membrane.i_ion'] + extra_log)
sol2 = {}
times2 = d['engine.time']
sol2["Voltage"] = d['membrane.VC']
sol2["I_total"] = d['membrane.i_ion']
sol2["INa"] = sim.current_response_info.get_current(['INa'])
sol2["INaL"] = sim.current_response_info.get_current(['INaL'])
sol2["Ito"] = sim.current_response_info.get_current(['Ito'])
sol2["ICaL"] = sim.current_response_info.get_current(['ICaL'])
sol2["IKr"] = sim.current_response_info.get_current(['IKr'])
sol2["IKs"] = sim.current_response_info.get_current(['IKs'])
sol2["IK1"] = sim.current_response_info.get_current(['IK1'])
tr = mod_trace.Trace(VC_protocol,
cell_params=None,
t=times2,
y=sol2["Voltage"], # simulator.model.V,
command_voltages=sol2["Voltage"],
current_response_info=sim.current_response_info,
default_unit=None)
max_contributions2 = tr.current_response_info.get_max_current_contributions(time=times2,
window=window,
step_size=step_size)
print("--- %s seconds ---"%(time.time()-start_time))
# -
max_contributions2
# +
'''
Plot
'''
# current_name = 'IKr'
fig, ax = plt.subplots(len(sol1),1, figsize=(10,30))
# fig.suptitle(sim.name, fontsize=14)
for i, (name, values) in enumerate(sol1.items()):
# ax.set_title('Simulation %d'%(simulationNo))
# axes[i].set_xlim(model_scipy.times.min(), model_scipy.times.max())
# ax.set_ylim(ylim[0], ylim[1])
ax[i].set_xlabel('Time (ms)')
ax[i].set_ylabel(f'{name}')
ax[i].plot( times1, values, label='control', color='k', linewidth=5)
ax[i].plot( times2, sol2[name], label='treatment', color='r', linewidth=2)
ax[i].legend()
ax[i].grid()
if i!=0 and i!=1:
contribution = max_contributions1[max_contributions1["Current"]==name]['Contribution'].values[0]
start = max_contributions1[max_contributions1["Current"]==name]['Time Start'].values[0]
end = max_contributions1[max_contributions1["Current"]==name]['Time End'].values[0]
ax[i].axvspan(start, end, color='g', alpha=0.3)
# ax[-1].set_ylim(-5, 5)
plt.subplots_adjust(left=0.07, bottom=0.05, right=0.95, top=0.95, wspace=0.5, hspace=0.15)
plt.show()
# fig.savefig(os.path.join('Results', "C.jpg"), dpi=100)
# -
| Generate_dataset_ohara2017/Change_Conductances.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Carlosrnes/group_work_ml/blob/main/ML_Group_work_Algorithms.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="4lb_2mvnkRIR"
# 1. Prepare Problem
# a) Load libraries
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
from pandas import read_csv
from pandas import set_option
from pandas.plotting import scatter_matrix
from sklearn.pipeline import Pipeline
from sklearn.datasets import make_classification
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.datasets import load_digits
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import FeatureUnion
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# + id="d0T8xbTEkRIk" colab={"base_uri": "https://localhost:8080/"} outputId="52ef54e8-6280-4ba6-94d8-e2f9b4d2c098"
# b) Load dataset
df = pd.read_csv('https://raw.githubusercontent.com/Carlosrnes/group_work_ml/main/techscape-ecommerce/train.csv')
# Drop Access_ID
df = df.drop(['Access_ID'], axis=1)
# Converting Date type from object to datetime
df['Date'] = pd.to_datetime(df['Date'], format='%d-%b-%y')
filters1 = (
(df['AccountMng_Duration']<=2000)
&
(df['FAQ_Duration']<=1500)
&
(df['Product_Pages']<=500)
&
(df['Product_Duration']<=25000)
&
(df['GoogleAnalytics_PageValue']<=300)
)
df_1 = df[filters1]
print('Percentage of data kept after removing outliers:', np.round(df_1.shape[0] / df.shape[0], 4))
df = df[filters1]
# Creating new features
df['month'] = df['Date'].dt.month
# Dropping columns
df = df.drop(['Date'], axis=1).reset_index(drop=True)
# One-hot encoding
df = pd.concat([df,pd.get_dummies(df['month'], prefix='month_',dummy_na=True)],axis=1).drop(['month'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Type_of_Traffic'], prefix='Type_of_Traffic_',dummy_na=True)],axis=1).drop(['Type_of_Traffic'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Browser'], prefix='Browser_',dummy_na=True)],axis=1).drop(['Browser'],axis=1)
df = pd.concat([df,pd.get_dummies(df['OS'], prefix='OS_',dummy_na=True)],axis=1).drop(['OS'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Country'], prefix='Country_',dummy_na=True)],axis=1).drop(['Country'],axis=1)
df = pd.concat([df,pd.get_dummies(df['Type_of_Visitor'], prefix='Type_of_Visitor_',dummy_na=True)],axis=1).drop(['Type_of_Visitor'],axis=1)
# Sampling
dataset = df.sample(frac=0.90, random_state=786).reset_index(drop=True)
data_unseen = df.drop(dataset.index).reset_index(drop=True)
print('Data for Modeling: ' + str(dataset.shape))
print('Unseen Data For Predictions: ' + str(data_unseen.shape))
# + id="m1je2K-ekRJH"
# 4. Evaluate Algorithms
# a) Split-out validation dataset
df = dataset.dropna()
df1 = df.drop(['Buy'], axis=1)
array = df1.values
X = array[:,0:df1.shape[1]-1].astype(float)
y = np.array(df['Buy'])
validation_size=0.2
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y,
test_size=validation_size, random_state=seed)
# + id="mJlzII4vkRJJ"
# b) Test options and evaluation metric
num_folds = 10
scoring = 'f1'
# + id="l8JHRMx7kRJK" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="d65972d0-9164-498c-aa31-37326e8aa76c"
# c) Spot Check Algorithms
# create a dict of standard models to evaluate {name:object}
def define_models(models=dict()):
# linear models
models['logistic'] = LogisticRegression()
alpha = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
for a in alpha:
models['ridge-'+str(a)] = RidgeClassifier(alpha=a)
models['sgd'] = SGDClassifier(max_iter=1000, tol=1e-3)
models['pa'] = PassiveAggressiveClassifier(max_iter=1000, tol=1e-3)
# non-linear models
n_neighbors = range(1, 12)
for k in n_neighbors:
models['knn-'+str(k)] = KNeighborsClassifier(n_neighbors=k)
models['cart'] = DecisionTreeClassifier()
models['extra'] = ExtraTreeClassifier()
#models['svml'] = SVC(kernel='linear')
#models['svmp'] = SVC(kernel='poly')
#c_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
#for c in c_values:
# models['svmr'+str(c)] = SVC(C=c)
models['bayes'] = GaussianNB()
# ensemble models
n_trees = 100
models['ada'] = AdaBoostClassifier(n_estimators=n_trees)
models['bag'] = BaggingClassifier(n_estimators=n_trees)
models['rf'] = RandomForestClassifier(n_estimators=n_trees)
models['et'] = ExtraTreesClassifier(n_estimators=n_trees)
models['gbm'] = GradientBoostingClassifier(n_estimators=n_trees)
print('Defined %d models' % len(models))
return models
# no transforms pipeline
def pipeline_none(model):
return model
# standardize transform pipeline
def pipeline_standardize(model):
steps = list()
# standardization
steps.append(('standardize', StandardScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# normalize transform pipeline
def pipeline_normalize(model):
steps = list()
# normalization
steps.append(('normalize', MinMaxScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# standardize and normalize pipeline
def pipeline_std_norm(model):
steps = list()
# standardization
steps.append(('standardize', StandardScaler()))
# normalization
steps.append(('normalize', MinMaxScaler()))
# the model
steps.append(('model', model))
# create pipeline
pipeline = Pipeline(steps=steps)
return pipeline
# evaluate a single model
def evaluate_model(X, y, model, folds, metric, pipe_func):
# create the pipeline
pipeline = pipe_func(model)
# evaluate model
scores = cross_val_score(pipeline, X, y, scoring=metric, cv=folds, n_jobs=-1)
return scores
# evaluate a model and try to trap errors and and hide warnings
def robust_evaluate_model(X, y, model, folds, metric, pipe_func):
scores = None
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
scores = evaluate_model(X, y, model, folds, metric, pipe_func)
except:
scores = None
return scores
# evaluate a dict of models {name:object}, returns {name:score}
def evaluate_models(X, y, models, pipe_funcs, folds=num_folds, metric=scoring):
results = dict()
for name, model in models.items():
# evaluate model under each preparation function
for i in range(len(pipe_funcs)):
# evaluate the model
scores = robust_evaluate_model(X, y, model, folds, metric, pipe_funcs[i])
# update name
run_name = str(i) + name
# show process
if scores is not None:
# store a result
results[run_name] = scores
mean_score, std_score = mean(scores), std(scores)
print('>%s: %.3f (+/-%.3f)' % (run_name, mean_score, std_score))
else:
print('>%s: error' % run_name)
return results
# print and plot the top n results
def summarize_results(results, maximize=True, top_n=10):
# check for no results
if len(results) == 0:
print('no results')
return
# determine how many results to summarize
n = min(top_n, len(results))
# create a list of (name, mean(scores)) tuples
mean_scores = [(k,mean(v)) for k,v in results.items()]
# sort tuples by mean score
mean_scores = sorted(mean_scores, key=lambda x: x[1])
# reverse for descending order (e.g. for accuracy)
if maximize:
mean_scores = list(reversed(mean_scores))
# retrieve the top n for summarization
names = [x[0] for x in mean_scores[:n]]
scores = [results[x[0]] for x in mean_scores[:n]]
# print the top n
print()
for i in range(n):
name = names[i]
mean_score, std_score = mean(results[name]), std(results[name])
print('Rank=%d, Name=%s, Score=%.3f (+/- %.3f)' % (i+1, name, mean_score, std_score))
# boxplot for the top n
plt.boxplot(scores, labels=names)
_, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.savefig('spotcheck.png')
# Run Spot Check Algorithms
# get model list
models = define_models()
# define transform pipelines
pipelines = [pipeline_none, pipeline_standardize, pipeline_normalize, pipeline_std_norm]
# evaluate models
results = evaluate_models(X, y, models, pipelines)
# summarize results
summarize_results(results)
# + id="L9cUoKB2kRJg" colab={"base_uri": "https://localhost:8080/"} outputId="3dc2d223-aea2-4123-a0a8-05d1c8407a05"
# 5. Improve Accuracy
# a) Algorithm Tuning (Hyperparameters)
# Tune Gradient Boosting Classifier
n_trees = [10,50,100, 200]
learning = [0.001, 0.01, 0.1]
subsample = [0.7, 0.85, 1.0]
max_depth = [3, 7]
param_grid = dict(n_estimators=n_trees, learning_rate=learning, subsample=subsample, max_depth=max_depth)
model = GradientBoostingClassifier()
kfold = StratifiedKFold(n_splits=num_folds, random_state=seed, shuffle=True)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(X, y)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# + id="3q-4Mdy9kRJh" colab={"base_uri": "https://localhost:8080/", "height": 625} outputId="faa205ec-eaac-47b7-e23f-595590a95019"
# 6. Feature Selection
# Define model
model_selected = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, subsample=0.85)
features_list = list(df1.columns.values)
# a) Recursive Feature Elimination.
rfecv = RFECV(estimator=model_selected, step=1, cv=kfold, scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure(figsize=(16,10))
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
# + id="m6Av6wCrkRJi" colab={"base_uri": "https://localhost:8080/"} outputId="193f9393-14fc-45d7-ad86-00266c6d434b"
# a) Recursive Feature Elimination.
model = model_selected
rfe = RFE(model, 16)
fit = rfe.fit(X, y)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
print("Feature Ranking: %s" % fit.ranking_)
print("Feature Ranking: %s" % fit.estimator_)
# + id="hh3jKL3EkRJk"
# c) Feature Importance.
model = model_selected
model.fit(X, y)
plt.figure(figsize=(16,10))
importances = pd.DataFrame({'feature': features_list, 'importance': np.round(model.feature_importances_,3)})
importances = importances.sort_values('importance', ascending=True).set_index('feature')
importances.plot.barh().set_title('Importance of features')
# + id="aZf9nFJzkRJl" colab={"base_uri": "https://localhost:8080/"} outputId="39d6bfe8-53b2-41ef-d2ba-4e1d2863239d"
# 7. Finalize Model
# a) Predictions on validation dataset
# prepare the model
model = model_selected
model.fit(X, y)
# estimate accuracy on validation dataset
predictions = model.predict(X_validation)
print('Accuracy:')
print(accuracy_score(Y_validation, predictions))
print('f1-score:')
print(f1_score(Y_validation, predictions))
print('Confusion Matrix:')
print(confusion_matrix(Y_validation, predictions))
print('Classification Report:')
print(classification_report(Y_validation, predictions))
| ML_Group_work_Algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# http://actionrecognition.net/files/dsetdetail.php?did=15;
# https://github.com/epic-kitchens/annotations
# %load_ext autoreload
# %autoreload 2
from childes_mi.utils.paths import DATA_DIR, ensure_dir, EPIC_KITCHENS_DIR
import urllib.request
import pandas as pd
actions_data = pd.read_csv(EPIC_KITCHENS_DIR / 'EPIC_train_action_labels.csv')
from tqdm.autonotebook import tqdm
actions = [actions_data[actions_data.video_id == vid].verb_class.values for vid in tqdm(actions_data.video_id.unique())]
actions_name = [[actions_data[actions_data.video_id == vid].verb.values, actions_data[actions_data.video_id == vid].noun.values] for vid in tqdm(actions_data.video_id.unique())]
behav_lens = [len(i) for i in actions]
import matplotlib.pyplot as plt
import numpy as np
np.sum(behav_lens)
len(np.unique(np.concatenate(actions)))
np.median(behav_lens)
fig, ax = plt.subplots()
ax.hist(behav_lens, bins = 50);
ax.axvline(np.median(behav_lens), color='red')
# ### MI
from childes_mi.information_theory import mutual_information as mi
distances = np.arange(1,250).astype('int')
actions_indv = [["{}_{}".format(bi, element) for element in bout] for bi, bout in enumerate(tqdm(actions))]
actions_indv[0][:10]
(MI, MI_var), (shuff_MI, shuff_MI_var) = mi.sequential_mutual_information(
np.array([np.random.permutation(i) for i in actions_indv]), distances=distances, n_jobs=-1, estimate=True
)
MI_DF = pd.DataFrame(
[[MI, MI_var, shuff_MI, shuff_MI_var, distances]],
columns=["MI", "MI_var", "shuff_MI", "shuff_MI_var", "distances"],
)
MI_DF.to_pickle(DATA_DIR / "mi" / "epic_kitchens_shuffled.pickle")
row = MI_DF.iloc[0]
fig, ax = plt.subplots(figsize=(10,5))
MI = row.MI-row.shuff_MI
MI_var = row.MI_var
ax.axvline(np.median(behav_lens), color='red')
ax.scatter(distances, MI)
ax.fill_between(distances, MI-MI_var, MI+MI_var, alpha = 0.25, color= 'k')
#ax.set_ylim([1e-3, 1])
ax.set_yscale('log')
ax.set_xscale('log')
from matplotlib import gridspec
median_len = 45
from childes_mi.utils.paths import DATA_DIR, FIGURE_DIR
from childes_mi.utils.general import flatten,save_fig
# +
yoff=-.20
ncols = 4
zoom = 5
hr = [1, 0.5, 0.5, 0.5]
nrows = np.ceil(len(MI_DF)/ncols).astype(int)
fig = plt.figure(figsize=(len(MI_DF)*zoom*1.0,np.sum(hr)*zoom))
gs = gridspec.GridSpec(ncols=len(MI_DF), nrows=4, height_ratios=hr)
axi = 0
row = MI_DF.iloc[0]
color = 'k'#LCOL_DICT[row.language]
ax0 = plt.subplot(gs[0,axi])
ax = ax0
sig = np.array(row.MI-row.shuff_MI)
distances = row.distances
sig = sig
# get signal limits
sig_lims = np.log([np.min(sig[sig>0]), np.nanmax(sig)])
sig_lims = [sig_lims[0] - (sig_lims[1]-sig_lims[0])/10,
sig_lims[1] + (sig_lims[1]-sig_lims[0])/10]
if axi==0:
ax.set_ylabel('MI (bits)', labelpad=5, fontsize=24)
ax.yaxis.set_label_coords(yoff,0.5)
# plot real data
ax.scatter(distances, sig, alpha = 1, s=60, color=color)
ax.set_xlabel('Distance (actions)', labelpad=5, fontsize=24)
#print(row.language, distances[peak_of_interest])
for ax in [ax0]:
ax.set_xlim([distances[0], distances[-1]])
sig_lims[0] = np.log(10e-6)
ax.set_ylim([1e-6, 0.5])
ax.tick_params(which='both', direction='in', labelsize=14, pad=10)
ax.tick_params(which='major', length=10, width =3)
ax.tick_params(which='minor', length=5, width =2)
ax.set_xscale( "log" , basex=10)
ax.set_yscale( "log" , basey=10)
ax.set_xticks([])
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(3)
ax.spines[axis].set_color('k')
ax.set_xticks([1,10])
ax.set_xticklabels(['1','10'])
ax.set_xlim([0.9, 45.5])
if axi !=0:
for ax in [ax0,ax1,ax2]:
ax.yaxis.set_ticklabels([])
gs.update(wspace=0.075, hspace=0.1)
ax0.set_title("Epic Kitchens", fontsize=24)
save_fig(FIGURE_DIR/'epic_kitchens-1000-shuffled')
# -
| notebooks/epic-kitchens2/2.1-epic-kitchens-shuffled-sequences.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Learn Latex using Jupyter notebook
# ```
# $$
# \begin{align}
# \sum_{k=1}^{\infty} \frac{1}{k^2} = \frac{\pi^2}{6}
# \end{align}
# $$
# ```
# $$
# \begin{align}
# \sum_{k=1}^{\infty} \frac{1}{k^2} = \frac{\pi^2}{6}
# \end{align}
# $$
#
# `$$`で囲むと行替えしてセンタリングされるみたい。
# `$`で囲むと行替えされずに行の中に書かれる。
#
# ```
# $$
# \newcommand{\rot}[1]{\nabla\times #1}
# \newcommand{\pdfrac}[2]{\frac{\partial #1}{\partial #2}}
# \begin{align}
# \mathbf{D} &= 0 \\\
# \mathbf{B} &= 0 \\\
# \rot{\mathbf{E}} &= - \pdfrac{\mathbf{B}}{t} \\\
# \rot{\mathbf{H}} &= \pdfrac{\mathbf{D}}{t}
# \end{align}
# $$
# ```
# $$
# \newcommand{\rot}[1]{\nabla\times #1}
# \newcommand{\pdfrac}[2]{\frac{\partial #1}{\partial #2}}
# \begin{align}
# \mathbf{D} &= 0 \\\
# \mathbf{B} &= 0 \\\
# \rot{\mathbf{E}} &= - \pdfrac{\mathbf{B}}{t} \\\
# \rot{\mathbf{H}} &= \pdfrac{\mathbf{D}}{t}
# \end{align}
# $$
#
# `$$`で囲んだ中の改行は`\\\`みたい。
#
# ```
# \begin{align}
# \dot{x} & = \sigma(y-x) \\
# \dot{y} & = \rho x - y - xz \\
# \dot{z} & = -\beta z + xy
# \end{align}
# ```
# \begin{align}
# \dot{x} & = \sigma(y-x) \\
# \dot{y} & = \rho x - y - xz \\
# \dot{z} & = -\beta z + xy
# \end{align}
#
# `& =`というのは縦に`=`を揃える、ってことかな。
# ここでは改行は`\\`になっている。
#
# おっとそれより、`$$`で囲まなくてもいいのか!
# ```
# \begin{equation*}
# \left( \sum_{k=1}^n a_k b_k \right)^2 \leq \left( \sum_{k=1}^n a_k^2 \right) \left( \sum_{k=1}^n b_k^2 \right)
# \end{equation*}
# ```
# \begin{equation*}
# \left( \sum_{k=1}^n a_k b_k \right)^2 \leq \left( \sum_{k=1}^n a_k^2 \right) \left( \sum_{k=1}^n b_k^2 \right)
# \end{equation*}
#
# `\displaystyle`とか書かなくてもシグマの添字が上下についているがこれを横につけるにはどうしたらよいのか。
#
# %matplotlib inline
from sympy import *
init_printing()
x = Symbol('x')
y = Symbol('y')
expr = (x + y)**5
expr
expand (expr)
solve(expr, x, 0)
| learnlatex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nanle21/BackEndFrontTest/blob/master/firstScript.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="NBZS8Fhs-q1Y" colab_type="code" colab={}
# + [markdown] id="kD18jmoU_plV" colab_type="text"
# #This is my title
# this is a regular text
#
#
#
# ```
# # This is nanle formatted text
# ```
#
#
# + id="QaJ3K85JAXUT" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
# + id="OUHnmTi2A1Ic" colab_type="code" colab={}
x = np.linspace(0, 10*np.pi, 1000)
y = np.sin(x)
# + id="y7tsz8EdBgnJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="bc583ec8-0db5-4240-8db1-bbdf1a0b762c"
plt.plot(x,y)
| firstScript.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### EKF slam and driving through poles
#
# This jupyter notebook will use EKF slam to steer an imaginary rover through some poles.
#
# Again leaning heavily on the libraries from the PythonRobotics with some augmentations.
from slam.ekf_slam import EkfSlam
from utils.rover import Rover
from control import pose_control
import numpy as np
import matplotlib.pyplot as plt
import math
# The idea will be to identify a "lineup" pose that we should drive to in order to prepare for the rover to drive through the gates, then drive through the gates. Then stop.
def targetPose(pole1,pole2):
mid = ((pole1[0]+pole2[0])/2.0,(pole1[1]+pole2[1])/2.0)
dy = pole2[1]-pole1[1]
dx = pole2[0]-pole1[0]
theta = np.arctan2(dx,dy)
return Rover(1*np.cos(theta)+mid[0],1*np.sin(theta)+mid[1],theta + np.pi)
# +
pole = np.array([[5.0, 5.0],
[7.0, 5.0]])
# State Vector [x y yaw v]'
xEst = np.zeros((EkfSlam.STATE_SIZE, 1))
xTrue = np.zeros((EkfSlam.STATE_SIZE, 1))
PEst = np.eye(EkfSlam.STATE_SIZE)
xDR = np.zeros((EkfSlam.STATE_SIZE, 1)) # Dead reckoning
# history
hxEst = xEst
hxTrue = xTrue
hxDR = xTrue
## Setting up simulation.
T = 0.0
rov = Rover(0.0,0.0,0.0) # start it at the origin.
# setting up goals and simulation parameters
pose_control.dt = EkfSlam.DT = 0.01
poseGoal = [targetPose(pole[1],pole[0]),targetPose(pole[0],pole[1]), Rover(0,0,0)]
poseGoal[1].theta = poseGoal[0].theta
for i in range(len(poseGoal)):
v = 10
yawrate = 10
while(rov.check_error(poseGoal[i])>0.1):
T += EkfSlam.DT
v,yawrate = pose_control.move_to_pose_step(rov,poseGoal[i])
if abs(v)>10:
v = math.copysign(10,v)
if abs(yawrate)>100:
v = math.copysign(100,yawrate)
u = np.array([[v, yawrate]]).T
xTrue, z, xDR, ud = EkfSlam.observation(xTrue, xDR, u, pole)
xEst, PEst = EkfSlam.ekf_slam(xEst, PEst, ud, z)
x_state = xEst[0:EkfSlam.STATE_SIZE]
rov.x = xEst[0][0]
rov.y = xEst[1][0]
rov.theta = xEst[2][0]
# store data history
hxEst = np.hstack((hxEst, x_state))
hxDR = np.hstack((hxDR, xDR))
hxTrue = np.hstack((hxTrue, xTrue))
print("Time take to reach objective " ,i+1," was ", T, " seconds")
# -
x,y = pole.T
plt.scatter(x,y)
plt.plot(hxTrue[0, :],
hxTrue[1, :], "-b")
plt.plot(hxDR[0, :],
hxDR[1, :], "-k")
plt.plot(hxEst[0, :],
hxEst[1, :], "-r")
plt.axis("equal")
plt.grid(True)
plt.pause(0.001)
plt.show()
| nova_rover_demos/EKF Slam steering through poles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:hpi]
# language: python
# name: conda-env-hpi-py
# ---
# # TODO
#
# - Filter low GO depth pairs before mining? This will affect the cell counts...
# - Create plots and calculate number of interaction pairs for species, pathogens, the distribution of IPR/GO across host/pathogen, the depth distribution for host/pathogen, etc.
# - Create filter density plot showing the depth, significance, cell count, etc. p-value distribution can be on sides. Scatterplot-like, since it's for pairs.
# # Results without GO propagation|
from pathlib import Path
import pandas as pd
import numpy as np
results_file = Path('/media/pieter/DATA/Wetenschap/Doctoraat/projects/host-pathogen-ppi-analysis/data/processed/10292/results-index-pmi.tsv')
results_df = pd.read_csv(results_file,sep='\t',header=0, index_col=0)
results_df.index
results_df.columns
sorted_df = results_df.sort_values(by='fisher_p-value_fdr_bh')[['fisher_oddsration', 'fisher_p-value_fdr_bh',
'G_G', 'G_p-value_fdr_bh',
'chi2_chi2', 'chi2_p-value_fdr_bh',
'pmi_0']]
sorted_df
sorted_df[sorted_df['fisher_p-value_fdr_bh'] < 0.01]
# - collapse parent-child
# - filter op diepte
# - kleine netwerken weglaten en relaties checken (leave 1 virus out)
# of
# - pas relaties toe op elk netwerk: check of eiwitten aanwezig zijn.
# - consistentie GO termen overheen virussen/hosts (e.g. orthologen)
sorted_df[(sorted_df['fisher_p-value_fdr_bh'] < 0.01) & (sorted_df['pmi_0'] > 0)]
# +
import argparse
import numpy as np
import pandas as pd
from pathlib import Path
from goscripts import obo_tools
from goscripts import gaf_parser
from phppipy.ppi_tools import label_go
from phppipy.ppi_tools import label_interpro
obo_path = Path('/media/pieter/DATA/Wetenschap/Doctoraat/projects/host-pathogen-ppi-analysis/data/raw/go_data/go.obo')
go_dict = obo_tools.importOBO(obo_path, ignore_part_of=False)
# -
sorted_df[(sorted_df['fisher_p-value_fdr_bh'] < 0.01) & (sorted_df['fisher_oddsration'] > 1)]
def monotonic(x):
return np.all(np.diff(x) >= 0)
monotonic(results_df.sort_values(by='fisher_p-value')['fisher_p-value'].values)
monotonic(results_df.sort_values(by='fisher_p-value')['pmi_0'].values)
monotonic(results_df.sort_values(by='fisher_p-value')['G_p-value'].values)
monotonic(results_df.sort_values(by='fisher_p-value')['chi2_p-value'].values)
monotonic(results_df.sort_values(by='pmi_0')['chi2_p-value'].values)
monotonic(results_df.sort_values(by='pmi_0')['G_p-value'].values)
monotonic(results_df.sort_values(by='chi2_p-value')['G_p-value'].values)
1938 1311 1159 10.0349081703
import math
math.log(1938/(1311*1159))
import math
math.log((1159/50000952) / ( (1311/50000952) * (1938/50000952) ) )
# todo:
# timit: steps
# joblib
# propagate
# # Results with GO propagation
# +
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from ast import literal_eval
import math
pd.set_option('display.max_colwidth', -1)
results_file = Path('/media/pieter/DATA/Wetenschap/Doctoraat/projects/host-pathogen-ppi-analysis/data/processed/10292/results-propagated-mul.tsv')
results_df = pd.read_csv(results_file,sep='\t',header=0, index_col=0)
from goscripts import obo_tools
obo_path = Path('/media/pieter/DATA/Wetenschap/Doctoraat/projects/host-pathogen-ppi-analysis/data/raw/go_data/go.obo')
go_dict = obo_tools.importOBO(obo_path, ignore_part_of=False)
obo_tools.buildGOtree(go_dict, root_nodes=['GO:0008150', 'GO:0005575', 'GO:0003674'])
# -
# Convert Gene Ontology IDs to names
# +
def convert_GO_names(pair, go_dict):
l = []
for i in pair:
if 'GO' in i:
l.append(go_dict[i[2:]].name)
else:
l.append(i)
return tuple(l)
results_df['GO_names'] = results_df.apply(lambda x: convert_GO_names(literal_eval(x.name), go_dict), axis=1)
# -
# Convert IPR identifiers to descriptions
ipr_path = Path('/media/pieter/DATA/Wetenschap/Doctoraat/projects/host-pathogen-ppi-analysis/data/interim/10292/interpro/protein2ipr.dat')
ipr_dict = {}
with ipr_path.open('r') as f:
for line in f:
ipr_dict[line.split('\t')[1]] = line.split('\t')[2]
def convert_IPR(pair, ipr_dict):
l = []
for i in pair:
if 'IPR' in i:
l.append(ipr_dict[i[2:]])
else:
l.append(i)
return tuple(l)
results_df['IPR_description'] = results_df.apply(lambda x: convert_IPR(literal_eval(x.name), ipr_dict), axis=1)
def convert_annotations_to_descriptions(pair, go_dict, ipr_dict):
l = []
for i in pair:
if 'IPR' in i:
l.append(ipr_dict[i[2:]])
elif 'GO' in i:
l.append(go_dict[i[2:]].name)
else:
l.append(i)
return tuple(l)
results_df['pair_description'] = results_df.apply(lambda x: convert_annotations_to_descriptions(literal_eval(x.name), go_dict, ipr_dict), axis=1)
# Add column describing the type of the annotation pair: 'GO-GO', 'GO-IPR', 'IPR-GO' and 'IPR-IPR'.
# +
def pair_type(pair):
# if all(['GO' in i for i in pair]):
# return 'GO-GO'
# elif any(['GO' in i for i in pair]):
# return('GO-IPR')
# else:
# return ('IPR')
s = ''
for i in pair:
if 'GO' in i:
s += 'GO'
else:
s += 'IPR'
s += ';'
return s.strip(';')
results_df['pair_type'] = results_df.apply(lambda x: pair_type(literal_eval(x.name)), axis=1)
# -
# ## Multiple correction filter
# Sort the results on FDR-corrected p-values and filter out those larger than 0.1.
sorted_df = results_df.sort_values(by='fisher_p-value_fdr_bh')[['pair_description', 'GO_names', 'IPR_description', 'pair_type', 'pmi_0', 'G_G', 'G_df', 'G_exp', 'chi2_chi2',
'chi2_df', 'chi2_exp', 'fisher_oddsration',
'min_count_0', 'counts_pair_count',
'counts_label_one_count_exclusive', 'counts_label_two_count_exclusive',
'counts_label_one_count', 'counts_label_two_count',
'counts_absent_count', 'counts_total_count', 'depth_protein_A',
'depth_protein_B', 'chi2_p-value_fdr_bh', 'G_p-value_fdr_bh',
'fisher_p-value_fdr_bh']]
fdr_positive_df = sorted_df.loc[(sorted_df['fisher_p-value_fdr_bh'] < 0.01) & (sorted_df['pmi_0'] > 0), ['pair_description', 'GO_names', 'IPR_description',
'pair_type', 'pmi_0', 'chi2_p-value_fdr_bh', 'G_p-value_fdr_bh',
'fisher_p-value_fdr_bh', 'min_count_0', 'counts_pair_count',
'counts_label_one_count_exclusive', 'counts_label_two_count_exclusive',
'counts_absent_count', 'counts_total_count', 'depth_protein_A',
'depth_protein_B']]
fdr_positive_df
fdr_positive_df.shape
# ## Visualisation of depth of GO terms
display(pd.concat([fdr_positive_df.groupby(['depth_protein_A']).size(), fdr_positive_df.groupby(['depth_protein_B']).size()], axis=1))
fdr_positive_df.apply(lambda x: min(x['depth_protein_A'], x['depth_protein_B']), axis=1).value_counts()
plt.figure();
fdr_positive_df.apply(lambda x: min(x['depth_protein_A'], x['depth_protein_B']), axis=1).plot.hist(alpha=0.5, title='Minimum depth of pair')
# One obsolete GO term snuck into the analysis.
fdr_positive_df.loc[(fdr_positive_df[['depth_protein_A', 'depth_protein_B']].isna().sum(axis=1) ==2) & (~mask_IPR)]
# Checking depth filter calculations...
mask_IPR = fdr_positive_df['pair_type'] == 'IPR;IPR'
mask_depth = fdr_positive_df[['depth_protein_A', 'depth_protein_B']].min(axis=1) > -1
mask_depth2 = (fdr_positive_df[['depth_protein_A', 'depth_protein_B']].isna().sum(axis=1) < 2) & (fdr_positive_df[['depth_protein_A', 'depth_protein_B']].min(axis=1) > -1)
fdr_positive_df[mask_IPR | mask_depth].shape
sum(mask_depth) == sum(mask_depth2)
sum(fdr_positive_df[['depth_protein_A', 'depth_protein_B']].min(axis=1) > -1)
len(fdr_positive_df.loc[mask_IPR, ['depth_protein_A', 'depth_protein_B']].min(axis=1))
# One obsolete GO term snuck in the analysis.
fdr_positive_df.loc[(fdr_positive_df[['depth_protein_A', 'depth_protein_B']].isna().sum(axis=1) ==2) & (~mask_IPR)].shape
# ## Filter out pairs with depth < 4
# Filter out depths below chosen threshold, while retaining IPR-IPR pairs. IPR-GO pairs are also filtered.
min_depth = 4
mask_IPR = fdr_positive_df['pair_type'] == 'IPR;IPR'
mask_depth = fdr_positive_df[['depth_protein_A', 'depth_protein_B']].min(axis=1) > min_depth
fdr_positive_depth_df = fdr_positive_df[mask_IPR | mask_depth]
fdr_positive_df.groupby('pair_type').size()
fdr_positive_depth_df.groupby('pair_type').size()
# Todo: distribution of IPR/GO over host vs pathogen partner.
# ## Small cell counts
fdr_positive_depth_df[fdr_positive_depth_df['min_count_0'] < 25].groupby('min_count_0').size()
fdr_positive_depth_df[fdr_positive_depth_df['min_count_0'] >= 25].shape[0]
plt.figure();
fdr_positive_depth_df.groupby('min_count_0').size().plot(kind='bar');
plt.figure();
fdr_positive_depth_df.groupby('min_count_0').size().plot.hist(bins=100,alpha=0.5);
plt.figure();
fdr_positive_depth_df.groupby('min_count_0').size()[:500].plot.hist(bins=100,alpha=0.5);
plt.figure();
fdr_positive_depth_df.groupby('min_count_0').size().map(math.log).plot.hist(alpha=0.5);
# Filter out all pairs that were significantly associated, but had cell counts lower than 5.
fdr_positive_depth_cell_df = fdr_positive_depth_df[fdr_positive_depth_df['min_count_0'] > 5]
fdr_positive_depth_cell_df.shape
# ## Final results
display(fdr_positive_depth_cell_df)
fdr_positive_depth_cell_df.groupby('pair_type').size()
pair_id_df = fdr_positive_depth_cell_df.apply(lambda x: pd.Series(literal_eval(x.name)), axis=1).rename(columns={0: 'annotation_A', 1: 'annotation_B'})
pair_description_df = fdr_positive_depth_cell_df.apply(lambda x: pd.Series(x['pair_description']), axis=1).rename(columns={0: 'description_A', 1: 'description_B'})
fdr_positive_depth_cell_df = pd.concat([fdr_positive_depth_cell_df, pair_id_df, pair_description_df], axis=1)
fdr_positive_depth_cell_df.to_csv('/media/pieter/DATA/Wetenschap/Doctoraat/projects/host-pathogen-ppi-analysis/data/processed/10292/results-filtered.tsv', sep='\t', index=False, header=True)
len(set(fdr_positive_depth_cell_df[['description_A', 'description_B']].values.ravel()))
d1 = {'teams': [['SF', 'NYG'],['SF', 'NYG'],['SF', 'NYG'],
['SF', 'NYG'],['SF', 'NYG'],['SF', 'NYG'],['SF', 'NYG']]}
df2 = pd.DataFrame(d1)
df2.index
pd.concat([pd.DataFrame(df2['teams'].values.tolist()), df2[1]], axis=1)
df2[0]
ids = list(range(14))
[ids[j:j+6] for j in range(0, len(ids), 6)]
[j for j in range(0, len(ids), 6)]
| notebooks/pairwise mining - results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import sqlite3
con = sqlite3.connect('../data/survey.db')
cursor = con.cursor()
cursor.execute("SELECT * FROM Site;")
results = cursor.fetchall()
results
pd.DataFrame(results)
cursor.close()
con.close()
con = sqlite3.connect('../data/survey.db')
df = pd.read_sql_query("SELECT * FROM Site;", con)
con.close()
a = 3
f"hey, a = {a}"
# ../data/table4b.csv
# make that dataset "tidy"
df = pd.read_csv('../data/table4b.csv')
df.melt(id_vars='country', value_name='pop', var_name='year')
# +
# ../data/table3.csv
# splite the rate column into case_count and population
df = pd.read_csv('../data/table3.csv')
df
# -
df[['count', 'pop']] = df.rate.str.split('/', expand=True)
df
def parse(x):
return x.split('/')
df['numerator'] = df.rate.apply(parse).str.get(0)
df['demoninator'] = df.rate.apply(parse).str.get(1)
df
| notebooks/10-sql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # python-aco
# __Author__:<NAME>
#
# __Name__: PSO Implementation and P-ACO Implementation
#
# __Purpose__: Fun + Prep. for exam.
#
#
# ## Ant Colony Optimizer (ACO)
# Implementation of the ACO with the possibility to visualise error, shortest path and the pheromone-matrix. This is done in the following way:
#
# Each ant guesses a paths through all points/cities according to a heuristic and the current pheromone value on the connection between the corresponding points. Meaning: The more pheromone and the closer, the higher the probability of choosing the city as next position in the path. After an iteration, the lenghts of the paths are compared and the connections between the cities of the winning ant get all of
# the pheromone (could be done otherwise). Also, each connection is
# decreased by (1-gamma). This is done until i==num_runs.
#
#
# ## Population based Ant Colony Optimizer (P-ACO)
#
# Simple implementation of the P-ACO algorithm. This is similar to ACO, but there is no evaporation step. In this case a population of solution influences the choice of the ants. After 'population_size' steps, the solution looses it's impact and the corresponding pheromone value is removed from the pheromone matrix.
#
#
# ## Relevant literature
#
# - Application to Shortest Path Problems: [Dorigo et al.,91]
# - Population based ACO (P-ACO): [Guntsch,Middendorf,2002]
#
| aco/aco.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# #### In this project, I will pre-process the text and train a text classifier using different feature representation techniques.
import pandas as pd
import re
data = pd.read_csv('FPB.csv',header = None,names = ['sentiments','headlines'],encoding = 'ISO-8859-1')
data.head()
# ### Part 1: Text Pre-Processing
import nltk
nltk.download('punkt') # downloads a model
#tokenization
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
word_token = []
for i in range(len(data)):
sentence = re.sub(r'[^\w\s]', ' ', data.iloc[i,1]) #remove alphanumeric characters
token = word_tokenize(sentence)
word_token.append(token)
# %pprint
#results of tokenization for the first 5 sentences
word_token[:5]
# I used below function to process the text. I first removed alphanumeric characters, then I performed stemming of words based on PorterStemmer. I didn't remove stopwords as doing so will decrease the size of the corpus and the model performance.
# +
#results of stemming for the first 5 sentences
nltk.download('stopwords') # <--- this is new
from nltk.corpus import stopwords
stop = set(stopwords.words('english'))
from nltk.stem import PorterStemmer
ps = PorterStemmer()
# return a list of tokens
def pre_processing_by_nltk(doc, stemming = True, need_sent = False):
# step 1: get sentences
sentences = re.sub(r'[^\w\s]', ' ', doc)
sentences = sent_tokenize(sentences)
# step 2: get tokens
tokens = []
for sent in sentences:
words = word_tokenize(sent)
# step 3 (optional): stemming
if stemming:
words = [ps.stem(word) for word in words]
if need_sent:
tokens.append(words)
else:
tokens += words
return [w.lower() for w in tokens]
# -
stem_list = []
for i in range(len(data)):
token = pre_processing_by_nltk(data.iloc[i,1])
stem_list.append(token)
#results of tokenization&stemming for the first 5 sentences
stem_list[:5]
# ### Part 2: Bag Of Words (20 points)
# +
#split test and train data
from sklearn.model_selection import train_test_split
# splitting the train-test sets
X_train, X_test, y_train, y_test = train_test_split(data.headlines, data.sentiments, random_state=42, test_size=0.2, shuffle=True)
# -
# In the following part, I created a word frequency dictionary based on the train dataset. Keys of the freq are words that contained in the train dataset, and values associated with each key are times that word appears in the document.
# +
from collections import defaultdict
freq = defaultdict(int)
corpus = ' '.join(list(X_train))
new_corpus = re.sub(r'[^\w\s]', ' ', corpus)
raw_tokens = new_corpus.lower().split()
raw_tokens = [ps.stem(word) for word in raw_tokens]
raw_tokens_stop = [w.lower() for w in raw_tokens if w.lower() not in stop]
# -
print(len(raw_tokens))
print(len(raw_tokens_stop))
# a dictionary that contains frequency of a certain word
for token in raw_tokens:
freq[token] += 1
from math import log
IDF, vocab = dict(), dict()
for token in freq:
vocab[token] = len(vocab) #create a fix index of all words
IDF[token] = log(1 + len(X_train) / freq[token]) #
IDF['<UNK>'] = 1
vocab['<UNK>'] = len(vocab)
index_list = vocab.keys()
# ### Train the classifier using method 1
# In this method, the feature is represented as a binary-valued vector of dimension equal to the size of the vocabulary. The value at an index is 1 if the word corresponding to that index is present in the document, else 0.
# When I first train the model, I received a warning indicating the model doesn't converge. Therefore I increased the number of iteration to solve the problem.
def vocabEXIST(doc,index,freqdic):
tokens = pre_processing_by_nltk(doc)
x= []
for vob in index:
if vob not in tokens:
x.append(0)
else:
x.append(1)
return x
X_train_1 = []
X_test_1 = []
for doc in X_train: #create a feature vector
X_train_1.append(vocabEXIST(doc, index_list, freq))
for doc in X_test:
X_test_1.append(vocabEXIST(doc, index_list, freq))
from sklearn.linear_model import LogisticRegression
M1 = LogisticRegression(random_state=0,max_iter=1000).fit(X_train_1,y_train)
predict_y1 = M1.predict(X_test_1)
# +
import sklearn
macro_f1_1 = sklearn.metrics.f1_score(y_test, predict_y1,average='macro')
micro_f1_1 = sklearn.metrics.f1_score(y_test, predict_y1,average='micro')
y_predict_prob = M1.predict_proba(X_test_1)
auc_1 = sklearn.metrics.roc_auc_score(y_test,y_predict_prob,multi_class = 'ovr')
print('AUROC is',round(auc_1,2),', macro-f1 score is', round(macro_f1_1,2),', micro-f1 score',round(micro_f1_1,2))
# -
# ### Train the classifier using method 2
# In this method, the feature is represented by a vector of dimension equal to the size of the vocabulary where the value corresponding to each word is its frequency in the document.
#
def vocabfreq(doc,index,freqdic):
tokens = pre_processing_by_nltk(doc)
x= []
for vob in index:
if vob not in tokens:
x.append(0)
else:
x.append(freqdic[vob])
return x
X_train_2 = []
X_test_2 = []
for doc in X_train:
X_train_2.append(vocabfreq(doc, index_list, freq))
for doc in X_test:
X_test_2.append(vocabfreq(doc, index_list, freq))
M2 = LogisticRegression(random_state=0,max_iter=2000).fit(X_train_2,y_train)
predict_y2 = M2.predict(X_test_2)
# +
macro_f1_2 = sklearn.metrics.f1_score(y_test, predict_y2,average='macro')
micro_f1_2 = sklearn.metrics.f1_score(y_test, predict_y2,average='micro')
y_predict_prob = M2.predict_proba(X_test_2)
auc_2 = sklearn.metrics.roc_auc_score(y_test,y_predict_prob,multi_class = 'ovr')
print('AUROC is',round(auc_2,2),' macro-f1 score is', round(macro_f1_2,2),'micro-f1 score',round(micro_f1_2,2))
# -
# ### Train the classifier using method 3
# In this method, the feature is represented by a vector of dimension equal to the size of the vocabulary where the value corresponding to each word is its tf-idf value.
# method 3
def tfidf_feature_extractor(doc, vocab, IDF):
tokens = pre_processing_by_nltk(doc)
for i, token in enumerate(tokens):
if token not in vocab:
tokens[i] = '<UNK>'
TF = defaultdict(int)
for token in tokens:
TF[token] += 1
x = [0] * len(vocab)
for token in set(tokens):
tfidf = log(TF[token] + 1) * IDF[token]
token_id = vocab[token]
# print(token, TF[token], IDF[token])
x[token_id] = tfidf # this will be a dense matrix
return x
# +
X_train_3 = []
X_test_3 = []
for doc in X_train:
X_train_3.append(tfidf_feature_extractor(doc, vocab, IDF))
for doc in X_test:
X_test_3.append(tfidf_feature_extractor(doc, vocab, IDF))
# -
M3 = LogisticRegression(random_state=0).fit(X_train_3,y_train)
predict_y3 = M3.predict(X_test_3)
# +
macro_f1_3 = sklearn.metrics.f1_score(y_test, predict_y3,average='macro')
micro_f1_3 = sklearn.metrics.f1_score(y_test, predict_y3,average='micro')
y_predict_prob = M3.predict_proba(X_test_3)
auc_3 = sklearn.metrics.roc_auc_score(y_test,y_predict_prob,multi_class = 'ovr')
print('AUROC is',round(auc_3,2),' macro-f1 score is', round(macro_f1_3,2),'micro-f1 score',round(micro_f1_3,2))
# -
auc = [auc_1,auc_2,auc_3]
mi_f1 = [micro_f1_1,micro_f1_2,micro_f1_3]
ma_f1 = [macro_f1_1,macro_f1_2,macro_f1_3]
sum_table = pd.DataFrame().assign(auc = auc,macro_f1 =ma_f1 ,micro_f1 =mi_f1 )
sum_table.index = ['Binary','Frequency','TF-IDF']
sum_table
# For this specific text classifier, I will choose the method 1 since it has the highest AUC, macro-f1 score, and micro-f1 score. The first model dominates the other 2 based on the three metric we choose.
| text_process-gitlab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img style="float: left;;" src='Imagenes/iteso.jpg' width="50" height="100"/></a>
#
# # <center> <font color= #000047> Módulo 2: Fundamentos de Numpy
#
#
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/1/1a/NumPy_logo.svg" width="400px" height="400px" />
#
# > Hasta ahora sólo hemos hablado acerca de tipos (clases) de variables y funciones que vienen por defecto en Python.
#
# > Sin embargo, una de las mejores cosas de Python (especialmente si eres o te preparas para ser un científico de datos) es la gran cantidad de librerías de alto nivel que se encuentran disponibles.
#
# > Algunas de estas librerías se encuentran en la librería estándar, es decir, se pueden encontrar donde sea que esté Python. Otras librerías se pueden añadir fácilmente.
#
# > La primer librería externa que cubriremos en este curso es NumPy (Numerical Python).
#
#
# Referencias:
# - https://www.numpy.org/
# - https://towardsdatascience.com/first-step-in-data-science-with-python-numpy-5e99d6821953
# ___
# # 0. Motivación
#
# ¿Recuerdan algo de álgebra lineal? Por ejemplo:
# - vectores;
# - suma de vectores;
# - producto por un escalar ...
#
# ¿Cómo se les ocurre que podríamos manejar lo anterior en Python?
# Crear dos vectores
# Suma de vectores
# ¿con ciclos quizá?
# Producto por escalar
# ¿con ciclos quizá?
# ### Solución: NumPy
#
# NumPy es la librería fundamental para computación científica con Python. Contiene, entre otros:
# - una clase de objetos tipo arreglo N-dimensional muy poderso;
# - funciones matemáticas sofisticadas;
# - herramientas matemáticas útiles de álgebra lineal, transformada de Fourier y números aleatorios.
# Aparte de sus usos científicos, NumPy puede ser usada como un contenedor eficiente de datos multidimensional, lo que le otorga a NumPy una capacidad impresionante de integración con bases de datos.
#
# Por otra parte, casi todas las librerías de Python relacionadas con ciencia de datos y machine learning tales como SciPy (Scientific Python), Mat-plotlib (librería de gráficos), Scikit-learn, dependen de NumPy razonablemente.
# Para nuestra fortuna, NumPy ya viene instalado por defecto en la instalación de Anaconda.
#
# Así que si queremos empezar a utilizarlo, lo único que debemos hacer es importarlo:
# Importar numpy
# Lo que acabamos de hacer es un procedimiento genérico para importar librerías:
# - se comienza con la palabra clave `import`;
# - a continuación el nombre de la librería, en este caso `numpy`;
# - opcionalmente se puede incluir una cláusula `as` y una abreviación del nombre de la librería. Para el caso de NumPy, la comunidad comúmente usa la abreviación `np`.
# Ahora, intentemos hacer lo mismo que que antes, pero con el arreglo n-dimensional que provee NumPy como vector:
# Ayuda sobre arreglo N-dimensional
# Crear dos vectores
# Suma de vectores
# Producto interno
# ### Diferencias fundamentales entre Listas de Python y Arreglos de NumPy
#
# Mientras que las listas y los arreglos tienen algunas similaridades (ambos son colecciones ordenadas de valores), existen ciertas diferencias abismales entre este tipo de estructuras de datos:
#
# - A diferencia de las listas, todos los elementos en un arreglo de NumPy deben ser del mismo tipo de datos (esto es, todos enteros, o flotantes, o strings, etc).
#
# - Por lo anterior, los arreglos de NumPy soportan operaciones aritméticas y otras funciones matemáticas que se ejecutan en cada elemento del arreglo. Las listas no soportan estos cálculos.
#
# - Los arreglos de NumPy tienen dimensionalidad.
# # 1. ¿Qué podemos hacer en NumPy?
#
# Ya vimos como crear arreglos básicos en NumPy, con el comando `np.array()`
# ¿Cuál es el tipo de estos arreglos?
# También podemos crear arreglos multidimensionales:
# Matriz 4x5
# Tipo
# Atributos
# ## 1.1 Funciones de NumPy
# Seguiremos nuestra introducción a NumPy mediante la resolución del siguiente problema:
# ### Problema 1
#
# > Dados cinco (5) contenedores cilíndricos con diferentes radios y alturas que pueden variar entre 5 y 25 cm, encontrar:
# > 1. El volumen del agua que puede almacenar cada contenedor;
# > 2. El volumen total del agua que pueden almacenar todos los contenedores juntos;
# > 3. Cual contenedor puede almacenar más volumen, y cuanto;
# > 4. Cual contenedor puede almacenar menos volumen, y cuanto;
# > 5. Obtener la media, la mediana y la desviación estándar de los volúmenes de agua que pueden ser almacenados en los contenedores.
# Antes que nada, definamos las variables que nos dan:
# Definir numero de contenedores, medida minima y medida maxima
# A continuación, generaremos un arreglo de números enteros aleatorios entre 5 y 25 cm que representarán los radios y las alturas de los cilindros:
# Ayuda de np.random.randint()
# Números aleatorios que representan radios y alturas.
# Inicializar la semilla
# Ver valores
# array.reshape
# De los números generados, separemos los que corresponden a los radios, y los que corresponden a las alturas:
# Radios
# Alturas
# 1. Con lo anterior, calculemos cada uno los volúmenes:
# Volúmenes de los contenedores
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/b/b3/Symbol_great.svg" width="400px" height="400px" />
#
# ### ¡Excelente!
#
# Con esta línea de código tan sencilla, pudimos obtener de un solo jalón todos los volúmenes de nuestros contenedores.
#
# Esta es la potencia que nos ofrece NumPy. Podemos operar los arreglos de forma rápida, sencilla, y muy eficiente.
# 2. Ahora, el volumen total
# Volumen total
# 3. ¿Cuál contenedor puede almacenar más volumen? ¿Cuánto?
# Contenedor que puede almacenar más volumen
# Volumen máximo
# También se puede, pero no es recomendable. Ver comparación de tiempos
# 4. ¿Cuál contenedor puede almacenar menos volumen? ¿Cuánto?
# Contenedor que puede almacenar menos volumen
# Volumen mínimo
# 5. Media, mediana y desviación estándar de los volúmenes
# Media, mediana y desviación estándar
# Atributos shape y dtype
# ## 1.2 Trabajando con matrices
# ### Problema 2
#
# > 25 cartas numeradas de la 1 a la 25 se distribuyen aleatoriamente y en partes iguales a 5 personas. Encuentre la suma de cartas para cada persona tal que:
# > - para la primera persona, la suma es el valor de la primera carta menos la suma del resto de las cartas;
# > - para la segunda persona, la suma es el valor de la segunda carta menos la suma del resto de las cartas;
# > - y así sucesivamente ...
#
# > La persona para la cual la suma sea mayor, será el ganador. Encontrar el ganador.
# Lo primero será generar los números del 1 al 25. ¿Cómo podemos hacer esto?
# np.arange = np.array(range)
# Ayuda en la función np.arange()
# Números del 1 al 25
# Luego, tal y como en un juego de cartas, deberíamos barajarlos, antes de repartirlos:
# Ayuda en la función np.random.shuffle()
# Barajar
# Ver valores
# Bien. Ahora, deberíamos distribuir las cartas. Podemos imaginarnos la distribución como una matriz 5x5:
# Repartir cartas
# Ver valores
# Entonces, tenemos 5 cartas para cada una de las 5 personas, visualizadas como una matriz 5x5.
#
# Lo único que nos falta es encontrar la suma para cada uno, es decir, sumar el elemento de la diagonal principal y restar las demás entradas de la fila (o columna).
#
# ¿Cómo hacemos esto?
# Ayuda en la función np.eye()
# Matriz con la diagonal principal
# Ayuda en la función np.ones()
# Matriz con los elementos fuera de la diagonal negativos
# Matriz completa
# Sumar por filas
# ¿Quién es el ganador?
# # 2. Algo de álgebra lineal con NumPy
#
# Bueno, ya hemos utilizado NumPy para resolver algunos problemas de juguete. A través de estos problemas, hemos introducido el tipo de objetos que podemos manipular con NumPy, además de varias funcionalidades que podemos utilizar.
#
# Pues bien, este tipo de objetos nos sirven perfectamente para representar vectores y matrices con entradas reales o complejas... si, de las que estudiamos en algún momento en álgebra lineal.
#
# Mejor aún, NumPy nos ofrece un módulo de álgebra lineal para efectuar las operaciones básicas que podríamos necesitar.
# Consideremos la siguiente matriz:
# Podemos obtener varios cálculos útiles alrededor de la matriz A:
# Rango de la matriz A
# Determinante de la matriz A
# Inversa de la matriz A
# Potencia de la matriz A
# A.dot(A).dot(A).dot(A).dot(A)
# Eigenvalores y eigenvectores de la matriz A
# Por otra parte, si tenemos dos vectores:
# podemos calcular su producto interno (producto punto)
# De la misma manera, podemos calcular la multiplicación de la matriz A por un vector
# $$
# A x = z
# $$
# **Recomendado el siguiente [tutorial](https://www.numpy.org/devdocs/user/quickstart.html) para que profundicen más en todo lo que pueden hacer con NumPy**
| Modulo2/Clase4_NumpyFundamentos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# this code takes the NFIRS Dataset and outputs a count of fires by zipcode
# requires NFIRS_2009_2016_geocoded_with_tract.csv to be in the main path
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
import conda
# +
# Change to your filepath to data and uncomment if NFIRS_2009_2016_geocoded_with_tract.csv not in the main path
#os.chdir('../Data/Raw')
# -
conda install -c conda-forge geopandas
NFIR = pd.read_csv('NFIRS_2009_2016_geocoded_with_tract.csv',
encoding='latin_1',
index_col=0,
low_memory=False)
NFIR.head()
NFIR.columns
OUT = NFIR.groupby(['STATE','COUNTYFP'] )['TRACTCE'].value_counts()
OUT.name = 'COUNT'
# Output
OUT.to_csv('Fires_by_Census_Tract.csv',header= True)
OUT
| Code/.ipynb_checkpoints/Number_of_Fires_by_Census_Tract_VIZ-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TEXT data cleaner and tokenizer
#
# we use NLTK, Regex, BeautifulSoup, inflect to clean the parsed text data (json file), tokenize them into sentences and restore them as JSON file.
# Resources used to create this code are as following:
# * <a href="https://www.kdnuggets.com/2018/03/text-data-preprocessing-walkthrough-python.html" target="_black" > Text processing </a>
# * <a href="https://www.geeksforgeeks.org/nlp-how-tokenizing-text-sentence-words-works/" target ="_black" >Tokenization </a>
# ---------------------------
# <code>Step 1:</code> Importing all the neccesary libraries
import pandas as pd
import re, string, unicodedata
import inflect
from collections import defaultdict
from bs4 import BeautifulSoup as BS
import spacy
from nltk.corpus import stopwords
STOPWORDS = set(stopwords.words('english'))
import json
import nltk.data
from nltk.tokenize import sent_tokenize
from nltk.stem import LancasterStemmer, WordNetLemmatizer
import numpy as np
# <code>Step 2:</code> Openning, premilinary cleaning, and tokenizing the text data into sentences
def open_clean_tokenize(filepath):
""" filepath is the path to the json file to be openned. This function opens the file, do some basic cleaning (makes text lowercase, remoeves and removes unwanted symboles and tokenize the data in to sentences
"""
with open(filepath) as json_file:
corpus = json.load(json_file)
#corpus = pd.read_json(json_file, orient ='index')
corpus = str(corpus)
corpus = BS(corpus, 'lxml').text #HTML decoding. BeautifulSoup's text attribute will return a string stripped of any HTML tags and metadata.
corpus = corpus.lower()
corpus = re.sub(r'\w*\d\w*', '', corpus)
corpus = re.sub(r'http\S+', '', corpus) # removes url
corpus = re.compile('[/(){}\[\]\|@;#:]').sub('', corpus) #replace REPLACE_BY_SPACE_RE symbols by space in text. substitute the matched string in REPLACE_BY_SPACE_RE with space.
corpus = re.compile('(\$[a-zA-Z_][0-9a-zA-Z_]*)').sub('', corpus) #remove symbols from text.
tokenizer = nltk.data.load('tokenizers/punkt/PY3/english.pickle') # tokenizes data into sentences
corpus = tokenizer.tokenize(corpus)
return corpus
# <code>Step 3:</code> Further cleaning (remove punctuations, numbers, and stop words) the tokenized data in setp 2 for to use in Word2vec
# +
def remove_punctuation(corpus):
"""Remove punctuation from list of tokenized corpus"""
new_corpus = []
for token in corpus:
new_token = re.sub(r'[^\w\s]', '', token)
if new_token != '':
new_corpus.append(new_token)
return new_corpus
def replace_numbers(corpus):
"""Replace all interger occurrences in list of tokenized words with textual representation"""
p = inflect.engine()
new_corpus = []
for token in corpus:
if token.isdigit():
new_token = p.number_to_words(token)
new_corpus.append(new_token)
else:
new_corpus.append(token)
return new_corpus
def remove_stopwords(corpus):
"""Remove stop words from list of tokenized words"""
new_corpus = []
for token in corpus:
if token not in stopwords.words('english'):
new_corpus.append(token)
return new_corpus
def normalize(corpus):
corpus = remove_punctuation(corpus)
corpus = replace_numbers(corpus)
corpus = remove_stopwords(corpus)
return corpus
# -
# <code> Step 4:</code> Let's run the above two functions as below
filepath = 'Basic Principles of Organic Chemistry_Roberts and Caserio'
corpus = open_clean_tokenize(filepath)
tokens = normalize(corpus)
tokens = normalize(corpus)
# ## Stemming and lemmatization
#
# We can further process the cleanned and tokenized data as follows:
# +
def stem_words(tokens):
"""Stem words in list of tokenized words"""
tokens = normalize(corpus)
stemmer = LancasterStemmer()
stems = []
for token in tokens:
stem = stemmer.stem(token)
stems.append(stem)
return stems
def lemmatize_verbs(tokens):
"""Lemmatize verbs in list of tokenized words"""
lemmatizer = WordNetLemmatizer()
lemmas = []
for token in tokens:
lemma = lemmatizer.lemmatize(token, pos='v')
lemmas.append(lemma)
return lemmas
# -
def stem_and_lemmatize(tokens):
stems = stem_words(tokens)
lemmas = lemmatize_verbs(tokens)
return stems, lemmas
final_data = stem_and_lemmatize(tokens)
| Rukiya/.ipynb_checkpoints/text_data_cleaner-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jzzy-jeff/github-slideshow/blob/master/audioPrep.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="e7nBakVvHKok" outputId="32664040-6beb-4d18-d1c2-1d6caf909f44"
pip install -q tensorflow-io
# + colab={"base_uri": "https://localhost:8080/"} id="vyC0TSmLHeXL" outputId="b4b88ace-2d46-4e40-d431-8d8dd11ae762"
import tensorflow as tf
import tensorflow_io as tfio
audio = tfio.audio.AudioIOTensor('gs://cloud-samples-tests/speech/brooklyn.flac')
print(audio)
# + colab={"base_uri": "https://localhost:8080/"} id="C8_TZJJtH-vF" outputId="d0c53a57-3413-4b71-f3c8-d6695a4f188c"
audio_slice = audio[100:]
#remove the last dimension
audio_tensor = tf.squeeze(audio_slice, axis=[-1])
print(audio_tensor)
# + colab={"base_uri": "https://localhost:8080/", "height": 75} id="k2qhu4rNJ9-N" outputId="412ebcc9-e492-4274-90e3-bf5b70fa22a1"
from IPython.display import Audio
Audio(audio_tensor.numpy(), rate=audio.rate.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="b8eQJmZqKT9c" outputId="5a8e6a0d-60cd-462b-c0b0-9e2133b10558"
import matplotlib.pyplot as plt
tensor = tf.cast(audio_tensor, tf.float32) / 32768.0
plt.figure()
plt.plot(tensor.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="wV6fFGIyMqpe" outputId="791c8a3e-4096-450c-b80b-ae7838a6d31e"
position = tfio.experimental.audio.trim(tensor, axis=0, epsilon=0.1)
start = position[0]
stop = position[1]
print(start, stop)
processed = tensor[start:stop]
plt.figure()
plt.plot(processed.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 75} id="UfiD8x30Nkqb" outputId="cf7c5272-d001-4186-971d-4aa135a8a381"
Audio(processed.numpy(), rate=audio.rate.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="F1lXzxAsN2tL" outputId="a7542e90-3156-4d80-ad38-ab51dcdeb7ca"
fade = tfio.experimental.audio.fade(
processed, fade_in=1000, fade_out=2000, mode="logarithmic")
plt.figure()
plt.plot(fade.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 75} id="rIcuPl4sOcYc" outputId="ebec4996-67cc-441e-8b3a-de1312d3d47a"
Audio(fade.numpy(), rate=audio.rate.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 176} id="-PA5oCN6Otg7" outputId="ab69e651-1395-40dd-b0e5-8408804d48fd"
#the spectre
# Convert to spectrogram
spectrogram = tfio.experimental.audio.spectrogram(
fade, nfft=512, window=512, stride=256)
plt.figure()
plt.imshow(tf.math.log(spectrogram).numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 536} id="3OWCWkRMQF-X" outputId="cc9a8e34-3c5f-4e41-93fd-72a1468fb7b4"
# Convert to mel-spectrogram
mel_spectrogram = tfio.experimental.audio.melscale(
spectrogram, rate=16000, mels=128, fmin=0, fmax=8000)
plt.figure()
plt.imshow(tf.math.log(mel_spectrogram).numpy())
# Convert to db scale mel-spectrogram
dbscale_mel_spectrogram = tfio.experimental.audio.dbscale(
mel_spectrogram, top_db=80)
plt.figure()
plt.imshow(dbscale_mel_spectrogram.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="m4QL8GiOQQ4M" outputId="1185fb0c-d595-4779-ea43-1b415f81d59e"
#Frequency masking
freq_mask = tfio.experimental.audio.freq_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(freq_mask.numpy())
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="iGwj32JvRNeP" outputId="9351caf0-0bc0-459d-b744-0f2a08397426"
#Time mask
time_mask = tfio.experimental.audio.time_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(time_mask.numpy())
| audioPrep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# Machine Learning Challenge #2 from HackerEarth
#
# Data: Kickstarter Project Details
#
# Target: Project will successfully get funded or not
#
# Hardware Configuration: 8-core CPU, 16gb RAM
#
# https://www.hackerearth.com/challenge/competitive/machine-learning-challenge-2/problems/
# + deletable=true editable=true
import numpy as np
import pandas as pd
from matplotlib import pyplot
# + [markdown] deletable=true editable=true
# ## 1. Data Exploration
# + deletable=true editable=true
train = pd.read_csv('../../input/train.csv')
test = pd.read_csv('../../input/test.csv')
# + deletable=true editable=true
print(train.shape, test.shape)
# + deletable=true editable=true
# + deletable=true editable=true
train[:2]
# + [markdown] deletable=true editable=true
# ## 2. Data Preprocessing
# LabelEncoder and OneHotEncoder
# + deletable=true editable=true
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# + deletable=true editable=true
#merge train and test data for preprocessing
size_train = train.shape[0]
target = train.final_status.values
train.drop(['backers_count', 'final_status'], axis=1, inplace=True)
data = train.append(test, ignore_index=True)
# + deletable=true editable=true
#project_id - to int
data['project_id'] = data['project_id'].apply(lambda x: x[4:]).apply(int)
# + deletable=true editable=true
#disable_communication - label encode
le = {}
le['disable_communication'] = LabelEncoder()
data['disable_communication'] = le['disable_communication'].fit_transform(data['disable_communication'])
# + deletable=true editable=true
#country, currency - label & one hot encode
ohe = {}
for col in ['country', 'currency']:
#fit encoder
le[col] = LabelEncoder()
ohe[col] = OneHotEncoder(sparse=False)
#process train data
data[col] = le[col].fit_transform(data[col])
features = ohe[col].fit_transform(data[col].values.reshape(-1,1))
print(data.shape, ' + ', features.shape)
data = pd.concat([data, pd.DataFrame(features, columns=[col+'_'+str(n) for n in range(features.shape[1])])], axis=1)
print(data.shape)
# + [markdown] deletable=true editable=true
# ## 3. Feature Extraction
# + [markdown] deletable=true editable=true
# ### 3.1 Date Features
# + deletable=true editable=true
from datetime import datetime
for col in ['deadline', 'state_changed_at', 'created_at', 'launched_at']:
dt = data[col].apply(datetime.fromtimestamp)
data[col+'_year'] = dt.apply(lambda x: x.year)
data[col+'_month'] = dt.apply(lambda x: x.month)
data[col+'_day'] = dt.apply(lambda x: x.day)
data[col+'_hour'] = dt.apply(lambda x: x.hour)
data[col+'_minute'] = dt.apply(lambda x: x.minute)
data[col+'_second'] = dt.apply(lambda x: x.second)
# + deletable=true editable=true
data['ready_duration'] = data['launched_at'] - data['created_at']
# + deletable=true editable=true
data['run_duration'] = data['state_changed_at'] - data['launched_at']
# + deletable=true editable=true
data['goal_duration'] = data['deadline'] - data['launched_at']
# + deletable=true editable=true
data['goal_daily'] = np.round(data['goal'] / np.round((data['run_duration']/(60*60*24)), decimals=1), decimals=0).astype(int)
# + [markdown] deletable=true editable=true
# ### 3.2 Text Features
# + deletable=true editable=true
from sklearn.feature_extraction.text import CountVectorizer
# + deletable=true editable=true
period = data['launched_at_year'].apply(str) + data['launched_at_month'].apply(lambda x: str(x).zfill(2))
period_vectorizer = CountVectorizer()
period_result = period_vectorizer.fit_transform(period)
period_value = {}
for value in period.unique():
period_value[value] = (period == value).sum()
data['launched_at_ym_same'] = period.apply(lambda x: period_value[x])
# + deletable=true editable=true
period = data['deadline_year'].apply(str) + data['deadline_month'].apply(lambda x: str(x).zfill(2))
period_vectorizer = CountVectorizer()
period_result = period_vectorizer.fit_transform(period)
period_value = {}
for value in period.unique():
period_value[value] = (period == value).sum()
data['deadline_ym_same'] = period.apply(lambda x: period_value[x])
# + deletable=true editable=true
#text features
data['keywords'] = data['keywords'].apply(str).apply(lambda x: x.split('-'))
# + deletable=true editable=true
for col in ['name', 'desc', 'keywords']:
data[col+"_len"] = data[col].apply(str).apply(len)
data[col+"_count"] = data[col].apply(str).apply(lambda x: len(x.split(' ')))
# + [markdown] deletable=true editable=true
# ### 3.3 Dimentionality Reduction Features
# + deletable=true editable=true
from sklearn.decomposition import PCA, FastICA, TruncatedSVD
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
# + deletable=true editable=true
n_comp = 30
# tSVD
tsvd = TruncatedSVD(n_components=n_comp, random_state=420)
tsvd_results = tsvd.fit_transform(data.drop(['name', 'desc', 'keywords'], axis=1))
# PCA
pca = PCA(n_components=n_comp, random_state=420)
pca_results = pca.fit_transform(data.drop(['name', 'desc', 'keywords'], axis=1))
# ICA
ica = FastICA(n_components=n_comp, random_state=420)
ica_results = ica.fit_transform(data.drop(['name', 'desc', 'keywords'], axis=1))
# GRP
grp = GaussianRandomProjection(n_components=n_comp, eps=0.1, random_state=420)
grp_results = grp.fit_transform(data.drop(['name', 'desc', 'keywords'], axis=1))
# SRP
srp = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=420)
srp_results = srp.fit_transform(data.drop(["name", 'desc', 'keywords'], axis=1))
# Append decomposition components to datasets
for i in range(n_comp):
data['pca_' + str(i)] = pca_results[:, i]
data['ica_' + str(i)] = ica_results[:, i]
data['tsvd_' + str(i)] = tsvd_results[:, i]
data['grp_' + str(i)] = grp_results[:, i]
data['srp_' + str(i)] = srp_results[:, i]
# + [markdown] deletable=true editable=true
# ### 3.4 Bag of words
# + deletable=true editable=true
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation, SparsePCA
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans
# + [markdown] deletable=true editable=true
# #### 3.4.1 Term Vectorizer
# + deletable=true editable=true
vectorizer = {}
# + deletable=true editable=true
keywords = data['keywords'].apply(lambda x: ' '.join(x))
vectorizer['keywords'] = TfidfVectorizer(stop_words='english')#, ngram_range=(1,3))
keywords_result = vectorizer['keywords'].fit_transform(keywords)
# + deletable=true editable=true
vectorizer['desc'] = TfidfVectorizer(stop_words='english')#, ngram_range=(1,3))
desc_result = vectorizer['desc'].fit_transform(data['desc'].fillna(''))
# + deletable=true editable=true
print(keywords_result.shape, desc_result.shape)
# + [markdown] deletable=true editable=true
# #### 3.4.2 Topic Extraction
# + deletable=true editable=true
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# + [markdown] deletable=true editable=true
# ##### 3.4.2.1 KMeans Clustering
# + deletable=true editable=true
# Fit KMeans Cluster model - keywords, desc
cluster_svd = {
'keywords': TruncatedSVD(n_components=200),
'desc': TruncatedSVD(n_components=200) #2gb ram
}
cluster_svd_result = {
'keywords': cluster_svd['keywords'].fit_transform(keywords_result),
'desc': cluster_svd['desc'].fit_transform(desc_result)
}
# + deletable=true editable=true
cluster_norm = {
'keywords': Normalizer(copy=False),
'desc': Normalizer(copy=False)
}
cluster_norm_result = {
'keywords': cluster_norm['keywords'].fit_transform(cluster_svd_result['keywords']),
'desc': cluster_norm['desc'].fit_transform(cluster_svd_result['desc'])
}
cluster = {
'keywords': KMeans(n_clusters=40, init='k-means++', max_iter=300, n_init=10,
verbose=1, n_jobs=-1),
'desc': KMeans(n_clusters=40, init='k-means++', max_iter=300, n_init=10,
verbose=1, n_jobs=-1)
}
# + deletable=true editable=true
cluster_result = {}
cluster_result['keywords'] = cluster['keywords'].fit_transform(cluster_norm_result['keywords'])
# + deletable=true editable=true
cluster_result['desc'] = cluster['desc'].fit_transform(cluster_norm_result['desc'])
# + deletable=true editable=true
print("Top terms per cluster:")
c = 'desc' #'keywords
original_space_centroids = cluster_svd[c].inverse_transform(cluster[c].cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
terms = vectorizer[c].get_feature_names()
for i in range(40):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
# + deletable=true editable=true
#append to data
data = pd.concat([data, pd.DataFrame(cluster_result['keywords'],
columns=['cluster_k_'+str(n) for n in range(cluster_result['keywords'].shape[1])])], axis=1)
data = pd.concat([data, pd.DataFrame(cluster_result['desc'],
columns=['cluster_d_'+str(n) for n in range(cluster_result['desc'].shape[1])])], axis=1)
# + [markdown] deletable=true editable=true
# ##### 3.4.2.2 NMF Decomposition
# + deletable=true editable=true
# Fit the NMF model
nmf = {}
nmf_result = {}
nmf['keywords'] = NMF(n_components=40, random_state=420,
alpha=.1, l1_ratio=.5, verbose=1)
nmf_result['keywords'] = nmf['keywords'].fit_transform(keywords_result)
nmf['desc'] = NMF(n_components=40, random_state=420,
alpha=.1, l1_ratio=.5, verbose=1)
nmf_result['desc'] = nmf['desc'].fit_transform(desc_result)
# + deletable=true editable=true
#print_top_words(nmf['keywords'], vectorizer['keywords'].get_feature_names(), 100)
# + deletable=true editable=true
#append to data
data = pd.concat([data, pd.DataFrame(nmf_result['keywords'],
columns=['nmf_k_'+str(n) for n in range(nmf_result['keywords'].shape[1])])], axis=1)
data = pd.concat([data, pd.DataFrame(nmf_result['desc'],
columns=['nmf_d_'+str(n) for n in range(nmf_result['desc'].shape[1])])], axis=1)
# + [markdown] deletable=true editable=true
# ##### 3.4.2.3 LDA Decomposition
# + deletable=true editable=true
#discarded because not well performing
# Fit the LDA model (batch_size affects speed, use more data can allocate cpu efficiently)
#lda = {}
#lda_result = {}
'''lda['keywords'] = LatentDirichletAllocation(n_topics=40, max_iter=10, max_doc_update_iter=100,
learning_method='online', batch_size=keywords_result.shape[0],
random_state=420, n_jobs=-1, verbose=1)
lda_result['keywords'] = lda['keywords'].fit_transform(keywords_result)'''
'''lda['desc'] = LatentDirichletAllocation(n_topics=40, max_iter=10, max_doc_update_iter=100,
learning_method='online', batch_size=desc_result.shape[0],
learning_offset=50.,
random_state=420, n_jobs=-1, verbose=1)
lda_result['desc'] = lda['desc'].fit_transform(desc_result)'''
# + deletable=true editable=true
#print_top_words(lda['desc'], vectorizer['desc'].get_feature_names(), 100)
# + [markdown] deletable=true editable=true
# ## 4. Data Preparation - Memory Enhanced Concat
# + deletable=true editable=true
data.info()
# + deletable=true editable=true
#simple columns list
print(data.columns.values, data.columns.shape)
# + [markdown] deletable=true editable=true
# ### 4.1 Without Bag-of-Words
# + deletable=true editable=true
#split train & testdata (if skip bag of words, for feature exploration)
data_train = data[:size_train].drop(['name', 'desc', 'keywords'], axis=1).values
data_test = data[size_train:].drop(['name', 'desc', 'keywords'], axis=1).values
# + [markdown] deletable=true editable=true
# ### 4.2 With Bag-of-Words (Sparse)
# + deletable=true editable=true
#sparse matrix is GOD! use only 2gb vs > 30gb of dense array
from scipy import sparse
# + deletable=true editable=true
original = sparse.csr_matrix(data.drop(['name', 'desc', 'keywords'], axis=1).values)
# + deletable=true editable=true
concat = sparse.hstack([original, keywords_result, desc_result], format='csr')
# + deletable=true editable=true
#xgboost bug fix
data_final = sparse.hstack((concat, sparse.csr_matrix(np.ones((concat.shape[0], 1)))), format='csr')
# + deletable=true editable=true
#split train & testdata
data_train = data_final[:size_train]
data_test = data_final[size_train:]
# + deletable=true editable=true
print(data_train.shape, data_test.shape)
# + [markdown] deletable=true editable=true
# ### 4.3 Data Checkpoint
# + [markdown] deletable=true editable=true
# #### 4.3.1 To/From Without BoW
# + deletable=true editable=true
np.save('data_train_small', data_train)
np.save('data_test_small', data_test)
# + deletable=true editable=true
data_train = np.load('data_train_small')
data_test = np.load('data_test_small')
# + [markdown] deletable=true editable=true
# #### 4.3.2 To/From with Bow
# + deletable=true editable=true
#only in scipy 0.19.1
sparse.save_npz('data_train', data_train)
sparse.save_npz('data_test', data_test)
# + deletable=true editable=true
data_train = sparse.load_npz('data_train')
data_test = sparse.load_npz('data_test')
# + [markdown] deletable=true editable=true
# ## 5. Model Building
# + [markdown] deletable=true editable=true
# ### 5.1 XGBoost
# + deletable=true editable=true
import xgboost as xgb
# + [markdown] deletable=true editable=true
# #### 5.1.1 Model K-Fold Validation for initial exploration and performance checking
# + deletable=true editable=true
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
# + deletable=true editable=true
kf = StratifiedKFold(n_splits=2, shuffle=True, random_state=420)
models_xgb = {}
i = 0
for train_index, test_index in kf.split(data_train, target):
X_train = data_train[train_index]
X_val = data_train[test_index]
Y_train = target[train_index]
Y_val = target[test_index]
models_xgb[i] = xgb.XGBClassifier(max_depth=10, learning_rate=0.03, n_estimators=300,
subsample=0.8, colsample_bytree=0.8,
seed=420)
models_xgb[i].fit(X_train, Y_train, eval_metric='auc',
eval_set=[(X_train, Y_train), (X_val, Y_val)])
i += 1
# + deletable=true editable=true
sorted(zip(data.columns.values, models_xgb[0].feature_importances_), key=lambda x: x[1], reverse=True)
# + [markdown] deletable=true editable=true
# #### 5.1.2 Find Stopping Round with more data
# + deletable=true editable=true
#2. find stopping round
split_index = int(data_train.shape[0]*0.8)
X_train = data_train[:split_index]
X_val = data_train[split_index:]
Y_train = target[:split_index]
Y_val = target[split_index:]
# + deletable=true editable=true
model_xgb1 = xgb.XGBClassifier(max_depth=6, learning_rate=0.05, n_estimators=20000,
subsample=0.8, colsample_bytree=0.9, reg_alpha=65,
seed=420)
# + deletable=true editable=true
model_xgb1.fit(X_train, Y_train, eval_metric='auc',
eval_set=[(X_train, Y_train), (X_val, Y_val)],
early_stopping_rounds=100)
# + [markdown] deletable=true editable=true
# #### 5.1.3 Final Xgboost Model on All Data
# + deletable=true editable=true
model_xgb2 = xgb.XGBClassifier(max_depth=10, learning_rate=0.1, n_estimators=1150,
subsample=0.8, colsample_bytree=0.9,
seed=420)
# + deletable=true editable=true
model_xgb2.fit(data_train, target, eval_metric='auc',
eval_set=[(data_train, target)])
# + [markdown] deletable=true editable=true
# ### 5.2 LightGBM Model
# + deletable=true editable=true
import lightgbm as lgb
# + deletable=true editable=true
from sklearn.grid_search import GridSearchCV
grid_model = lgb.LGBMClassifier(reg_alpha=65, max_depth=10, learning_rate=0.1,
num_leaves=60, colsample_bytree=0.9, min_child_weight=3,
boosting_type='dart', max_bin=255, n_estimators=600,
subsample_for_bin=50000, objective=None, min_split_gain=0,
min_child_samples=10, subsample=0.8,
subsample_freq=1, reg_lambda=0,
seed=420)
grid_params = {
'max_depth':[4,6,8,10],
'learning_rate':[0.1,0.06,0.03,0.01,0.005,0.001],
}
grid_cv = list(StratifiedKFold(n_splits=2, shuffle=True, random_state=420).split(data_train, target))
grid = GridSearchCV(grid_model, grid_params, scoring='roc_auc',
cv=grid_cv, verbose=50)
grid.fit(data_train, target)
# + deletable=true editable=true
grid.best_params_
# + deletable=true editable=true
model_lgb = lgb.LGBMClassifier(reg_alpha=65, max_depth=10, learning_rate=0.1,
num_leaves=60, colsample_bytree=0.9, min_child_weight=3,
boosting_type='dart', max_bin=255, n_estimators=600,
subsample_for_bin=50000, objective=None, min_split_gain=0,
min_child_samples=10, subsample=0.8,
subsample_freq=1, reg_lambda=0,
seed=420)
# + deletable=true editable=true
model_lgb.fit(data_train, target, eval_metric='auc',
eval_set=[(data_train, target)],
early_stopping_rounds=100)
#[(X_train, Y_train), (X_val, Y_val)], [(data_train, target)],
# + [markdown] deletable=true editable=true
# ## 6. Make Prediction
# + [markdown] deletable=true editable=true
# ### 6.1 Single Model
# + deletable=true editable=true
Y_pred = model_lgb.predict(data_test)
# + [markdown] deletable=true editable=true
# ### 6.2 Ensemble Models
# + deletable=true editable=true
Y_pred1 = model_xgb2.predict_proba(data_test)
#Y_pred1 = pd.read_csv('pred_xgb.csv').values
# -
output1 = pd.DataFrame(Y_pred1)
output1.to_csv('pred_xgb.csv', index=False)
# + deletable=true editable=true
#Y_pred2 = model_lgb.predict_proba(data_test)
Y_pred2 = pd.read_csv('pred_lgb.csv').values
# + deletable=true editable=true
output2 = pd.DataFrame(Y_pred2)
output2.to_csv('pred_lgb.csv', index=False)
# + deletable=true editable=true
Y_pred = np.apply_along_axis(lambda x: 0 if x[0]>0.5 else 1, 1,
((Y_pred1 + Y_pred2)/2))
# + [markdown] deletable=true editable=true
# ### 6.3 Save Predictions
# + deletable=true editable=true
output = pd.DataFrame({'project_id': test['project_id'], 'final_status': Y_pred})
output.to_csv('submission26.csv', index=False, columns=['project_id', 'final_status'])
# + [markdown] deletable=true editable=true
# ## ANN
# + deletable=true editable=true
#a = data_train[10000:11000].todense()
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Embedding, LSTM
from keras.layers.advanced_activations import PReLU
# -
data_ann = np.random.random((1, 10000))#data_train[:100].todense()
target_ann = np.random.randint(2, size=(1, 1))
# + deletable=true editable=true
model = Sequential()
# + deletable=true editable=true
model.add(Embedding(10000, output_dim=256))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# -
model.fit(data_ann, target_ann, epochs=1, verbose=2)
# +
data_dim = 150000 #192327
timesteps = 8
num_classes = 2
# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(timesteps, data_dim))) # returns a sequence of vectors of dimension 32
model.add(LSTM(32, return_sequences=True)) # returns a sequence of vectors of dimension 32
model.add(LSTM(32)) # return a single vector of dimension 32
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# Generate dummy training data
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.randint(num_classes, size=(1000, 1))
# Generate dummy validation data
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.randint(num_classes, size=(100, 1))
model.fit(x_train, y_train,
batch_size=64, epochs=5,
validation_data=(x_val, y_val))
# -
| Kickstarter-Funding-Successful-Projects/Model+Exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MACHINE LEARNING LAB - 5 ( naïve Bayesian Classifier )
# **5. Write a program to implement the naïve Bayesian classifier for a sample training data set stored as a .CSV file. Compute the accuracy of the classifier, considering few test data sets.**
# +
# import necessary libarities
import pandas as pd
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import GaussianNB
# load data from CSV
data = pd.read_csv('tennisdata.csv')
print("THe first 5 values of data is :\n",data.head())
# -
# obtain Train data and Train output
X = data.iloc[:,:-1]
print("\nThe First 5 values of train data is\n",X.head())
y = data.iloc[:,-1]
print("\nThe first 5 values of Train output is\n",y.head())
# +
# Convert then in numbers
le_outlook = LabelEncoder()
X.Outlook = le_outlook.fit_transform(X.Outlook)
le_Temperature = LabelEncoder()
X.Temperature = le_Temperature.fit_transform(X.Temperature)
le_Humidity = LabelEncoder()
X.Humidity = le_Humidity.fit_transform(X.Humidity)
le_Windy = LabelEncoder()
X.Windy = le_Windy.fit_transform(X.Windy)
print("\nNow the Train data is :\n",X.head())
# -
le_PlayTennis = LabelEncoder()
y = le_PlayTennis.fit_transform(y)
print("\nNow the Train output is\n",y)
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.20)
classifier = GaussianNB()
classifier.fit(X_train,y_train)
from sklearn.metrics import accuracy_score
print("Accuracy is:",accuracy_score(classifier.predict(X_test),y_test))
| 7th SEM/MACHINE LEARNING LABORATORY/5-Naive Bayesian Classifier/LAB 5.ipynb |
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
{-# LANGUAGE DerivingVia #-}
import Control.Applicative (Alternative(..))
import Control.Monad.Trans.State.Strict
import Control.Monad (guard)
import Data.Char (isSpace, isDigit, ord)
newtype Parser a = Parser { runParser :: String -> [(a, String)] }
deriving (Functor, Applicative, Alternative, Monad) via (StateT String [])
-- +
anyChar :: Parser Char
anyChar = Parser $ \s -> case s of
[] -> empty
(c:cs) -> pure (c, cs)
satisfy :: (Char -> Bool) -> Parser Char
satisfy pred = do
c <- anyChar
guard $ pred c
pure c
char :: Char -> Parser Char
char = satisfy . (==)
string :: String -> Parser String
string [] = pure []
string (c:cs) = (:) <$> char c <*> string cs
-- +
sepBy :: Parser a -> Parser b -> Parser [a]
sepBy p sep = (p `sepBy1` sep) <|> pure []
sepBy1 :: Parser a -> Parser b -> Parser [a]
sepBy1 p sep = (:) <$> p <*> many (sep *> p)
-- +
chainl :: Parser a -> Parser (a -> a -> a) -> a -> Parser a
chainl p op a = (p `chainl1` op) <|> pure a
chainl1 :: Parser a -> Parser (a -> a -> a) -> Parser a
chainl1 p op = p >>= rest
where
rest a = (do
f <- op
b <- p
rest (f a b)) <|> pure a
chainr :: Parser a -> Parser (a -> a -> a) -> a -> Parser a
chainr p op a = (p `chainr1` op) <|> pure a
chainr1 :: Parser a -> Parser (a -> a -> a) -> Parser a
chainr1 p op = scan
where
scan = p >>= rest
rest a = (do
f <- op
b <- scan
rest (f a b)) <|> pure a
-- +
space :: Parser String
space = many (satisfy isSpace)
token :: Parser a -> Parser a
token p = p <* space
symbol :: String -> Parser String
symbol = token . string
apply :: Parser a -> String -> [(a, String)]
apply p = runParser (space *> p)
-- +
expr, term, factor, digit :: Parser Int
expr = term `chainl1` addop
term = factor `chainl1` mulop
factor = digit <|> (symbol "(" *> expr <* symbol ")")
digit = subtract (ord '0') . ord <$> token (satisfy isDigit)
addop, mulop :: Parser (Int -> Int -> Int)
addop = (symbol "+" *> pure (+)) <|> (symbol "-" *> pure (-))
mulop = (symbol "*" *> pure (*)) <|> (symbol "/" *> pure (div))
-- -
runParser expr "(1 + 2 * 4) / 3 + 5"
| deriving-via/Parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/srijan-singh/machine-learning/blob/main/Regression/Simple%20Regression/Model/Simple_Regression_M1_1ipynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="pLFqMInfACzW" outputId="3fff249e-21cf-4283-9bf7-9e8e66a38b3e"
#@title Install Libraries
# !pip install -U scikit-learn
# !wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 595} id="8H0SjkWM-FBZ" outputId="98f3c21c-5d81-4068-c6e9-5664a92b2ce1"
#@title Model M1.1
df = pd.read_csv("FuelConsumption.csv")
# selecting features and exploring the data
cdf = df[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB', 'CO2EMISSIONS']]
# Distributing the Data
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.xlabel("Engine Size")
plt.ylabel("Co2 Emission")
plt.show()
from sklearn import linear_model
regr = linear_model.LinearRegression()
train_x = np.asanyarray(train[['ENGINESIZE']])
train_y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(train_x, train_y)
# Plotting the Graph
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color="blue")
plt.plot(train_x, regr.coef_[0][0]*train_x + regr.intercept_[0], '-r')
plt.xlabel("Engine size")
plt.ylabel("Emission")
from sklearn.metrics import r2_score
test_x = np.asanyarray(test[['ENGINESIZE']])
test_y = np.asanyarray(test[["CO2EMISSIONS"]])
test_y_ = regr.predict(test_x)
print("Mean absolute error: %.2f"% np.mean(np.absolute(test_y_ - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y)**2))
print("R2-score: %.2f" % r2_score(test_y_, test_y))
# + colab={"base_uri": "https://localhost:8080/"} id="MVg31nQrAfES" outputId="3aa591d6-3341-4944-dcd1-ee1e04194aa5"
#@title Predict
users_engn_siz = np.asanyarray([[float(input("Engine Size: "))]])
prediction = regr.predict(users_engn_siz)
print("Co2 Emission:",prediction[0][0])
| Regression/Simple Regression/Model/Simple_Regression_M1_1ipynb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.12 64-bit (''realworld-stylegan2-encoder-sLdVorT1'':
# pipenv)'
# language: python
# name: python3
# ---
# +
from e4e.encoder import Encoder4EditingMobileNet
from stylegan2.model import Generator
import torch
from collections import OrderedDict
import time
from argparse import Namespace
import time
import os
import sys
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
# +
_ckpt = "./weights/e4e_best_model.pt"
img_tensor = torch.randn(1, 3, 256, 256)
latents_tensor = torch.randn(1, 18, 512)
encoder = Encoder4EditingMobileNet()
decoder = Generator(1024, 512, 8, channel_multiplier=2)
ckpt = torch.load(_ckpt, map_location="cpu")
opts = ckpt['opts']
# -
opts['checkpoint_path'] = _ckpt
opts
# +
# state_dict_decoder = OrderedDict()
state_dict_encoder = OrderedDict()
latent_avg = ckpt["latent_avg"]
state_dict_encoder["latent_avg"] = latent_avg
for k, v in ckpt["state_dict"].items():
if "encoder" in k:
state_dict_encoder[k[8:]] = v
# if "decoder" in k:
# state_dict_decoder[k[8:]] = v
# -
encoder.load_state_dict(state_dict_encoder)
# decoder.load_state_dict(state_dict_decoder)
encoder.eval()
# decoder.eval()
# +
trans = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
# +
# Load input image and resize to the right format
original_image1 = Image.open('./test_images/custom/dongyun.jpg')
original_image1 = original_image1.convert("RGB")
original_image1.resize((256, 256))
input_image1 = original_image1
# Transformers input image into format needed for model from the pre-process pipeline
transformed_image1 = trans(input_image1)
input_image1
# +
with torch.no_grad():
tic = time.time()
result = encoder(transformed_image1.unsqueeze(0))
# randomize_noise=False,
# return_latents=True)
toc = time.time()
print('Inference took {:.4f} seconds.'.format(toc - tic))
# images, latents = encoder(inputs.to("cuda").float(),
# randomize_noise=False, return_latents=True)
# -
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
# +
output = result
output = (output.squeeze().transpose((1, 2, 0)) + 1) / 2
output[output < 0] = 0
output[output > 1] = 1
output = normalization(output) * 255
output = Image.fromarray(output.astype('uint8'))
output
# -
| torchonnx.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from fetch import get_onedrive_directlink, fetch_beijing_AQ_data, fetch_flue_gas_data
# ## Beijing Air Quality Dataset
#
# https://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data
#
# This data set includes hourly air pollutants data from 12 nationally-controlled air-quality monitoring sites. The air-quality data are from the Beijing Municipal Environmental Monitoring Center. The meteorological data in each air-quality site are matched with the nearest weather station from the China Meteorological Administration. The time period is from March 1st, 2013 to February 28th, 2017. Missing data are denoted as NA.
df = fetch_beijing_AQ_data() # https://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip
df.head()
# ## Flue Gas Data
#
# https://archive.ics.uci.edu/ml/datasets/Gas+Turbine+CO+and+NOx+Emission+Data+Set
#
# The dataset contains 36733 instances of 11 sensor measures aggregated over one hour (by means of average or sum) from a gas turbine located in Turkey's north western region for the purpose of studying flue gas emissions, namely CO and NOx (NO + NO2). The data comes from the same power plant as the dataset used for predicting hourly net energy yield. By contrast, this data is collected in another data range (01.01.2011 - 31.12.2015), includes gas turbine parameters (such as Turbine Inlet Temperature and Compressor Discharge pressure) in addition to the ambient variables. Note that the dates are not given in the instances but the data are sorted in chronological order. See the attribute information and relevant paper for details. Kindly follow the protocol mentioned in the paper (using the first three years' data for training/ cross-validation and the last two for testing) for reproducibility and comparability of works. The dataset can be well used for predicting turbine energy yield (TEY) using ambient variables as features.
df = fetch_flue_gas_data() # https://archive.ics.uci.edu/ml/machine-learning-databases/00551/pp_gas_emission.zip
df.head()
# ## [London Air Quality Dataset](../data/AQ/readme.md)
df = pd.read_csv(get_onedrive_directlink('https://1drv.ms/u/s!As2ibEui13xmlv592PW5gzCX4Xvywg?e=56ADuF'), compression='zip') # london air quality dataset
df
# ## Morgan's Geochem Dataset
# here I've added a direct link to a compressed dataset stored in Onedrive
df = pd.read_csv(get_onedrive_directlink('https://1drv.ms/u/s!As2ibEui13xmlv567184QOhGfNmQDQ?e=l1ghpZ'), compression='zip') # geochem dataset
df.drop(columns=df.columns[0], inplace=True) # an index snuck in here - we don't need it
df.head()
| notebooks/pm0-DatasetOptions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ColdCoffee21/Foundations-of-Data-Science/blob/master/Kaggle_MNIST.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="CC9SHo8-BnJq"
# First of all, you run the following code and upload your kaggle.json.
# + id="Ctfdr_26-CAz" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": "OK"}}, "base_uri": "https://localhost:8080/", "height": 97} outputId="b9458e94-0158-4abd-a59e-1859e5431f06"
from google.colab import files
files.upload()
# + [markdown] id="1V1Y-4-rBwAp"
# Next, run the following code to set the path.
# + id="FuWwsjuQ-xEk"
# !mkdir -p ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# + [markdown] id="lI-4BtK-B2DX"
# Then, run the following to install kaggle!
# + id="5rMK91IdWumr" colab={"base_uri": "https://localhost:8080/", "height": 228} outputId="c9b56d62-2077-4de6-9232-5b532a2b70a1"
# !pip install kaggle
# + [markdown] id="bnnVUYUGB-Wy"
# Run the following to add access permission to yourself to download data.
#
# You can also see the list of datasets you can donwload!
# + id="3vLYGsdT_UkW" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="2c7d8a7f-a7fe-4ae5-cf23-18c4579824f0"
# !chmod 600 /root/.kaggle/kaggle.json
# !kaggle datasets list
# + [markdown] id="wCrjGDbiCS3s"
# As you see, you can download a lot of data here. This time we just download the data in MNIST competition. Run the following to do it.
# + id="Xq6axMUIClTl" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="f9936caf-2f96-40a4-e935-c2b62069859e"
# !kaggle competitions download -c digit-recognizer
# + [markdown] id="KOCfJVw2CqBx"
# You can find the code to install data in many competitions in data description section (shown as API ...).
#
# The following is basically the same as what you would do in kaggle kernel, except that we need to install keras as it is not available in default in google colab.
#
# + id="NvR3N51XDCe7" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3b21e3ce-ec7f-4764-d535-d76204005acd"
# !pip install -q keras
import keras
# + [markdown] id="lHtyGHdlDmM7"
# Note that you are in /content.
# + id="LvgHW8hkLM-e" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3636e499-3d72-41bc-9b08-5e64b9fc0e08"
import os
print(os.getcwd())
# + id="qNIr85ukHVCz"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# import further libraries
import matplotlib.pyplot as plt
import seaborn as sns
# keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.optimizers import SGD, RMSprop
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
# + [markdown] id="zNNDcrJrDrGJ"
# As you are in /content, loading data can be done in the following.
# + id="4ZVWuGGYMLkG"
# load training & test datasets
train = pd.read_csv("/content/train.csv")
test = pd.read_csv("/content/test.csv")
# + [markdown] id="qteBqO4yD0Lg"
# The rest is just modeling:D
# + id="jQQOPBUWRozT"
# pandas to numpy
y_train = train["label"]
X_train = train.drop(labels=["label"], axis=1)
del train
# normalize
X_train = X_train/255.0
test = test/255.0
# reshape the data so that the data
# represents (label, img_rows, img_cols, grayscale)
X_train = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
# one-hot vector as a label (binarize the label)
y_train = to_categorical(y_train, num_classes=10)
# + id="2BpbJqeUUcct" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="7d6866c1-b685-4105-daf8-e12afa74d7ff"
# Three steps to create a CNN
# 1. Convolution
# 2. Activation
# 3. Pooling
# Repeat Steps 1,2,3 for adding more hidden layers
# 4. After that make a fully connected network
# This fully connected network gives ability to the CNN
# to classify the samples
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(28,28,1)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,(3, 3)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
# Fully connected layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
# + id="TXAqTuCVU3Jx" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="91190ce2-5333-41bb-c17c-2a7251445416"
# compile model
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# + id="3mHxJo_0fEdn"
# cross validation
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.10, random_state=1220)
# + id="pa3URx-NVFiP"
# data argumentation
gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
train_generator = gen.flow(X_train, y_train, batch_size=64)
# + id="PGhE1XvPfctn"
# learning rate
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
# + id="VFpqnS71Vw6F" colab={"base_uri": "https://localhost:8080/", "height": 1392} outputId="079c9125-ee23-4199-c367-7906a465a52f"
# model training
model.fit_generator(train_generator, epochs=30, validation_data = (X_val, y_val), verbose=2, steps_per_epoch=X_train.shape[0]/36,
callbacks=[learning_rate_reduction])
# + id="PfbXZzwAZf6r"
# model prediction on test data
predictions = model.predict_classes(test, verbose=0)
# + id="hNLSLschbtd8"
# make a submission file
submissions = pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)),
"Label": predictions})
submissions.to_csv("my_submission.csv", index=False, header=True)
# + [markdown] id="hNXDKbOgGytz"
# Finally by running the following command, you can submit your file to kaggle from google colab!
# + id="PcPaRH1BcfJr"
# submit the file to kaggle
# !kaggle competitions submit digit-recognizer -f my_submission.csv -m "Yeah! I submit my file through the Google Colab!"
# + [markdown] id="u70KYSWiG89m"
# Now you can go back to Kaggle to see where you are on the Leaderboard:D Enjoy kaggle more with google colab!
| Kaggle_MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Recommending Movies
#
# The [MovieLens 20M](http://files.grouplens.org/datasets/movielens/ml-20m-README.html) dataset contains 20 million user ratings from 1 to 5 of thousands of movies. In this demo we'll build a simple recommendation system which will use this data to suggest 25 movies based on a seed movie you provide.
# The notebook cells below use `pymldb`'s `Connection` class to make [REST API](../../../../doc/#builtin/WorkingWithRest.md.html) calls. You can check out the [Using `pymldb` Tutorial](../../../../doc/nblink.html#_tutorials/Using pymldb Tutorial) for more details.
from pymldb import Connection
mldb = Connection()
# ## Download the MovieLens 20M data
#
# We'll start by using some command-line tools to download and decompress the data.
# + language="bash"
# mkdir -p /mldb_data/data
# curl "file://mldb/mldb_test_data/ml-20m.zip" 2>/dev/null > /mldb_data/data/ml-20m.zip
# unzip /mldb_data/data/ml-20m.zip -d /mldb_data/data
# + language="bash"
# head /mldb_data/data/ml-20m/README.txt
# + language="bash"
# head /mldb_data/data/ml-20m/ratings.csv
# -
# ## Load the data into MLDB
#
# See the [Loading Data Tutorial](../../../../doc/nblink.html#_tutorials/Loading Data Tutorial) guide for more details on how to get data into MLDB.
#
# Here we load a text file and use the `pivot` aggregator to create a sparse matrix representation of the ratings.
# +
# %%time
print mldb.put('/v1/procedures/import_mvlns', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/ratings.csv",
"outputDataset": "mvlns_ratings_csv",
"runOnCreation": True
}
})
print mldb.put('/v1/procedures/process_mvlns', {
"type": "transform",
"params": {
"inputData": """
select pivot(movieId, rating) as *
named userId
from mvlns_ratings_csv
group by userId
""",
"outputDataset": "mvlns_ratings",
"runOnCreation": True
}
})
# -
# ## Take a peek at the dataset
#
# We'll use the [Query API](../../../../doc/#builtin/sql/QueryAPI.md.html). Each row is a user, each column is a movie, and the cell value is the rating the user gave the movie.
mldb.query("select * from mvlns_ratings limit 3")
# ## Singular Value Decomposition (SVD)
#
# We will create and run a [Procedure](../../../../doc/#builtin/procedures/Procedures.md.html) of type [`svd.train`](../../../../doc/#builtin/procedures/Svd.md.html). This creates an `embedding` dataset where each row is a movie and the columns represent coordinates in a 100-dimensional space. Similar movies end up closer to each other than dissimilar movies.
print mldb.put('/v1/procedures/mvlns_svd', {
"type" : "svd.train",
"params" : {
"trainingData" : "select COLUMN EXPR (where rowCount() > 3) from mvlns_ratings",
"columnOutputDataset" : "mvlns_svd_embedding",
"modelFileUrl": "file://models/mvlns.svd",
"functionName": "mvlns_svd_embedder",
"runOnCreation": True
}
})
# ## Explore the results!
#
# Our dataset has `movieId`s but humans think about movie names so we'll load up the movie names in a dataset.
# +
from ipywidgets import interact, interact_manual
from uuid import uuid4
print mldb.put('/v1/procedures/import_movies', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/movies.csv",
"outputDataset": "movies",
"select": "title, movieId",
"named": "movieId",
"runOnCreation": True
}
})
# -
# A simple search function to find all movies (and corresponding `movieId`s) whose names contain a string.
@interact
def movie_search(x = "toy story"):
return mldb.query("select title from movies where regex_match(lower(title), '.*%s.*')" % x.strip().lower())
# Now let's create a dataset to hold user preferences, and a simple function to simulate a user rating movies they like and movies they dislike, based on the `movie_search` function above.
# +
print mldb.put("/v1/datasets/mvlns_user_prefs", {"type": "sparse.mutable"})
print mldb.put("/v1/functions/preferences", {
"type": "sql.query",
"params": {
"query": "select {*} as p from mvlns_user_prefs where rowName()=$user"
}
})
def save_prefs(user_id, likes, dislikes):
for rating, search_terms in zip([5,1],[likes, dislikes]):
for x in search_terms.split(","):
if len(x) > 3:
mldb.post("/v1/datasets/mvlns_user_prefs/rows", {
"rowName":user_id,
"columns": [[str(m), rating, 0] for m in movie_search(x).index]
})
mldb.post("/v1/datasets/mvlns_user_prefs/commit", {})
save_prefs("janedoe", "Toy Story", "Terminator")
mldb.query("select preferences({ user: 'janedoe' })[p] as *")
# -
# With all that done, we can now build a recommendation engine out of a simple SQL query by mapping a user's preferences into the same space as the movie embeddings (i.e. embedding the user's preferences) and looking for the nearest movies.
# +
print mldb.put("/v1/functions/nearest_movies", {
"type": "embedding.neighbors",
"params": {
"dataset": "mvlns_svd_embedding",
"defaultNumNeighbors": 25,
"columnName": "embedding"
}
})
print mldb.put("/v1/functions/recommendations", {
"type": "sql.query",
"params": {
"query": """
select nearest_movies({
coords: mvlns_svd_embedder({
row: preferences({ user: $user })[p]
})[embedding]
})[distances] as r
"""
}
})
# -
# Here's a simple function which lets you simulate the results of liking and disliking certain movies and getting back the resulting recommendations.
# +
def recommend(likes="Toy Story, Terminator", dislikes="Star Trek"):
# here we simulate a new user saving these preferences
user_id = str(uuid4())
save_prefs(user_id, likes, dislikes)
# we can then run an SQL query to:
# - retrieve recommendations
# - transpose and join them to movies to get titles
# - exclude the already-rated movies from the result
return mldb.query("""
select m.title
named m.movieId
from
transpose(( select recommendations({ user: '%(user)s' }) )) as r
join movies as m on r.rowPathElement(2) = m.rowPathElement(0)
where m.movieId not in (keys of preferences({ user: '%(user)s' })[p])
order by r.result
""" % dict(user=user_id))
recommend(likes="Toy Story, Terminator", dislikes="Star Trek")
# -
# Here's an interactive form that lets you play with this function to see if you agree with the recommendations!
#
# NOTE: the interactive part of this demo only works if you're running this Notebook live, not if you're looking at a static copy on http://docs.mldb.ai. See the documentation for [Running MLDB](../../../../doc/#builtin/Running.md.html).
interact_manual(recommend)
# ## Where to next?
#
# Check out the other [Tutorials and Demos](../../../../doc/#builtin/Demos.md.html).
| container_files/demos/Recommending Movies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
# %matplotlib inline
# +
#input file fields as they are saved into the UKF output file
fig, ax = plt.subplots(2,1, figsize=(16,10))
ax1 = ax[0]
ax2 = ax[1]
time_from = 0
time_to = 1500
my_cols=['timestep','Kp','Ki', 'Kd', 'cte',
'total_error','steering','throttle','speed','distance']
for one_try, name in zip(['1-0-0', '05-0-0', '025-0-0', '0125-0-0', '00625-0-0'],
['Kp = 1.0', 'Kp = 0.5', 'Kp = 0.25', 'Kp = 0.125', 'Kp = 0.0625']):
filename = 'data_record'
file = filename + one_try + '.txt'
with open('build/'+file) as f:
table_pid_output = pd.read_table(f, sep='\t', header=0,
names=my_cols, lineterminator='\n')
ax1.plot(table_pid_output['timestep'][time_from:time_to],
table_pid_output['cte'][time_from:time_to], label=name)
square_values = table_pid_output['cte']**2
ax2.plot(table_pid_output['timestep'][time_from:time_to],
square_values[time_from:time_to].cumsum(), label=name)
ax1.set_title('CTE evolution for different Kp params', fontsize =14)
ax1.legend(fontsize =12)
ax2.set_title('Accumulative square CTE evolutions for different Kp params', fontsize =14)
ax2.legend(fontsize =12)
plt.show()
fig.savefig('report_images/'+'Kp-params')
# +
fig, ax = plt.subplots(2,1, figsize=(16,10))
ax1 = ax[0]
ax2 = ax[1]
time_from = 0
time_to = 1500
my_cols=['timestep','Kp','Ki', 'Kd', 'cte',
'total_error','steering','throttle','speed','distance']
for one_try, name in zip(['0125-0-1', '0125-0-05', '0125-0-2', '0125-0-4'],
['Kp / Kd = 0.125 / 1.0', 'Kp / Kd = 0.125 / 0.5',
'Kp / Kd = 0.125 / 2.0', 'Kp / Kd = 0.125 / 4.0']):
filename = 'data_record'
file = filename + one_try + '.txt'
with open('build/'+file) as f:
table_pid_output = pd.read_table(f, sep='\t', header=0,
names=my_cols, lineterminator='\n')
ax1.plot(table_pid_output['timestep'][time_from:time_to],
table_pid_output['cte'][time_from:time_to], label=name)
square_values = table_pid_output['cte']**2
ax2.plot(table_pid_output['timestep'][time_from:time_to],
square_values[time_from:time_to].cumsum(), label=name)
ax1.set_title('CTE evolution for different Kd params', fontsize =14)
ax1.legend(fontsize =12)
ax2.set_title('Accumulative square CTE evolutions for different Kd params', fontsize =14)
ax2.legend(fontsize =12)
plt.show()
fig.savefig('report_images/'+'Kd-params')
# +
fig, ax = plt.subplots(2,1, figsize=(16,10))
ax1 = ax[0]
ax2 = ax[1]
time_from = 0
time_to = 1500
my_cols=['timestep','Kp','Ki', 'Kd', 'cte',
'total_error','steering','throttle','speed','distance']
for one_try, name in zip(['0125-1-1', '0125-01-1', '0125-001-1', '0125-0001-1'],
['Kp / Ki / Kd = 0.125 / 1 / 1', 'Kp / Ki / Kd = 0.125 / 0.1 / 1',
'Kp / Ki / Kd = 0.125 / 0.01 / 1', 'Kp / Ki / Kd = 0.125 / 0.001 / 1']):
filename = 'data_record'
file = filename + one_try + '.txt'
with open('build/'+file) as f:
table_pid_output = pd.read_table(f, sep='\t', header=0,
names=my_cols, lineterminator='\n')
ax1.plot(table_pid_output['timestep'][time_from:time_to],
table_pid_output['cte'][time_from:time_to],
label=name)
square_values = table_pid_output['cte']**2
ax2.plot(table_pid_output['timestep'][time_from:time_to],
square_values[time_from:time_to].cumsum(), label=name)
ax1.set_title('CTE evolution for different Ki params', fontsize =14)
ax1.legend(fontsize =12)
ax2.set_title('Accumulative square CTE evolutions for different Ki params', fontsize =14)
ax2.legend(fontsize =12)
plt.show()
fig.savefig('report_images/'+'Ki-params')
# +
fig, ax = plt.subplots(2,1, figsize=(16,10))
ax1 = ax[0]
ax2 = ax[1]
time_from = 0
time_to = 1500
my_cols=['timestep','Kp','Ki', 'Kd', 'cte',
'total_error','steering','throttle','speed','distance']
for one_try, name in zip(['0125-0-1', '0125-0001-1', 'optimal2'],
['Kp / Ki / Kd = 0.125 / 0 / 1',
'Kp / Ki / Kd = 0.125 / 0.001 / 1',
'Kp / Ki / Kd = 0.249 / 0.000303 / 2.45']):
filename = 'data_record'
file = filename + one_try + '.txt'
with open('build/'+file) as f:
table_pid_output = pd.read_table(f, sep='\t', header=0,
names=my_cols, lineterminator='\n')
ax1.plot(table_pid_output['timestep'][time_from:time_to],
table_pid_output['cte'][time_from:time_to], label=name)
square_values = table_pid_output['cte']**2
ax2.plot(table_pid_output['timestep'][time_from:time_to],
square_values[time_from:time_to].cumsum(), label=name)
ax1.set_title('CTE evolution for optimal K params', fontsize =14)
ax1.legend(fontsize =12)
ax2.set_title('Accumulative square CTE evolutions for optimal K params', fontsize =14)
ax2.legend(fontsize =12)
plt.show()
fig.savefig('report_images/'+'optimal-params')
# +
filename = 'data_record'
file = filename + '0125-0001-1' + '.txt'
my_cols=['timestep','Kp','Ki', 'Kd', 'cte',
'total_error','steering','throttle','speed','distance']
with open('build/'+file) as f:
table_pid_output = pd.read_table(f, sep='\t', header=0, names=my_cols, lineterminator='\n')
fig, ax = plt.subplots(2,1, figsize=(16,10))
ax1 = ax[0]
ax2 = ax[1]
ax1.plot(table_pid_output['timestep'], table_pid_output['cte'], alpha=0.9, c = 'black', label='Proportional')
ax1.plot(table_pid_output['timestep'], table_pid_output['cte'].cumsum(), alpha=0.9, c = 'red', label='Integral')
ax1.plot(table_pid_output['timestep'], table_pid_output['cte'].diff(), alpha=0.9, c = 'green', label='Derivative')
ax1.set_title('P / I / D Error - Magnitude comparison', fontsize =14)
ax1.legend(fontsize =12) #, loc = 'lower right')
ax2.plot(table_pid_output['timestep'], table_pid_output['cte'], alpha=0.9, c = 'black', label='Proportional')
ax2.plot(table_pid_output['timestep'], table_pid_output['cte'].diff(), alpha=0.9, c = 'green', label='Derivative')
ax2.set_title('P / D Error - Magnitude comparison', fontsize =14)
ax2.legend(fontsize =12) #, loc = 'lower right')
plt.show()
fig.savefig('report_images/'+'PID-magnitude-comparison')
| PID-project-visualizations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import genemunge
# -
# # Searching the gene ontology for relevant genes
# +
# set up an object to search the gene ontology
searcher = genemunge.search.Searcher()
# get all of the GO identifiers associated with the word 'immune'
# set exact = False to walk through the ontology and grab all child terms
immune_identifiers = searcher.keyword_search(['immune'], exact=False)
# get all of the genes assigned to the immune_identifiers
immune_genes = searcher.get_genes(immune_identifiers)
# get a list of housekeeping genes
housekeeping = searcher.get_housekeeping_genes()
# keep all of the immune related genes that are not housekeeping genes
variable_immune_genes = list(set(immune_genes) - set(housekeeping))
print('Identified {} variable immune related genes'.format(len(variable_immune_genes)))
# -
# # Obtaining statistics about gene expression
# set up an object to describe genes
describer = genemunge.describe.Describer('symbol')
# +
# find the absolute and relative expression levels of each gene of interest
expression_data = pd.DataFrame(index=variable_immune_genes,
columns=['expression', 'log ratio'])
# get the expression levels in healthy tissue (in TPM units)
stats = describer.tissue_stats['median'].reindex(variable_immune_genes)
expression_data['expression'] = stats['Small Intestine']
# control the log with a small pseudocount
pseudocount = 1.0
expression_data['log ratio'] = np.log10(
(pseudocount + stats['Small Intestine']) / (pseudocount + stats['Stomach']))
# +
# plot the gene expression fraction
fig, ax = plt.subplots(figsize=(12, 8))
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
ax.scatter(expression_data['expression'], expression_data['log ratio'])
ax.set_xlabel('Small Intestine expression [TPM]', fontsize=20)
ax.set_ylabel('log10 ratio (Small Intestine / Stomach)', fontsize=20)
ax.set_xscale('log')
ax.set_xlim([0.001, 1e5])
ax.set_ylim([-3, 4])
plt.savefig('small_intestine_example.png', bbox_inches='tight', dpi=300)
plt.show()
# -
# # Converting between gene identifier types
# +
# set up an object to convert from ensembl to symbol
ensembl_to_symbol = genemunge.convert.IDConverter('ensembl_gene_id', 'symbol')
# convert the immune identifiers to gene symbols
variable_immune_symbols = ensembl_to_symbol.convert_list(variable_immune_genes)
# reset the index of the dataframe
expression_data.index = variable_immune_symbols
# -
target_genes = expression_data[expression_data['log ratio'] > 1]
target_genes = target_genes.sort_values(by=['expression'], ascending=False)
target_genes
# # Getting information about a specific gene
# +
# set up an object to describe genes
describer = genemunge.describe.Describer('symbol')
# get some basic information about one of the immune genes
print(json.dumps(describer.get_gene_info(target_genes.index[0]), indent=2))
# make a plot of the expression of one of the immune genes across tissues from GTEx
describer.plot_tissue_expression(target_genes.index[0], sortby='median',
filename='gene_expr_example.png')
# -
# # Normalization
# set up an object to normalize genes
normalizer = genemunge.normalize.Normalizer('ensembl_gene_id')
trans_factor_ids = searcher.get_transcription_factors()
# +
# make some fake brain expression data
# get the expression levels in healthy tissue (in TPM units)
mean_clr = describer.tissue_stats['mean_clr'].reindex(trans_factor_ids)
std_clr = describer.tissue_stats['std_clr'].reindex(trans_factor_ids)
brain_mean = mean_clr['Brain'].values
brain_std = std_clr['Brain'].values
num_samples = 10000
# draw samples from a gaussian approximation
draws = pd.DataFrame(np.multiply(brain_std, np.random.randn(num_samples, len(brain_mean))) + brain_mean,
index=np.arange(num_samples), columns=trans_factor_ids)
# make a tissues series with all 'Brain'
tissues = pd.Series(np.full((num_samples,),'Brain'), index=np.arange(num_samples))
# compute z-scores
z_scores = normalizer.z_score_from_clr(draws, tissues, trans_factor_ids)
# compute ternarized data
ternary = normalizer.ternary_from_clr(draws, tissues, 2.0, trans_factor_ids)
# -
| examples/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib notebook
import matplotlib.pyplot as plt
x = [1, 2, 3]
y = [2, 4, 3]
x2 = [1, 2, 3]
y2 = [7, 7, 14]
plt.plot(x, y, label='First Line')
plt.plot(x2, y2, label='Second Line')
plt.xlabel('X Label (Plot Number)')
plt.ylabel('Y Label (The Data)')
plt.title('My Cool Graph')
plt.legend()
plt.show()
# +
# %matplotlib notebook
x = [1, 2, 3, 4, 5]
y = [2, 4, 3, 1, 7]
plt.bar(x, y, label='First Bars')
plt.xlabel('X Label (Plot Number)')
plt.ylabel('Y Label (The Data)')
plt.title('My Cool Graph')
plt.legend()
plt.show()
# +
x = [1, 3, 5, 7, 9]
y = [2, 4, 3, 1, 7]
x2 = [2, 4, 6, 8, 10]
y2 = [2, 4, 4, 2, 6]
plt.bar(x, y, label='First Bars')
plt.bar(x2, y2, label='Second Bars')
plt.xlabel('X Label (Plot Number)')
plt.ylabel('Y Label (The Data)')
plt.title('My Cool Graph')
plt.legend()
plt.show()
# +
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [9, 7, 3, 5, 2, 2, 1, 1, 6, 10]
plt.scatter(x, y)
plt.xlabel('This is X')
plt.ylabel('This is Y')
plt.title('My Cool Scatter Plot')
plt.show()
# +
days = [1, 2, 3, 4, 5]
emails = [1, 1, 2, 3, 1]
codereviews = [2, 1, 1, 2, 3]
bugreports = [0, 0, 1, 0, 2]
internet = [3, 4, 2, 2, 5]
plt.stackplot(days, emails, codereviews, bugreports, internet,
labels=['emails', 'codereviews', 'bugreports', 'internet'])
plt.xlabel('This is X')
plt.ylabel('This is Y')
plt.title('My Cool Stackplot')
plt.legend()
plt.show()
# +
days = [1, 2, 3, 4, 5]
emails = [1, 1, 2, 3, 1]
codereviews = [2, 1, 1, 2, 3]
bugreports = [0, 0, 1, 0, 2]
internet = [3, 4, 2, 2, 5]
slices = [sum(emails), sum(codereviews), sum(bugreports), sum(internet)]
tasks = ['emails', 'codereviews', 'bugreports', 'internet']
plt.pie(slices, labels=tasks)
plt.title('My Cool Pie Chart')
plt.legend()
plt.show()
# +
days = [1, 2, 3, 4, 5]
emails = [1, 1, 2, 3, 1]
codereviews = [2, 1, 1, 2, 3]
bugreports = [0, 0, 1, 0, 2]
internet = [3, 4, 2, 2, 5]
slices = [sum(emails), sum(codereviews), sum(bugreports), sum(internet)]
tasks = ['emails', 'codereviews', 'bugreports', 'internet']
plt.pie(slices, labels=tasks, startangle=90, autopct='%1.1f%%')
plt.title('My Cool Pie Chart')
plt.legend()
plt.show()
# +
days = [1, 2, 3, 4, 5]
emails = [1, 1, 2, 3, 1]
codereviews = [2, 1, 1, 2, 3]
bugreports = [0, 0, 1, 0, 2]
internet = [3, 4, 2, 2, 5]
slices = [sum(emails), sum(codereviews), sum(bugreports), sum(internet)]
tasks = ['emails', 'codereviews', 'bugreports', 'internet']
plt.pie(slices, labels=tasks, startangle=90,
autopct='%1.1f%%', explode=(0, 0, 0.2, 0))
plt.title('My Cool Pie Chart')
plt.legend()
plt.show()
# -
import matplotlib.pyplot as plt
import numpy as np
x=list(range(10))
def fun(k):
return np.sin(k)
y=list(map(fun,x))
plt.plot(x,y,'-.')
plt.show()
# +
import matplotlib.pyplot as plt
import numpy as np
Fs = 8000
f = 5
sample = 8000
x = np.arange(sample)
y = np.sin(2 * np.pi * f * x / Fs)
plt.plot(x, y)
plt.xlabel('sample(n)')
plt.ylabel('voltage(V)')
plt.show()
# -
| Doc/Jupyter Notebook/Test/Plot_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sanikamal/time-series-analysis-and-forecasting-atoz/blob/master/time_windows.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="Ou0PGp_4icRo"
# # Time windows
# + [markdown] colab_type="text" id="vidayERjaO5q"
# ## Setup
# + colab_type="code" id="gqWabzlJ63nL" colab={}
import tensorflow as tf
# + [markdown] colab_type="text" id="ViWVB9qd8OIR"
# ## Time Windows
#
# First, we will train a model to forecast the next step given the previous 20 steps, therefore, we need to create a dataset of 20-step windows for training.
# + colab_type="code" id="bgJkwtq88OIS" colab={}
dataset = tf.data.Dataset.range(10)
for val in dataset:
print(val.numpy())
# + colab_type="code" id="ad8C65JV8OIT" colab={}
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1)
for window_dataset in dataset:
for val in window_dataset:
print(val.numpy(), end=" ")
print()
# + colab_type="code" id="AQtmODsi8OIU" colab={}
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
for window_dataset in dataset:
for val in window_dataset:
print(val.numpy(), end=" ")
print()
# + colab_type="code" id="kTRHiWxi8OIW" colab={}
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
for window in dataset:
print(window.numpy())
# + colab_type="code" id="iPsQbWHb8OIX" colab={}
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
dataset = dataset.map(lambda window: (window[:-1], window[-1:]))
for x, y in dataset:
print(x.numpy(), y.numpy())
# + colab_type="code" id="hzp7RD6_8OIY" colab={}
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
dataset = dataset.map(lambda window: (window[:-1], window[-1:]))
dataset = dataset.shuffle(buffer_size=10)
for x, y in dataset:
print(x.numpy(), y.numpy())
# + colab_type="code" id="y70nV0EI8OIZ" colab={}
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
dataset = dataset.map(lambda window: (window[:-1], window[-1:]))
dataset = dataset.shuffle(buffer_size=10)
dataset = dataset.batch(2).prefetch(1)
for x, y in dataset:
print("x =", x.numpy())
print("y =", y.numpy())
# + colab_type="code" id="1tl-0BOKkEtk" colab={}
def window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
| time_windows.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to Computer Vision! #
#
# Have you ever wanted to teach a computer to see? In this course, that's exactly what you'll do!
#
# <!-- <center> -->
# <!-- <\!-- <img src="./images/1-header.png" width="1600" alt="Header illustration: a line of cars."> -\-> -->
# <!-- <img src="" width="1600" alt="Header illustration"> -->
# <!-- </center> -->
#
# In this course, you'll:
# - Use modern deep-learning networks to build an **image classifier** with Keras
# - Design your own **custom convnet** with reusable blocks
# - Learn the fundamental ideas behind visual **feature extraction**
# - Master the art of **transfer learning** to boost your models
# - Utilize **data augmentation** to extend your dataset
#
# If you've taken the *Introduction to Deep Learning* course, you'll know everything you need to be successful.
#
# Now let's get started!
# # Introduction #
#
# This course will introduce you to the fundamental ideas of computer vision. Our goal is to learn how a neural network can "understand" a natural image well-enough to solve the same kinds of problems the human visual system can solve.
#
# The neural networks that are best at this task are called **convolutional neural networks** (Sometimes we say **convnet** or **CNN** instead.) Convolution is the mathematical operation that gives the layers of a convnet their unique structure. In future lessons, you'll learn why this structure is so effective at solving computer vision problems.
#
# We will apply these ideas to the problem of **image classification**: given a picture, can we train a computer to tell us what it's a picture *of*? You may have seen [apps](https://identify.plantnet.org/) that can identify a species of plant from a photograph. That's an image classifier! In this course, you'll learn how to build image classifiers just as powerful as those used in professional applications.
#
# While our focus will be on image classification, what you'll learn in this course is relevant to every kind of computer vision problem. At the end, you'll be ready to move on to more advanced applications like [generative adversarial networks](https://www.kaggle.com/tags/gan) and [image segmentation](https://www.kaggle.com/tags/object-segmentation).
# # The Convolutional Classifier #
#
# A convnet used for image classification consists of two parts: a **convolutional base** and a **dense head**.
#
# <center>
# <!-- <img src="./images/1-parts-of-a-convnet.png" width="600" alt="The parts of a convnet: image, base, head, class; input, extract, classify, output.">-->
# <img src="https://i.imgur.com/U0n5xjU.png" width="600" alt="The parts of a convnet: image, base, head, class; input, extract, classify, output.">
# </center>
#
# The base is used to **extract the features** from an image. It is formed primarily of layers performing the convolution operation, but often includes other kinds of layers as well. (You'll learn about these in the next lesson.)
#
# The head is used to **determine the class** of the image. It is formed primarily of dense layers, but might include other layers like dropout.
#
# What do we mean by visual feature? A feature could be a line, a color, a texture, a shape, a pattern -- or some complicated combination.
#
# The whole process goes something like this:
#
# <center>
# <!-- <img src="./images/1-extract-classify.png" width="600" alt="The idea of feature extraction."> -->
# <img src="https://i.imgur.com/UUAafkn.png" width="600" alt="The idea of feature extraction.">
# </center>
#
# The features actually extracted look a bit different, but it gives the idea.
# # Training the Classifier #
#
# The goal of the network during training is to learn two things:
# 1. which features to extract from an image (base),
# 2. which class goes with what features (head).
#
# These days, convnets are rarely trained from scratch. More often, we **reuse the base of a pretrained model**. To the pretrained base we then **attach an untrained head**. In other words, we reuse the part of a network that has already learned to do *1. Extract features*, and attach to it some fresh layers to learn *2. Classify*.
#
# <center>
# <!-- <img src="./images/1-attach-head-to-base.png" width="400" alt="Attaching a new head to a trained base."> -->
# <img src="https://imgur.com/E49fsmV.png" width="400" alt="Attaching a new head to a trained base.">
# </center>
#
# Because the head usually consists of only a few dense layers, very accurate classifiers can be created from relatively little data.
#
# Reusing a pretrained model is a technique known as **transfer learning**. It is so effective, that almost every image classifier these days will make use of it.
# # Example - Train a Convnet Classifier #
#
# Throughout this course, we're going to be creating classifiers that attempt to solve the following problem: is this a picture of a *Car* or of a *Truck*? Our dataset is about 10,000 pictures of various automobiles, around half cars and half trucks.
# ## Step 1 - Load Data ##
#
# This next hidden cell will import some libraries and set up our data pipeline. We have a training split called `ds_train` and a validation split called `ds_valid`.
# +
#$HIDE_INPUT$
# Imports
import os, warnings
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
# Reproducability
def set_seed(seed=31415):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
set_seed(31415)
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
warnings.filterwarnings("ignore") # to clean up output cells
# Load training and validation sets
ds_train_ = image_dataset_from_directory(
'../input/car-or-truck/train',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=True,
)
ds_valid_ = image_dataset_from_directory(
'../input/car-or-truck/valid',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=False,
)
# Data Pipeline
def convert_to_float(image, label):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
ds_train = (
ds_train_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
ds_valid = (
ds_valid_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
# -
# Let's take a look at a few examples from the training set.
#$HIDE_INPUT$
import matplotlib.pyplot as plt
# ## Step 2 - Define Pretrained Base ##
#
# The most commonly used dataset for pretraining is [*ImageNet*](http://image-net.org/about-overview), a large dataset of many kind of natural images. Keras includes a variety models pretrained on ImageNet in its [`applications` module](https://www.tensorflow.org/api_docs/python/tf/keras/applications). The pretrained model we'll use is called **VGG16**.
pretrained_base = tf.keras.models.load_model(
'../input/cv-course-models/cv-course-models/vgg16-pretrained-base',
)
pretrained_base.trainable = False
# ## Step 3 - Attach Head ##
#
# Next, we attach the classifier head. For this example, we'll use a layer of hidden units (the first `Dense` layer) followed by a layer to transform the outputs to a probability score for class 1, `Truck`. The `Flatten` layer transforms the two dimensional outputs of the base into the one dimensional inputs needed by the head.
# +
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
pretrained_base,
layers.Flatten(),
layers.Dense(6, activation='relu'),
layers.Dense(1, activation='sigmoid'),
])
# -
# ## Step 4 - Train ##
#
# Finally, let's train the model. Since this is a two-class problem, we'll use the binary versions of `crossentropy` and `accuracy`. The `adam` optimizer generally performs well, so we'll choose it as well.
# +
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['binary_accuracy'],
)
history = model.fit(
ds_train,
validation_data=ds_valid,
epochs=30,
)
# -
# When training a neural network, it's always a good idea to examine the loss and metric plots. The `history` object contains this information in a dictionary `history.history`. We can use Pandas to convert this dictionary to a dataframe and plot it with a built-in method.
# +
import pandas as pd
history_frame = pd.DataFrame(history.history)
history_frame.loc[:, ['loss', 'val_loss']].plot()
history_frame.loc[:, ['binary_accuracy', 'val_binary_accuracy']].plot();
# -
# # Conclusion #
#
# In this lesson, we learned about the structure of a convnet classifier: a **head** to act as a classifier atop of a **base** which performs the feature extraction.
#
# The head, essentially, is an ordinary classifier like you learned about in the introductory course. For features, it uses those features extracted by the base. This is the basic idea behind convolutional classifiers: that we can attach a unit that performs feature engineering to the classifier itself.
#
# This is one of the big advantages deep neural networks have over traditional machine learning models: given the right network structure, the deep neural net can learn how to engineer the features it needs to solve its problem.
#
# For the next few lessons, we'll take a look at how the convolutional base accomplishes the feature extraction. Then, you'll learn how to apply these ideas and design some classifiers of your own.
# # Your Turn #
#
# For now, move on to the [**Exercise**](#$NEXT_NOTEBOOK_URL$) and build your own image classifier!
| notebooks/computer_vision/raw/tut1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
params = {
'num_clusters': 50,
'num_components': 256,
'image_size': (384, 380),
'random_state': 123,
'lr': 1e-4,
'max_lr': 1e-4,
'epochs': 100,
'batch_size': 16,
'print_interval': 0.3,
'save_path': 'CNN-k50.pt',
'use_pretrained': True,
'wandb_key': '6f927fe3835ebcc7bb05946984340ac2c810388e',
'wandb_run_name': 'Run 1 (k = 50, b4, pretrained)'
}
# !pip install -q --upgrade torchvision
# !pip install -q --upgrade datasets
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
import yaml
import torch
import torchvision
from torch import optim, nn
from torchvision import models, transforms
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import cv2
from PIL import Image
import os
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import seaborn as sns
import plotly.express as px
from datasets import Dataset
import datasets
from sklearn.decomposition import PCA
import wandb
import os
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics import average_precision_score
from tqdm.notebook import tqdm
from torch.utils.data import DataLoader,Dataset
from sklearn.metrics import f1_score
# datasets.disable_progress_bar()
# +
def read_config(path = 'config.yml'):
with open('../input/thesis-chatbot/' + path) as f:
config = yaml.safe_load(f)
return config
def get_train_imgs(img_ls):
"""
Lấy ảnh ở tập train ra, kiểm tra xác thực đảm bảo
nó có trong folder chứa ảnh
Params:
img_ls (pandas series): danh sách ảnh train/val
Returns:
temp (list): danh sách ảnh output
"""
img_train = img_ls.dropna().values
img_train = list(set(img_train))
img_folder = os.listdir('../input/thesis-chatbot/' + config['path']['image_path'])
temp = []
for img in img_train:
if img in img_folder:
temp.append(img)
return temp
def feed_img(model, img_file, image_size):
"""
Đưa 1 ảnh qua mô hình
Params:
model (pytorch module): mô hình
img_file (str): tên file ảnh
image_size (int, int): kích thước ảnh sau khi resize cho input mô hình
"""
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
model = model.to(device)
filename = '../input/thesis-chatbot/' + str(config['path']['image_path']) + str(img_file)
# try:
img_file = Image.open(str(filename))
# Gif
img_file = np.array(img_file.convert('RGB'))
# Return None nếu file ảnh không đọc được
if img_file is None:
#Dùng None thay vì continue, continue thì xuống dưới hong bỏ được tên ảnh mà feature nó bị None
#check if feature not None, mới cho lấy tên ảnh, feature --> làm luôn 2 việc.
return None
# Transform the image
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(512),
transforms.Resize(image_size),
transforms.ToTensor()
])
img_file = transform(img_file)
# Reshape the image. PyTorch model reads 4-dimensional tensor
# [batch_size, channels, width, height]
img_file = img_file.reshape(1, 3, image_size[0], image_size[1])
img_file = img_file.to(device)
# We only extract features, so we don't need gradient
with torch.no_grad():
model.eval()
# Extract the feature from the image
feature = model(img_file)
# Convert to NumPy Array, Reshape it, and save it to features variable
return feature.cpu().detach().numpy().reshape(-1)
def feed_single_img(model, example):
"""
Mỗi step sẽ làm gì - feed ảnh qua mô hình
Feature được lưu ở cột features
Params:
model (Pytorch module): mô hình CNN
example: 1 dòng trong dataframe
Returns:
example: 1 dòng trong dataframe (đã thêm cột features)
"""
example['features'] = feed_img(model, example['img'], params['image_size'])
return example
def get_img_features_faster(img_train):
"""
Đưa toàn bộ ảnh qua mô hình - xử lí song song
Tên ảnh được lưu ở cột 'img'
Feature được lưu ở cột 'features'
Params:
model (pytorch module): mô hình CNN ở biến global
img_train (list): danh sách tên ảnh train
Returns:
temp (huggingface dataset): class dataset của huggingface
Giống dictionary
"""
global model
temp = pd.DataFrame({'img': img_train})
temp = datasets.Dataset.from_pandas(temp)
temp = temp.map(lambda x: feed_single_img(model, x))
return temp
def kmean_fit(features, n_clusters, random_state = 42):
"""
Run kmean clustering
Params:
features (list): list các features ảnh trong tập train
n_clusters: số cluster
random_state: fixed random state
Returns:
labels (list): list nhãn giả tương ứng với features
"""
# Initialize the model
kmean_model = KMeans(n_clusters=n_clusters, random_state=random_state)
# Fit the data into the model
kmean_model.fit(features)
# Extract the labels
labels = kmean_model.labels_
return labels
def make_pseudo_labels(img_train, num_components, num_clusters):
# Feed lấy image features
img_dataset = get_img_features_faster(img_train)
# PCA rút gọn features
pca = PCA(n_components= num_components, random_state= params['random_state'])
pca_data = pca.fit_transform(img_dataset['features'])
# Kmean clustering
labels = kmean_fit(pca_data, n_clusters = num_clusters, random_state = params['random_state'])
return labels, img_dataset
class MyData(Dataset):
def __init__(self, img_dataset, labels, transform):
self.img_dataset = img_dataset
self.labels = labels
self.transform = transform
def __len__(self):
return len(img_dataset)
def __getitem__(self, index):
image = Image.open('../input/thesis-chatbot/' + config['path']['image_path'] + img_dataset['img'][index])
image = np.array(image.convert('RGB'))
if self.transform is not None:
image = self.transform(image)
labels = self.labels[index]
return {'images': image, 'labels': labels}
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def remove_classifier(model):
"""
Tháo lớp classifier ra khỏi mô hình
Params:
model (pytorch_module): mô hình
pass by reference, thay đổi sẽ ảnh hưởng thẳng tới biến mô hình
!!! classifier sẽ là attribute model.classifier
--> mình đè lên bằng class Identity - class fake lấy output là ouput của lớp trước nó
Returns:
None
"""
# Class Identity: class fake không có gì ở trong,
# lấy output của lớp trước đó output ra luôn
model.classifier[1] = Identity()
def add_classifier(model, last_layer_shape: int, num_classes: int):
"""
Gắn lớp classifier vào mô hình
Params:
model (pytorch module): mô hình
pass by reference, thay đổi sẽ ảnh hưởng thẳng tới biến
!!! module mô hình có attribute model.classifier,
last_layer_shape (int): kích thước đầu ra của layer trước đó
(image representation layer)
num_classes (int): số class đầu ra cho classification
"""
model.classifier[1] = nn.Linear(last_layer_shape, num_classes)
# METRIC----------------------
def accuracy(predictions, labels):
classes = torch.argmax(predictions, dim=1)
return torch.mean((classes == labels).float())
def nmi(predictions, labels, is_prob = True):
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html
if is_prob:
classes = torch.argmax(predictions, dim=1)
else:
classes = predictions
return normalized_mutual_info_score(classes, labels)
def f1(predictions, labels):
classes = torch.argmax(predictions, dim=1)
return f1_score(labels, classes, average='weighted')
# +
# Load config
config = read_config()
# Load df
df = pd.read_pickle('../input/thesis-chatbot/' + config['path']['train_preprocessed_path'])
# Get list of train imgs
img_train = get_train_imgs(df['img_id'])
# Load model
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# Update model
if params['use_pretrained']:
model = models.efficientnet_b4(pretrained=True)
else:
model = models.efficientnet_b4()
# Kích thước của lớp biểu diễn đặc trưng ảnh
# b4: 1408, b0: 1280 tuỳ version của model. Mình set bằng biến cho linh hoạt
projection_shape = model.classifier[1].in_features
model.classifier = nn.Sequential(
nn.Linear(projection_shape, 768),
nn.Linear(768, params['num_clusters'])
)
#Transform tăng cường ảnh
transform_aug = transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(512),
transforms.Resize(params['image_size']),
transforms.RandomChoice([ # random 1 phép để dùng
transforms.RandomHorizontalFlip(p=0.5), #lật ngang
transforms.RandomRotation(degrees=30), #xoay góc 30
# thay đổi độ sáng, tương phản, bão hoà, hue
transforms.ColorJitter(brightness= (0.1, 1), contrast= (0.1, 1), saturation= (0.1, 1), hue= (-0.1, 0.1)),
transforms.RandomPerspective(), #quay góc nhìn
]),
transforms.ToTensor()])
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr= params['lr'])
# scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,
# max_lr= params['max_lr'],
# steps_per_epoch=len(train_loader),
# epochs= params['epochs'])
os.environ["WANDB_API_KEY"] = params['wandb_key']
wandb.init(config = params, project="CNN brrr", entity="thesis-chatbot", name = params['wandb_run_name'])
# -
for epoch in tqdm(range(1, params['epochs'] + 1)):
print(f"\n\nEpoch {epoch}")
print("=" * 10)
# 1. Remove classifier
img_train = get_train_imgs(df['img_id'])
remove_classifier(model)
# 2. Clustering
print("Clustering...")
model.eval() #bật chế độ inference (tắt dropout)
# Nếu epoch > 1, Tính NMI giữa pseudo labels của epoch hiện tại vs pseudo labels của epoch trước đó
if epoch > 1:
previous_labels = labels
labels, img_dataset = make_pseudo_labels(img_train,
num_components = params['num_components'],
num_clusters = params['num_clusters'])
reassign_nmi = nmi(labels, previous_labels, is_prob = False)
print(f"Reassigned NMI: {reassign_nmi}")
wandb.log({'Reassigned NMI': reassign_nmi})
else:
labels, img_dataset = make_pseudo_labels(img_train,
num_components = params['num_components'],
num_clusters = params['num_clusters'])
# 3. Dataset + Dataloader
train_set = MyData(img_dataset, labels, transform_aug)
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=params['batch_size'],
shuffle=True)
# 4. Add classifier
add_classifier(model, 768, params['num_clusters'])
model = model.to(device) # đưa dô gpu (nếu có)
# 5. Classification
print("Training...")
running_loss = []
running_accuracy = []
running_nmi = []
running_f1 = []
for i, data in enumerate(tqdm(train_loader)):
model.train()
inputs, labels_int = data['images'].to(device), data['labels'].to(device)
# reset gradients đã tính mỗi batch
optimizer.zero_grad()
# forward
outputs = model(inputs)
long_labels = labels_int.type(torch.LongTensor).to(device)
# Loss
loss = criterion(outputs, long_labels)
# backward
loss.backward()
# Gradient clipping - prevent exploding gradient
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm= 1.0)
# Update weights
optimizer.step()
# Đưa loss, accuracy
running_loss.append(loss.item())
running_accuracy.append(accuracy(outputs, long_labels))
# Hàm nmi cần nhận vào các biến ở cpu
running_nmi.append(nmi(outputs.detach().cpu(), long_labels.detach().cpu(), is_prob = True))
running_f1.append(f1(outputs.detach().cpu(), long_labels.detach().cpu()))
# Do train 1 epoch quá lẹ nên chỉ cần lấy metric cuối epoch
# Loss
last_loss = torch.mean(torch.Tensor(running_loss))
# Accuracy
last_accuracy = torch.mean(torch.Tensor(running_accuracy))
# Pred NMI
last_nmi = torch.mean(torch.Tensor(running_nmi))
# f1
last_f1 = torch.mean(torch.Tensor(running_f1))
# In ra
print(f"Epoch {epoch} ({i + 1}/{len(train_loader)}), Loss: {last_loss:.3f}, Accuracy: {last_accuracy:.3f}, Pred NMI: {last_nmi:.3f}, F1: {last_f1:.3f}")
# Log qua wandb, 4 plots
wandb.log({"train/loss": last_loss,
'train/accuracy': last_accuracy,
'train/Pred NMI': last_nmi,
'train/f1': last_f1})
# Luu
torch.save(model.state_dict(), params['save_path'])
| notebooks/deepcluster_kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Heart Rate vs Time Figure
#
# #### Analysis by <NAME>
#
# Just a simple time vs heart rate plot.
#
# It seems like when the sensor loses contact with the heartbeat it registers it as zero. I should have seen this during my wrangle. Guess no one is perfect?
#
# I need to modify the wrangler to set zero values (or many less than 30?) to NANs.
#
#
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
sns.set()
# +
class hr_vs_time_figure:
'''
Makes a heart rate vs time plot
'''
def __init__(self, data_filename,):
self.data_filename = data_filename
self.data_filepath = os.path.join('..', 'data', 'wrangled_data', data_filename)
self.df = self.load_data()
self.fig = self.create_figure()
def load_data(self):
df = pd.read_csv(self.data_filepath)
df[df['HR (bpm)'] == 0] = np.NAN
df['Time'] = pd.to_datetime(df['Time'], infer_datetime_format=True)
return df
def create_figure(self, figsize = (20, 10), title_size = 20, window_radius = 5):
fig = plt.figure( figsize = figsize)
t = self.create_time_axis()[window_radius:-window_radius]
mins, avgs, maxes, stds = self.get_min_avg_max_hr(window_radius)
plt.plot(t, avgs, color = 'blue')
plt.plot(t, self.create_hr_axis()[window_radius:-window_radius],
color = 'blue', alpha = .4)
self.plot_missing_values()
plt.xlabel('Time (min)')
plt.ylabel('Heart Rate (bpm)')
plt.title(self.make_title(), fontsize = title_size)
return fig
def plot_missing_values(self):
null_mask = fig.df["HR (bpm)"].isnull()
null_times = fig.df['HR (bpm)'][null_mask].index
for t_null in null_times:
plt.axvline(t_null/60, color = 'red')
def create_time_axis(self):
start_datetime = self.df.loc[0, 'Time']
time_deltas = self.df['Time'] - self.df.loc[0, 'Time']
time_seconds = time_deltas.dt.seconds.values/60
return time_seconds
def create_hr_axis(self):
hr = self.df['HR (bpm)'].values
return hr
def get_min_avg_max_hr(self, window_radius):
hr = self.create_hr_axis()
windows_list = [hr[i-window_radius:i + window_radius] for i in range(window_radius, hr.shape[0] - window_radius)]
windows = np.vstack(windows_list)
mins = windows.min(axis = 1)
avgs = windows.mean(axis = 1)
stds = windows.std(axis = 1)
maxes = windows.max(axis = 1)
return mins, avgs, maxes, stds
def make_title(self):
filename_terms = self.data_filename.replace('.', '_').split('_')
activity = filename_terms[2].title()
name = '{} {}'.format(filename_terms[-3], filename_terms[-2]).title()
datetime_format = '%Y-%m-%d %H:%M:%S'
start_datetime = self.df.loc[0, 'Time']
start_time = start_datetime.strftime('%H:%M')
last_entry = self.df.shape[0]-1
end_datetime = self.df.loc[last_entry, 'Time']
end_time = end_datetime.strftime('%H:%M')
date = start_datetime.strftime('%A, %d %B %Y')
title = '''
Heart Rate vs. Time
{}, {}
{}
'''.format( name, activity, date)
return title
data_filepath = '2020-06-14_15:25_running_jeremy_mann.csv'
fig = hr_vs_time_figure(data_filepath)
# -
| notebooks/2020-06-15-jpm-hr-vs-time-figure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to SQLite in Python
#
# In this tutorial, learn about SQLite (an extremely light-weighted RDBMS) in Python.
#
# Relational database management systems (RDBMS) are extremely popular and are quite an inseparable part of application development. There exists a number different of RDBMS for example MySQL, PostgreSQL, IBM DB2, Oracle 11g and so on. Such an RDBMS is [SQLite](https://www.sqlite.org/index.html). **SQLite** is widely-used and is favorite among the developers for many reasons -
# * Extremely light-weighted (not more than 500 KBs)
# * It is serverless which means you do not need any separate server for availing its services
# * No complex setup
# * Fully transactional and concurrency-compliant
# and many more.
#
# However, there are some limitations of SQLite as well. For example it does not support joins like `RIGHT OUTER JOIN` and `FULL OUTER JOIN`. But the advantages are way more than the limitations. In this tutorial, you will be introduced to using SQLite in Python and following is the overview of the contents this tutorial covers -
# * Installation and setup of SQLite
# * Creating databases and tables in SQLite
# * Importing a .csv file into a SQLite database
# * SQLite in Python
#
# **Note**: This tutorial assumes that you are already familiar with basics of SQL (using any RDBMS) and Python (3). If you want to refresh these skills then following resources might come in handy -
# * [Learn Python 3 by CodeAcademy](https://www.codecademy.com/learn/learn-python-3)
# * [Intro to SQL for Data Science by DataCamp](https://www.datacamp.com/courses/intro-to-sql-for-data-science?tap_a=5644-dce66f&tap_s=357540-5b28dd)
#
# Once you feel comfortable working with SQL and Python you can come back and resume from where you left off.
# ## Installation and setup
#
# Installing and setting up SQLite is a matter of few minutes. You can use SQLite using the command line tools but there is a GUI-based utility which lets you use SQLite through a decent graphical interface. For this tutorial, you will be using [DB Browser for SQLite](https://sqlitebrowser.org). To start off, you will first download this tool from [here](https://sqlitebrowser.org/dl/) with respect to your OS platform.
#
# On a Windows 7 platform, the interface for DB Browser for SQLite looks like this -
#
# 
# ## Creating databases and tables
#
# Once you are ready with the DB Browser tool, you can create a new SQLite database to proceed. To do this, you can click on the **New Database** tab -
#
# 
# After clicking on the **New Database** tab, you will be prompted to enter a name for the database. Give a name of your choice and then proceed. An empty database of the name that you entered will be created instantaneously and you will be prompted to create a table under that database. You can skip the table creation part for now, you will get to it shortly.
#
# To create a table, you need to fix upon the schema of the table. For this tutorial and for the sake of understanding let's first create a simple table named **consumers** with the following field and data-types -
# * <u>consumer_id</u> (integer)
# * consumer_full_name (string) (cannot be null)
# * consumer_email (string) (cannot be null)
# * consumer_grade (character) (cannot be null)
#
# **Note**: If you are familiar with database schema designing, you might recollect that the field **consumer_id** is the primary key of the table (that is why it is underlined).
#
# In order to create a table, just click on the **Create Table** tab and you will prompted to enter the details of the table that you wish to create -
#
# 
# If you look closely at the above figure, you will see that it contains the exact details that you wanted to be incorporated in the table **consumers**. You can also see the respective SQL to create the table. The DB Browser tool lets you do this very efficiently. Once you followed this, just click on the **OK** button and the table **consumers** should appear under the database that you created sometimes back -
#
# 
# The table **sqlite_sequence** is there because if you specified the `consumer_id` field to be auto-incremented, SQLite creates a separate table to maintain the sequences. You can execute other SQL queries also by going to the **Execute SQL** section. Feel free to execute some of your favorite SQL queries.
#
# Now before interacting with SQLite databases using Python, let's see how you can import a .csv file into a SQLite database and use it for analysis.
# ## Importing a .csv file into a SQLite database
#
# To import a .csv file into the database you created, just follow this navigation: **File -> Import -> Table from CSV file**. You can use [this .csv file](https://bit.ly/2GMz84D) for the purpose. It contains details about different countries around the globe. Navigate to the file and you will get a dialog box like the following after that -
#
# 
# DB Browser lets you specify many things here including the names of table. Make sure you check the **Column names in the first line** option so that SQLite can extract the column names automatically. Click on **OK** after you are done with the specifications.
#
# You should be able to see an entry for the table -
#
# 
#
# Feel free to execute some `select` queries to see if the table was properly imported or not.
# ## SQLite in Python
#
# You now have a database and a table ready to work with. To be able to interact with a SQLite database using Python, you would need the [sqlite3](https://docs.python.org/3/library/sqlite3.html) module which comes with the [Anaconda](https://anaconda.org/) distribution.
#
# Now, you will connect to the database that you created using the `connect()` method provided by `sqlite3`. This returns a `Connection` object. Supply the path of the database to the `connect` method. Databases are generally saved in `.db` extension.
# +
import sqlite3
conn = sqlite3.connect('tutorial.db')
# -
# Once you have a `Connection` to the database, you can create a `Cursor` object and call its `execute()` method to perform SQL commands.
cur = conn.cursor()
cur.execute('SELECT * from countries')
# After executing the `SELECT` statement, you can -
# * treat the cursor object `cur` as an iterator call the `fetchone()` method to display a single row or
# * call the `fetchall()` method to display a list of rows
#
# Let's try both one by one.
print(cur.fetchone())
print(cur.fetchall())
# You can make the output of the `fetchall()` method slightly prettier by iterating over each rows -
for row in cur.execute('SELECT * FROM countries'):
print(row)
# Let's now see how you add a `where` clause to the query and execute it. Let's fetch the details of the country where the `code = 'AFG'`.
code = ('AFG',)
cur.execute('SELECT * FROM countries WHERE code = ?', code)
print(cur.fetchone())
# You could have done the following also in order to get the records but the above one is more secure.
code = 'AFG'
cur.execute("SELECT * FROM countries WHERE code = '%s'" % code)
print(cur.fetchone())
# You can insert records into a table either one by one or many records at one go. For this, let's use the `consumers` table. It does not contain any record as of now. Let's populate it from here.
# One by one
cur.execute("INSERT INTO consumers VALUES (1,'<NAME>','<EMAIL>','A')")
for row in cur.execute('SELECT * FROM consumers'):
print(row)
# +
# Prepare a list of records to be inserted
purchases = [(2,'<NAME>','<EMAIL>','B'),
(3,'<NAME>','<EMAIL>','A'),
]
# Use executemany() to insert multiple records at a time
cur.executemany('INSERT INTO consumers VALUES (?,?,?,?)', purchases)
for row in cur.execute('SELECT * FROM consumers'):
print(row)
# -
# You can cross-check this from the DB Browser tool. The records should reflect there also. But this will not happen until and unless you are committing these transactions. You can commit/save this by simply calling the `commit()` method of the `Connection` object you created.
conn.commit()
# You should be able to see the entries now -
#
# 
# It is a good programming practice to close the DB connection once the works are done. But before that the changes need to be made permanent and it is achieved using the `commit()` method as shown above.
# Closing the DB connection
conn.close()
# ## Congrats!
#
# Thank you for reading through the entire tutorial. This tutorial introduced you to SQLite, a powerful but light-weighted RDBMS and you learned to interact with SQLite using Python. Let me know if you have any questions in the comments section.
| DataCamp blog on SQLite in Python/Introduction to SQLite in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import csv
import json
import os
import pandas as pd
import pickle
import requests
# ### Data Cleaning
# ##### Read the data into Pandas Dataframes.
page_data_file_path = "./page_data.csv"
wpds_data_file_path = "./WPDS_2018_data.csv"
page_data_df = pd.read_csv(page_data_file_path)
wpds_df = pd.read_csv(wpds_data_file_path)
page_data_df.head()
wpds_df.head()
# ##### Clean page_data by removing the pages which represent templates.
is_template = page_data_df['page'].str.match('Template:')
page_data_cleaned_df = page_data_df[~is_template]
# ##### Clean wpds data by removing the rows representing cumulative regions or continents.
wpds_df["is_continent"] = wpds_df.Geography.str.isupper()
wpds_countries_df = wpds_df[~wpds_df["is_continent"]]
wpds_continents_df = wpds_df[wpds_df["is_continent"]]
# ##### Showing the wpds rows corresponding to Cumulative regions (continents).
wpds_continents_df
# ##### Map each country to its region.
# +
country_region_dict = {}
cur_region = None
for row in wpds_df.iterrows():
geography = row[1]["Geography"]
if geography.isupper():
cur_region = geography
else:
country_region_dict[geography] = cur_region
country_region_df = pd.DataFrame(list(country_region_dict.items()), columns=['country', 'region'])
# -
country_region_df.head()
# ### Getting article quality predictions from ORES.
# ##### Making ORES requests using REST API. Alternatively, the ORES python package can be used, but it has additional dependencies which may cause trouble while installing.
# +
# Copied from Demo: "https://github.com/Ironholds/data-512-a2/blob/master/hcds-a2-bias_demo.ipynb".
headers = {'User-Agent' : 'https://github.com/bhuvi3', 'From' : '<EMAIL>'}
def get_ores_data(revision_ids, headers, batch_size=100):
def chunker(seq, size):
"""
Taken from Stack Overflow answer by 'nosklo': https://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks.
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
# Define the endpoint
endpoint = 'https://ores.wikimedia.org/v3/scores/{project}/?models={model}&revids={revids}'
aggregated_response = {}
for rev_ids_group in chunker(revision_ids, batch_size):
# Specify the parameters - smushing all the revision IDs together separated by | marks.
# Yes, 'smush' is a technical term, trust me I'm a scientist.
# What do you mean "but people trusting scientists regularly goes horribly wrong" who taught you tha- oh.
params = {'project' : 'enwiki',
'model' : 'wp10',
'revids' : '|'.join(str(x) for x in rev_ids_group)
}
uri = endpoint.format(**params)
api_call = requests.get(uri)
cur_response = api_call.json()
aggregated_response.update(cur_response['enwiki']['scores'])
return aggregated_response
# -
# ##### The API call over all the revision ids might take few minutes. The ORES REST API was throwing errors when queried for more than approx. 200 revision ids in a single call. Hence, I am querying the revision ids in batches. Also, I am storing the queries results in a local pickle file, so that we can avoid making API calls if running this multiple times.
# Note: This cell may take few minutes to run (~5 min)
# For each revision_id in our data, we get ORES quality class predictions.
ores_res_cache_file = "cached_ores_api_call_res.pickle"
if os.path.exists(ores_res_cache_file):
with open(ores_res_cache_file, "rb") as fp:
ores_call_res = pickle.load(fp)
else:
revision_ids = []
for row in page_data_cleaned_df.iterrows():
row_series = row[1]
revision_ids.append(int(row_series["rev_id"]))
ores_call_res = get_ores_data(revision_ids, headers)
# ##### Parse the API call result and add the article_quality to the page_data. Ignore the article for which the ORES quality could not be retrieved, and store these article revision ids in a file locally.
# +
quality_categories_dict = {}
missed_rev_ids = []
for key, value in ores_call_res.items():
try:
quality_categories_dict[key] = value["wp10"]["score"]['prediction']
except:
quality_categories_dict[key] = "missed"
missed_rev_ids.append(key)
missed_rev_ids_file = "ores_missed_rev_ids.txt"
with open(missed_rev_ids_file, "w") as fp:
for rev_id in missed_rev_ids:
fp.write("%s\n" % rev_id)
print("Total number of articles for which ORES quality could not be retrieved: %s. "
"The revision_ids of these articles have been written to %s"
% (len(missed_rev_ids), missed_rev_ids_file))
page_quality_df = pd.DataFrame(list(quality_categories_dict.items()), columns=['rev_id', 'article_quality']).astype({'rev_id': 'int64'})
page_data_joined_df = page_data_cleaned_df.merge(page_quality_df, on="rev_id", how="inner")
page_data_joined_filtered_df = page_data_joined_df[page_data_joined_df["article_quality"] != "missed"]
# -
page_data_joined_filtered_df = page_data_joined_filtered_df.rename(columns={"rev_id": "revision_id", "page": "article_name"})
page_data_joined_filtered_df.head()
wpds_countries_df["Population mid-2018 (millions)"] = wpds_countries_df["Population mid-2018 (millions)"].str.replace(',', '')
wpds_countries_df = wpds_countries_df.astype({"Population mid-2018 (millions)": "float32"})
wpds_countries_df["population"] = wpds_countries_df["Population mid-2018 (millions)"] * 1000000
wpds_countries_df = wpds_countries_df.drop(columns=["is_continent", "Population mid-2018 (millions)"])
wpds_countries_df = wpds_countries_df.rename(columns={"Geography": "country"})
wpds_countries_df.head()
# ##### Combine the Wikipedia and Population data (from WPDS).
# +
page_wpds_merged_df = page_data_joined_filtered_df.merge(wpds_countries_df, on="country", how="left")
is_no_match = page_wpds_merged_df["population"].isnull()
no_match_rows_file = "wp_wpds_countries-no_match.csv"
page_wpds_merged_df_no_match = page_wpds_merged_df[is_no_match]
page_wpds_merged_df_no_match.to_csv(no_match_rows_file, index=False)
print("Rows which did not match have been saved at %s" % no_match_rows_file)
page_wpds_merged_df_matched = page_wpds_merged_df[~is_no_match]
matched_rows_file = "wp_wpds_politicians_by_country.csv"
page_wpds_merged_df_matched.to_csv(matched_rows_file, index=False)
print("Rows matched have been saved at %s" % matched_rows_file)
# -
# Rows where the countries did not match.
page_wpds_merged_df_no_match.head()
# Rows where countries matched.
page_wpds_merged_df_matched.head()
# ### Analysis
# ##### Create an analysis df with the following metrics for analying the bias.
# - coverage: The percentage of articles by population. If a country has a population of 10,000 people, and you found 10 articles about politicians from that country, then the percentage of articles-per-population would be .1%.
# - relative_quality: The percentage of high-quality articles. If a country has 10 articles about politicians, and 2 of them are FA or GA class articles, then the percentage of high-quality articles would be 20%.
# +
# Find number of articles per country.
country_article_counts_df = page_wpds_merged_df_matched.groupby("country").size().reset_index(name='article_count')
# Find number of high quality articles per country.
is_high_quality = (page_wpds_merged_df_matched["article_quality"] == "FA") | (page_wpds_merged_df_matched["article_quality"] == "GA")
country_high_quality_article_count_df = page_wpds_merged_df_matched[is_high_quality].groupby("country").size().reset_index(name='high_quality_article_count')
# Make an analysis dataframe with computed metrics.
analysis_df = country_article_counts_df.merge(wpds_countries_df, on="country", how="inner")
analysis_df = analysis_df.merge(country_high_quality_article_count_df, on="country", how="left")
analysis_df['high_quality_article_count'] = analysis_df['high_quality_article_count'].fillna(value=0).astype("int64")
# Add the percentage metrics.
analysis_df["coverage_perc"] = (analysis_df["article_count"] / analysis_df["population"]) * 100
analysis_df["relative_quality"] = (analysis_df["high_quality_article_count"] / analysis_df["article_count"]) * 100
# -
analysis_df.head()
# ##### Add region-wise metrics.
# +
region_analysis_df = analysis_df.drop(columns=["coverage_perc", "relative_quality"]).merge(country_region_df, on="country", how="inner")
region_analysis_df = region_analysis_df.groupby("region").sum()
region_analysis_df["coverage_perc"] = (region_analysis_df["article_count"] / region_analysis_df["population"]) * 100
region_analysis_df["relative_quality"] = (region_analysis_df["high_quality_article_count"] / region_analysis_df["article_count"]) * 100
# -
region_analysis_df
# ### Analysis Results
# ##### Top 10 countries by coverage: 10 highest-ranked countries in terms of number of politician articles as a proportion of country population.
# Additional columns have been retained to allow for observation.
analysis_df.sort_values("coverage_perc", ascending=False).head(10)
# ##### Bottom 10 countries by coverage: 10 lowest-ranked countries in terms of number of politician articles as a proportion of country population.
analysis_df.sort_values("coverage_perc", ascending=True).head(10)
# ##### Top 10 countries by relative quality: 10 highest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality.
analysis_df.sort_values("relative_quality", ascending=False).head(10)
# ##### Bottom 10 countries by relative quality: 10 lowest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality.
analysis_df.sort_values("relative_quality", ascending=True).head(10)
# ##### Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population.
region_analysis_df.sort_values("coverage_perc", ascending=False)
# ##### Geographic regions by relative quality: Ranking of geographic regions (in descending order) in terms of the relative proportion of politician articles from countries in each region that are of GA and FA-quality.
region_analysis_df.sort_values("relative_quality", ascending=False)
# ### Reflections and implications: Please refer to the README file in the repository root for reflections and implications of the analyses provided in this project.
| hcds-a2-bias.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Normalized species
# Example model which calculates functions depending on the normalized values of a species which can be either in active state `SA` or inactive state `SI`.
#
# The normalized values are `SA_f` and `SI_f`, respectively, with the total concentration of `S` given as
# ```
# ST = SA + SI
# ```
# #### Model definition
# The model is defined using `Tellurium` and `Antimony`. The identical equations could be typed directly in `COPASI`.
#
# The created model is exported as `SBML` which than can be used in `COPASI`.
# +
# %matplotlib inline
from __future__ import print_function, division
import tellurium as te
r = te.loada("""
model normalized_species()
# conversion between active (SA) and inactive (SI)
J1: SA -> SI; k1*SA - k2*SI;
k1 = 0.1; k2 = 0.02;
# species
species SA, SI, ST;
SA = 10.0; SI = 0.0;
const ST := SA + SI;
SA is "active state S";
SI is "inactive state S";
ST is "total state S";
# normalized species calculated via assignment rules
species SA_f, SI_f;
SA_f := SA/ST;
SI_f := SI/ST;
SA_f is "normalized active state S";
SI_f is "normalized inactive state S";
# parameters for your function
P = 0.1;
tau = 10.0;
nA = 1.0;
nI = 2.0;
kA = 0.1;
kI = 0.2;
# now just use the normalized species in some math
F := ( (1-(SI_f^nI)/(kI^nI+SI_f^nI)*(kI^nI+1) ) * ( (SA_f^nA)/(kA^nA+SA_f^nA)*(kA^nA+1) ) -P)*tau;
end
""")
# print(r.getAntimony())
# Store the SBML for COPASI
import os
import tempfile
temp_dir = tempfile.mkdtemp()
file_path = os.path.join(temp_dir, 'normalizedSpecies.xml')
r.exportToSBML(file_path)
# -
# #### Model simulation
# We perform a simple model simulation to demonstrate the main features using `roadrunner`:
# - normalized values `SA_f` and `SI_f` are normalized in `[0,1]`
# - the normalized values have same dynamics like `SA` and `SF`
# - the normalized values can be used to calculates some dependent function, here `F`
# %matplotlib inline
r.reset()
# select the variables of interest in output
r.selections = ['time', 'F'] + r.getBoundarySpeciesIds() \
+ r.getFloatingSpeciesIds()
# simulate from 0 to 50 with 1001 points
s = r.simulate(0,50,1001)
# plot the results
r.plot(s);
| examples/notebooks/core/model_normalizedSpecies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JPA-BERT/jpa-bert.github.io/blob/master/notebooks/05PyTorchTEXT_text_sentiment_ngrams_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="qI0kA8S2Vpjf" colab_type="text"
# ---
#
# このファイルは PyTorch のチュートリアルにあるファイル <https://pytorch.org/tutorials/beginner/text_sentiment_ngrams_tutorial.html> を翻訳して,加筆修正したもの
# です。
#
# すぐれたチュートリアルの内容,コードを公開された Sean Robertson と PyTorch 開発陣に敬意を表します。
#
# - Original:
# - Date: 2020-0811
# - Translated and modified: <NAME> <<EMAIL>>
#
# ---
# + id="YAcSQ_GPV93E" colab_type="code" colab={}
# 2020年8月11日現在,以下のコマンドを実行してランタイムを再起動しなければ,このノートブックは動作しません。
# 理由は torchtext.datasets のバージョンが 0.3.0 であれば text_classification が定義されていないからです
# 以下のコマンドを用いて upgrade すると 0.7.0 にバージョンが更新され,動作するようになります。
# !pip install --upgrade torchtext
# + id="nD5EXvvuVpjh" colab_type="code" colab={}
# from https://github.com/dmlc/xgboost/issues/1715
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# + colab_type="code" id="3N34FPtujcuo" colab={}
# %matplotlib inline
# + [markdown] colab_type="text" id="BBH7YQ3ijcu0"
# ## TorchText によるテキスト分類
# <!--
# ## Text Classification with TorchText
# -->
#
# <!--This tutorial shows how to use the text classification datasets in ``torchtext``, including-->
#
# このチュートリアルでは、 ``torchtext`` のテキスト分類データセットの使い方を説明します。
#
# - AG_NEWS
# - SogouNews
# - DBpedia
# - YelpReviewPolarity
# - YelpReviewFull
# - YahooAnswers
# - AmazonReviewPolarity
# - AmazonReviewFull
#
# <!--
# This example shows how to train a supervised learning algorithm for classification using one of these ``TextClassification`` datasets.
# -->
# ここでは 上記 ``TextClassification`` データセットを用いて,教師付き学習アルゴリズムによるテキスト分類でどのように訓練するかを示しています。
#
# ## ngrams によるデータの読み込み
# <!--
# ## Load data with ngrams
# -->
#
# <!--A bag of ngrams feature is applied to capture some partial information about the local word order.
# In practice, bi-gram or tri-gram are applied to provide more benefits as word groups than only one word. An example:-->
#
# 局所的な語順についての部分的な情報を捕捉するため,ngrams 袋の特徴が適用されます。
# (訳注: Bag of Words 単語袋とは,単語を語順に関わらず詰め込む袋に喩えて Bag of Words と言う)
# 実際には 1 つの単語だけよりも多くの利点を単語群として提供するため,バイグラム bigram (2-gram) または トライグラム (3-gram) が適用されます。
# 例としては
#
# <!--
# ```
# "load data with ngrams"
# Bi-grams results: "load data", "data with", "with ngrams"
# Tri-grams results: "load data with", "data with ngrams"
# ```
# -->
#
# ```
# "load data with ngrams"
# Bi-grams 結果: "load data", "data with", "with ngrams"
# Tri-grams 結果: "load data with", "data with ngrams"
# ```
# <!--
# ``TextClassification`` Dataset supports the ngrams method.
# By setting ngrams to 2, the example text in the dataset will be a list of single words plus bi-grams string.
# -->
#
# ``TextClassification`` データセットは ngrams メソッドをサポートしています。
# ngrams を 2 に設定すると,データセットの例文は,単語のリスト と 2-grams の文字列になります。
#
# + colab_type="code" id="RARIsp3yjcu1" colab={}
import torch
import torchtext
from torchtext.datasets import text_classification
NGRAMS = 2
import os
if not os.path.isdir('./.data'):
os.mkdir('./.data')
train_dataset, test_dataset = text_classification.DATASETS['AG_NEWS'](
root='./.data', ngrams=NGRAMS, vocab=None)
BATCH_SIZE = 16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# + id="J5ikNOK9Vpj0" colab_type="code" colab={}
torchtext.__version__
# + [markdown] colab_type="text" id="uc5b2WwCjcu4"
# ## モデルの定義
# <!--
# ## Define the model
# -->
#
# <!--
# The model is composed of the
# [EmbeddingBag](https://pytorch.org/docs/stable/nn.html?highlight=embeddingbag#torch.nn.EmbeddingBag) layer and the linear layer (see the figure below).
# -->
#
# モデルは,[埋め込み袋 EmbeddingBag](https://pytorch.org/docs/stable/nn.html?highlight=embeddingbag#torch.nn.EmbeddingBag) 層と線形層から構成されています。
# 下図を参照してください。
#
# <!--
# ``nn.EmbeddingBag`` computes the mean value of a “bag” of embeddings. The text entries here have different lengths.
# ``nn.EmbeddingBag`` requires no padding here since the text lengths are saved in offsets.
# -->
#
# ``nn.EmbeddingBag`` は埋め込み「袋」の平均値を計算します。ここでのテキスト項目の長さはそれぞれ異なります。
# ``nn.EmbeddingBag`` は埋め込み (padding) を必要としません。テキストの長さはオフセット(ズレ)で保存されからです。
#
# <!--
# Additionally, since ``nn.EmbeddingBag`` accumulates the average across the embeddings on the fly, ``nn.EmbeddingBag`` can enhance the performance and memory efficiency to process a sequence of tensors.
# -->
#
# さらに ``nn.EmbeddingBag`` は,その場でその都度埋め込み全体の平均値を蓄積します。
# 従って ``nn.EmbeddingsBag`` はテンソル系列を処理する性能とメモリ効率が向上します。
#
# <!--
# 
# -->
#
# <img src="https://pytorch.org/tutorials/_images/text_sentiment_ngrams_model.png">
#
#
#
# + colab_type="code" id="bqDb2PMrjcu5" colab={}
import torch.nn as nn
import torch.nn.functional as F
class TextSentiment(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
# + [markdown] colab_type="text" id="gurBAngujcvA"
# ## 事例の初期化
# <!--
# ## Initiate an instance
# -->
#
# <!--The AG_NEWS dataset has four labels and therefore the number of classes is four.-->
# AG_NEWS データセット は 4 つのラベルを持っており,クラス数は 4つです。
#
# ```
# 1 : World 世界
# 2 : Sports スポーツ
# 3 : Business ビジネス
# 4 : Sci/Tec 科学工学
# ```
#
# <!--
# The vocab size is equal to the length of vocab (including single word and ngrams).
# The number of classes is equal to the number of labels, which is four in AG_NEWS case.
# -->
# 語彙サイズは,語彙の長さ (語彙長)に等しくなります(単語と ngramsを含みます)
# クラス数はラベル数に等しく,AG_NEWS の場合は 4 です。
#
# + colab_type="code" id="wLICqX2FjcvB" colab={}
VOCAB_SIZE = len(train_dataset.get_vocab())
EMBED_DIM = 32
NUN_CLASS = len(train_dataset.get_labels())
model = TextSentiment(VOCAB_SIZE, EMBED_DIM, NUN_CLASS).to(device)
# + [markdown] colab_type="text" id="z-LXKADMjcvL"
# ## バッチ生成に用いる関数
# <!--
# ## Functions used to generate batch
# -->
# + [markdown] colab_type="text" id="MuknKQGtjcvL"
# <!--
# Since the text entries have different lengths, a custom function generate_batch() is used to generate data batches and offsets.
# The function is passed to ``collate_fn`` in ``torch.utils.data.DataLoader``.
# The input to ``collate_fn`` is a list of tensors with the size of batch_size, and the ``collate_fn`` function packs them into a mini-batch.
# Pay attention here and make sure that ``collate_fn`` is declared as a top level def.
# This ensures that the function is available in each worker.
# -->
#
# テキストエントリの長さはそれぞれ異なるので,データのバッチとオフセットを生成するためにカスタム関数 `generate_batch()` を用います。
# この関数は ``torch.utils.data.DataLoader`` の ``collate_fn`` に渡されます。
# ``collate_fn`` への入力は `batch_size` サイズのテンソルリストです。 ``collate_fn``関数はそれらをミニバッチにまとめます。
# ここで注意してほしいのは,``collate_fn`` がトップレベルの関数 (def) として宣言されていることです。
# これにより,各ワーカーでこの関数が利用できるようになります。
#
# <!--
# The text entries in the original data batch input are packed into a list and concatenated as a single tensor as the input of ``nn.EmbeddingBag``.
# The offsets is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor.
# Label is a tensor saving the labels of individual text entries.
# -->
#
# 元のデータ一括入力のテキストエントリはリストに詰められ ``nn.EmbeddingBag`` の入力として 1 つのテンソルとして連結されます。
# オフセット(ずれ) は,テキストテンソル内の個々の系列の開始インデックスを表す分割子(デリミタ) のテンソルです。
# ラベル (Label) は,個々のテキスト項目のラベルを保存したテンソルです。
#
# + colab_type="code" id="N2GduZNujcvM" colab={}
def generate_batch(batch):
label = torch.tensor([entry[0] for entry in batch])
text = [entry[1] for entry in batch]
offsets = [0] + [len(entry) for entry in text]
# torch.Tensor.cumsum returns the cumulative sum
# of elements in the dimension dim.
# torch.Tensor([1.0, 2.0, 3.0]).cumsum(dim=0)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text = torch.cat(text)
return text, offsets, label
# + [markdown] colab_type="text" id="_e_fDPp2jcvQ"
# ## モデルの訓練と結果の評価のための関数の定義
# <!--
# ## Define functions to train the model and evaluate results.
# -->
# + [markdown] colab_type="text" id="GLNzY6QMjcvR"
# <!--
# [torch.utils.data.DataLoader](https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader) is recommended for PyTorch users, and it makes data loading in parallel easily (a tutorial is [here](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html).
# We use ``DataLoader`` here to load AG_NEWS datasets and send it to the model for training/validation.
# -->
#
# [torch.utils.data.DataLoader](https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader) は PyTorch ユーザにおすすめのツールです。
# これにより,データの読み込みを簡単に並列して行うことができます。チュートリアルは[こちら](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html)。
# ここでは AG_NEWS データセットをロードしてモデルに送り,訓練や検証を行うために ``DataLoader`` を使用します。
#
# + colab_type="code" id="BQhNMMpNjcvR" colab={}
from torch.utils.data import DataLoader
def train_func(sub_train_):
# Train the model
train_loss = 0
train_acc = 0
data = DataLoader(sub_train_, batch_size=BATCH_SIZE, shuffle=True,
collate_fn=generate_batch)
for i, (text, offsets, cls) in enumerate(data):
optimizer.zero_grad()
text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)
output = model(text, offsets)
loss = criterion(output, cls)
train_loss += loss.item()
loss.backward()
optimizer.step()
train_acc += (output.argmax(1) == cls).sum().item()
# Adjust the learning rate
scheduler.step()
return train_loss / len(sub_train_), train_acc / len(sub_train_)
def test(data_):
loss = 0
acc = 0
data = DataLoader(data_, batch_size=BATCH_SIZE, collate_fn=generate_batch)
for text, offsets, cls in data:
text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)
with torch.no_grad():
output = model(text, offsets)
loss = criterion(output, cls)
loss += loss.item()
acc += (output.argmax(1) == cls).sum().item()
return loss / len(data_), acc / len(data_)
# + [markdown] colab_type="text" id="7o7QmeTJjcvY"
# ## データセットの分割とモデルの実行
# <!--
# ## Split the dataset and run the model
# -->
#
# <!--
# Since the original AG_NEWS has no valid dataset, we split the training dataset into train/valid sets with a split ratio of 0.95 (train) and
# 0.05 (valid).
# Here we use [torch.utils.data.dataset.random_split](https://pytorch.org/docs/stable/data.html?highlight=random_split#torch.utils.data.random_split) function in PyTorch core library.
# -->
#
# 元の AG_NEWS には検証データセットがないため,学習データセットを 分割率 0.95 (学習データ) で学習/有効 データセットに分割します 0.05 は検証データセット。
# ここでは PyTorch コアライブラリ [torch.utils.data.dataset.random_split](https://pytorch.org/docs/stable/data.html?highlight=random_split#torch.utils.data.random_split) 関数を使用しています。
#
# <!--
# [CrossEntropyLoss](https://pytorch.org/docs/stable/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss) criterion combines nn.LogSoftmax() and nn.NLLLoss() in a single class.
# It is useful when training a classification problem with C classes.
# [SGD](https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html) implements stochastic gradient descent method as optimizer.
# The initial learning rate is set to 4.0.
# [StepLR](https://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#StepLR) is used here to adjust the learning rate through epochs.
# -->
#
# [交差エントロピー損失 CrossEntropyLoss](https://pytorch.org/docs/stable/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss) 基準は `nn.LogSoftmax()` (訳注: 対数ソフトマックス) と `nn.NLLLoss()` (訳注: 負の対数尤度) を一つのクラスにまとめたものです。
# C クラスを用いた分類問題を学習する際に便利です。
# [確率的勾配降下法 SGD](https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html) はオプティマイザとして確率的勾配降下法を実装しています。
# 初期学習率は 4.0 に設定されています。
# ここでは [StepLR](https://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#StepLR) を用いてエポック単位で学習率を調整しています。
#
# + colab_type="code" id="5CRTSvonjcva" colab={}
import time
from torch.utils.data.dataset import random_split
N_EPOCHS = 5
min_valid_loss = float('inf')
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=4.0)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)
train_len = int(len(train_dataset) * 0.95)
sub_train_, sub_valid_ = \
random_split(train_dataset, [train_len, len(train_dataset) - train_len])
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train_func(sub_train_)
valid_loss, valid_acc = test(sub_valid_)
secs = int(time.time() - start_time)
mins = secs / 60
secs = secs % 60
print('Epoch: %d' %(epoch + 1), " | time in %d minutes, %d seconds" %(mins, secs))
print(f'\tLoss: {train_loss:.4f}(train)\t|\tAcc: {train_acc * 100:.1f}%(train)')
print(f'\tLoss: {valid_loss:.4f}(valid)\t|\tAcc: {valid_acc * 100:.1f}%(valid)')
# + [markdown] colab_type="text" id="62-Lz0S1jcvf"
# <!--
# Running the model on GPU with the following information:
# -->
# GPU を用いてモデルを実行すると以下のよう情報を得ます
#
# ```
# Epoch: 1 | time in 0 minutes, 11 seconds
# Loss: 0.0263(train) | Acc: 84.5%(train)
# Loss: 0.0001(valid) | Acc: 89.0%(valid)
#
# Epoch: 2 | time in 0 minutes, 10 seconds
# Loss: 0.0119(train) | Acc: 93.6%(train)
# Loss: 0.0000(valid) | Acc: 89.6%(valid)
#
# Epoch: 3 | time in 0 minutes, 9 seconds
# Loss: 0.0069(train) | Acc: 96.4%(train)
# Loss: 0.0000(valid) | Acc: 90.5%(valid)
#
# Epoch: 4 | time in 0 minutes, 11 seconds
# Loss: 0.0038(train) | Acc: 98.2%(train)
# Loss: 0.0000(valid) | Acc: 90.4%(valid)
#
#
# Epoch: 5 | time in 0 minutes, 11 seconds
# Loss: 0.0022(train) | Acc: 99.0%(train)
# Loss: 0.0000(valid) | Acc: 91.0%(valid)
# ```
#
# + [markdown] colab_type="text" id="1nNGPxHCjcvg"
# ## テストデータを用いたモデルの評価
# <!--
# ## Evaluate the model with test dataset
# -->
# + colab_type="code" id="WGBzwperjcvg" colab={}
print('Checking the results of test dataset...')
test_loss, test_acc = test(test_dataset)
print(f'\tLoss: {test_loss:.4f}(test)\t|\tAcc: {test_acc * 100:.1f}%(test)')
# + [markdown] colab_type="text" id="M4wD9vq8jcvk"
# テストデータセットの結果をチェック:
#
# <!--
# Checking the results of test dataset…
# -->
#
# ```
# Loss: 0.0237(test) | Acc: 90.5%(test)
# ```
#
# + [markdown] colab_type="text" id="Vp1bTWv_jcvk"
# ## ランダムニューズでのテスト
# <!--
# ## Test on a random news
# -->
#
# <!--
# Use the best model so far and test a golf news. The label information is available [here](https://pytorch.org/text/datasets.html?highlight=ag_news#torchtext.datasets.AG_NEWS).
# -->
#
# 今までの最良モデルを使ってゴルフニュースを試してみてください。
# ラベル情報は[こちら](https://pytorch.org/text/datasets.html?highlight=ag_news#torchtext.datasets.AG_NEWS)。
#
#
# + colab_type="code" id="7XAfishYjcvl" colab={}
import re
from torchtext.data.utils import ngrams_iterator
from torchtext.data.utils import get_tokenizer
ag_news_label = {1 : "World",
2 : "Sports",
3 : "Business",
4 : "Sci/Tec"}
def predict(text, model, vocab, ngrams):
tokenizer = get_tokenizer("basic_english")
with torch.no_grad():
text = torch.tensor([vocab[token]
for token in ngrams_iterator(tokenizer(text), ngrams)])
output = model(text, torch.tensor([0]))
return output.argmax(1).item() + 1
ex_text_str = "<NAME>. – Four days ago, <NAME> was \
enduring the season’s worst weather conditions on Sunday at The \
Open on his way to a closing 75 at Royal Portrush, which \
considering the wind and the rain was a respectable showing. \
Thursday’s first round at the WGC-FedEx St. Jude Invitational \
was another story. With temperatures in the mid-80s and hardly any \
wind, the Spaniard was 13 strokes better in a flawless round. \
Thanks to his best putting performance on the PGA Tour, Rahm \
finished with an 8-under 62 for a three-stroke lead, which \
was even more impressive considering he’d never played the \
front nine at TPC Southwind."
vocab = train_dataset.get_vocab()
model = model.to("cpu")
print("This is a %s news" %ag_news_label[predict(ex_text_str, model, vocab, 2)])
# + [markdown] colab_type="text" id="kyzCwYhHjcvq"
# これはスポーツニュースです
# <!--This is a Sports news-->
#
#
# + [markdown] colab_type="text" id="oBj4aVZLjcvr"
# <!--
# You can find the code examples displayed in this note [here](https://github.com/pytorch/text/tree/master/examples/text_classification)
# -->
#
# このノートのコードは [こちら](https://github.com/pytorch/text/tree/master/examples/text_classification) です。
# + id="67F5dbFOVpkp" colab_type="code" colab={}
| notebooks/05PyTorchTEXT_text_sentiment_ngrams_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
val = -770.123329847239847234987
t = 21.023232304923049234
print('the {0} value is: {1}'.format(t, val))
'efe'.format()
vals = [True, False, True]
import numpy as np
vv = np.asarray(vals).sum()
vals
vv.dtype
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Carga de datos a travez de la funcion read_csv
import pandas as pd
data = pd.read_csv("../datasets/titanic/titanic3.csv")
data.head()
read_csv(filepath="/Users/joseph/Developer/AnacondaProjects/Machine-learning-con-python/datasets/titanic/titanic3.csv",
sep=",",dtype={"ingresos":np.float64,"edad":np.int32},header=0,names={"ingresos","edad"},
skiprows=12,index_col=None,skip_blank_lines=False,na_filter=False)
| notebooks/T1-1-Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="YTyz3geQHRuS" executionInfo={"status": "ok", "timestamp": 1603601917071, "user_tz": -660, "elapsed": 2140, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}}
import tensorflow as tf
from tensorflow import keras
from functools import partial
import matplotlib.pyplot as plt
import os
import numpy as np
# + id="Ztq6BgmLHf4Q" executionInfo={"status": "ok", "timestamp": 1603601918770, "user_tz": -660, "elapsed": 2850, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}}
(train_data, train_label), (test_data, test_label) = keras.datasets.cifar10.load_data()
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-5)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
# + id="0uxBOHl0K2A8" executionInfo={"status": "ok", "timestamp": 1603594517756, "user_tz": -660, "elapsed": 1040306, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="121f1426-fbe6-4384-c98d-b66124304364" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model.fit(train_data, train_label, epochs=100, validation_data=(test_data, test_label), callbacks=callbacks)
# + id="Jw_QSOdrnyRm" executionInfo={"status": "ok", "timestamp": 1603606476675, "user_tz": -660, "elapsed": 1421904, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="e6837ac8-65e5-4421-825d-be8fff2c0a78" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-5)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
model.fit(train_data, train_label, epochs=30, validation_data=(test_data, test_label))
# + id="TFMPs62JTkNX" executionInfo={"status": "ok", "timestamp": 1603607246045, "user_tz": -660, "elapsed": 612153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="9dfce909-e0f8-4345-c83c-4a79353577b7" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(lr=5e-5)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
model.fit(train_data, train_label, epochs=100, validation_data=(test_data, test_label), callbacks=callbacks)
# + id="ImyuG9y3sxni" executionInfo={"status": "ok", "timestamp": 1603611852970, "user_tz": -660, "elapsed": 1931, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="a9882920-1b38-4092-9e69-8cc49b333d88" colab={"base_uri": "https://localhost:8080/", "height": 51}
model.evaluate(test_data, test_label)
# + id="I8DUaqgqtyk7" executionInfo={"status": "ok", "timestamp": 1603612008168, "user_tz": -660, "elapsed": 6353, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="8a24c3dc-95d5-44aa-e23e-0919cbecd0f4" colab={"base_uri": "https://localhost:8080/", "height": 34}
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
def mc_dropout_predict_probas(mc_model, X, n_samples=10):
Y_probas = [mc_model.predict(X) for sample in range(n_samples)]
return np.mean(Y_probas, axis=0)
def mc_dropout_predict_classes(mc_model, X, n_samples=10):
Y_probas = mc_dropout_predict_probas(mc_model, X, n_samples)
return np.argmax(Y_probas, axis=1)
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
y_pred = mc_dropout_predict_classes(mc_model, test_data)
accuracy = np.mean(y_pred == test_label[:, 0])
accuracy
# + id="R5bmuAcHu2YB" executionInfo={"status": "ok", "timestamp": 1603612507020, "user_tz": -660, "elapsed": 102548, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="7f8d4f5b-5b56-4b30-f442-29e392f9d435" colab={"base_uri": "https://localhost:8080/", "height": 1000}
K = keras.backend
class OneCycleScheduler(keras.callbacks.Callback):
def __init__(self, iterations, max_rate, start_rate=None,
last_iterations=None, last_rate=None):
self.iterations = iterations
self.max_rate = max_rate
self.start_rate = start_rate or max_rate / 10
self.last_iterations = last_iterations or iterations // 10 + 1
self.half_iteration = (iterations - self.last_iterations) // 2
self.last_rate = last_rate or self.start_rate / 1000
self.iteration = 0
def _interpolate(self, iter1, iter2, rate1, rate2):
return ((rate2 - rate1) * (self.iteration - iter1)
/ (iter2 - iter1) + rate1)
def on_batch_begin(self, batch, logs):
if self.iteration < self.half_iteration:
rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)
elif self.iteration < 2 * self.half_iteration:
rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,
self.max_rate, self.start_rate)
else:
rate = self._interpolate(2 * self.half_iteration, self.iterations,
self.start_rate, self.last_rate)
rate = max(rate, self.last_rate)
self.iteration += 1
K.set_value(self.model.optimizer.lr, rate)
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(lr=1e-2)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
n_epochs = 50
batch_size = 128
onecycle = OneCycleScheduler(len(train_data) // batch_size * n_epochs, max_rate=0.05)
history = model.fit(train_data, train_label, epochs=n_epochs, batch_size=batch_size,
validation_data=(test_data, test_label),
callbacks=[onecycle])
| Hands-on-ML/Code/Chapter 11/cifar10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''default'': conda)'
# name: python3
# ---
# # Creating a AWS redshift cluster
# In order to run this code, you will need AWS Credentials stored in a configuration file. In this project it's placed in the hidden folder: './credentials/dwh.cfg' to not upload the secret keys publicly online.
# * We start by loading the packages we will need:
import pandas as pd
import boto3
import json
import configparser
# ## Start by setting up the configuration variables.
# We are pulling the variables out of the configuration files, and storing them as notebook variables to be used later
# +
config = configparser.ConfigParser()
config.read_file(open('credentials/dwh.cfg'))
KEY = config.get('AWS', 'KEY')
SECRET = config.get('AWS', 'SECRET')
DWH_CLUSTER_TYPE = config.get("DWH", "DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("DWH", "DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("DWH", "DWH_NODE_TYPE")
DWH_CLUSTER_IDENTIFIER = config.get("DWH", "DWH_CLUSTER_IDENTIFIER")
DWH_DB = config.get("DWH", "DWH_DB")
DWH_DB_USER = config.get("DWH", "DWH_DB_USER")
DWH_DB_PASSWORD = config.get("DWH", "DWH_DB_PASSWORD")
DWH_PORT = config.get("DWH", "DWH_PORT")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
# -
# ## Then create the clients for EC2, S3, IAM and Redshift
# in order to connect to AWS services
# +
ec2 = boto3.resource('ec2',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
s3 = boto3.resource('s3',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
iam = boto3.client('iam',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
# -
# ## We create an IAM role that allow redshift to read from an S3 bucket
# +
try:
print('1.1 Creating a new IAM Role')
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description="Allows Redshift clusters to call AWS services for you",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
# -
print('1.2 Attaching Policy')
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
print('1.3 Get the IAM role ARN')
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
# ## This following cell will create a redshift cluster, Careful as this will incur in some charges in your AWS account
# * Run only the first cell to create the cluster, then the second and third will allow you to check the status.
# ONLY proceed when you see that it becomes available, either running the third cell or in your redshift dasboard in AWS. Make sure you are looking at the right region. For this project we are creating it in 'us-west-2' (Oregon)
try:
response = redshift.create_cluster(
# TODO: add parameters for hardware
ClusterType=DWH_CLUSTER_TYPE,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
# TODO: add parameters for identifiers & credentials
DBName=DWH_DB,
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
MasterUsername=DWH_DB_USER,
MasterUserPassword=<PASSWORD>,
# TODO: add parameter for role (to allow s3 access)
IamRoles=[roleArn]
)
except Exception as e:
print(e)
# This function will display the status of our cluster
def prettyRedshiftProps(props):
pd.set_option('display.max_colwidth', None)
keysToShow = ["ClusterIdentifier",
"NodeType",
"ClusterStatus",
"MasterUsername",
"DBName",
"Endpoint",
"NumberOfNodes",
'VpcId']
x = [(k, v) for k, v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"])
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
prettyRedshiftProps(myClusterProps)
# ** IMPORTANT: RUN THIS ONLY AFTER THE STATUS BECOMES AVAILABLE BY RUNNING THE PREVIOUS CELL **
DWH_ENDPOINT = myClusterProps['Endpoint']['Address']
DWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']
print("DWH_ENDPOINT :: ", DWH_ENDPOINT)
print("DWH_ROLE_ARN :: ", DWH_ROLE_ARN)
# ## Opening a TCP port to access the cluster endpoint
# The port might already exist, and it would throw back an error if it's the case
try:
vpc = ec2.Vpc(id=myClusterProps['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName=defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DWH_PORT),
ToPort=int(DWH_PORT)
)
except Exception as e:
print(e)
# ## Checking if we can connect to the cluster
# %reload_ext sql
# %%capture
# %load_ext sql
# %sql sqlite:///factbook.db
conn_string="postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)
print(conn_string)
# %sql $conn_string
# ## Test that we can create,populate and query against a table '
#
# -> If this works, we can proceed to run create_tables.py to build the backbone of our schema in the cluster.
# -> Be careful to not go ahead with cleaning resources (end of this notebook) before the pipelines finish to run.
# %sql CREATE TABLE IF NOT EXISTS test (col1 TEXT, col2 TEXT, col3 INT);
# %sql INSERT INTO test (col1,col2,col3) VALUES ('hello','world',55),('another','world',34),('bananas','pyjamas',69);
# %sql SELECT * FROM test ORDER BY col1 desc ;
# And cleanup the table
# %sql DROP TABLE test
# ## To check if there were any errors creating/inserting data in the cluster, run this query:
# %sql SELECT * FROM stl_load_errors ORDER BY starttime desc ;
# ## Here we can run these queries to check that our pipelines were created successfully:
# + language="sql"
# SELECT * FROM restaurant_table LIMIT 10;
# SELECT * FROM time_table LIMIT 10;
# SELECT * FROM pickup_table LIMIT 10;
# SELECT * FROM quadrant_table LIMIT 10;
# SELECT * FROM address_table LIMIT 10;
#
# -- Find the pickup-rest-ratio per zip code of pickups happening in the end of the week.
# SELECT at.zip_code AS zip_code
# , COUNT(DISTINCT rt.restaurant_id) AS num_restaurants
# , COUNT(pt.datetime) AS num_pickups
# , num_pickups::DECIMAL/NULLIF(num_restaurants, 0) AS pickups_restaurant_ratio
# FROM restaurant_table AS rt
# JOIN address_table AS at ON at.address_id=rt.address_id
# LEFT JOIN pickup_table AS pt ON pt.quadrant_id=rt.quadrant_id
# LEFT JOIN time_table AS tt ON tt.datetime=pt.datetime
# WHERE tt.weekday IN (4,5,6)
# GROUP BY at.zip_code
# ORDER BY pickups_restaurant_ratio;
#
# -
# ## Clean resources: This will delete the cluster and the IAM role.
# 1. Run only the first cell to delete the cluster. Very important if you don't want to incur in more charges
# 2. Wait until it's deleted checking the status by running the second cell
# 3. Run the last cell to delete the user role
#### CAREFUL!!
#-- Uncomment & run to delete the created resources
redshift.delete_cluster( ClusterIdentifier=DWH_CLUSTER_IDENTIFIER, SkipFinalClusterSnapshot=True)
#### CAREFUL!!
# +
# RUN THIS TO CHECK THE STATUS OF THE CLUSTER UNTIL IT'S NOT FOUND ANYMORE
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
prettyRedshiftProps(myClusterProps)
# -
#### CAREFUL!!
#-- Uncomment & run to delete the created resources
iam.detach_role_policy(RoleName=DWH_IAM_ROLE_NAME, PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)
#### CAREFUL!!
| create_cluster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# default_exp foundations
# -
# # Foundations
#
# > Foundational classes for well-versed typings and descriptors
#hide
from nbdev.showdoc import *
#export
from enum import Enum, EnumMeta
#export
class DirectValueMeta(EnumMeta):
"Metaclass that allows for directly getting an enum attribute"
def __getattribute__(cls, name):
value = super().__getattribute__(name)
if isinstance(value, cls):
value = value.value
return value
#export
class DocumentedEnum(Enum, metaclass=DirectValueMeta):
"""
An `Enum` that can have its members have custom docstrings
Based on https://stackoverflow.com/questions/19330460/how-do-i-put-docstrings-on-enums
"""
def __new__(cls, *args):
obj = object.__new__(cls)
if len(args) > 1:
obj._value_ = args[0]
else:
obj._value_ = None
return obj
def __repr__(self):
r = f'<{self.__class__.__name__}.{self._name_}: {self._value_}>'
if hasattr(self, '__doc__'):
r += f'\n {self.__doc__}'
return r
def __init__(self, *args):
"""
Creates a generic enumeration with potential assigning of a member docstring
Should be passed in the form of:
value, docstring
Or:
docstring
"""
if len(args) == 1:
self._value_ = self._name_.lower()
if isinstance(args[-1], str):
self.__doc__ = args[-1]
# The `DocumentedEnum` let's us define custom `enum` classes that can have their members be documented. This let's us improve the readability of namespace classes, especially if those enum members have very specific meanings.
#
# We can also use the member name as the value, with the following string dictating the docstring.
class Weekday(DocumentedEnum):
"Days of the week"
MONDAY = 1, "The first day of the week"
TUESDAY = 2, "The second day of the week"
WEDNESDAY = 3, "The third day of the week"
THURSDAY = 4, "The fourth day of the week"
FRIDAY = 5, "The fifth day of the week"
SATURDAY = 6, "The sixth day of the week"
SUNDAY = "The seventh day of the week"
show_doc(Weekday.MONDAY)
# These docstrings will also show in the general `__repr__`:
Weekday.MONDAY
# These are optional, so if a docstring is not provided it will fallback to the enum's docstring:
show_doc(Weekday.SUNDAY)
Weekday.SUNDAY
# ## Utility Functions
#export
def noop (x=None, *args, **kwargs):
"Do nothing"
return x
| 00_core.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from biokit import viz
# %pylab inline
from biokit.viz import anova
# +
import pandas as pd
X = random.random(100)
df = pd.DataFrame({'A':X, 'B': X + X/10,
'C': X + random.random(100),
'D': X + random.random(100)})
# -
an = anova.ANOVA(df)
an.imshow_anova_pairs()
| notebooks/viz/anova.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github"
# <a href="https://colab.research.google.com/github/vvthakral/data-science-bootcamp/blob/main/amazon_recommendation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="afgJxK-Om4d2"
# # Problem Statement
#
# You are given subset of Amazon Transaction data for year 2019. Recommend a second product to the user based on users input (1st product in cart). The recommendation does not need to involve Machine Learning but simply be based on the count of most commonly purchased products (together) from the purchase history in the dataset.To limit the scope, you can target only San Francisco instead of entire USA.
#
#
# Learnings:<br>
# How to handle huge dataset<br>
# How to deal with Textual data<br>
# How to build prototype recommender system <br>
# How to make recommendations more efficient<br>
# + id="5LZjZa19nWpw"
import pandas as pd
import numpy as np
# + [markdown] id="FyWESdcfm4eA"
# # Reading Data
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="xAYXnTqSHOaW" outputId="e40efef8-8e71-48c3-ab15-e9ac3aca66e8"
from google.colab import files
uploaded = files.upload()
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="UOOblFxTsKfH" outputId="b8bd037c-3e4b-42e3-bcf9-35ec9917e10e"
df = pd.read_csv("amazon_transaction.csv")
df.head(10)
# + [markdown] id="UAwrsnz00iKI"
# #Data Understanding
# + colab={"base_uri": "https://localhost:8080/"} id="mlvL-ROpum0C" outputId="d70bde13-4a20-416e-cccf-ebe04fb35a65"
#Check for nulls and remove the rows
df.isnull().sum()
# + id="RR-M2yjjv2Dy"
df.dropna(inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="nxyvYhOQtP5p" outputId="5cd2372f-6373-41cd-b2da-a694ba5b2299"
#Get basic stats
df.describe()
# + id="z61X0uhNz3wY"
df = df[df['Purchase Address'] != 'Purchase Address']
# + colab={"base_uri": "https://localhost:8080/"} id="Mf-z4d6Dwp5X" outputId="b287f2eb-3bf9-471a-ad64-0546b64268dc"
df['Order ID'].count() - len(df['Order ID'].unique())
# + [markdown] id="wyGL_YKXwODt"
# We have 7513 duplicate order id i.e purchase of more than 1 product was made on these order id.
#
# This is the data we will be using for making recommendations.
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="LZG_LDp2vIQM" outputId="07cb3bd2-7230-4526-d24c-06031d0c5903"
df = df[df['Order ID'].duplicated(keep=False)]
df
# + [markdown] id="fciYLbC6O0w0"
# #Feature Engineering
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="mAfb6Zyc02bJ" outputId="ec6e67e8-1e33-4458-d7f7-74bc7e251709"
#extract city from address
def get_city(address):
return address.split(",")[1].strip(" ")
df['city'] = df['Purchase Address'].apply(get_city)
df.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="ewl6anu_1ki8" outputId="cc5ae32c-6e3e-4581-956b-b1a64196bfe6"
#select data for San Francisco only
df = df[df['city']=='San Francisco']
print(f"Number of orders with more than 1 product purchase in San Francisco are {df['city'].count()}")
# + id="egzFVA9hJ-H9"
#Use the apply function to merge products in same order id.
#refer session notebook or the below link
#https://stackoverflow.com/questions/27298178/concatenate-strings-from-several-rows-using-pandas-groupby
# + id="_C8spn6kmRvl" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="278fc7f6-6f82-4cd8-9dc0-4a25e92e4c17"
#Task 1
#Merge products with same order id
'''
After merger, products in same order id should be merged into 1 string
eg: Google Phone, Wired Headphones
'''
df = df.groupby(['Order ID'])['Product'].apply(lambda x: ','.join(x)).reset_index()
df.head()
# + [markdown] id="GJ5vHkldO441"
# #Recommendation
# + colab={"base_uri": "https://localhost:8080/"} id="ciGCizP-3TOa" outputId="20e1d5bc-5ad9-4282-f9eb-6ef12f7c57d0"
#Task 2
#Make recommendation
'''
Make simple recommendation, just based on count of combinations from previous purchase history.
eg:
google phone, wired headphones 5 times
google phone, usb type c 10 times
so recommend usb type c cable if customer has google phone in their cart
'''
combinations = {}
for _ in df['Product']:
p = _.split(',')
p.sort()
for i in range(len(p)):
for j in range(i+1,len(p)):
if (p[i],p[j]) in combinations:
combinations[(p[i],p[j])] +=1
else:
combinations[(p[i],p[j])]=1
print(combinations)
# + colab={"base_uri": "https://localhost:8080/"} id="gYaEjSIt-AIL" outputId="c3215030-c3bf-433d-eb40-53433537fdd4"
#Make list out of dictionary
comb_list = list(combinations.items())
print(comb_list)
# + colab={"base_uri": "https://localhost:8080/"} id="unvxVtmjAd86" outputId="3b0a48d3-5216-455a-c5d1-986538c4a344"
#sort list based on the number of occurences/combination count
comb_list = sorted(comb_list,key=lambda x:x[1],reverse=True)
print(comb_list)
# + id="VpRndCLMnUIY" colab={"base_uri": "https://localhost:8080/"} outputId="94019aed-69d9-44ec-801c-ded01ed13107"
def recommender(product):
'''
code for recommendation
Simple method: return any product that was purchased with the product in cart
better approach: return product with highest match count
'''
global comb_list
product = product.lower().strip() # clean,format the user input
for i in comb_list:
if i[0][0].lower() == product:
return i[0][1]
elif i[0][1].lower() == product:
return i[0][0]
return 'We are still upgrading our recommendation system!'
product = 'google phone'
print(recommender(product))
# + id="sLvZoMhvNIQA"
| week-3-pandas/amazon_recommendation_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# ## Collect Data
df = pd.read_excel('Raw_data.xlsx')
tiers = pd.read_excel('Tiers.xlsx')
# ## Assess Data
Tier1 = tiers[tiers['DSC_TIER'] == 'TIER1'].index
Tier2 = tiers[tiers['DSC_TIER'] == 'TIER2'].index
Tier3 = tiers[tiers['DSC_TIER'] == 'TIER3'].index
Tier4 = tiers[tiers['DSC_TIER'] == 'TIER4'].index
T1 = []
T2 = []
T3 = []
T4 = []
for i in Tier1:
T1.append(tiers['DSC_CITY'][i])
for i in Tier2:
T2.append(tiers['DSC_CITY'][i])
for i in Tier3:
T3.append(tiers['DSC_CITY'][i])
for i in Tier4:
T4.append(tiers['DSC_CITY'][i])
def to_tier(row):
if row['Region'] in T1:
return 'Tier1'
elif row['Region'] in T2:
return 'Tier2'
elif row['Region'] in T3:
return 'Tier3'
elif row['Region'] in T4:
return 'Tier4'
region = tiers['DSC_CITY'].unique()
city = tiers['DSC_REGION'].unique()
df.info()
# ## Clean
# <a name='clean'></a>
# keep only reasons with first 4 letters and ignore nulls
reason = list(df[df['Last failure reason'].notnull()].index)
for i in reason:
df['Last failure reason'][i] = df['Last failure reason'][i][:4]
df['Last failure reason'].tail()
for i in range(len(df)):
if df['Region'][i] not in region:
df['Region'] = df['Region'].drop(i)
df['Region'].nunique()
df['Tier'] = df.apply(lambda row: to_tier(row), axis = 1)
# ## Answers
# #### 1- Find how many orders have been Delivered or Completed by each services provider at each Tier.
df[df['Package Status'] == 'COMPLETED'].groupby(['Hub', 'Tier'])['Tier'].count()
# #### 2- Find how many Jumia Express orders have been delivered
# #### within and out of Delivery Timeline at each Tier
# I couldn't resolve the `Delivery date #` column values
# #### 3- Find how many orders Pending at each hub more than 5 days without attempt
# #### and what is the total number of packages for 3PL & CF Teams
# #### (any hub contains fleet word consider as CF)
df[df['Package Status'] == 'PENDING'].groupby(['Tier', '# of attempts'])['Tier'].count()
# #### 4 Find how many orders not delivered yet and passed Our Delivery Timeline at each hub
# I couldn't resolve the `Delivery date #` column values
# #### 5- Find the highest hub canceled packages by hubs with CA35 & CA52 reasons
df[df['Last failure reason'] == 'CA35'].groupby(['Hub', 'Package Status'])['Hub'].max()
df[df['Last failure reason'] == 'CA52'].groupby(['Hub', 'Package Status'])['Hub'].count()
# #### 6- Suppose the order was Jumia express Tier1 and created before 2:00 PM
# #### we promised the customer to delivery the packages Next Day,
# #### if not one day after based on the previous scenario,
# #### find how many orders placed before 2:00 PM and delivered within our promise.
# I couldn't resolve the `Delivery date #` column values
# #### 7.1 - use the raw data to share with us a new interesting insights, after applying Data analysis process.
# #### 7.2 - list data analysis process should be used by any data analyst to perform his work.
# 1. Pose questions
# 2. Collect data
# 3. Clean data
# 4. Explore data
# 5. Draw conclusions
# 6. Communicate results
# +
#cols = ['Tracking #', 'Hub', 'Location', '# of attempts', 'Client name', 'Client last name', 'Amount to Collect',
# 'Collected amount', 'Service Code', 'Service Code Description',
# 'Payment Method', 'Package Status', 'Last failure reason',
# 'Order number', 'Shipment service', 'Airway bill number', 'Refunded', 'Delayed']
#dates = ['Schedule date', 'Operation Date',
# 'Collected date #', 'Delivered date #',
# 'Package date #', 'Service date #', 'Order date'
# ]
#botcol = ['Locality', 'Notes', 'Phone', 'Post-code', 'Street', 'Aging',
# 'Movable unit #', 'Runsheet name', 'Runsheet driver', 'Attempt user name', 'Company']
#tier = ['Region', 'City']
#packcol = ['Weight', 'Length', 'Width', 'Height']
| jumia analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Translation of Numeric Phrases with Seq2Seq
#
# In the following we will try to build a **translation model from french phrases describing numbers** to the corresponding **numeric representation** (base 10).
#
# This is a toy machine translation task with a **restricted vocabulary** and a **single valid translation for each source phrase** which makes it more tractable to train on a laptop computer and easier to evaluate. Despite those limitations we expect that this task will highlight interesting properties of Seq2Seq models including:
#
# - the ability to **deal with different length** of the source and target sequences,
# - handling token with a **meaning that changes depending on the context** (e.g "quatre" vs "quatre vingts" in "quatre cents"),
# - basic counting and "reasoning" capabilities of LSTM and GRU models.
#
# The parallel text data is generated from a "ground-truth" Python function named `to_french_phrase` that captures common rules. Hyphenation was intentionally omitted to make the phrases more ambiguous and therefore make the translation problem slightly harder to solve (and also because Olivier had no particular interest hyphenation in properly implementing rules :).
# + deletable=true editable=true
from french_numbers import to_french_phrase
for x in [21, 80, 81, 300, 213, 1100, 1201, 301000, 80080]:
print(str(x).rjust(6), to_french_phrase(x))
# + [markdown] deletable=true editable=true
# ## Generating a Training Set
#
# The following will **generate phrases 20000 example phrases for numbers between 1 and 1,000,000** (excluded). We chose to over-represent small numbers by generating all the possible short sequences between `1` and `exhaustive=5000`.
#
# We then split the generated set into non-overlapping train, validation and test splits.
# + deletable=true editable=true
from french_numbers import generate_translations
from sklearn.model_selection import train_test_split
numbers, french_numbers = generate_translations(
low=1, high=int(1e6) - 1, exhaustive=5000, random_seed=0)
num_train, num_dev, fr_train, fr_dev = train_test_split(
numbers, french_numbers, test_size=0.5, random_state=0)
num_val, num_test, fr_val, fr_test = train_test_split(
num_dev, fr_dev, test_size=0.5, random_state=0)
# + deletable=true editable=true
len(fr_train), len(fr_val), len(fr_test)
# + deletable=true editable=true
for i, fr_phrase, num_phrase in zip(range(5), fr_train, num_train):
print(num_phrase.rjust(6), fr_phrase)
# + deletable=true editable=true
for i, fr_phrase, num_phrase in zip(range(5), fr_val, num_val):
print(num_phrase.rjust(6), fr_phrase)
# + [markdown] deletable=true editable=true
# ## Vocabularies
#
# Build the vocabularies from the training set only to get a chance to have some out-of-vocabulary words in the validation and test sets.
#
# First we need to introduce specific symbols that will be used to:
# - pad sequences
# - mark the beginning of translation
# - mark the end of translation
# - be used as a placehold for out-of-vocabulary symbols (not seen in the training set).
#
# Here we use the same convention as the [tensorflow seq2seq tutorial](https://www.tensorflow.org/tutorials/seq2seq):
# + deletable=true editable=true
PAD, GO, EOS, UNK = START_VOCAB = ['_PAD', '_GO', '_EOS', '_UNK']
# + [markdown] deletable=true editable=true
# To build the vocabulary we need to tokenize the sequences of symbols. For the digital number representation we use character level tokenization while whitespace-based word level tokenization will do for the French phrases:
# + deletable=true editable=true
def tokenize(sentence, word_level=True):
if word_level:
return sentence.split()
else:
return [sentence[i:i + 1] for i in range(len(sentence))]
# + deletable=true editable=true
tokenize('1234', word_level=False)
# + deletable=true editable=true
tokenize('mille deux cent trente quatre', word_level=True)
# + [markdown] deletable=true editable=true
# Let's now use this tokenization strategy to assign a unique integer token id to each possible token string found the traing set in each language ('French' and 'numeric'):
# + deletable=true editable=true
def build_vocabulary(tokenized_sequences):
rev_vocabulary = START_VOCAB[:]
unique_tokens = set()
for tokens in tokenized_sequences:
unique_tokens.update(tokens)
rev_vocabulary += sorted(unique_tokens)
vocabulary = {}
for i, token in enumerate(rev_vocabulary):
vocabulary[token] = i
return vocabulary, rev_vocabulary
# + deletable=true editable=true
tokenized_fr_train = [tokenize(s, word_level=True) for s in fr_train]
tokenized_num_train = [tokenize(s, word_level=False) for s in num_train]
fr_vocab, rev_fr_vocab = build_vocabulary(tokenized_fr_train)
num_vocab, rev_num_vocab = build_vocabulary(tokenized_num_train)
# + [markdown] deletable=true editable=true
# The two languages do not have the same vocabulary sizes:
# + deletable=true editable=true
len(fr_vocab)
# + deletable=true editable=true
len(num_vocab)
# + deletable=true editable=true
for k, v in sorted(fr_vocab.items())[:10]:
print(k.rjust(10), v)
print('...')
# + deletable=true editable=true
for k, v in sorted(num_vocab.items()):
print(k.rjust(10), v)
# + [markdown] deletable=true editable=true
# We also built the reverse mappings from token ids to token string representations:
# + deletable=true editable=true
print(rev_fr_vocab)
# + deletable=true editable=true
print(rev_num_vocab)
# + [markdown] deletable=true editable=true
# ## Seq2Seq with a single GRU architecture
#
# <img src="images/basic_seq2seq.png" width="80%" />
#
# From: [Sutskever, Ilya, <NAME>, and <NAME>. "Sequence to sequence learning with neural networks." NIPS 2014](https://arxiv.org/abs/1409.3215)
#
#
#
# For a given source sequence - target sequence pair, we will:
# - tokenize the source and target sequences;
# - reverse the order of the source sequence;
# - build the input sequence by concatenating the reversed source sequence and the target sequence in original order using the `_GO` token as a delimiter,
# - build the output sequence by appending the `_EOS` token to the source sequence.
#
#
# Let's do this as a function using the original string representations for the tokens so as to make it easier to debug:
# -
# **Exercise**
# - Write a function that turns a pair of tokenized (source, target) sequences into a pair of (input, output) sequences as described above.
# - The function should have a `reverse_source=True` as an option.
#
# Notes:
# - The function should output two sequences of string tokens: one to be fed as the input and the other as expected output for the seq2seq network.
# - Do not pad the sequences: we will handle the padding later.
# - Don't forget to insert the `_GO` and `_EOS` special symbols at the right locations.
# + deletable=true editable=true
def make_input_output(source_tokens, target_tokens, reverse_source=True):
# TOTO
return input_tokens, output_tokens
# +
# # %load solutions/make_input_output.py
# + deletable=true editable=true
input_tokens, output_tokens = make_input_output(
['cent', 'vingt', 'et', 'un'],
['1', '2', '1'],
)
# Expected outputs:
# ['un', 'et', 'vingt', 'cent', '_GO', '1', '2', '1']
# ['1', '2', '1', '_EOS']
# + deletable=true editable=true
input_tokens
# + deletable=true editable=true
output_tokens
# + [markdown] deletable=true editable=true
# ### Vectorization of the parallel corpus
#
# Let's apply the previous transformation to each pair of (source, target) sequene and use a shared vocabulary to store the results in numpy arrays of integer token ids, with padding on the left so that all input / output sequences have the same length:
# + deletable=true editable=true
all_tokenized_sequences = tokenized_fr_train + tokenized_num_train
shared_vocab, rev_shared_vocab = build_vocabulary(all_tokenized_sequences)
# -
max(len(s) for s in tokenized_fr_train)
max(len(s) for s in tokenized_num_train)
# + deletable=true editable=true
import numpy as np
max_length = 20 # found by introspection of our training set
def vectorize_corpus(source_sequences, target_sequences, shared_vocab,
word_level_source=True, word_level_target=True,
max_length=max_length):
assert len(source_sequences) == len(target_sequences)
n_sequences = len(source_sequences)
source_ids = np.empty(shape=(n_sequences, max_length), dtype=np.int32)
source_ids.fill(shared_vocab[PAD])
target_ids = np.empty(shape=(n_sequences, max_length), dtype=np.int32)
target_ids.fill(shared_vocab[PAD])
numbered_pairs = zip(range(n_sequences), source_sequences, target_sequences)
for i, source_seq, target_seq in numbered_pairs:
source_tokens = tokenize(source_seq, word_level=word_level_source)
target_tokens = tokenize(target_seq, word_level=word_level_target)
in_tokens, out_tokens = make_input_output(source_tokens, target_tokens)
in_token_ids = [shared_vocab.get(t, UNK) for t in in_tokens]
source_ids[i, -len(in_token_ids):] = in_token_ids
out_token_ids = [shared_vocab.get(t, UNK) for t in out_tokens]
target_ids[i, -len(out_token_ids):] = out_token_ids
return source_ids, target_ids
# + deletable=true editable=true
X_train, Y_train = vectorize_corpus(fr_train, num_train, shared_vocab,
word_level_target=False)
# + deletable=true editable=true
X_train.shape
# -
X_train[0]
# + deletable=true editable=true
Y_train.shape
# + deletable=true editable=true
fr_train[0]
# + deletable=true editable=true
num_train[0]
# + deletable=true editable=true
X_train[0]
# + deletable=true editable=true
Y_train[0]
# + [markdown] deletable=true editable=true
# This looks good. In particular we can note:
#
# - the PAD=0 symbol at the beginning of the two sequences,
# - the input sequence has the GO=1 symbol to separate the source from the target,
# - the output sequence is a shifted version of the target and ends with EOS=2.
#
# Let's vectorize the validation and test set to be able to evaluate our models:
# + deletable=true editable=true
X_val, Y_val = vectorize_corpus(fr_val, num_val, shared_vocab,
word_level_target=False)
X_test, Y_test = vectorize_corpus(fr_test, num_test, shared_vocab,
word_level_target=False)
# + deletable=true editable=true
X_val.shape, Y_val.shape
# + deletable=true editable=true
X_test.shape, Y_test.shape
# + [markdown] deletable=true editable=true
# ### A simple homogeneous Seq2Seq architecture
#
# To keep the architecture simple we will use the **same RNN model and weights for both the encoder part** (before the `_GO` token) **and the decoder part** (after the `_GO` token).
#
# We may GRU recurrent cell instead of LSTM because it is slightly faster to compute and should give comparable results.
#
# **Exercise:**
# - Build a Seq2Seq model:
# - Start with an Embedding layer;
# - Add a single GRU layer: the GRU layer should yield a sequence of output vectors, one at each timestep;
# - Add a Dense layer to adapt the ouput dimension of the GRU layer to the dimension of the output vocabulary;
# - Don't forget to insert some Dropout layer(s), especially after the Embedding layer.
#
# Note:
# - The output dimension of the Embedding layer should be smaller than usual be cause we have small vocabulary size;
# - The dimension of the GRU should be larger to give the Seq2Seq model enough "working memory" to memorize the full input sequence before decoding it;
# - Your model should output a shape `[batch, sequence_length, vocab_size]`.
# + deletable=true editable=true
from keras.models import Sequential
from keras.layers import Embedding, Dropout, GRU, Dense
vocab_size = len(shared_vocab)
simple_seq2seq = Sequential()
# TODO
# Here we use the sparse_categorical_crossentropy loss to be able to pass
# integer-coded output for the token ids without having to convert to one-hot
# codes
simple_seq2seq.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
# +
# # %load solutions/simple_seq2seq.py
# -
# **Questions**:
#
# - What is the expected shape of the output of the model when fed with input of length 20 tokens? What is the meaning of the values in the output of the model?
# - What is the shape of the output of each layer in the model?
# +
# simple_seq2seq.predict(X_train[0:1]).shape
# +
# simple_seq2seq.summary()
# + [markdown] deletable=true editable=true
# Let's register a callback mechanism to automatically snapshot the best model by measure the performance of the model on the validation set at the end of each epoch during training:
# + deletable=true editable=true
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
best_model_fname = "simple_seq2seq_checkpoint.h5"
best_model_cb = ModelCheckpoint(best_model_fname, monitor='val_loss',
save_best_only=True, verbose=1)
# + [markdown] deletable=true editable=true
# We need to use np.expand_dims trick on Y: this is required by Keras because of we use a sparse (integer-based) representation for the output:
# + deletable=true editable=true
# %matplotlib inline
import matplotlib.pyplot as plt
history = simple_seq2seq.fit(X_train, np.expand_dims(Y_train, -1),
validation_data=(X_val, np.expand_dims(Y_val, -1)),
epochs=15, verbose=2, batch_size=32,
callbacks=[best_model_cb])
plt.figure(figsize=(12, 6))
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], '--', label='validation')
plt.ylabel('negative log likelihood')
plt.xlabel('epoch')
plt.legend()
plt.title('Convergence plot for Simple Seq2Seq')
# + [markdown] deletable=true editable=true
# Let's load the best model found on the validation set at the end of training:
# + deletable=true editable=true
simple_seq2seq = load_model(best_model_fname)
# + [markdown] deletable=true editable=true
# If you don't have access to a GPU and cannot wait 10 minutes to for the model to converge to a reasonably good state, feel to use the pretrained model. This model has been obtained by training the above model for ~150 epochs. The validation loss is significantly lower than 1e-5. In practice it should hardly ever make any prediction error on this easy translation problem.
#
# Alternatively we will load this imperfect model (trained only to 50 epochs) with a validation loss of ~7e-4. This model makes funny translation errors so I would suggest to try it first:
# + deletable=true editable=true
from keras.utils.data_utils import get_file
filename = get_file(
"simple_seq2seq_partially_pretrained.h5",
"https://github.com/m2dsupsdlclass/lectures-labs/releases/"
"download/0.4/simple_seq2seq_partially_pretrained.h5"
)
# Uncomment the following to replace for the fully trained network:
# filename= get_file(
# "simple_seq2seq_pretrained.h5",
# "https://github.com/m2dsupsdlclass/lectures-labs/releases/"
# "download/0.4/simple_seq2seq_pretrained.h5")
simple_seq2seq.load_weights(filename)
# + [markdown] deletable=true editable=true
# Let's have a look at a raw prediction on the first sample of the test set:
# + deletable=true editable=true
fr_test[0]
# + [markdown] deletable=true editable=true
# In numeric array this is provided (along with the expected target sequence) as the following padded input sequence:
# + deletable=true editable=true
first_test_sequence = X_test[0:1]
first_test_sequence
# + [markdown] deletable=true editable=true
# Remember that the `_GO` (symbol indexed at `1`) separates the reversed source from the expected target sequence:
# + deletable=true editable=true
rev_shared_vocab[1]
# + [markdown] deletable=true editable=true
# ### Interpreting the model prediction
#
# **Exercise**:
# - Feed this test sequence into the model. What is the shape of the output?
# - Get the argmax of each output prediction to get the most likely symbols
# - Dismiss the padding / end of sentence
# - Convert to readable vocabulary using `rev_shared_vocab`
#
# *Interpretation*
# - Compare the output with the first example in numerical format `num_test[0]`
# - What do you think of this way of decoding? Is it correct to use it at inference time?
# +
# # %load solutions/interpret_output.py
# + [markdown] deletable=true editable=true
# In the previous exercise we cheated because we gave the complete sequence along with the solution in the input sequence.
#
# To be more realistic we need to use the model in a setting where we do not provide any token of expected translation as part of the input sequence: the model shall predict one token at a time starting only from the source sequence along with the `<GO>` special symbol. At each step, we append the new predicted output token in the input sequence to predict the next token:
# + deletable=true editable=true
def greedy_translate(model, source_sequence, shared_vocab, rev_shared_vocab,
word_level_source=True, word_level_target=True):
"""Greedy decoder recursively predicting one token at a time"""
# Initialize the list of input token ids with the source sequence
source_tokens = tokenize(source_sequence, word_level=word_level_source)
input_ids = [shared_vocab.get(t, UNK) for t in reversed(source_tokens)]
input_ids += [shared_vocab[GO]]
# Prepare a fixed size numpy array that matches the expected input
# shape for the model
input_array = np.empty(shape=(1, model.input_shape[1]),
dtype=np.int32)
decoded_tokens = []
while len(input_ids) <= max_length:
# Vectorize a the list of input tokens as
# and use zeros padding.
input_array.fill(shared_vocab[PAD])
input_array[0, -len(input_ids):] = input_ids
# Predict the next output: greedy decoding with argmax
next_token_id = model.predict(input_array)[0, -1].argmax()
# Stop decoding if the network predicts end of sentence:
if next_token_id == shared_vocab[EOS]:
break
# Otherwise use the reverse vocabulary to map the prediction
# back to the string space
decoded_tokens.append(rev_shared_vocab[next_token_id])
# Append prediction to input sequence to predict the next
input_ids.append(next_token_id)
separator = " " if word_level_target else ""
return separator.join(decoded_tokens)
# + deletable=true editable=true
phrases = [
"un",
"deux",
"trois",
"onze",
"quinze",
"cent trente deux",
"cent mille douze",
"sept mille huit cent cinquante neuf",
"vingt et un",
"vingt quatre",
"quatre vingts",
"quatre vingt onze mille",
"quatre vingt onze mille deux cent deux",
]
for phrase in phrases:
translation = greedy_translate(simple_seq2seq, phrase,
shared_vocab, rev_shared_vocab,
word_level_target=False)
print(phrase.ljust(40), translation)
# -
# The results are far from perfect but we can see that the network has already picked up some translation skills.
#
# Why does the partially trained network is able to give the correct translation for:
#
# `"sept mille huit cent cinquante neuf"`
#
# but not for:
#
# `"cent mille douze"` ?
#
# The answer is as following:
# - it is rather easy for the network to learn a mapping between symbols (first case), by dismissing `"cent"` and `"mille"`;
# - outputing the right amount of symbols, especially `0s` for `"cent mille douze"` requires more reasoning and ability to count.
# Let's have a look at generalization out of correct French and see if the network would generalize like a French speaker would do:
# + deletable=true editable=true
phrases = [
"quatre vingt et un",
"quarante douze",
"onze mille soixante vingt sept",
"deux mille soixante vingt quatorze",
]
for phrase in phrases:
translation = greedy_translate(simple_seq2seq, phrase,
shared_vocab, rev_shared_vocab,
word_level_target=False)
print(phrase.ljust(40), translation)
# + [markdown] deletable=true editable=true
# ## Model evaluation
#
# Because **we expect only one correct translation** for a given source sequence, we can use **phrase-level accuracy** as a metric to quantify our model quality.
#
# Note that **this is not the case for real translation models** (e.g. from French to English on arbitrary sentences). Evaluation of a machine translation model is tricky in general. Automated evaluation can somehow be done at the corpus level with the [BLEU score](https://en.wikipedia.org/wiki/BLEU) (bilingual evaluation understudy) given a large enough sample of correct translations provided by certified translators but its only a noisy proxy.
#
# The only good evaluation is to give a large enough sample of the model predictions on some test sentences to certified translators and ask them to give an evaluation (e.g. a score between 0 and 6, 0 for non-sensical and 6 for the hypothetical perfect translation). However in practice this is very costly to do.
#
# Fortunately we can just use phrase-level accuracy on a our very domain specific toy problem:
# + deletable=true editable=true
def phrase_accuracy(model, num_sequences, fr_sequences, n_samples=300,
decoder_func=greedy_translate):
correct = []
n_samples = len(num_sequences) if n_samples is None else n_samples
for i, num_seq, fr_seq in zip(range(n_samples), num_sequences, fr_sequences):
if i % 100 == 0:
print("Decoding %d/%d" % (i, n_samples))
predicted_seq = decoder_func(simple_seq2seq, fr_seq,
shared_vocab, rev_shared_vocab,
word_level_target=False)
correct.append(num_seq == predicted_seq)
return np.mean(correct)
# + deletable=true editable=true
print("Phrase-level test accuracy: %0.3f"
% phrase_accuracy(simple_seq2seq, num_test, fr_test))
# + deletable=true editable=true
print("Phrase-level train accuracy: %0.3f"
% phrase_accuracy(simple_seq2seq, num_train, fr_train))
# + [markdown] deletable=true editable=true
# ## Bonus: Decoding with a Beam Search
#
# Instead of decoding with greedy strategy that only considers the most-likely next token at each prediction, we can hold a priority queue of the most promising top-n sequences ordered by loglikelihoods.
#
# This could potentially improve the final accuracy of an imperfect model: indeed it can be the case that the most likely sequence (based on the conditional proability estimated by the model) starts with a character that is not the most likely alone.
#
# **Bonus Exercise:**
# - build a beam_translate function which decodes candidate translations with a beam search strategy
# - use a list of candidates, tracking `beam_size` candidates and their corresponding likelihood
# - compute predictions for the next outputs by using predict with a batch of the size of the beam
# - be careful to stop appending results if EOS symbols have been found for each candidate!
# -
def beam_translate(model, source_sequence, shared_vocab, rev_shared_vocab,
word_level_source=True, word_level_target=True,
beam_size=10, return_ll=False):
"""Decode candidate translations with a beam search strategy
If return_ll is False, only the best candidate string is returned.
If return_ll is True, all the candidate strings and their loglikelihoods
are returned.
"""
# + deletable=true editable=true
# # %load solutions/beam_search.py
# + deletable=true editable=true
candidates = beam_translate(simple_seq2seq, "cent mille un",
shared_vocab, rev_shared_vocab,
word_level_target=False,
return_ll=True, beam_size=10)
candidates
# + deletable=true editable=true
candidates = beam_translate(simple_seq2seq, "quatre vingts",
shared_vocab, rev_shared_vocab,
word_level_target=False,
return_ll=True, beam_size=10)
candidates
# + [markdown] deletable=true editable=true
# ## Model Accuracy with Beam Search Decoding
# + deletable=true editable=true
print("Phrase-level test accuracy: %0.3f"
% phrase_accuracy(simple_seq2seq, num_test, fr_test,
decoder_func=beam_translate))
# + deletable=true editable=true
print("Phrase-level train accuracy: %0.3f"
% phrase_accuracy(simple_seq2seq, num_train, fr_train,
decoder_func=beam_translate))
# + [markdown] deletable=true editable=true
# When using the partially trained model the test phrase-level is slightly better (0.38 vs 0.37) with the beam decoder than with the greedy decoder. However the improvement is not that important on our toy task. Training the model to convergence would yield a perfect score on the test set anyway.
#
# Properly tuned beam search decoding can be critical to improve the quality of Machine Translation systems trained on natural language pairs though.
# + [markdown] deletable=true editable=true
# # Going Further
#
# We only scratched the surface of sequence-to-sequence systems. To go further, we recommend reading the initial [Sequence to Sequence paper](https://arxiv.org/abs/1409.3215) as well as the following developments, citing this work. Furthermore, here are a few pointers on how to go further if you're interested.
#
# ### Improved model
#
# - Add multiple, larger GRU layers and more dropout regularization.
# - This should make it possible train a perfect translation model with a smaller amount of labeled samples. Try to train a seq2seq model with only 4000 training sequences or even fewer without overfitting.
# - You will need a GPU and more training time for that.
#
# ### Reverse translation: Numeric to French
#
# - Build a model, with the same data from Numeric to French
# - The model should fine work with the same kind of architecture
#
#
# ### Separated Encoder-Decoder
#
# We may want to build a model with a separated encoder and decoder, to improve performance and be more flexible with the architecture.
#
# With Keras, you can get access to the activation states of the LSTM cell by using:
#
# ```python
# encoder = LSTM(latent_dim, return_state=True)
# encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# encoder_states = [state_h, state_c]
# ```
#
# then you can reuse those states to initialize the state of the decoder cell:
#
# ```python
# decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
# decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
# ```
#
# A full example of this architecture can be found at:
#
# - https://github.com/keras-team/keras/blob/master/examples/lstm_seq2seq.py
#
# **Exercise**:
#
# - Implement the Encoder-Decoder Seq2Seq architecture an apply it to the French numbers dataset.
#
# ### Attention models
#
# Instead of initializing the decoder with the encoder states, it is possible to use an attention-model.
#
# At the time of writing Keras does not yet have an turn-key attentional mechanism layer but it's possible to build one by assembling lower level building blocks. For instance see:
#
# - https://github.com/keras-team/keras/issues/1472#issuecomment-172095544
#
# Attention models are efficient to model longer sequences, to find alignment between input and output sequences, and to model different parts of sequences with seperated meanings
#
# Other frameworks also have working examples of attention mechanisms:
#
# - A good implementation is available for translation here: https://github.com/OpenNMT/OpenNMT-py
# - TensorFlow also has working examples: https://google.github.io/seq2seq/
#
#
# ### Mastering Neural Machine Translation
#
# In complement to studying the TensorFlow seq2seq and OpenNMT code base, you might also want to read the following 55 pages tutorial:
#
# [Neural Machine Translation and Sequence-to-sequence Models: A Tutorial](https://arxiv.org/abs/1703.01619) by <NAME>.
# -
| labs/07_seq2seq/Translation_of_Numeric_Phrases_with_Seq2Seq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="VBlhf4LxvsH8"
# In this project, we will see how we can add a watermark to an image. Adding a watermark works as a copyright for your image, so that no one can illegally use your image or document. We will use OpenCV for this project to add logo and text as a watermark.
# + id="xgM3uKsrvp6L"
# Important library imports
import cv2
import numpy as np
import requests
from PIL import Image
# + [markdown] id="4cvjLYhvq8WA"
# We will start by importing required libraries like OpenCV for image processing, numpy for mathematical computation, etc.
# + id="VFr_tb6Ryw9R" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="b6f48e4d-a831-47f1-efa2-5b979cf49c40"
# Reading image form url
image = Image.open(requests.get('https://media.sproutsocial.com/uploads/2017/02/10x-featured-social-media-image-size.png', stream=True).raw)
image_logow = image.resize((500,300))
image_textw = image.resize((500,300))
image_logow
# + [markdown] id="xtISsTOwrPsK"
# For this project we need an image url on which we will apply watermark. We will load the image using the url of the image from google images. You can use any image that you desire and replace the url of the image with current url. Next we will read and resize the image and finally store it into two variables for logo and text watermarking.
# + id="1Gap6ecd1SJO" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="d811357e-e37e-46cd-89d1-ae6608009a29"
# Reading logo form url
logo = Image.open(requests.get('https://pianalytix.com/wp-content/uploads/2020/05/cropped-pianalytiX-full-logo-square-2-209x52.png', stream=True).raw)
logo
# + [markdown] id="SJYlbCCpspSs"
# For logo we are using Pianalytix logo which is downloaded using the above url.
# + id="01o6pwi43quX"
image_logow = np.array(image_logow.convert('RGB'))
h_image, w_image, _ = image_logow.shape
logo = np.array(logo.convert('RGB'))
h_logo, w_logo, _ = logo.shape
# + [markdown] id="ADldE9gFuQRR"
# In order to use these images we will first convert them into rbg format and store it as a numpy array. Similarly, we will also extract the shape i.e. it's height and width.
# + id="xkd3LdtG49xP"
# Get the center of the original. It's the location where we will place the watermark
center_y = int(h_image / 2)
center_x = int(w_image / 2)
top_y = center_y - int(h_logo / 2)
left_x = center_x - int(w_logo / 2)
bottom_y = top_y + h_logo
right_x = left_x + w_logo
# + [markdown] id="k5Mra59pvJJ3"
# We will place our watermark in the center of the image so for that we need to calculate the center of the image. Here, we will find out the center coordinates of the image using height and width of the image.
# + id="qAR3LGie2nTY"
# Get ROI
roi = image_logow[top_y: bottom_y, left_x: right_x]
# Add the Logo to the Roi
result = cv2.addWeighted(roi, 1, logo, 1, 0)
# Drawing
cv2.line(image_logow, (0, center_y), (left_x, center_y), (0, 0, 255), 1)
cv2.line(image_logow, (right_x, center_y), (w_image, center_y), (0, 0, 255), 1)
# Replace the ROI on the image
image_logow[top_y: bottom_y, left_x: right_x] = result
# + [markdown] id="UEKmKAg-wLea"
# A Region of Interest defined as the area we need to place our watermark on. Here, we will find out ROI using the coordinates we found above. Next we will use OpenCV to merge our logo on ROI. Similarly we will also add a line or a pattern on our image.
# + id="fgDosRDX3n6h" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="9e50fbe7-7446-4b90-a1da-1e7978c90925"
# Ploting logo watermark image
img = Image.fromarray(image_logow, 'RGB')
img
# + id="w-ubvxFTWeCZ"
# + [markdown] id="ZpM7pocuyI7u"
# Now we will convert the image from array to RGB and visualize it.
# + id="xcc51X3leKM2"
# Text Watermark
image_text = np.array(image_textw.convert('RGB'))
cv2.putText(image_text, text='Pianalytix', org=(w_image - 95, h_image - 10), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5,
color=(0,0,255), thickness=2, lineType=cv2.LINE_4);
# + [markdown] id="a3joTfitywSl"
# Next we shall see how we can use a text as a watermark. We will convert our image into RGB and save it as a numpy array. To put a text as a watermark we will be using putText() function of OpenCV. Here we can use different features like size of the text, thickness of the text, font family, etc.
# + id="7m9cXRZlgn4N" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="ca150c9e-c702-43f2-c71c-650e3b80a4c6"
# Plotting text watermark image
timg = Image.fromarray(image_text, 'RGB')
timg
# + [markdown] id="sTrPkkcnzzpf"
# To visualize the image we need to convert it back to RGB format.
# + [markdown] id="4Jkxj9DIz_4q"
# ## Conclusion:
# + [markdown] id="eVepNN93z__6"
# Adding a watermark to an image works as a copyright. Different organizations can use it to make their content secure so that their content cannot be misused without thier license or a paid version. In this project we saw how easily we can use OpenCV to add watermarks to our images.
# + id="twLHbP_wkeXS"
| Image Watermarking Flask App/Image Watermarking Flask App/Image_Watermarking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.2 64-bit (''base'': conda)'
# name: python392jvsc74a57bd098b0a9b7b4eaaa670588a142fd0a9b87eaafe866f1db4228be72b4211d12040f
# ---
# ---
# author: <NAME> (<EMAIL>)
# ---
#
# This answer assumes you have imported SymPy as follows.
from sympy import * # load all math functions
init_printing( use_latex='mathjax' ) # use pretty math output
# If your equation has just one variable, simply call `solve` on it.
# Note that you may get a list of more than one solution.
var( 'x' )
equation = Eq( x**2 + 3*x, -x + 9 )
solve( equation )
# Sometimes you get no solutions, which is shown as a Python empty list.
solve( Eq( x+1, x+2 ) )
# Sometimes the answers include complex numbers.
solve( Eq( x**3, -1 ) )
# To restrict the solution to the real numbers, use `solveset` instead,
# and specify the real numbers as the domain.
solveset( Eq( x**3, -1 ), domain=S.Reals )
# You can solve systems of equations by calling `solve` on them.
var( 'x y' )
system = [
Eq( x + 2*y, 1 ),
Eq( x - 9*y, 5 )
]
solve( system )
| database/tasks/How to solve symbolic equations/Python, using SymPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3 - python
# language: python
# name: ipython_python
# ---
# # Example 1 Data
#
# We start by loading some useful libraries in and some functionality to provide a more functional approach to programming.
import pandas as pd
from scipy import stats
import altair as alt
from typing import List, Any, Tuple
from functools import reduce
import math as math
# We start by defining a function which generates a data frame of the results from a single individual when they have been asked to flip a coin a fixed number of time.
def random_flips(num_flips: int,
prob_heads: float,
person_id: int) -> pd.DataFrame:
coin_result = stats.bernoulli.rvs(p = prob_heads,
size = num_flips)
flip_number = range(1, num_flips + 1)
flipper_id = num_flips * [person_id]
return pd.DataFrame({"name": flipper_id,
"flip_number": flip_number,
"outcome": coin_result})
# Then we can wrap this in a function that does this for a group of people and puts all of the results into a single data frame.
def random_experiment(num_flips: int,
person_ids: List[int],
prob_heads_list: List[float]) -> pd.DataFrame:
rand_dfs = (random_flips(num_flips, prob, pid)
for (prob,pid) in zip(prob_heads_list,person_ids))
op = lambda df, x: df.append(x)
return reduce(op, rand_dfs, pd.DataFrame())
# Given the number of trials and the number of successes among those trials we can get an MLE for the probability of success and we can generate a Wald style confidence interval on the estimate. Note that we define a new type to make it clear what the result of this is.
# +
EstimateAndCI = Tuple[float,Tuple[float,float]]
def wald_estimate_and_ci(num_trials: int, num_success: int) -> EstimateAndCI:
p_hat = num_success / num_trials
z = 1.96
delta = z * math.sqrt(p_hat * (1 - p_hat) / num_trials)
return (p_hat,(p_hat - delta, p_hat + delta))
# -
# The data set that we want will have a couple of outliers in it so that the audience has something interesting to find. We will also generate another false data set which leads to the correct point estimate but that has a structure which means that the binomial model is not appropriate. We will use two maps, `exp1` and `exp2`, to hold the specifics of each data set.
# +
num_flips = 30
exp1 = {
"experiment": 1,
"num_people": 15,
"person_ids": range(15),
"num_outliers": 2,
"prob_heads": 0.4,
"output_csv": "experiment1.csv"
}
exp2 = {
"experiment": 2,
"num_people": 50,
"person_ids": range(50),
"prob_lower": 0.2,
"prob_upper": 0.6,
"output_csv": "experiment2.csv"
}
# -
# ## Experiment 1
#
# The last two people do not actually flip the coin, they just write heads for all trials.
# +
prob_heads_1 = ((exp1["num_people"] - exp1["num_outliers"]) * [exp1["prob_heads"]] +
exp1["num_outliers"] * [1.0])
results_1 = random_experiment(
num_flips,
exp1["person_ids"],
prob_heads_1
)
results_1.to_csv(exp1["output_csv"], index=False)
# -
# ## Experiment 2
#
# Everyone flips they coin that they are given, but the coins all have different probabilities of heads.
# +
prob_inc = (exp2["prob_upper"] - exp2["prob_lower"]) / (exp2["num_people"] - 1)
prob_heads_2 = [exp2["prob_lower"] + prob_inc * n
for n in range(exp2["num_people"])]
results_2 = random_experiment(
num_flips,
exp2["person_ids"],
prob_heads_2
)
results_2.to_csv(exp2["output_csv"], index=False)
| example-1/example-1-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine, OptimizationSetting, MINUTE_DB_NAME
from vnpy.trader.app.ctaStrategy.strategy.strategyAtrRsi import AtrRsiStrategy
# -
# 创建回测引擎对象
engine = BacktestingEngine()
# 设置回测使用的数据
engine.setBacktestingMode(engine.BAR_MODE) # 设置引擎的回测模式为K线
engine.setDatabase(MINUTE_DB_NAME, 'IF0000') # 设置使用的历史数据库
engine.setStartDate('20120101') # 设置回测用的数据起始日期
# 配置回测引擎参数
engine.setSlippage(0.2) # 设置滑点为股指1跳
engine.setRate(0.3/10000) # 设置手续费万0.3
engine.setSize(300) # 设置股指合约大小
engine.setPriceTick(0.2) # 设置股指最小价格变动
# 在引擎中创建策略对象
d = {'atrLength': 11} # 策略参数配置
engine.initStrategy(AtrRsiStrategy, d) # 创建策略对象
# 运行回测
engine.runBacktesting() # 运行回测
# 显示回测结果
engine.showBacktestingResult()
# 显示前10条成交记录
for i in range(10):
print engine.tradeDict[str(i+1)].__dict__
# +
# 优化配置
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始12,结束20,步进2
setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进5
setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
# 执行多进程优化
import time
engine.runParallelOptimization(AtrRsiStrategy, setting)
print u'耗时:%s' %(time.time()-start)
# -
| examples/CtaBacktesting/.ipynb_checkpoints/backtesting-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
import skimage.data
import matplotlib.pyplot as plt
# %matplotlib inline
from matplotlib import animation, rc
from IPython.display import HTML
from timeit import default_timer as timer
from torchvision.transforms import ToPILImage
from elaugment import generator
img = skimage.data.astronaut()
def benchmark_elaugment(ts_train):
tg_train = generator.TransformationsGenerator(ts_train)
start = timer()
for i in range(1000):
transform = next(tg_train)
transform(img)
end = timer()
print('elaugment', end - start)
def benchmark_torchvision(transform):
pil_image = ToPILImage()(img)
start = timer()
for i in range(100):
transform(pil_image)
end = timer()
print('torchvision', end - start)
# +
from elaugment.image import random
from torchvision.transforms import RandomResizedCrop
benchmark_elaugment([random.RandomResizedCrop(crop_size=448, min_area=0.08)])
benchmark_torchvision(RandomResizedCrop(448))
# +
from elaugment.image import random
from torchvision.transforms import RandomCrop
benchmark_elaugment([random.RandomCrop(crop_size=448)])
benchmark_torchvision(RandomCrop(448))
# +
from elaugment.image import transformations
from torchvision.transforms import Resize
benchmark_elaugment([transformations.Resize((100, 100))])
benchmark_torchvision(Resize((100, 100)))
# +
from elaugment.image import random
tf = [
random.RandomDistortion(5, 5, 0.1, 0.1, order=1)
]
tg = generator.TransformationsGenerator(tf)
img = transformations.Resize((128, 128))(img)
start = timer()
iterations = 500
for i in range(iterations):
t = next(tg)
t(img / 255)
end = timer()
print('iterations/s', iterations / (end - start))
# -
| examples/benchmarks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### This problem is from "An Introduction to Computational Fluid Dynamics" by Veersteg and Malalasekera
#
# #### Chapter 4: Finite Volume Methods for Diffusion Problems, Example 4.3
# We discuss the cooling of a circular fin by means of convective heat transfer along its length. Convection gives rise to a temperature-dependent heat loss or sink term in the governing equation. There is a cylindrical fin with uniform cross-sectional area A. The base is at a temperature of 100°C ($T_B$) and the end is insulated. The fin is exposed to an ambient temperature of 20°C. One dimensional heat transfer in this situation is governed by:
#
# $$ \frac{d}{dx} \left(kA\frac{dT}{dx}\right) - hP(T - T_\infty) = 0$$
#
# where h is the convective heat transfer coefficient, P the perimeter, k the thermal conductivity of the material and $T_\infty$ the ambient temperature. Calculate the temperature distribution along the fin and compare the results with the analytical solution given by:
#
# $$ \frac{T - T_\infty}{T_B - T_\infty} = \frac{cosh(n(L-x))}{cosh(nL)} $$
#
# where $n^2 = hP/(kA)$, L is the length of the fin and x the distance along the fin.
#
# Data: L = 1 m, $hP/(kA) = 25/m^2$ (note that kA is constant).
import numpy as np
import matplotlib.pyplot as plt
# +
# Initializing the variables
| FVM Practice/FVM_1D-DiffusionWithSource_Practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
# # Capítulo 1 - Manipulação de dados: Básico
# ## Seção 5 - Encontrando e preenchendo valores em branco
# ___
# +
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
# -
# ### Encontrando os valores em branco
#
# O NumPy tem embutido nele um objeto chamado `np.nan` que serve para identificar valores em branco
# +
em_branco = np.nan
serie = Series(['linha 1', 'linha 2', em_branco, 'linha 4', 'linha 5', 'linha 6', em_branco, 'linha 8'])
serie
# -
# #### nome_do_objeto.isnull()
#
# ♔┈♔┈♔┈( O QUE ISSO FAZ )┈♔┈♔┈♔
#
# O método `.isnull()` retornar uma valores booleanos (`True` ou `Falso`) se um item dentro de um objeto Pandas é nulo/vazio.
serie.isnull()
# ### Preenchendo valores em branco
np.random.seed(25)
df = DataFrame(np.random.randn(36).reshape(6,6))
df.loc[3:5, 0] = em_branco
df.loc[1:4, 5] = em_branco
df
# #### nome_do_objeto.fillna(valor)
#
# ♔┈♔┈♔┈( O QUE ISSO FAZ ) ┈♔┈♔┈♔
#
# O método `.fillna()` encontra os valores nulos do nosso objeto e preenche esses valores com o novo valor que você passar.
df_preenchido = df.fillna(0) # onde antes tinha NaN foi preenchiudo com zero
df_preenchido
# #### nome_do_objeto.fillna(dicionário)
#
# ♔┈♔┈♔┈( O QUE ISSO FAZ ) ┈♔┈♔┈♔
#
# Você também pode passar um dicionário para o método `.fillna()`. Usando um dicionário o método entende que cada chave do dicionário corresponde a uma coluna e o seu par o valor a ser substituido na respectiva coluna.
dicio = {0: 0.1, 5: 1.25}
df_preenchido = df.fillna(dicio)
df_preenchido
| Cap01/05_Encontrando_e_preechendo_valores_em_branco.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Analysis of Spatial Data: Spatial Autocorrelation #
#
#
# In this notebook we introduce methods of _exploratory spatial data analysis_
# that are intended to complement geovizualization through formal univariate and
# multivariate statistical tests for spatial clustering.
#
#
# ## Imports
import sys
import os
sys.path.append(os.path.abspath('..'))
from pysal.explore import esda
import pandas as pd
import geopandas as gpd
import pysal.lib as lps
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Our data set comes from the Berlin airbnb scrape taken in April 2018. This dataframe was constructed as part of the [GeoPython 2018 workshop](https://github.com/ljwolf/geopython) by [<NAME>](https://ljwolf.org) and [<NAME>](https://sergerey.org). As part of the workshop a geopandas data frame was constructed with one of the columns reporting the median listing price of units in each neighborhood in Berlin:
df = gpd.read_file('data/neighborhoods.shp')
# was created in previous notebook with df.to_file('data/neighborhoods.shp')
df.head()
# We have an `nan` to first deal with:
pd.isnull(df['median_pri']).sum()
df = df
df['median_pri'].fillna((df['median_pri'].mean()), inplace=True)
df.plot(column='median_pri')
fig, ax = plt.subplots(figsize=(12,10), subplot_kw={'aspect':'equal'})
df.plot(column='median_pri', scheme='Quantiles', k=5, cmap='GnBu', legend=True, ax=ax)
#ax.set_xlim(150000, 160000)
#ax.set_ylim(208000, 215000)
# ## Spatial Autocorrelation ##
#
# Visual inspection of the map pattern for the prices allows us to search for
# spatial structure. If the spatial distribution of the prices was random, then we
# should not see any clustering of similar values on the map. However, our visual
# system is drawn to the darker clusters in the south west as well as the center,
# and a concentration of the lighter hues (lower prices) in the north central and
# south east.
#
# Our brains are very powerful pattern recognition machines. However, sometimes
# they can be too powerful and lead us to detect false positives, or patterns
# where there are no statistical patterns. This is a particular concern when
# dealing with visualization of irregular polygons of differning sizes and shapes.
#
# The concept of *spatial
# autocorrelation* relates to the combination of two types of similarity: spatial
# similarity and attribute similarity. Although there are many different measures
# of spatial autocorrelation, they all combine these two types of simmilarity into
# a summary measure.
#
# Let's use PySAL to generate these two types of similarity
# measures.
#
# ### Spatial Similarity ###
#
# We have already encountered spatial weights
# in a previous notebook. In spatial autocorrelation analysis, the spatial weights
# are used to formalize the notion of spatial similarity. As we have seen there
# are many ways to define spatial weights, here we will use queen contiguity:
wq = lps.weights.Queen.from_dataframe(df)
wq.transform = 'r'
# ### Attribute Similarity ###
#
# So the spatial weight between neighborhoods $i$ and $j$ indicates if the two
# are neighbors (i.e., geographically similar). What we also need is a measure of
# attribute similarity to pair up with this concept of spatial similarity. The
# **spatial lag** is a derived variable that accomplishes this for us. For neighborhood
# $i$ the spatial lag is defined as: $$ylag_i = \sum_j w_{i,j} y_j$$
y = df['median_pri']
ylag = lps.weights.lag_spatial(wq, y)
ylag
from pysal.viz import mapclassify as mc
ylagq5 = mc.Quantiles(ylag, k=5)
# +
f, ax = plt.subplots(1, figsize=(9, 9))
df.assign(cl=ylagq5.yb).plot(column='cl', categorical=True, \
k=5, cmap='GnBu', linewidth=0.1, ax=ax, \
edgecolor='white', legend=True)
ax.set_axis_off()
plt.title("Spatial Lag Median Price (Quintiles)")
plt.show()
# -
# The quintile map for the spatial lag tends to enhance the impression of value
# similarity in space. It is, in effect, a local smoother.
df['lag_median_pri'] = ylag
f,ax = plt.subplots(1,2,figsize=(2.16*4,4))
df.plot(column='median_pri', ax=ax[0], edgecolor='k',
scheme="quantiles", k=5, cmap='GnBu')
ax[0].axis(df.total_bounds[np.asarray([0,2,1,3])])
ax[0].set_title("Price")
df.plot(column='lag_median_pri', ax=ax[1], edgecolor='k',
scheme='quantiles', cmap='GnBu', k=5)
ax[1].axis(df.total_bounds[np.asarray([0,2,1,3])])
ax[1].set_title("Spatial Lag Price")
ax[0].axis('off')
ax[1].axis('off')
plt.show()
# However, we still have
# the challenge of visually associating the value of the prices in a neighborhod
# with the value of the spatial lag of values for the focal unit. The latter is a
# weighted average of homicide rates in the focal county's neighborhood.
#
# To complement the geovisualization of these associations we can turn to formal
# statistical measures of spatial autocorrelation.
#
#
# ## Global Spatial Autocorrelation
#
# We begin with a simple case where the variable under consideration is binary.
# This is useful to unpack the logic of spatial autocorrelation tests. So even though
# our attribute is a continuously valued one, we will convert it to a binary case
# to illustrate the key concepts:
#
# ### Binary Case
y.median()
yb = y > y.median()
sum(yb)
# We have 68 neighborhoods with list prices above the median and 70 below the
# median (recall the issue with ties).
yb = y > y.median()
labels = ["0 Low", "1 High"]
yb = [labels[i] for i in 1*yb]
df['yb'] = yb
# The spatial distribution of the binary variable immediately raises questions
# about the juxtaposition of the "black" and "white" areas.
fig, ax = plt.subplots(figsize=(12,10), subplot_kw={'aspect':'equal'})
df.plot(column='yb', cmap='binary', edgecolor='grey', legend=True, ax=ax)
# ### Join counts ###
#
# One way to formalize a test for spatial autocorrelation in a binary attribute is
# to consider the so-called _joins_. A join exists for each neighbor pair of
# observations, and the joins are reflected in our binary spatial weights object
# `wq`.
#
# Each unit can take on one of two values "Black" or "White", and so for a given
# pair of neighboring locations there are three different types of joins that can
# arise:
#
# - Black Black (BB)
# - White White (WW)
# - Black White (or White Black) (BW)
#
# Given that we have 68 Black polygons on our map, what is the number of Black
# Black (BB) joins we could expect if the process were such that the Black
# polygons were randomly assigned on the map? This is the logic of join count statistics.
#
# We can use the `esda` package from PySAL to carry out join count analysis:
from pysal.explore import esda
yb = 1 * (y > y.median()) # convert back to binary
wq = lps.weights.Queen.from_dataframe(df)
wq.transform = 'b'
np.random.seed(12345)
jc = esda.join_counts.Join_Counts(yb, wq)
# The resulting object stores the observed counts for the different types of joins:
jc.bb
jc.ww
jc.bw
# Note that the three cases exhaust all possibilities:
jc.bb + jc.ww + jc.bw
# and
wq.s0 / 2
# which is the unique number of joins in the spatial weights object.
#
# Our object tells us we have observed 121 BB joins:
jc.bb
# The critical question for us, is whether this is a departure from what we would
# expect if the process generating the spatial distribution of the Black polygons
# were a completely random one? To answer this, PySAL uses random spatial
# permutations of the observed attribute values to generate a realization under
# the null of _complete spatial randomness_ (CSR). This is repeated a large number
# of times (999 default) to construct a reference distribution to evaluate the
# statistical significance of our observed counts.
#
# The average number of BB joins from the synthetic realizations is:
jc.mean_bb
# which is less than our observed count. The question is whether our observed
# value is so different from the expectation that we would reject the null of CSR?
import seaborn as sbn
sbn.kdeplot(jc.sim_bb, shade=True)
plt.vlines(jc.bb, 0, 0.075, color='r')
plt.vlines(jc.mean_bb, 0,0.075)
plt.xlabel('BB Counts')
# The density portrays the distribution of the BB counts, with the black vertical
# line indicating the mean BB count from the synthetic realizations and the red
# line the observed BB count for our prices. Clearly our observed value is
# extremely high. A pseudo p-value summarizes this:
# + attributes={"classes": ["ptyhon"], "id": ""}
jc.p_sim_bb
# -
# Since this is below conventional significance levels, we would reject the null
# of complete spatial randomness in favor of spatial autocorrelation in market prices.
#
#
# ### Continuous Case
#
# The join count analysis is based on a binary attribute, which can cover many
# interesting empirical applications where one is interested in presence and
# absence type phenomena. In our case, we artificially created the binary variable,
# and in the process we throw away a lot of information in our originally
# continuous attribute. Turning back to the original variable, we can explore
# other tests for spatial autocorrelation for the continuous case.
#
# First, we transform our weights to be row-standardized, from the current binary state:
wq.transform = 'r'
y = df['median_pri']
# Moran's I is a test for global autocorrelation for a continuous attribute:
np.random.seed(12345)
mi = esda.moran.Moran(y, wq)
mi.I
# Again, our value for the statistic needs to be interpreted against a reference
# distribution under the null of CSR. PySAL uses a similar approach as we saw in
# the join count analysis: random spatial permutations.
import seaborn as sbn
sbn.kdeplot(mi.sim, shade=True)
plt.vlines(mi.I, 0, 1, color='r')
plt.vlines(mi.EI, 0,1)
plt.xlabel("Moran's I")
# Here our observed value is again in the upper tail, although visually it does
# not look as extreme relative to the binary case. Yet, it is still statistically significant:
mi.p_sim
# ## Local Autocorrelation: Hot Spots, Cold Spots, and Spatial Outliers ##
#
# In addition to the Global autocorrelation statistics, PySAL has many local
# autocorrelation statistics. Let's compute a local Moran statistic for the same
# d
np.random.seed(12345)
from pysal.explore import esda
wq.transform = 'r'
lag_price = lps.weights.lag_spatial(wq, df['median_pri'])
# +
price = df['median_pri']
b, a = np.polyfit(price, lag_price, 1)
f, ax = plt.subplots(1, figsize=(9, 9))
plt.plot(price, lag_price, '.', color='firebrick')
# dashed vert at mean of the price
plt.vlines(price.mean(), lag_price.min(), lag_price.max(), linestyle='--')
# dashed horizontal at mean of lagged price
plt.hlines(lag_price.mean(), price.min(), price.max(), linestyle='--')
# red line of best fit using global I as slope
plt.plot(price, a + b*price, 'r')
plt.title('Moran Scatterplot')
plt.ylabel('Spatial Lag of Price')
plt.xlabel('Price')
plt.show()
# -
# Now, instead of a single $I$ statistic, we have an *array* of local $I_i$
# statistics, stored in the `.Is` attribute, and p-values from the simulation are
# in `p_sim`.
li = esda.moran.Moran_Local(y, wq)
li.q
# We can again test for local clustering using permutations, but here we use
# conditional random permutations (different distributions for each focal location)
(li.p_sim < 0.05).sum()
# We can distinguish the specific type of local spatial association reflected in
# the four quadrants of the Moran Scatterplot above:
sig = li.p_sim < 0.05
hotspot = sig * li.q==1
coldspot = sig * li.q==3
doughnut = sig * li.q==2
diamond = sig * li.q==4
spots = ['n.sig.', 'hot spot']
labels = [spots[i] for i in hotspot*1]
df = df
from matplotlib import colors
hmap = colors.ListedColormap(['red', 'lightgrey'])
f, ax = plt.subplots(1, figsize=(9, 9))
df.assign(cl=labels).plot(column='cl', categorical=True, \
k=2, cmap=hmap, linewidth=0.1, ax=ax, \
edgecolor='white', legend=True)
ax.set_axis_off()
plt.show()
spots = ['n.sig.', 'cold spot']
labels = [spots[i] for i in coldspot*1]
df = df
from matplotlib import colors
hmap = colors.ListedColormap(['blue', 'lightgrey'])
f, ax = plt.subplots(1, figsize=(9, 9))
df.assign(cl=labels).plot(column='cl', categorical=True, \
k=2, cmap=hmap, linewidth=0.1, ax=ax, \
edgecolor='white', legend=True)
ax.set_axis_off()
plt.show()
spots = ['n.sig.', 'doughnut']
labels = [spots[i] for i in doughnut*1]
df = df
from matplotlib import colors
hmap = colors.ListedColormap(['lightblue', 'lightgrey'])
f, ax = plt.subplots(1, figsize=(9, 9))
df.assign(cl=labels).plot(column='cl', categorical=True, \
k=2, cmap=hmap, linewidth=0.1, ax=ax, \
edgecolor='white', legend=True)
ax.set_axis_off()
plt.show()
spots = ['n.sig.', 'diamond']
labels = [spots[i] for i in diamond*1]
df = df
from matplotlib import colors
hmap = colors.ListedColormap(['pink', 'lightgrey'])
f, ax = plt.subplots(1, figsize=(9, 9))
df.assign(cl=labels).plot(column='cl', categorical=True, \
k=2, cmap=hmap, linewidth=0.1, ax=ax, \
edgecolor='white', legend=True)
ax.set_axis_off()
plt.show()
sig = 1 * (li.p_sim < 0.05)
hotspot = 1 * (sig * li.q==1)
coldspot = 3 * (sig * li.q==3)
doughnut = 2 * (sig * li.q==2)
diamond = 4 * (sig * li.q==4)
spots = hotspot + coldspot + doughnut + diamond
spots
spot_labels = [ '0 ns', '1 hot spot', '2 doughnut', '3 cold spot', '4 diamond']
labels = [spot_labels[i] for i in spots]
# +
from matplotlib import colors
hmap = colors.ListedColormap([ 'lightgrey', 'red', 'lightblue', 'blue', 'pink'])
f, ax = plt.subplots(1, figsize=(9, 9))
df.assign(cl=labels).plot(column='cl', categorical=True, \
k=2, cmap=hmap, linewidth=0.1, ax=ax, \
edgecolor='white', legend=True)
ax.set_axis_off()
plt.show()
# -
# ## S-maup: Statistical test to measure the sensitivity to the modifiable areal unit problem ##
#
# This is a nonparametric statistical test to measure the sensitivity of a spatially intensive variable to the effects of the Modifiable Areal Unit Problem (MAUP). The S-maup statistic can be used to find the maximum level of spatial aggregation (k) that preserves the distributional characteristics of the spatial variable before being aggregated.
#
# **H0**: The variable y **is not** significantly affected by the MAUP.
#
# **H1**: The variable y **is** significantly affected by the MAUP.
#
# **Citation**: <NAME>, <NAME>, <NAME> (2018) S-maup: Statistical test to measure the sensitivity to the modifiable areal unit problem. PLoS ONE 13(11): e0207377. https://doi.org/10.1371/journal.pone.0207377
#
# https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0207377
# Let's find the maximum number of regions (k) in which we can aggregate (n) areas before having the negative consequences of the MAUP
# ### 1. load the map
# +
#import pysal.lib
#from pysal.explore.esda.moran import Moran
#from pysal.explore.esda.smaup import Smaup
# -
# ### 2. select the variable to analyze
# +
#f = pysal.lib.io.open(pysal.lib.examples.get_path("stl_hom.txt"))
#y = np.array(f.by_col['HR8893'])
# -
# ### 3. calculate the level of spatial autocorrelation
# +
#w = pysal.lib.io.open(pysal.lib.examples.get_path("stl.gal")).read()
#rho = Moran(y, w).I
# -
# ### 4. calculate the number of areas (n)
# +
#n = len(y)
# -
# ### 5. let's begin with the minimum level of aggregation which is the number of areas minus one
'''
k = len(y)-1
crit_val0_05 = np.inf
smaup = -np.inf
#the reduce k until H0 is rejected
while smaup < crit_val0_05:
s = Smaup(n,k,rho)
smaup = s.smaup
crit_val0_05 = s.critical_05
k -= 1
'''
# ### 6. maximum number of regions (k) is...
# +
#print(k+1)
| notebooks/explore/esda/Spatial Autocorrelation for Areal Unit Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# # Validation 10 - Timeseries Minimum Up Time
# + deletable=true editable=true
# %matplotlib inline
# + deletable=true editable=true
import psst
# + deletable=true editable=true
from psst.case import read_matpower
from psst.network import create_network
import pandas as pd
# + [markdown] deletable=true editable=true
# ### Validation of case 1
# + deletable=true editable=true
case = read_matpower('./cases/case7.m')
# + deletable=true editable=true
case.load = pd.read_csv('./cases/case7.csv', index_col=0)
# + deletable=true editable=true
network = create_network(case, prog='neato')
network.draw()
# + deletable=true editable=true
case
# + deletable=true editable=true
case.bus
# + deletable=true editable=true
case.branch
# + deletable=true editable=true
case.gen.loc['GenCo1', 'MINIMUM_UP_TIME'] = 5
# + deletable=true editable=true
case.gen
# + deletable=true editable=true
case.gencost
# + deletable=true editable=true
import matplotlib.pyplot as plt
# + deletable=true editable=true
fig, axs = plt.subplots(1, 1, figsize=(8, 5))
ax = axs
case.load['Bus2'].plot.bar(ax=ax)
ax.set_ylim(0, 500);
# + deletable=true editable=true
from psst.model import build_model
# + deletable=true editable=true
model = build_model(case)
# + deletable=true editable=true
model
# + deletable=true editable=true
model.solve(solver='cbc', verbose=True)
# + [markdown] deletable=true editable=true
# ### Input data
# + deletable=true editable=true
import pandas as pd
# + deletable=true editable=true
pd.DataFrame(case.gen['PMAX'])
# + deletable=true editable=true
case.load
# + [markdown] deletable=true editable=true
# ### Model Results
# + deletable=true editable=true
model.results.unit_commitment
# + deletable=true editable=true
model.results.power_generated
# + deletable=true editable=true
model.results.commitment_cost
# + deletable=true editable=true
model.results.production_cost
# + deletable=true editable=true
model.results.noload_cost
# + deletable=true editable=true
model.results.line_power
# + deletable=true editable=true
from psst.plot import line_power, stacked_power_generation
# + deletable=true editable=true
stacked_power_generation(model.results, legend=True)
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
| docs/notebooks/validation/Validation10-TimeseriesMinimumUpTime.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scraping the same data from multiple pages
#
# In this exercise, we'll look in multiple Exxon annual reports to find how their proven crude oil reserves have changed over time.
#
# To use this script, you need to download and save the list of report summary URLs acquired by doing a search of the SEC database for the Exxon company code. The file is on GitHub in the same directory as this script and is called `docs.txt`.
#
# To see a more complicated version of this script that carries out the search of the SEC database for annual reports of a bunch of companies, then captures the names of their boards of directors, see https://github.com/HeardLibrary/digital-scholarship/blob/master/code/scrape/python/scrape_sec.py
#
# ## Setup
#
# As before, import the two necessary modules:
import requests # best library to manage HTTP transactions
from bs4 import BeautifulSoup # web-scraping library
# Define the function to read in the list of URLs for pages to scrape and hard code some variables.
# +
def getUrlList(path):
with open(path, 'rt') as fileObject:
lineList = fileObject.read().split('\n')
# remove any extra item caused by trailing newline
if lineList[len(lineList)-1] == '':
lineList = lineList[0:len(lineList)-1]
return lineList
baseUrl = 'https://www.sec.gov'
acceptMediaType = 'text/html'
userAgentHeader = 'BaskaufScraper/0.1 (<EMAIL>)'
requestHeaderDictionary = {
'Accept' : acceptMediaType,
'User-Agent': userAgentHeader
}
# -
# ## First round of scraping to find the URLs of the annual reports
#
# This is a hack of the script from last week that extracts only the URL of only the 10-K report.
# +
urlList = getUrlList('docs.txt')
lookupUrls = []
for url in urlList:
response = requests.get(url, headers = requestHeaderDictionary)
soupObject = BeautifulSoup(response.text,features="html5lib")
rowObjects = soupObject.find_all('tr')
for row in rowObjects:
cellObjects = row.find_all('td')
for cell in cellObjects:
if cell.text == '10-K':
lookupUrl = baseUrl + cellObjects[2].a.get('href')
print(lookupUrl)
lookupUrls.append(lookupUrl)
# -
# ## Test scrape of the first annual report
#
# The following three cells work out the scrape process using only the first annual report. Here's its URL: https://www.sec.gov/Archives/edgar/data/34088/000003408818000015/xom10k2017.htm
#
# By separating the steps of the scrape, we avoid repeated hits on the server and also the delays caused by the slowness of the steps.
#
# Load the report HTML as a Beautiful Soup object:
response = requests.get(lookupUrls[0], headers = requestHeaderDictionary)
soupObject = BeautifulSoup(response.text,features="html5lib")
# The report year is at the very top of the report, so we just need to go through enough elements to find it. The `break` command stops the loop after the first bold text that is found.
paragraphs = soupObject.find_all('p')
for p in paragraphs:
bold = p.find_all('b')
if len(bold) != 0:
year = bold[0].text
print(year)
break
# The Proved Reserves table is somewhere in the middle of the report. Rather than stepping through the tables, we do a quick and dirty method of just pulling in every row in every table, then look through the cells in each row until we find the text for the row description "Total Proved". (We only use "Total Proved" because there is a line break after "Proved".) We can see from the table that the number we want for crude oil is in the second column (i.e. cell with index 1).
rowObjects = soupObject.find_all('tr')
for row in rowObjects:
cellObjects = row.find_all('td')
for cellObject in cellObjects:
if "Total Proved" in cellObject.get_text():
provedBarrels = cellObjects[1].text
print(provedBarrels)
# ## Actual scrape of all of the annual reports
#
# Now that we know the scrape is getting us what we want, put the three cells above into a loop that iterates through all of the annual reports.
#
# The process of retrieving the documents, and parsing and searching the HTML is slow for each document, so print the URL of the document as each one starts to get an indication of progress.
table = []
for reportUrl in lookupUrls:
print(reportUrl)
response = requests.get(reportUrl, headers = requestHeaderDictionary)
soupObject = BeautifulSoup(response.text,features="html5lib")
paragraphs = soupObject.find_all('p')
for p in paragraphs:
bold = p.find_all('b')
if len(bold) != 0:
year = bold[0].text.strip()
print(year)
break
rowObjects = soupObject.find_all('tr')
for row in rowObjects:
cellObjects = row.find_all('td')
for cellObject in cellObjects:
if "Total Proved" in cellObject.get_text():
provedBarrels = cellObjects[1].text.strip()
print(provedBarrels)
table.append([year, provedBarrels])
# For our purposes here, we've just printed the list of lists. If you wanted to output it to a file, you could use the `writeCsv()` function from last week to save the data in a CSV spreadsheet.
print(table)
| code/scrape/pylesson/lesson4-scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from os import (makedirs, path, listdir)
import random
from shutil import copy
import pandas as pd
DATA_NAME = 'bird_song'
DATA_ROOT = '../../__data__'
MINI_DATA_NAME = 'mini_bird_song'
NUM_FILES = 9
MAX_FILES = 100
# -
def gen_mini_arr():
data_path = path.join(DATA_ROOT, DATA_NAME)
# Check if data path exists
if not path.exists(data_path):
raise Exception("{} not found, please check path...".format(data_path))
# Check if file limit exceeded
if MAX_FILES < NUM_FILES:
raise Exception(" NUM_FILES: {} cannot be greated that MAX_FILES : {}".format(NUM_FILES, MAX_FILES))
# contains tuple of filename and path
filename_arr = []
for parent_dir in listdir(path.join(data_path,'train_audio')):
sub_dir = path.join(data_path,'train_audio',parent_dir)
try:
tmp_arr = random.sample(listdir(sub_dir), NUM_FILES)
for file in tmp_arr:
filename_arr.append((file, path.join(data_path,'train_audio',parent_dir, file)))
except:
pass
return filename_arr
def move_files(mini_arr):
makedirs(path.join(DATA_ROOT, MINI_DATA_NAME,'train_audio'))
for file, src_path in mini_arr:
dst_path = path.join(DATA_ROOT, MINI_DATA_NAME,'train_audio',file)
copy(src_path, dst_path)
def gen_mini_csv(mini_arr):
csv_path = path.join(DATA_ROOT, DATA_NAME)
file_tup = list(zip(*mini_arr))[0]
data_frame = pd.read_csv(path.join(csv_path, "train.csv"))
filtered_df = data_frame[data_frame.filename.isin(file_tup)]
clean_df = filtered_df.drop(columns=[
"rating","playback_used","channels",
"pitch","speed", "description", "file_type", "volume",
"xc_id", "author", "url","length", "recordist",
"title", "bird_seen", "sci_name", "location", "license"
])
clean_df.to_csv(path.join(DATA_ROOT, MINI_DATA_NAME,'train.csv'), index=False)
mini_arr = gen_mini_arr()
move_files(mini_arr)
gen_mini_csv(mini_arr)
| docs/utility/data_splitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import meshposition as mp
import utils as utl
import numpy as np
import matplotlib.pyplot as plt
import analyse_utils as atl
mp.start()
# -
# databases available from https://www.homesmartmesh.com/docs/networks/ultrawideband/#database
node_ids = mp.rf_get_active_short_ids()
utl.save_json_timestamp("config",node_ids)
diag_list = mp.get_list_uwb_ping_diag([("Tester","Green"),("Tester","Simple")], 100)
# + tags=[]
utl.save_json_timestamp("ping_diag_3_nodes",diag_list)
# -
atl.set_data_list(diag_list)
print(f"lodaded {len(diag_list)} entries")
# +
fig, axs = plt.subplots(2, 3, sharex=True, sharey=True)
atl.plot_axis_param("fpAmp1","Tester","Green",axs[0,0])
atl.plot_axis_param("fpAmp2","Tester","Green",axs[0,1])
atl.plot_axis_param("fpAmp3","Tester","Green",axs[0,2])
atl.plot_axis_param("fpAmp1","Tester","Simple",axs[1,0])
atl.plot_axis_param("fpAmp2","Tester","Simple",axs[1,1])
atl.plot_axis_param("fpAmp3","Tester","Simple",axs[1,2])
fig.set_size_inches(18,5)
# +
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
atl.plot_axis_param("stdNoise","Tester","Green",axs[0])
atl.plot_axis_param("stdNoise","Tester","Simple",axs[1])
fig.set_size_inches(9,5)
# +
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
atl.plot_axis_param("maxNoise","Tester","Green",axs[0])
atl.plot_axis_param("maxNoise","Tester","Simple",axs[1])
fig.set_size_inches(9,5)
# +
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
atl.plot_axis_param("maxGrowthCIR","Tester","Green",axs[0])
atl.plot_axis_param("maxGrowthCIR","Tester","Simple",axs[1])
fig.set_size_inches(9,5)
# +
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
atl.plot_axis_param("rxPreamCount","Tester","Green",axs[0])
atl.plot_axis_param("rxPreamCount","Tester","Simple",axs[1])
fig.set_size_inches(9,5)
# -
mp.stop()
| py_serial/uwb_diag_capture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="aLsfIWSoU5x_"
# # Computing with Python: an Introduction
# + [markdown] id="AXTRG_T1U5yD"
# Welcome to programming!
#
# What is the difference between *Python* and a calculator? We begin this first lesson by showing how Python can be used **as** a calculator, and we move into one of the most important programming structures -- the **loop**. Loops allow computers to carry out repetetive computations, with just a few commands.
#
# To navigate this notebook, you might wish to click on the Table of Contents icon (near top-left of the Google Colab window). Then you can expand the sections to work on each one.
#
# Remember to save your work frequently on your personal GitHub repository.
# + [markdown] id="-y8Iz5LaU5yD"
# # Python as a calculator
# + [markdown] id="6rbqov7pU5yE"
# Different kinds of data are stored as different *types* in Python. For example, if you wish to work with integers, your data is typically stored as an *int*. A real number might be stored as a *float*. There are types for booleans (True/False data), strings (like "Hello World!"), and many more we will see.
#
# A more complete reference for Python's numerical types and arithmetic operations can be found in the [official Python documentation](https://docs.python.org/3/library/stdtypes.html). The [official Python tutorial](https://docs.python.org/3/tutorial/introduction.html) is also a great place to start.
#
# Python allows you to perform arithmetic operations: addition, subtraction, multiplication, and division, on numerical types. The operation symbols are `+`, `-`, `*`, and `/`. Evaluate each of the following cells to see how Python performs operations on *integers*.
#
# To evaluate the cell, click anywhere within the cell to select it and use the keyboard shortcut *Shift-Enter* to evaluate. But as you go through this and later lessons, try to *predict* what will happen when you evaluate the cell before you hit Shift-Enter. Don't just click click click! Think for a few seconds or a minute, and then Shift-Enter.
# + id="yqyWbMp-U5yE"
2 + 3
# + id="ITtBZC2kU5yE"
2 * 3
# + id="mS2JIy1oU5yE"
5 - 11
# + id="WRjeVm8rU5yF"
5.0 - 11
# + id="Q9pdWY_hU5yF"
5 / 11
# + id="TPzva1UXU5yF"
6 / 3
# + id="GUmGnpHVU5yF"
5 // 11
# + id="JEXb5uPQU5yF"
6 // 3
# + [markdown] id="o4t4Yw_XU5yF"
# The results are probably not too surprising, though the last two require a bit of explanation. Python *interprets* the input number 5 as an *int* (integer) and 5.0 as a *float*. "Float" stands for "floating point number," which are decimal approximations to real numbers. The word "float" refers to the fact that the decimal (or binary, for computers) point can float around (as in 1.2345 or 12.345 or 123.45 or 1234.5 or 0.00012345). There are deep computational issues related to how computers handle decimal approximations, and you can [read about the IEEE standards](https://en.wikipedia.org/wiki/IEEE_754) if you're interested.
#
# Python enables different kinds of division. The single-slash division in Python 3.x gives a floating point approximation of the quotient. That's why `5 / 11` and `6 / 3` both output floats. On the other hand, `5 // 11` and `6 // 3` yield integer outputs (rounding down) -- this is useful, but one has to be careful!
#
# In fact the designers of Python changed their mind. **This tutorial assumes that you are using Python 3.x.** If you are using Python 2.x, the command `5 / 11` would output zero.
# + id="msOx7ga_U5yF"
-12 // 5 # What will this output? Guess before evaluating!
# + [markdown] id="6m4OcRYfU5yG"
# Why use integer division `//` and why use floating point division? In practice, integer division is typically a faster operation. So if you only need the rounded result (and that will often be the case), use integer division. It will run much faster than carrying out floating point division then manually rounding down.
#
# Observe that floating point operations involve approximation. The result of `5.0/11.0` might not be what you expect in the last digit. Over time, especially with repeated operations, *floating point approximation* errors can add up!
# + [markdown] id="uAyV-QWrU5yG"
# You might be wondering about the little [XX] and [XX] prompts in the code cells above. What is their purpose? Guess what the following line will do.
# + id="vgnp3B3JU5yG"
Out[4] + Out[5]
# + [markdown] id="GgS8uA14U5yG"
# Cool, huh? It's nice to have a record of previous computations, especially if you don't want to type something again.
#
# Python allows you to group expressions with parentheses, and follows the order of operations that you learn in school.
# + id="kE_p7LcGU5yG"
(3 + 4) * 5
# + id="n-hUvTxHU5yG"
3 + (4 * 5)
# + id="3E_z7E81U5yG"
3 + 4 * 5 # What do you think will be the result? Remember PEMDAS?
# + [markdown] id="lIFwbvWsU5yG"
# Now is a good time to try a few computations of your own, in the empty cell below. You can type any Python commands you want in the empty cell. If you want to insert a new cell into this notebook, just hover your cursor in the space between cells in Colab.
# + id="Rc5Lv9wLU5yH"
# An empty cell. Have fun!
# + [markdown] id="OkWlZNSSU5yH"
# For number theory, *division with remainder* is an operation of central importance. Integer division provides the quotient, and the operation `%` provides the remainder. It's a bit strange that the percent symbol is used for the remainder, but this [dates at least to the early 1970s](https://softwareengineering.stackexchange.com/questions/294297/in-what-programming-language-did-the-use-of-the-percent-sign-to-mean-modulo) and has become standard across computer languages.
# + id="xTWOi_XxU5yH"
23 // 5 # Integer division
# + id="UNjARp_xU5yH"
23 % 5 # The remainder after division
# + [markdown] id="WFrRvFgkU5yH"
# Note in the code above, there are little "comments". To place a short comment on a line of code, just put a hashtag `#` at the end of the line of code, followed by your comment.
#
# Python gives a single command for division with remainder. Its output is a *tuple*.
# + id="F2zJS-tRU5yH"
divmod(23,5)
# + id="y3dckm2dU5yH"
type(divmod(23,5))
# + [markdown] id="0qVUgNJ2U5yH"
# All data in Python has a type, but a common complaint about Python is that types are a bit concealed "under the hood". But they are not far under the hood! Anyone can find out the type of some data with a single command.
# + id="TU1Cho4tU5yI"
type(3)
# + id="OYALDWO5U5yI"
type(3.0)
# + id="SHQ-_qcHU5yI"
type('Hello')
# + id="nhf-YmD3U5yI"
type([1,2,3])
# + [markdown] id="tYWDszLmU5yI"
# The key to careful computation in Python is always being *aware of the type* of your data, and *knowing* how Python operates differently on data of different types.
# + id="Uc7eYJLWU5yI"
3 + 3
# + id="jv6N6XLQU5yI"
3.0 + 3.0
# + id="KKF4Sv-jU5yI"
'Hello' + 'World!'
# + id="q9H__Z99U5yI"
[1,2,3] + [4,5,6]
# + id="NDDAxeQJU5yJ"
3 + 3.0
# + id="xuBj_w9_U5yJ"
3 + 'Hello!' # Uh oh!
# + id="9P7roKZgU5yJ"
# An empty cell. Have fun!
# Try operating on ints, floats, and strings, with different operations. Which ones work? How?
# + [markdown] id="1_2D8-NFU5yJ"
# As you can see, addition (the `+` operator) is interpreted differently in the contexts of numbers, strings, and lists. The designers of Python allowed us to add *numbers* of different types: if you try to operate on an *int* and a *float*, the *int* will typically be *coerced* into a float in order to perform the operation. But the designers of Python did not give meaning to the addition of a number with a string, for example. That's why you probably received a *TypeError* after trying to add a number to a string.
#
# On the other hand, Python does interpret *multiplication* of a natural number with a string or a list.
# + id="MmRfXepMU5yJ"
3 * 'Hello!'
# + id="gVo2bjWzU5yJ"
0 * 'Hello!'
# + id="OqWWK6VWU5yJ"
2 * [1,2,3]
# + [markdown] id="Z28hpoAjU5yJ"
# Can you create a string with 100 A's (like `AAA...`)? Use an appropriate operation in the cell below.
# + id="fAxwL5O3U5yJ"
# Practice cell
# + [markdown] id="nHp8b6_8U5yJ"
# Exponents in Python are given by the `**` operator. The following lines compute 2 to the 1000th power, in two different ways.
# + id="VNDmF-hcU5yK"
2**1000
# + id="DEHGNQ3RU5yK"
2.0**1000
# + [markdown] id="O1bSyFFJU5yK"
# As before, Python interprets an operation (`**`) differently in different contexts. When given integer input, Python evaluates `2**1000` **exactly**. The result is a large integer. A nice fact about Python, for mathematicians, is that it handles exact integers of arbitrary length! Many other programming languages (like C++) will give an error message if integers get too large in the midst of a computation.
#
# New in version 3.x, Python implements long integers without giving signals to the programmer or changing types. In Python 2.x, there were two types: *int* for somewhat small integers (e.g., up to $2^{31}$) and *long* type for all larger integers. Python 2.x would signal which type of integer was being used, by placing the letter "L" at the end of a long integer. Now, in Python 3.x, the programmer doesn't really see the difference. There is only the *int* type. But Python still optimizes computations, using hardware functionality for arithmetic of small integers and custom routines for large integers. The programmer doesn't have to worry about it most of the time.
#
# For scientific applications, one often wants to keep track of only a certain number of significant digits (sig figs). If one computes the floating point exponent `2.0**1000`, the result is a decimal approximation. It is still a float. The expression "e+301" stands for "multiplied by 10 to the 301st power", i.e., Python uses *scientific notation* for large floats.
# + id="gbiP5lpxU5yK"
type(2**1000)
# + id="oYFQ7ZxFU5yL"
type(2.0**1000)
# + id="MtmwQzLgU5yL"
# An empty cell. Have fun!
# + [markdown] id="hu0LPXNLU5yL"
# Now is a good time for reflection. Double-click in the cell below to answer the given questions. Cells like this one are used for text rather than Python code. Text is entered using *markdown*, but you can typically just enter text as you would in any text editor without problems. Press *shift-Enter* after editing a text cell to complete the editing process.
# + [markdown] id="QLyCVcXoU5yL"
# ### Exercises
#
# 1. What data types have you seen, and what kinds of data are they used for? Can you remember them without looking back?
#
# 2. How is division `/` interpreted differently for different types of data?
#
# 3. How is multiplication `*` interpreted differently for different types of data?
#
# 4. What is the difference between 100 and 100.0, for Python?
# + [markdown] id="KDzM72ZXU5yL"
# Double-click this text cell to edit it, and answer the exercises. This may be graded, so please complete all questions! Write in clear, complete, and concise sentences.
#
# 1.
#
# 2.
#
# 3.
#
# 4.
# + [markdown] id="0jhkZ5zhU5yL"
# # Calculating with booleans
# + [markdown] id="IAb0HST5U5yM"
# A *boolean* (type *bool*) is the smallest possible piece of data. While an *int* can be any integer, positive or negative, a *boolean* can only be one of two things: *True* or *False*. In this way, booleans are useful for storing the answers to yes/no questions.
#
# Questions about (in)equality of numbers are answered in Python by *operations* with numerical input and boolean output. Here are some examples. A more complete reference is [in the official Python documentation](https://docs.python.org/3/library/stdtypes.html#boolean-operations-and-or-not).
# + id="AqNG6PZBU5yM"
3 > 2
# + id="nOgi4gqUU5yM"
type(3 > 2)
# + id="WabkpXPbU5yM"
10 < 3
# + id="Mohc3o5oU5yM"
2.4 < 2.4000001
# + id="jnrzq17fU5yM"
32 >= 32
# + id="D55td8J7U5yM"
32 >= 31
# + id="L0RlreGLU5yM"
2 + 2 == 4
# + [markdown] id="_f2QoYJ-U5yM"
# Which number is bigger: $23^{32}$ or $32^{23}$? Use the cell below to answer the question!
# + id="PGlksxncU5yN"
# Write your code here.
# + [markdown] id="KTiAVQ0qU5yN"
# The expressions `<`, `>`, `<=`, `>=` are interpreted here as **operations** with numerical input and boolean output. The symbol `==` (two equal symbols!) gives a True result if the numbers are equal, and False if the numbers are not equal. An extremely common typo is to confuse `=` with `==`. But the single equality symbol `=` has an entirely different meaning, as we shall see.
# + [markdown] id="3atYTyAnU5yN"
# Using the remainder operator `%` and equality, we obtain a divisibility test.
# + id="t1BCq6GFU5yN"
63 % 7 == 0 # Is 63 divisible by 7?
# + id="kYz8XfraU5yN"
101 % 2 == 0 # Is 101 even?
# + [markdown] id="0Iikx3JZU5yN"
# Use the cell below to determine whether 1234567890 is divisible by 3.
# + id="LPqBN_VlU5yN"
# Your code goes here.
# + [markdown] id="Tvle2bwdU5yN"
# Booleans can be operated on by the standard logical operations: and, or, not. In ordinary English usage, "and" and "or" are conjunctions, while here in *Boolean algebra*, "and" and "or" are operations with Boolean inputs and Boolean output. The precise meanings of "and" and "or" are given by the following **truth tables**.
#
#
# | and | True | False |
# |-----|------|-------|
# | **True** | True | False |
# | **False** | False | False|
#
# <br>
#
# | or | True | False |
# |-----|------|-------|
# | **True** | True | True |
# | **False** | True | False|
# + id="eRo1A6IzU5yN"
True and False
# + id="sFMWK-0HU5yN"
True or False
# + id="5bREtxzOU5yN"
True or True
# + id="DP0Rw6jDU5yO"
not True
# + [markdown] id="DRKe3Jv8U5yO"
# Use the truth tables to predict the result (True or False) of each of the following, before evaluating the code.
# + id="NVKb_6njU5yO"
(2 > 3) and (3 > 2)
# + id="JrprEal6U5yO"
(1 + 1 == 2) or (1 + 1 == 3)
# + id="Ef-ryEGjU5yO"
not (-1 + 1 >= 0)
# + id="FUSY_QuLU5yO"
2 + 2 == 4
# + id="hKTk4LjaU5yO"
2 + 2 != 4 # For "not equal", Python uses the operation `!=`.
# + id="UsJtoToLU5yO"
2 + 2 != 5 # Is 2+2 *not* equal to 5?
# + id="tVfKlWT_U5yO"
not (2 + 2 == 5) # The same as above, but a bit longer to write.
# + [markdown] id="uwxFqT9uU5yO"
# Experiment below to see how Python handles a double or triple negative, i.e., something with a `not` `not`.
# + id="nD-RgY0CU5yP"
# Experiment here.
# + [markdown] id="5xxO21m1U5yP"
# Python does give an interpretation to arithmetic operations with booleans and numbers. Try to guess this interpretation with the following examples. Change the examples to experiment!
# + id="Y2lLPpdWU5yP"
False * 100
# + id="5y3rtwrdU5yP"
True + 13
# + [markdown] id="QOkU3A_fU5yP"
# This ability of Python to interpret operations based on context is a mixed blessing. On one hand, it leads to handy shortcuts -- quick ways of writing complicated programs. On the other hand, it can lead to code that is harder to read, especially for a Python novice. Good programmers aim for code that is easy to read, not just short!
#
# The [Zen of Python](https://www.python.org/dev/peps/pep-0020/) is a series of 20 aphorisms for Python programmers. The first seven are below.
#
# > Beautiful is better than ugly.
#
# > Explicit is better than implicit.
#
# > Simple is better than complex.
#
# > Complex is better than complicated.
#
# > Flat is better than nested.
#
# > Sparse is better than dense.
#
# > Readability counts.
# + [markdown] id="_5mUQ1tCU5yP"
# ### Exercises
#
# 1. Did you look at the truth tables closely? Can you remember, from memory, what `True or False` equals, or what `True and False` equals?
#
# 2. How might you easily remember the truth tables? How do they resemble the standard English usage of the words "and" and "or"?
#
# 3. If you wanted to know whether a number, like 2349872348723, is a multiple of 7 but **not** a multiple of 11, how might you write this in one line of Python code?
#
# 4. You can chain together `and` commands, e.g., with an expression like `True and True and True` (which would evaluate to `True`). You can also group booleans, e.g., with `True and (True or False)`. Experiment to figure out the order of operations (`and`, `or`, `not`) for booleans.
#
# 6. The operation `xor` means "exclusive or". Its truth table is: `True xor True = False` and `False xor False = False` and `True xor False = True` and `False xor True = True`. How might you implement `xor` in terms of the usual `and`, `or`, and `not`?
#
#
# + [markdown] id="owWkn3ieU5yP"
# ### Solutions
#
# (Edit here to give solutions to the exercises)
#
# 1.
#
# 2.
#
# 3.
#
# 4.
#
# 5.
# + [markdown] id="k69tL17JU5yP"
# # Declaring variables
# + [markdown] id="UjI-5d00U5yP"
# A central feature of programming is the declaration of variables. When you declare a variable, you are *storing* data in the computer's *memory* and you are assigning a *name* to that data. Both storage and name-assignment are carried out with the *single* equality symbol =.
# + id="hkwJHNVpU5yQ"
e = 2.71828
# + [markdown] id="gY-LejB4U5yQ"
# With this command, the float 2.71828 is stored somewhere inside your computer, and Python can access this stored number by the name "e" thereafter. So if you want to compute "e squared", a single command will do.
# + id="2ZJ7VtIwU5yQ"
e * e
# + id="G46Ma5fCU5yQ"
type(e)
# + [markdown] id="cWVyUzZxU5yQ"
# You can use just about any name you want for a variable, but your name *must* start with a letter, *must* not contain spaces, and your name *must* not be an existing Python word. Characters in a variable name can include letters (uppercase and lowercase) and numbers and underscores `_`.
#
# So `e` is a valid name for a variable, but `type` is a bad name. It is very tempting for beginners to use very short abbreviation-style names for variables (like `dx` or `vbn`). But resist that temptation and use more descriptive names for variables, like `difference_x` or `very_big_number`. This will make your code readable by you and others!
#
# There are different style conventions for variable names. We use lowercase names, with underscores separating words, roughly following [Google's style conventions](https://google.github.io/styleguide/pyguide.html#Python_Style_Rules) for Python code.
# + id="yQN7N7ZPU5yQ"
my_number = 17
# + id="FitPq8UNU5yQ"
my_number < 23
# + [markdown] id="ne_mUsdnU5yQ"
# After you declare a variable, its value remains the same until it is changed. You can change the value of a variable with a simple assignment. After the above lines, the value of my_number is 17.
# + id="Eu-rHwt0U5yQ"
my_number = 3.14
# + [markdown] id="cqgxOyyEU5yQ"
# This command reassigns the value of my_number to 3.14. Note that it changes the type too! It effectively overrides the previous value and replaces it with the new value.
#
# Often it is useful to change the value of a variable *incrementally* or *recursively*. Python, like many programming languages, allows one to assign variables in a self-referential way. What do you think the value of S will be after the following four lines?
# + id="zW6_fRoxU5yR"
S = 0
S = S + 1
S = S + 2
S = S + 3
print(S)
# + [markdown] id="cX1eo3jZU5yR"
# The first line `S = 0` is the initial declaration: the value 0 is stored in memory, and the name S is assigned to this value.
#
# The next line `S = S + 1` looks like nonsense, as an algebraic sentence. But reading = as **assignment** rather than **equality**, you should read the line `S = S + 1` as assigning the *value* `S + 1` to the *name* `S`. When Python interprets `S = S + 1`, it carries out the following steps.
#
# 1. Compute the value of the right side, `S+1`. (The value is 1, since `S` was assigned the value 0 in the previous line.)
# 2. Assign this value to the left side, `S`. (Now `S` has the value 1.)
#
# Well, this is a slight lie. Python probably does something more efficient, when given the command `S = S + 1`, since such operations are hard-wired in the computer and the Python interpreter is smart enough to take the most efficient route. But at this level, it is most useful to think of a self-referential assignment of the form `X = expression(X)` as a two step process as above.
#
# 1. Compute the value of `expression(X)`.
# 2. Assign this value to `X`.
# + [markdown] id="82NT9vo2U5yR"
# Now consider the following three commands.
# + id="CbT40NiuU5yR"
my_number = 17
new_number = my_number + 1
my_number = 3.14
# + [markdown] id="yQz42J2dU5yR"
# What are the values of the variables my_number and new_number, after the execution of these three lines?
#
# To access these values, you can use the *print* function.
# + id="5J4rfa08U5yR"
print(my_number)
print(new_number)
# + [markdown] id="DbEoFnjnU5yR"
# Python is an *interpreted* language, which carries out commands line-by-line from top to bottom. So consider the three lines
#
# ``` python
# my_number = 17
# new_number = my_number + 1
# my_number = 3.14
# ```
#
# Line 1 sets the value of my_number to 17. Line 2 sets the value of new_number to 18. Line 3 sets the value of my_number to 3.14. But Line 3 does *not* change the value of new_number at all.
#
# (This will become confusing and complicated later, as we study mutable and immutable types.)
# + [markdown] id="PFA4Q7SiU5yR"
# ### Exercises
#
# 1. What is the difference between `=` and `==` in the Python language?
#
# 2. If the variable `x` has value `3`, and you then evaluate the Python command `x = x * x`, what will be the value of `x` after evaluation?
#
# 3. Imagine you have two variables `a` and `b`, and you want to switch their values. How could you do this in Python?
#
# 4. Kepler's third law states that for a planet in circular orbit around an object of mass $M$, one has $4 \pi^2 r^3 = G M t^2$. We can use this to estimate the mass of the sun, from other astronomically observable quantities. Look up $G$ (the gravitational constant, estimated by experiment on Earth) and $r$ (the distance from the Earth to the sun, in meters). Compute $t$, the number of seconds it takes for the Earth to go around the sun (365.25 days). Finally use all of this to estimate $M$, the mass of the sun. Your solution should use 5-10 lines of Python code.
# + [markdown] id="sIeJz7hUU5yR"
# ### Solutions
#
# (Use this space to work on the exercises. Place more code and text cells above as needed.)
#
# 1.
#
# 2.
#
# 3.
#
# 4.
# + [markdown] id="b81C2aIPU5yS"
# # Lists and ranges
# + [markdown] id="iZoKvbn0U5yS"
# Python stands out for the central role played by *lists*. A *list* is what it sounds like -- a list of data. Data within a list can be of any type. Multiple types are possible within the same list! The basic syntax for a list is to use brackets to enclose the list items and commas to separate the list items.
# + id="MQRMJWrgU5yS"
type([1,2,3])
# + id="BkTCBWhsU5yS"
type(['Hello',17])
# + [markdown] id="DH51tlMvU5yS"
# There is another type called a *tuple* that we will use less often. Tuples use parentheses for enclosure instead of brackets.
# + id="8m1KBuRNU5yS"
type((1,2,3))
# + [markdown] id="sFZr5TtpU5yS"
# There's another list-like type in Python 3, called the `range` type. Ranges are kind of like lists, but instead of plunking every item into a slot of memory, ranges just have to remember three integers: their *start*, their *stop*, and their *step*.
#
# The `range` command creates a range with a given start, stop, and step. If you only input one number, the range will ***start at zero*** and use ***steps of one*** and will stop ***just before*** the given stop-number.
#
# One can create a list from a range (plunking every term in the range into a slot of memory), by using the `list` command. Here are a few examples.
# + id="0N_HejMiU5yS"
type(range(10)) # Ranges are their own type, in Python 3.x. Not in Python 2.x!
# + id="XzlQhL4eU5yS"
list(range(10)) # Let's see what's in the range. Note it starts at zero! Where does it stop?
# + [markdown] id="7-TeavRKU5yS"
# A more complicated two-input form of the range command produces a range of integers **starting at** a given number, and **terminating before** another given number.
# + id="fpHHkb82U5yT"
list(range(3,10))
# + id="O5A8XZFxU5yT"
list(range(-4,5))
# + [markdown] id="dRzyphm5U5yT"
# This is a common source of difficulty for Python beginners. While the first parameter (-4) is the starting point of the list, the list ends just before the second parameter (5). This takes some getting used to, but experienced Python programmers grow to like this convention.
# + [markdown] id="XderpNFAU5yT"
# The *length* of a list can be accessed by the len command.
# + id="-xjzFOxWU5yT"
len([2,4,6])
# + id="1IG2dOJeU5yT"
len(range(10)) # The len command can deal with lists and ranges. No need to convert.
# + id="N9lCN8-bU5yT"
len(range(10,100)) # Can you figure out the length, before evaluating?
# + [markdown] id="zW4aiYaAU5yT"
# The final variant of the range command (for now) is the *three-parameter* command of the form `range(a,b,s)`. This produces a list like `range(a,b)`, but with a "step size" of `s`. In other words, it produces a list of integers, beginning at `a`, increasing by `s` from one entry to the next, and going up to (but not including) `b`. It is best to experiment a bit to get the feel for it!
# + id="RjmZQRmtU5yT"
list(range(1,10,2))
# + id="UX91kNpoU5yT"
list(range(11,30,2))
# + id="cU6TYerPU5yU"
list(range(-4,5,3))
# + id="_RueWhdHU5yU"
list(range(10,100,17))
# + [markdown] id="I3otAH2AU5yU"
# This can be used for descending ranges too, and observe that the final number b in range(a,b,s) is not included.
# + id="LVm1djsTU5yU"
list(range(10,0,-1))
# + [markdown] id="Jp1V-X59U5yU"
# How many multiples of 7 are between 10 and 100? We can find out pretty quickly with the range command and the len command (to count).
# + id="roA4gLoXU5yU"
list(range(10,100,7)) # What list will this create? It won't answer the question...
# + id="jybN-evPU5yU"
list(range(14,100,7)) # Starting at 14 gives the multiples of 7.
# + id="BdvLmGljU5yU"
len(range(14,100,7)) # Gives the length of the list, and answers the question!
# + [markdown] id="dMbcJV5UU5yU"
# ### Exercises
#
# 1. If `a` and `b` are integers, what is the length of `range(a,b)`? Express your answer as a formula involving `a` and `b`.
#
# 2. Use a list and range command to produce the list `[1,2,3,4,5,6,7,8,9,10]`.
#
# 3. Create the list `[1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5]` with a single list and range command and another operation.
#
# 4. How many multiples of 3 are there between 300 and 3000 (including 300 and 3000)?
# + id="4HKDVcFVU5yU"
# Use this space to work on the exercises.
# + [markdown] id="ccSW2BGyU5yV"
# # Iterating over a range
# + [markdown] id="qoOhvB1rU5yV"
# Computers are excellent at repetitive reliable tasks. If we wish to perform a similar computation, many times over, a computer a great tool. Here we look at a common and simple way to carry out a repetetive computation: the "for loop". The "for loop" *iterates* through items in a list or range, carrying out some action for each item. Two examples will illustrate.
# + id="mwdKBi3kU5yV"
for n in [1,2,3,4,5]:
print(n*n)
# + id="HC031A4LU5yV"
for s in ['I','Am','Python']:
print(s + "!")
# + [markdown] id="yuctH9OZU5yV"
# The first loop, **unraveled**, carries out the following sequence of commands.
# + id="UY4izCfvU5yV"
n = 1
print(n*n)
n = 2
print(n*n)
n = 3
print(n*n)
n = 4
print(n*n)
n = 5
print(n*n)
# + [markdown] id="IgnUBVI0U5yV"
# But the "for loop" is more efficient *and* more readable to programmers. Indeed, it saves the repetition of writing the same command `print(n*n)` over and over again. It also makes transparent, from the beginning, the range of values that `n` is assigned to.
#
# When you read and write "for loops", you should consider how they look unravelled -- that is how Python will carry out the loop. And when you find yourself faced with a repetetive task, you might consider whether it may be wrapped up in a for loop. Most of the time, you should try to write code that *never repeats* the same task. If there is repetition, wrap it in a loop.
# + [markdown] id="M16fubaDU5yV"
# Try to unravel the loop below, and predict the result, before evaluating the code.
# + id="vOyvFfJcU5yW"
P = 1
for n in range(1,6):
P = P * n
print(P)
# + [markdown] id="I79mIz44U5yW"
# This might have been difficult! So what if you want to trace through the loop, as it goes? Sometimes, especially when debugging, it's useful to inspect every step of the loop to see what Python is doing. We can inspect the loop above, by inserting a print command within the *scope* of the loop.
# + id="C6bVCkZdU5yW"
P = 1
for n in range(1,6):
P = P * n
print("n is",n,"and P is",P)
print(P)
# + [markdown] id="y8kjJUJAU5yW"
# Here we have used the *print* command with strings and numbers together. In Python 3.x, you can print multiple things on the same line by separating them by commas. The "things" can be strings (enclosed by single or double-quotes) and numbers (int, float, etc.).
# + id="wUu0BH2lU5yW"
print("My favorite number is",17)
# + [markdown] id="TPCXSm1LU5yW"
# If we unravel the loop above, the linear sequence of commands interpreted by Python is the following.
# + id="KzUl3ac7U5yW"
P = 1
n = 1
P = P * n
print("n is",n,"and P is",P)
n = 2
P = P * n
print("n is",n,"and P is",P)
n = 3
P = P * n
print("n is",n,"and P is",P)
n = 4
P = P * n
print("n is",n,"and P is",P)
n = 5
P = P * n
print("n is",n,"and P is",P)
print (P)
# + [markdown] id="44_u1MSrU5yW"
# Let's analyze the loop syntax in more detail.
# ```python
# P = 1
# for n in range(1,6):
# P = P * n # this command is in the scope of the loop.
# print("n is",n,"and P is",P) # this command is in the scope of the loop too!
# print(P)
# ```
# The "for" command ends with a colon `:`, and the **next two** lines are indented. The colon and indentation are indicators of **scope**. The *scope* of the for loop begins after the colon, and includes all indented lines. The *scope* of the for loop is what is repeated in every step of the loop (in addition to the reassignment of `n`).
# + id="jE003_YTU5yW"
P = 1
for n in range(1,6):
P = P * n # this command is in the scope of the loop.
print("n is",n,"and P is",P) # this command is in the scope of the loop too!
print(P)
# + [markdown] id="8wXaf4hxU5yW"
# If we change the indentation, it changes the scope of the for loop. Predict what the following loop will do, by unraveling, before evaluating it.
# + id="F_FcpC5QU5yX"
P = 1
for n in range(1,6):
P = P * n
print("n is",n,"and P is",P)
print(P)
# + [markdown] id="__tEmpKPU5yX"
# Scopes can be nested by nesting indentation. What do you think the following loop will do? Can you unravel it?
# + id="9fboPJdAU5yX"
for x in [1,2,3]:
for y in ['a', 'b']:
print(x,y)
# + [markdown] id="HmXwPykdU5yX"
# How might you create a nested loop which prints `1 a` then `2 a` then `3 a` then `1 b` then `2 b` then `3 b`? Try it below.
# + id="b97leVdkU5yX"
# Insert your loop here.
# + [markdown] id="ofCBY24EU5yX"
# Among popular programming languages, Python is particular about indentation. Other languages indicate scope with open/close braces, for example, and indentation is just a matter of style. By requiring indentation to indicate scope, Python effectively removes the need for open/close braces, and enforces a readable style.
#
# We have now encountered data types, operations, variables, and loops. Taken together, these are powerful tools for computation! Now complete the following exercises for more practice.
# + [markdown] id="JBr_HgrSU5yX"
# ## Exercises
# + [markdown] id="FEQTm_WZU5yX"
# 1. Describe how Python interprets division with remainder when the divisor and/or dividend is negative.
# 2. What is the remainder when $2^{90}$ is divided by $91$?
# 3. How many multiples of 13 are there between 1 and 1000?
# 4. How many *odd* multiples of 13 are there between 1 and 1000?
# 5. What is the sum of the numbers from 1 to 1000?
# 6. What is the sum of the squares, from $1 \cdot 1$ to $1000 \cdot 1000$?
# 7. Euler proved that
# $$\frac{1}{1^4} + \frac{1}{2^4} + \frac{1}{3^4} + \cdots = \frac{\pi^4}{C},$$
# for some positive integer $C$. Use Python to guess what $C$ is.
# + id="F870MLhKU5yX"
# Insert your solutions here.
# + [markdown] id="nS9xYcpCU5yY"
# # Explorations
#
# Now that you have learned the basics of computation in Python and loops, we can start exploring some interesting mathematics! We are going to look at approximation here -- some ancient questions made easier with programming.
# + [markdown] id="IfPCPAu5U5yY"
# ## Exploration 1: Approximating square roots.
#
# We have seen how Python can do basic arithmetic -- addition, subtraction, multiplication, and division. But what about other functions, like the square root? In fact, Python offers a few functions for the square root, but that's not the point. How can we compute the square root using only basic arithmetic?
#
# Why might we care?
#
# 1. We might want to know the square root of a number with more precision than the Python function offers.
# 2. We might want to understand how the square root is computed... under the hood.
# 3. Understanding approximations of square roots and other functions is important, because we might want to approximate other functions in the future (that aren't pre-programmed for us).
#
# Here is a method for approximating the square root of a number $X$.
#
# 1. Begin with a guess $g$.
# 2. Observe that $g \cdot (X / g) = X$. Therefore, among the two numbers $g$ and $(X/g)$, one will be less than or equal to the square root of X, and the other will be greater than or equal to the square root.
# 3. Take the average of $g$ and $(X/g)$. This will be closer to the square root than $g$ or $X/g$ (unless your guess is exactly right!)
# 4. Use this average as a new guess... and go back to the beginning.
#
# Now implement this in Python to approximate the square root of 2. Use a loop, so that you can go through the approximation process 10 times or 100 times or however many you wish. Explore the effect of different starting guesses. Would a change in the averaging function improve the approximation? How quickly does this converge? How does this change if you try square roots of different positive numbers?
#
# Write your code (Python) and findings (in Markdown cells) in a readable form. Answer the questions in complete sentences.
#
#
# + id="PoGqUlqoU5yY"
# Start your explorations here!
# + [markdown] id="gHxmSYaoc8v0"
# Describe your findings here.
# + [markdown] id="fOUbCcfxU5yY"
# ## Exploration 2: Approximating e and pi.
#
# Now we approximate two of the most important constants in mathematics: e and pi. There are multiple approaches, but e is pretty easy with the series expansion of e^x. First, approximate e by evaluating the Taylor series expansion of e^x at x=1. I.e., remember from calculus that
# $$e^x = 1 + x + \frac{1}{2} x^2 + \frac{1}{3!} x^3 + \frac{1}{4!} x^4 + \cdots.$$
# How many terms are necessary before the float stabilizes? Use a loop, with a running product for the factorials and running sums for the series.
# + id="cH0ApCftU5yY"
# Approximate e here.
# + [markdown] id="YYhT-tDRU5yY"
# Next we will approximate pi, which is much more interesting. We can try a few approaches. For a series-approach (like e), we need a series that converges to pi. A simple example is the arctangent atan(x). Recall (precalculus!) that $atan(1) = \pi/4$. Moreover, the derivative of $atan(x)$ is $1 / (1+x^2)$.
#
# 1. Figure out the Taylor series of $1 / (1+x^2)$ near $x=0$. Note that this is a geometric series!
#
# 2. Figure out the Taylor series of $atan(x)$ near $x=0$ by taking the antiderivative, term by term, of the above.
#
# 3. Try to estimate $\pi$ with this series, using many terms of the series.
# + id="wYlwMQxBU5yY"
# Approximate pi here!
# + [markdown] id="77BvRTGFU5yY"
# Now we'll accelerate things a bit. There's a famous formula of Machin (1706) who computed the first hundred digits of $\pi$. We'll use his identity:
#
# $\pi/4 = 4 \cdot atan(1/5) - atan(1 / 239)$.
#
# This isn't obvious, but there's a tedious proof using sum/difference identities in trig.
#
# Try using this formula now to approximate $\pi$, using your Taylor series for $atan(x)$. It should require fewer terms.
# + id="71EwcYnMU5yY"
# Approximate pi more quickly here!
# + [markdown] id="NazcANnNU5yY"
# Now let's compare this to **Archimedes' method**. Archimedes approximated pi by looking at the perimeters $p(n)$ and $P(n)$ of ($2^n$)-gons inscribed in and circumscribed around a unit circle. So $p(2)$ is the perimeter of a square inscribed in the unit circle. $P(2)$ is the perimeter of a square circumscribed around a unit circle.
#
# Archimedes proved the following (not in the formulaic language of algebra): For all $n \geq 2$,
#
# (P-formula) $P(n+1) = \frac{2 p(n) P(n)}{p(n) + P(n)}$.
#
# (p-formula) $p(n+1) = \sqrt{ p(n) P(n+1) }$.
#
# 1. Compute $p(2)$ and $P(2)$.
#
# 2. Use these formulas to compute $p(10)$ and $P(10)$. Use this to get a good approximation for $\pi$!
#
# We could use our previous sqrt function if you want, we'll take a fancier high-precision approach. "mpmath" is a Python package for high-precision calculation. It should be accessible through Google Colab. You can read the full documentation at http://mpmath.org/doc/current/
#
# First we load the package and print its status.
# + id="uJZZ9M_PU5yZ"
from mpmath import *
print(mp)
# + [markdown] id="17mWwbgyU5yZ"
# The number mp.dps is (roughly) the number of decimal digits that mpmath will keep track of in its computations. mp.prec is the binary precision, a bit more than 3 times the decimal precision. We can change this to whatever we want.
# + id="f2aNKEWeU5yZ"
mp.dps = 50 # Let's try 50 digits precision to start.
print(mp)
# + [markdown] id="AVFYKq9hU5yZ"
# mpmath has a nice function for square roots. Compare this to your approximation from before!
# + id="IynhKMaLU5yZ"
sqrt(2) # mpf(...) stands for an mp-float.
# + id="quMwWJJ3U5yZ"
type(sqrt(2)) # mpmath stores numbers in its own types!
# + id="3WEkkeNNU5yZ"
4*mp.atan(1) # mpmath has the arctan built in. This should be pretty close to pi!
# + [markdown] id="8oMFGUkiU5yZ"
# Now try Archimedes' approximation of pi. Use the mpmath sqrt function along the way. How many iterations do you need to get pi correct to 100 digits? Compare this to the arctan-series (not the mpmath atan function) via Machin's formula.
# + id="ulLOs22VU5yZ"
# Explore and experiment.
| P4M_Notebook1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 6/16/2021
#
# all normal tissue finder - make a Bar chart of # vocabularies by tissue type
#
# +
# basic packages
import os, glob
import pandas as pd
import numpy as np; np.random.seed(0)
import itertools
from collections import Counter, defaultdict
import time
# Import tools needed for visualization
from upsetplot import plot, from_memberships
import seaborn as sns;
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
# -
save_dir = '../data/processed/fig4_modelling/vocab_sum/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# +
# TISSUE = 'GDSD0'
normal_tissues = ['Airway','Astrocytes','Bladder','Colon','Esophageal','GDSD6','GM12878','HMEC','Melanocytes','Ovarian',
'Pancreas','Prostate','Renal','Thyroid','Uterine']
normal_tissues_dict = dict(zip(normal_tissues,range(len(normal_tissues))))
# -
# # 1. Tissue specific transcription factors
#
# - manual annotation
# - cell type specific expressed tfs (HOCOMOCO)
# - modelling tfs
# ## A. cell type specific expressed tfs (HOCOMOCO)
# tfs
tf_annon_df = pd.read_csv('../data/external/HOCOMOCOv11_annotation.csv',index_col=0)
tf_annon_df['id_trim'] = tf_annon_df['id'] + '.pwm.trim'
tf_name_to_id_dict = pd.Series(tf_annon_df.id_trim.values, index=tf_annon_df.tf.values).to_dict()
tf_id_to_name_dict = pd.Series(tf_annon_df.tf.values, index=tf_annon_df.id_trim.values).to_dict()
print(len(tf_name_to_id_dict))
THRES=1
rna_tpm_file = '../data/interim/rna/tissue_tpm_sym.csv'
rna_df = pd.read_csv(rna_tpm_file,index_col=0)
rna_df_tf = rna_df.loc[tf_annon_df.tf.values,normal_tissues]
# rna_df_log = np.log2(rna_df+1e-2)
# rna_df_norm = as.data.frame(scale(rna_df_log, center = TRUE, scale = TRUE))
# head(rna_df_norm)
num_tissues_per_tf = pd.DataFrame(rna_df_tf>THRES).sum(axis=1)
# number of unique tfs
unique_tfs = num_tissues_per_tf.index.values[num_tissues_per_tf==1]
print(unique_tfs.shape)
unique_tf_to_tissue = pd.DataFrame(rna_df_tf>THRES).reset_index().melt('index')
unique_tf_to_tissue = unique_tf_to_tissue[unique_tf_to_tissue['value']]
unique_tf_to_tissue = unique_tf_to_tissue[unique_tf_to_tissue['index'].isin(unique_tfs)]
unique_tf_to_tissue = unique_tf_to_tissue[['index','variable']]
unique_tf_to_tissue.columns = ['tf','cell_type']
unique_tf_to_tissue.cell_type.value_counts()
# ### manual annotation - from literature
#get tfs
tf_df_manual = pd.read_csv('../data/external/transcription_factor_info_061521.csv').drop_duplicates()
# TFS = sorted(set(tf_df[tf_df['cell_type']=='Keratinocytes']["tf"]))##### DIFFERENT FOR EACH TISSUE
# print(len(TFS))
# print(TFS)
tf_df_manual.cell_type.value_counts()
tf_df = pd.concat([unique_tf_to_tissue,tf_df_manual],sort=True).drop_duplicates()
tf_df.cell_type.value_counts()
# # 2 helper functions
pd.read_csv(os.path.join(vocab_dir,'expr_'+tissue+'_pro_pro_vocab_info.csv' ))
# +
def get_other_vocab_word(row, next_row):
if row['vocab']!=next_row['vocab']:
return False
vocab_word = set(row['tf'])
vocab_set = set(row['vocab'].split('::'))
other_vocab = list(vocab_set - vocab_word)[0]
return other_vocab == next_row['tf']
def check_distance(row,next_row,max_dist=MAX_DIST):
if row['chr_m']==next_row['chr_m']:
if row['stop_m']<next_row['start_m']:
tot_dist = next_row['stop_m'] - row['start_m']
btn_dist = next_row['start_m'] - row['stop_m']
return (tot_dist < max_dist), tot_dist, btn_dist
return False,-1,-1
def check_tissue(row,next_row,tfs=TFS):
if (row['tf'] in tfs) & (next_row['tf'] in tfs):
return 'both'
elif (row['tf'] in tfs) | (next_row['tf'] in tfs):
return 'one'
else:
return 'none'
def get_hits(vocab_file,tfs=TFS):
print('**** reading', vocab_file)
vocab_df = pd.read_csv(vocab_file)
print(vocab_df.shape)
idx = 0
idx_hits = 0
results_dict = {}
while idx < (vocab_df.shape[0]-1):
# look at next
row = vocab_df.iloc[idx,:]
next_row = vocab_df.iloc[idx+1,:]
check_vocab_pair = get_other_vocab_word(row,next_row)
check_dist,tot_dist, btn_dist = check_distance(row, next_row)
check_tissue_tf = check_tissue(row,next_row,tfs)
if (check_dist and check_vocab_pair):
# print('hi',idx)
# print(row)
# print(next_row)
results_dict[idx_hits] = {'vocab_pair':row['vocab'],'tot_dist':tot_dist,'btn_dist':btn_dist,
'chr':row['chr'],'start':row['start_m'],'stop':next_row['stop_m'],
'vocab1':row['tf'],'vocab1_start':row['start_m'], 'vocab1_stop': row['stop_m'],
'vocab2':next_row['tf'],'vocab2_start':next_row['start_m'], 'vocab2_stop': next_row['stop_m'],
'genes':row['genes'],'num_genes':len(row['genes'].split('|')), 'tissue':row['tissue'],
'check_tissuetf':check_tissue_tf}
idx_hits+=1
idx+=1
print('num_hits',idx_hits)
results_df = pd.DataFrame.from_dict(results_dict, orient='index')
return results_df
def filter_results(results_df,min_hits_per_vocab=10):
print('shape', results_df.shape)
vocab_counts = results_df.vocab_pair.value_counts()
print('original num vocab',vocab_counts.shape[0])
vocab_to_include = vocab_counts[vocab_counts>min_hits_per_vocab].index.values
print('filt num vocab',vocab_to_include.shape[0])
results_df_filt = results_df[results_df.vocab_pair.isin(vocab_to_include)]
return results_df_filt
def get_counts(results_df, label):
counts_df = pd.DataFrame(results_df.vocab_pair.value_counts())
counts_df.columns = ['num_instance']
counts_df['label']=label
return counts_df
def get_vocabs(tissue, tfs, save=True,filter_thres = ['none']):
"""
pipeline to get vocabs
filter thres is a list of 'none' or #'both' # 'one'#'none','one',
"""
tfs = sorted(set(tfs))
print(tissue, 'num tfs', len(tfs))
print(tfs)
pro_pro_file = os.path.join(vocab_dir,'expr_'+tissue+'_pro_pro_vocab_info.csv' )
loop_loop_file = os.path.join(vocab_dir,'expr_'+tissue+'_loop_loop_vocab_info.csv' )
#Takes awhile
# Step 1. get expressiod and stability for specific config regions
results_expr_pro_pro = get_hits(pro_pro_file, tfs=tfs)
results_expr_loop_loop = get_hits(loop_loop_file, tfs=tfs)
# Step 2: raw stats
print('pre genomic instance filter')
motifs_pro_pro = sorted(set(list(results_expr_pro_pro.vocab1.unique())+list(results_expr_pro_pro.vocab2.unique())))
print('num motifs in pro_pro', len(motifs_pro_pro))
print(motifs_pro_pro)
motifs_loop_loop = sorted(set(list(results_expr_loop_loop.vocab1.unique())+list(results_expr_loop_loop.vocab2.unique())))
print('num motifs in loop_loop', len(motifs_loop_loop))
print(motifs_loop_loop)
print('num vocab in expression enrichment (pro-pro region): ', results_expr_pro_pro.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (pro-pro region): ', results_stability_pro_pro.vocab_pair.unique().shape[0])
# print(results_stability_pro_pro.vocab_pair.unique())
print('num vocab in expression enrichment (loop-loop region): ', results_expr_loop_loop.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (loop-loop region): ', results_stability_loop_loop.vocab_pair.unique().shape[0])
# print(results_stability_loop_loop.vocab_pair.unique())
# step 3: filter expression enriched vocab words if then have at least 10 genomic instances then get stats
results_expr_pro_pro = filter_results(results_expr_pro_pro,min_hits_per_vocab=10)
results_expr_loop_loop = filter_results(results_expr_loop_loop,min_hits_per_vocab=10)
print('post genomic instance filter')
print('num vocab in expression enrichment (pro-pro region): ', results_expr_pro_pro.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (pro-pro region): ', results_stability_pro_pro.vocab_pair.unique().shape[0])
print(results_expr_pro_pro.vocab_pair.unique())
print('num vocab in expression enrichment (loop-loop region): ', results_expr_loop_loop.vocab_pair.unique().shape[0])
# print('num vocab in expression enrichment intersected with stability (loop-loop region): ', results_stability_loop_loop.vocab_pair.unique().shape[0])
print(results_expr_loop_loop.vocab_pair.unique())
# step 4: filter expr vocab words based on whether there is they are annotated for skin
print(results_expr_pro_pro[['vocab_pair','check_tissuetf']].drop_duplicates().check_tissuetf.value_counts())
print(results_expr_loop_loop[['vocab_pair','check_tissuetf']].drop_duplicates().check_tissuetf.value_counts())
results_expr_pro_pro_tissue = results_expr_pro_pro[results_expr_pro_pro.check_tissuetf.isin(filter_thres)]
print('pro-pro region')
print('total vocab:',results_expr_pro_pro.vocab_pair.unique().shape[0],'tissue annon vocab:', results_expr_pro_pro_tissue.vocab_pair.unique().shape[0])
print(results_expr_pro_pro_tissue.vocab_pair.unique())
results_expr_loop_loop_tissue = results_expr_loop_loop[results_expr_loop_loop.check_tissuetf.isin(filter_thres)]
print('loop-loop region')
print('total vocab:',results_expr_loop_loop.vocab_pair.unique().shape[0],'tissue annon vocab:', results_expr_loop_loop_tissue.vocab_pair.unique().shape[0])
print(results_expr_loop_loop_tissue.vocab_pair.unique())
# step 5: add in stability scores vocab pairs that pass the genomic instance filter and get genomic instance counts
vocab_summary_df = pd.concat([# get_counts(results_stability_pro_pro, 'stability_pro'),
get_counts(results_expr_pro_pro_tissue, 'expr_pro_tissue'),
# get_counts(results_stability_loop_loop, 'stability_loop'),
get_counts(results_expr_loop_loop_tissue, 'expr_loop_tissue')],axis=0)
vocab_summary_df.index.set_names('vocab',inplace=True)
vocab_summary_df.reset_index(inplace=True)
vocab_summary_df = vocab_summary_df.groupby('vocab').agg({'num_instance':sum, 'label':'|'.join}).reset_index()
vocab_summary_df['tissue']=tissue
print(vocab_summary_df.label.value_counts())
print('*****, number of vocab words', len(vocab_summary_df))
# results_stability_loop_loop.vocab_pair.unique()
# Saving..
if save:
vocab_summary_df.to_csv(os.path.join(save_dir, tissue+'_vocab_summary.csv'))
return vocab_summary_df
# -
# # 3. Vocabulary genomic instances - running
#
#
# global variables
vocab_dir = '../data/processed/fig4_modelling/tf_tf_pairs/'
MAX_DIST=135
# + jupyter={"outputs_hidden": true}
# %%time
vocab_tissue_all = pd.DataFrame()
for tissue in normal_tissues:
print('==============================================================')
tfs = tf_df[tf_df.cell_type==tissue].tf.values
# if os.path.exists(os.path.join(save_dir, tissue+'_vocab_summary.csv')):
# print('skipped',tissue, 'ar')
vocab_summary_df = get_vocabs(tissue, tfs, save=True,filter_thres = ['none'])
vocab_tissue_all = pd.concat([vocab_tissue_all, vocab_summary_df])
# -
vocab_tissue_all.tissue.value_counts()
# no filtering
# ### redo some of them
# + jupyter={"outputs_hidden": true}
tissue= 'HMEC'
tfs = tf_df[tf_df.cell_type==tissue].tf.values
tfs = list(tfs)+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'FOXA1', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXA9', 'HOXB2', 'HOXC13', 'HOXC6', 'HOXC8', 'IRF5', 'IRF7', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFF', 'MAFG', 'MAFK', 'MEIS3', 'MESP1', 'MNX1', 'MSX1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'PPARG', 'RREB1', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TEAD3', 'TP53', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF563', 'ZNF667', 'ZNF816', 'ZNF85']+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXB2', 'HOXC6', 'HOXC8', 'IRF5', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFG', 'MAFK', 'MECOM', 'MEIS3', 'MESP1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF667', 'ZNF816', 'ZNF85']
# if os.path.exists(os.path.join(save_dir, tissue+'_vocab_summary.csv')):
# print('skipped',tissue, 'ar')
vocab_summary_df = get_vocabs(tissue, tfs, save=True,filter_thres = ['none', 'one','both'])
# +
# %%time
tissue= 'Uterine'
tfs = tf_df[tf_df.cell_type==tissue].tf.values
# tfs = list(tfs)+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'FOXA1', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXA9', 'HOXB2', 'HOXC13', 'HOXC6', 'HOXC8', 'IRF5', 'IRF7', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFF', 'MAFG', 'MAFK', 'MEIS3', 'MESP1', 'MNX1', 'MSX1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'PPARG', 'RREB1', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TEAD3', 'TP53', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF563', 'ZNF667', 'ZNF816', 'ZNF85']+['ARID5B', 'ATF2', 'BACH1', 'BACH2', 'CEBPG', 'DDIT3', 'DLX1', 'FOS', 'FOSB', 'FOSL1', 'FOSL2', 'GLI3', 'HES7', 'HEY1', 'HLTF', 'HMGA1', 'HOXA10', 'HOXA5', 'HOXB2', 'HOXC6', 'HOXC8', 'IRF5', 'IRX2', 'IRX3', 'JUN', 'JUNB', 'JUND', 'KLF9', 'LHX6', 'MAF', 'MAFG', 'MAFK', 'MECOM', 'MEIS3', 'MESP1', 'MSX2', 'NFATC1', 'NFE2L1', 'NFE2L2', 'NFIA', 'OSR2', 'PBX1', 'POU2F2', 'RUNX2', 'RUNX3', 'SIX1', 'SMAD4', 'SOX13', 'SP2', 'TCF7', 'TP63', 'TWIST1', 'UBP1', 'ZBTB18', 'ZBTB49', 'ZBTB6', 'ZFP28', 'ZNF134', 'ZNF18', 'ZNF331', 'ZNF41', 'ZNF467', 'ZNF490', 'ZNF502', 'ZNF554', 'ZNF667', 'ZNF816', 'ZNF85']
# if os.path.exists(os.path.join(save_dir, tissue+'_vocab_summary.csv')):
# print('skipped',tissue, 'ar')
vocab_summary_df = get_vocabs(tissue, tfs, save=True,filter_thres = ['none', 'one','both'])
# -
# ### filtering
tissue_to_tfs_dict = {}
for tissue in normal_tissues:
tfs = tf_df[tf_df.cell_type==tissue].tf.values
tissue_to_tfs_dict[tissue] = tfs
vocab_tissue_all_filt = pd.DataFrame()
for tissue in normal_tissues:
tfs = tf_df[tf_df.cell_type==tissue].tf.values
vocab_summary_df = pd.read_csv(os.path.join(save_dir, tissue+'_vocab_summary.csv'), index_col=0)
print(tissue, vocab_summary_df.shape)
if vocab_summary_df.shape[0]>100:
vocab_summary_df1 = vocab_summary_df.copy()
vocab_summary_df1[['tf1','tf2']] = vocab_summary_df1.vocab.str.split('::',expand=True)
vocab_summary_df1 = vocab_summary_df1[(vocab_summary_df1.tf1.isin(tfs))|(vocab_summary_df1.tf2.isin(tfs))]
print(tissue, 'filtered')
print(vocab_summary_df.shape, vocab_summary_df1.shape)
vocab_tissue_all_filt = pd.concat([vocab_tissue_all_filt,vocab_summary_df1])
else:
vocab_tissue_all_filt = pd.concat([vocab_tissue_all_filt,vocab_summary_df])
print(vocab_tissue_all_filt.tissue.value_counts())
vocab_tissue_all_filt.to_csv(os.path.join(save_dir, 'all_normal_tissues_vocab_summary.csv'))
vocab_tissue_all_filt.vocab.unique().shape, vocab_tissue_all_filt.shape
vocab_tissue_all_filt.tissue.value_counts().describe()
# vocab_tissue_all_filt.tissue.value_counts().plot.bar()
vocab_counts = pd.DataFrame(vocab_tissue_all_filt.tissue.value_counts()).reset_index()
vocab_counts.columns = ['tissue','count']
vocab_counts
sns.set(style="whitegrid")
ax = sns.barplot(data=vocab_counts, x='tissue', y='count',color='black')
plt.xticks(rotation=90)
plt.subplots_adjust(top=1, bottom=.3)
plt.savefig(os.path.join(save_dir,'normal_tissue_vocab_counts.pdf'))
# ### upset plot
# +
plt.style.use('seaborn-paper')
vocab_tissue_all_filt = pd.read_csv(os.path.join(save_dir, 'all_normal_tissues_vocab_summary.csv'),index_col=0)
all_normal = vocab_tissue_all_filt.groupby('vocab').agg({'tissue': '|'.join,'num_instance':sum, 'label': lambda x: '|'.join(list(set(x)))})
display(all_normal)
tissue_counts = all_normal.tissue.value_counts()
print(tissue_counts)
names = [x.split('|') for x in tissue_counts.index]
values = list(tissue_counts.values)
data_upset = from_memberships(names, data=values)
plot(data_upset)
# plt.savefig(os.path.join(save_dir, 'vocabs_cancer_upset.pdf'))
# -
# # numbers
# ```
# ### of all regulatory TF vocabulary motifs occur pairwise within the same promoter region (Intra-promoter), #### occur pairwise within the same enhancer region (Intra-Enhancer), and #### occur with one motif residing in a distal enhancer region and the paired motif residing in the looped target gene promoter (Inter-Enhancer-Promoter)
#
# ```
vocab_tissue_all_filt = pd.read_csv(os.path.join(save_dir, 'all_normal_tissues_vocab_summary.csv'),index_col=0)
vocab_tissue_all_filt[:5]
save_dir
# +
tf_tf_loop_type_files = glob.glob('../data/processed/fig4_modelling/tf_tf_pairs/expr*loop_type.csv')
tissue_loop_df_dict={}
for file in tf_tf_loop_type_files:
tissue = os.path.basename(file).split('_')[1]
print(tissue, file)
tissue_loop_df_dict[tissue] = pd.read_csv(file,index_col=0).fillna('')
# pd.read_csv('../data/processed/fig4_modelling/tf_tf_pairs/expr_Astrocytes_loop_type.csv',index_col=0)
# +
# -
vocab_tissue_all_filt['num_genes_pro_pro'] = 0
vocab_tissue_all_filt['num_genes_pro_loop'] = 0
vocab_tissue_all_filt['num_genes_loop_loop'] = 0
vocab_tissue_all_filt['num_genes_all_config'] = 0
for idx, row in vocab_tissue_all_filt.iterrows():
df = tissue_loop_df_dict[row['tissue']]
info = df.loc[row['vocab']]
genes_all = set(info.pro_pro_genes.split('|')).union(set(info.pro_loop_genes.split('|'))).union(set(info.loop_loop_genes.split('|')))
vocab_tissue_all_filt.at[idx,'num_genes_pro_pro'] = info['pro_pro_count']
vocab_tissue_all_filt.at[idx,'num_genes_pro_loop'] = info['pro_loop_count']
vocab_tissue_all_filt.at[idx,'num_genes_loop_loop'] = info['loop_loop_count']
vocab_tissue_all_filt.at[idx,'num_genes_all_config'] = len(genes_all)
vocab_tissue_all_filt.sum()
# +
config_df = vocab_tissue_all_filt[['num_genes_pro_pro','num_genes_pro_loop','num_genes_loop_loop' ]]
config_df = config_df>0
config_df.sum()/config_df.shape[0]
# +
# vocab_tissue_all_filt['frac_genes_pro_pro'] = vocab_tissue_all_filt['num_genes_pro_pro']/vocab_tissue_all_filt['num_genes_all_config']
# vocab_tissue_all_filt['frac_genes_loop_loop'] = vocab_tissue_all_filt['num_genes_loop_loop']/vocab_tissue_all_filt['num_genes_all_config']
# vocab_tissue_all_filt['frac_genes_pro_loop'] = vocab_tissue_all_filt['num_genes_pro_loop']/vocab_tissue_all_filt['num_genes_all_config']
# vocab_tissue_all_filt['frac_genes_pro_pro_wt'] = vocab_tissue_all_filt['num_genes_pro_pro']*2*vocab_tissue_all_filt['weighting_factor']
# vocab_tissue_all_filt['frac_genes_loop_loop_wt'] = vocab_tissue_all_filt['num_genes_loop_loop']
# vocab_tissue_all_filt['frac_genes_pro_loop_wt'] = vocab_tissue_all_filt['num_genes_pro_loop']*vocab_tissue_all_filt['weighting_factor']
# vocab_tissue_all_filt['num_genes_all_config_wt'] = vocab_tissue_all_filt['frac_genes_pro_pro_wt'] + vocab_tissue_all_filt['frac_genes_loop_loop_wt'] + vocab_tissue_all_filt['frac_genes_pro_loop_wt']
# vocab_tissue_all_filt['frac_genes_pro_pro_wt'] = vocab_tissue_all_filt['frac_genes_pro_pro_wt']/vocab_tissue_all_filt['num_genes_all_config_wt']
# vocab_tissue_all_filt['frac_genes_loop_loop_wt'] = vocab_tissue_all_filt['frac_genes_loop_loop_wt']/vocab_tissue_all_filt['num_genes_all_config_wt']
# vocab_tissue_all_filt['frac_genes_pro_loop_wt'] = vocab_tissue_all_filt['frac_genes_pro_loop_wt']/vocab_tissue_all_filt['num_genes_all_config_wt']
# -
| notebooks/8D8_normal_vocab_enrich.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MNIST Classification
#
# The goal of this notebook is to implement a model consisting of just a softmax layer for classifying MNIST images, hence to perform a multiclass classification task. The notation follows the convention introduced for PW02:
#
# <code>m</code>: Number of samples <br>
# <code>n</code>: Number of features
#
# Please follow the instructions in the cells below to conduct the following tasks:
#
# 1. Implement the functions to prepare the data (very similar to PW02).
# 2. Implement softmax, the cross entropy cost for multiclass and its gradient.
# 3. Implement the `optimize` function (with given function signature) by using the classes `Metrics` and `MiniBatches`.
# 4. Run several trainings with different hyper-parameter settings and determine your favorite setting (1b).
# 5. Compute the Error Rates for the individual Digits (1c)
# 6. Analyze misclassified images with worst score (1d)
# 8. Plot the weights as images (1e)
# 7. Analyze different weights initialisation strategies (1f)
# ### Loading and Preparing the Data
#
# Some preparatory steps to be applied before training:
# * Imports: numpy and matplotlib
# * Loading the data (same as for PW of previous week)
# * Some plot utilities
# * Splitting the dataset into train and test
# * Data Standarisation
import numpy as np
import matplotlib.pyplot as plt
### START YOUR CODE ###
data_home = "./data"
### END YOUR CODE ###
# #### Load Data
# +
from sklearn.datasets import fetch_openml
def load_mnist(data_home):
"""
Loads the mnist dataset, prints the shape of the dataset and
returns the array with the images, the array with associated labels
and the shape of the images.
Parameters:
data_home -- Absolute path to the DATA_HOME
Returns:
x -- array with images of shape (784,m) where m is the number of images
y -- array with associated labels with shape (1,m) where m is the number of images
shape -- (28,28)
"""
mnist = fetch_openml(name='mnist_784', version=1, cache=True, data_home=data_home)
x, y = mnist['data'].T, np.array(mnist['target'], dtype='int').T
m = x.shape[1]
y = y.reshape(1,m)
print("Loaded MNIST original:")
print("Image Data Shape" , x.shape)
print("Label Data Shape", y.shape)
return x,y,(28,28)
# -
# #### Plotting Utility
# +
def plot_img(img, label, shape):
"""
Plot the x array by reshaping it into a square array of given shape
and print the label.
Parameters:
img -- array with the intensities to be plotted of shape (shape[0]*shape[1])
label -- label
shape -- 2d tuple with the dimensions of the image to be plotted.
"""
plt.imshow(np.reshape(img, shape), cmap=plt.cm.gray)
plt.title("Label %i"%label)
def plot_digits(x,y,selection,shape, cols=5):
"""
Plots the digits in a mosaic with given number of columns.
Arguments:
x -- array of images of size (n,m)
y -- array of labels of size (1,m)
selection -- list of selection of samples to be plotted
shape -- shape of the images (a 2d tuple)
selected_digits -- tuple with the two selected digits (the first associated with label 1, the second with label 0)
"""
if len(selection)==0:
print("No images in the selection!")
return
cols = min(cols, len(selection))
rows = len(selection)/cols+1
plt.figure(figsize=(20,4*rows))
for index, (image, label) in enumerate(zip(x.T[selection,:], y.T[selection,:])):
plt.subplot(rows, cols, index+1)
plt.imshow(np.reshape(image, shape), cmap=plt.cm.gray)
plt.title('Sample %i\n Label %i\n' % (selection[index],label), fontsize = 12)
plt.tight_layout()
# -
# #### Split Data and reshape as specified
#
# Split the data into training set and test set.
# We use the scikit-learn function 'train_test_split' with 20\% test data.
#
# Furthermore, we reshape input data x to (n,m).
# +
from sklearn.model_selection import train_test_split
def prepare_train_test(x, y, test_size=0.20):
"""
Split the dataset consisting of an array of images (shape (n, m)) and an array of labels (shape (1, m))
into train and test set.
Parameters:
x -- Array of images of shape (n,m) where m is the number of samples
y -- Array of labels of shape (1, m) where m is the number of samples
test_size -- fraction of samples to reserve as test sample
Returns:
x_train -- np.ndarray of images of shape (n,m1) used for training
y_train -- np.ndarray of labels of shape (1,m1) used for training
x_test -- np.ndarray of images of shape (n,m2) used for testing
y_test -- np.ndarray of labels of shape (1,m2) used for testing
"""
# split
# train_test_split() expects x, y in shapes (m, *), (m, *)
out = train_test_split(x.T, y.T, test_size=0.20, random_state=1)
# transpose back the output obtained from the train_test_split-function
x_train, x_test, y_train, y_test = (x.T for x in out)
print("Shape training set: ", x_train.shape, y_train.shape)
print("Shape test set: ", x_test.shape, y_test.shape)
return x_train, x_test, y_train, y_test
# -
# #### Data Normalisation
#
# Normalize the data - apply min/max normalization.
#
def normalize(x_train, x_test):
"""
Normalizes pixel values using min-max normalization, min and max are calculated globally over all features n
Parameters:
x_train -- Array of training samples of shape (n,m1) where n,m1 are the number of features and samples, respectively.
x_test -- Array of test samples of shape (n,m2) where n,m2 are the number of features and samples, respectively.
Returns:
The arrays with the normalized train and test samples.
"""
### START YOUR CODE ###
x_train_min = np.min(x_train)
x_train_max = np.max(x_train)
x_train = 2*(x_train-x_train_min)/(x_train_max-x_train_min)-1
x_test = 2*(x_test-x_train_min)/(x_train_max-x_train_min)-1
### END YOUR CODE ###
return x_train, x_test
# #### Test normalization
x_train_unittest = np.array([[10, 5], [-5, -10]])
x_test_unittest = np.array([[10, 1], [0, -10]])
actual_train, actual_test = normalize(x_train_unittest, x_test_unittest)
expected_train, expected_test = np.array([[1, 0.5], [-0.5, -1]]), np.array([[1, 0.1], [0, -1]])
np.testing.assert_almost_equal(expected_train, actual_train)
np.testing.assert_almost_equal(expected_test, actual_test)
# ### Softmax
#
# Implement the softmax function - actually, the softmax layer with given weights-matrix $W$ and bias-vector $b$.
# +
def softmax(z):
return np.exp(z) / np.sum(np.exp(z),axis=0) # any column must sum up to 1
def predict(W, b, X):
'''
Compute the per class probabilities for all the m samples by using a softmax layer with parameters (W, b).
Arguments:
W -- weights, a numpy array with shape (ny, nx) (with ny=10 for MNIST).
b -- biases, a numpy array with shape (ny,1)
X -- input data of size (nx,m)
Returns:
A -- a numpy array of shape (ny,m) with the prediction probabilities for the digits.
'''
### START YOUR CODE ###
# page 29
return softmax(np.dot(W,X)+b)
### END YOUR CODE ###
# -
# #### TEST Softmax
# +
W = np.array([[1,-1],[0,1],[-1,1]]).reshape(3,2)
b = np.array([0,0,0]).reshape(3,1)
X = np.array([2, 3]).reshape(2,1)
A = predict(W,b,X)
Aexp = np.array([0.01587624,0.86681333,0.11731043]).reshape(A.shape)
np.testing.assert_array_almost_equal(A,Aexp,decimal=8)
np.testing.assert_array_almost_equal(np.sum(A, axis=0), 1.0, decimal=8)
X = np.array([[2,-1,1,-1],[1,1,1,1]]).reshape(2,4)
A = predict(W,b,X)
Aexp = np.array([[0.46831053, 0.01321289, 0.21194156, 0.01321289],
[0.46831053, 0.26538793, 0.57611688, 0.26538793],
[0.06337894, 0.72139918, 0.21194156, 0.72139918]]
)
np.testing.assert_array_almost_equal(A,Aexp,decimal=8)
np.testing.assert_array_almost_equal(np.sum(A, axis=0), np.ones(4,dtype='float'), decimal=8)
# -
# ### Cost Function (Cross Entropy)
#
# Implement the cross entropy cost function for the multi-class setting.
#
# For later use implement a function to create one-hot vectors.
def cost(Ypred, Y):
"""
Computes the cross entropy cost function for given predicted values (Ypred) and labels (Y).
Parameters:
Ypred -- prediction from softmax, a numpy array of shape (ny,m)
Y -- ground truth labels, a numpy array with shape (1,m) containing digits 0,1,...,9.
Returns:
Cross Entropy Cost
"""
### START YOUR CODE ###
m,n = Ypred.shape
J = -1/n*np.sum(np.log(Ypred[Y,range(n)]))
return J
### END YOUR CODE ###
# #### TEST Cross Entropy Cost
# +
Y = np.array([1])
Ypred = np.array([0.04742587,0.95257413]).reshape(2,1)
J = cost(Ypred,Y)
Jexp = 0.04858735
np.testing.assert_almost_equal(J,Jexp,decimal=8)
Y = np.array([1,1,1,0])
Ypred = np.array([[1.79862100e-02, 6.69285092e-03, 4.74258732e-02, 9.99088949e-01],
[9.82013790e-01, 9.93307149e-01, 9.52574127e-01, 9.11051194e-04]])
Jexp = 0.01859102
J = cost(Ypred,Y)
np.testing.assert_almost_equal(J,Jexp,decimal=8)
# -
def onehot(y,n):
"""
Constructs a one-hot-vector from a given array of labels (shape (1,m), containing numbers 0,1,...,n-1)
and the number of classes n.
The resulting array has shape (n,m) and in row j and column i a '1' if the i-th sample has label 'j'.
Parameters:
y -- labels, numpy array of shape (1,m)
n -- number of labels
Returns:
On-hot-encoded vector of shape (n,m)
"""
### START YOUR CODE ###
'''
m = y.shape[1]
result = np.zeros((n,m))
print("y: ",y)
for j in range(m):
result[y[0,j],j] = 1
'''
m = y.shape[1]
result = np.zeros((n,m),dtype=float)
result[y[0,:],np.arange(m)] = 1
### START YOUR CODE ###
return result
## Test one-hot vector implementation ##
Y = np.array([1,3,0]).reshape(1,3)
onehot_comp = onehot(Y,4)
onehot_exp = np.array([[0,0,1],[1,0,0],[0,0,0],[0,1,0]]).reshape(4,3)
np.testing.assert_almost_equal(onehot_exp,onehot_comp,decimal=8)
# ### Update Rules for the Parameters
#
# Implement the (estimate of) gradient of the cost function (cross entropy implemented above) with respect to the parameters of the softmax layer.
# The contributions from the different samples given in $X$ and $Y$ should be averaged.
def gradient(X, Y, A):
"""
Computes the components of the gradient w.r.t. weights and bias - by using the cross entropy cost.
Arguments:
X -- input data of size (nx,m)
Y -- output labels - a numpy array with shape (1,m).
A -- predicted scores (as output of softmax) - a numpy array with shape (ny,m)
Returns:
gradJ -- dictionary with the gradient w.r.t. W (key "dW" with shape (ny,nx)) and w.r.t. b (key "db" with shape (ny,1))
"""
### START YOUR CODE ###
#Ypred = predict(W,b,X)
m,n = A.shape
mask = onehot(Y,m)
gradJ = {}
gradJ["dW"] = -1/n * np.dot(mask-A,X.T)
gradJ["db"] = -1/n * np.sum(mask-A,axis=1).reshape(m,1)
return gradJ
### END YOUR CODE ###
# #### Test the Calculation of the Gradient
# +
W = np.array([[1,-1],[0,1],[-1,1]]).reshape(3,2)
b = np.array([0,0,0]).reshape(3,1)
X = np.array([[2,-1,1,-1],[1,1,1,1]]).reshape(2,4)
A = predict(W,b,X)
Y = np.array([1,1,1,1]).reshape(1,4)
gradJ = gradient(X,Y,A)
dW = gradJ['dW']
db = gradJ['db']
dWexp = np.array([[ 0.28053421,0.17666947],
[-0.00450948,-0.60619918],
[-0.27602473,0.42952972]]).reshape(3,2)
dbexp = np.array([0.17666947,-0.60619918,0.42952972]).reshape(3,1)
np.testing.assert_array_almost_equal(dW,dWexp,decimal=8)
np.testing.assert_array_almost_equal(db,dbexp, decimal=8)
# -
# ### Metrics for measuring the performance of the algorithm
#
# As metrics we compute the error rate as number of wrong predictions divided by the number of samples.
def error_rate(Ypred, Y):
"""
Compute the error rate defined as the fraction of misclassified samples.
Arguments:
Ypred -- Predicted label, a numpy array of size (1,m)
Y -- ground truth labels, a numpy array with shape (1,m)
Returns:
error_rate
"""
Ypredargmax = np.argmax(Ypred, axis=0)
return np.mean(Y != Ypredargmax)
# ### Optimize (Learn)
#
# As in PW02, we first provide the metrics class that is used for tracking progress during the training (by collecting suitable quantities).
class Metrics():
"""
Allows to collect statistics (such as classification error or cost) that are of interest over the course of training
and for creating learning curves that are a useful tool for analyzing the quality of the learning.
"""
def __init__(self, cost, smooth=False):
"""
Constructor for a metrics object.
Initializes all the statistics to track in form of python lists.
Parameters:
cost -- cost function to use (a python function)
smooth -- if set to true updates learning curve after each training step and also provides learning curves
smoothed over the epoch
"""
self.epochs = []
self.smooth = smooth
self.train_costs_last = []
self.test_costs_last = []
self.train_errors_last = []
self.test_errors_last = []
self.stepsize_w_last = []
self.stepsize_b_last = []
if self.smooth:
self.train_costs_smoothed = []
self.test_costs_smoothed = []
self.train_errors_smoothed = []
self.test_errors_smoothed = []
self.stepsize_w_smoothed = []
self.stepsize_b_smoothed = []
self.cost_function = cost
self.init_epoch()
def init_epoch(self):
self.train_costs_epoch = []
self.test_costs_epoch = []
self.train_errors_epoch = []
self.test_errors_epoch = []
self.stepsize_w_epoch = []
self.stepsize_b_epoch = []
def update_epoch(self, epoch):
"""
Computes the average of the metrics over the epoch and adds the result to the per epoch history
Parameters:
epoch -- the epoch to add to the per epoch cache
"""
self.epochs.append(epoch)
if self.smooth:
self.train_costs_smoothed.append(np.mean(self.train_costs_epoch))
self.test_costs_smoothed.append(np.mean(self.test_costs_epoch))
self.train_errors_smoothed.append(np.mean(self.train_errors_epoch))
self.test_errors_smoothed.append(np.mean(self.test_errors_epoch))
self.stepsize_w_smoothed.append(np.mean(self.stepsize_w_epoch))
self.stepsize_b_smoothed.append(np.mean(self.stepsize_b_epoch))
self.train_costs_last.append(self.train_costs_epoch[-1])
self.test_costs_last.append(self.test_costs_epoch[-1])
self.train_errors_last.append(self.train_errors_epoch[-1])
self.test_errors_last.append(self.test_errors_epoch[-1])
self.stepsize_w_last.append(self.stepsize_w_epoch[-1])
self.stepsize_b_last.append(self.stepsize_b_epoch[-1])
self.init_epoch()
def update_iteration(self, ypred_train, y_train, ypred_test, y_test, dw, db):
"""
Allows to update the statistics to be tracked for a new epoch.
The cost is computed by using the function object passed to the constructor.
Parameters:
epoch -- Epoch
ypred_train -- predicted values on the training samples, a numpy array of shape (1,m1)
y_train -- ground truth labels associated with the training samples, a numpy array of shape (1,m1)
ypred_test -- predicted values on the test samples, a numpy array of shape (1,m2)
y_test -- ground truth labels associated with the test samples, a numpy array of shape (1,m2)
dw -- some lenght measure for the gradient w.r.t. the weights, a numpy array of shape (1,n)
db -- gradient w.r.t. the bias, a scalar
"""
Jtrain = self.cost_function(ypred_train, y_train)
Jtest = self.cost_function(ypred_test, y_test)
train_error = error_rate(ypred_train, y_train)
test_error = error_rate(ypred_test, y_test)
self.train_costs_epoch.append(Jtrain)
self.test_costs_epoch.append(Jtest)
self.train_errors_epoch.append(train_error)
self.test_errors_epoch.append(test_error)
self.stepsize_w_epoch.append(dw)
self.stepsize_b_epoch.append(db)
def print_latest_errors(self):
print ("Train/test error after epoch %i: %f, %f" %(self.epochs[-1], self.train_errors_last[-1], self.test_errors_last[-1]))
def print_latest_costs(self):
print ("Train/test cost after epoch %i: %f, %f" %(self.epochs[-1], self.train_costs_last[-1], self.test_costs_last[-1]))
def plot_cost_curves(self, ymin=None, ymax=None, smooth=True, logy=True):
minvalue = 1e-5
if logy:
plt.semilogy(self.epochs, self.train_costs_last, "b-", label="train")
plt.semilogy(self.epochs, self.test_costs_last, "r-", label="test")
if self.smooth:
plt.semilogy(self.epochs, self.train_costs_smoothed, "b--", label="train_smoothed")
plt.semilogy(self.epochs, self.test_costs_smoothed, "r--", label="test_smoothed")
else:
plt.plot(self.epochs, self.train_costs_last, "b-", label="train")
plt.plot(self.epochs, self.test_costs_last, "r-", label="test")
minvalue = 0.0
if self.smooth:
plt.plot(self.epochs, self.train_costs_smoothed, "b--", label="train_smoothed")
plt.plot(self.epochs, self.test_costs_smoothed, "r--", label="test_smoothed")
plt.ylabel('Cost')
plt.xlabel('Epochs')
xmax = self.epochs[-1]
if not ymin:
ymin = min(max(1e-5,np.min(self.train_costs_last)),max(1e-5,np.min(self.test_costs_last))) * 0.8
if not ymax:
ymax = max(np.max(self.train_costs_last),np.max(self.test_costs_last)) * 1.2
plt.axis([0,xmax,ymin,ymax])
plt.legend()
plt.show()
def plot_error_curves(self, ymin=None, ymax=None, smooth=True, logy=True):
minvalue = 1e-5
if logy:
plt.semilogy(self.epochs, self.train_errors_last, "b-", label="train")
plt.semilogy(self.epochs, self.test_errors_last, "r-", label="test")
if self.smooth:
plt.semilogy(self.epochs, self.train_errors_smoothed, "b--", label="train_smoothed")
plt.semilogy(self.epochs, self.test_errors_smoothed, "r--", label="test_smoothed")
else:
plt.plot(self.epochs, self.train_errors_last, "b-", label="train")
plt.plot(self.epochs, self.test_errors_last, "r-", label="test")
minvalue = 0.0
if self.smooth:
plt.plot(self.epochs, self.train_errors_smoothed, "b--", label="train_smoothed")
plt.plot(self.epochs, self.test_errors_smoothed, "r--", label="test_smoothed")
plt.ylabel('Errors')
plt.xlabel('Epochs')
xmax = self.epochs[-1]
if not ymin:
ymin = min(max(1e-5,np.min(self.train_errors_last)),max(1e-5,np.min(self.test_errors_last))) * 0.8
if not ymax:
ymax = max(np.max(self.train_errors_last),np.max(self.test_errors_last)) * 1.2
plt.axis([0,xmax,ymin,ymax])
plt.legend()
plt.show()
def plot_stepsize_curves(self, ymin=None, ymax=None, smooth=True):
plt.semilogy(self.epochs, self.stepsize_w_last, label="dw")
plt.semilogy(self.epochs, self.stepsize_b_last, label="db")
if self.smooth and smooth:
plt.semilogy(self.epochs, self.stepsize_w_smoothed, label="dw--")
plt.semilogy(self.epochs, self.stepsize_b_smoothed, label="db--")
plt.ylabel('Step Sizes (dw,db)')
plt.xlabel('Epochs')
xmax = self.epochs[-1]
if not ymin:
ymin = min(max(1e-5,np.min(self.stepsize_w_last)),max(1e-5,np.min(self.stepsize_b_last))) * 0.8
if not ymax:
ymax = max(np.max(self.stepsize_w_last),np.max(self.stepsize_b_last)) * 1.2
plt.axis([0,xmax,ymin,ymax])
plt.legend()
plt.show()
# In addition, we provide a utility class that allows to create mini-batches from the given dataset $(X,Y)$ with given batch size and initially shuffled.
class MiniBatches():
"""
Is initialized (constructed) with features x of shape (nx,m) and the labels y of shape (1,m).
Is reshuffled at construction time.
Then, a next minibatch (MBX,MBY) with MBX of shape (nx,batchsize) and MBY of shape (1,batchsize) is provided by calling next() on the object.
"""
def __init__(self, x, y, batchsize):
self.x = x
self.y = y
m = x.shape[1]
if not batchsize:
self.batchsize = m
else:
self.batchsize = batchsize
self.n = x.shape[0]
self.mb = int(m/batchsize)
self.indices = np.arange(m)
np.random.shuffle(self.indices)
self.ib = 0
def number_of_batches(self):
return self.mb
def next(self):
it = self.indices[self.ib*self.batchsize:(self.ib+1)*self.batchsize]
xbatch = self.x[:,it].reshape(self.n,self.batchsize)
ybatch = self.y[:,it].reshape(1,self.batchsize)
self.ib += 1
return xbatch, ybatch
# #### Optimisation
#
# This function should implement the training loop - adopting mini-batch gradient descent with arbitrary batch size. Implement the given function signature.
def optimize(W, b, x_train, y_train, x_test, y_test, nepochs, alpha, batchsize=32, debug=False):
"""
This function optimizes W and b by running (mini-batch) gradient descent. It starts with the given
weights as initial values and then iteratively updates the parameters for nepochs number of times.
It returns the trained parameters as dictionary (keys "W" and "b") and various quantities
collected during learning in form of a Metrics object. The data (x_train, etc.) is assumed to contain
m1 training and m2 test samples.
Arguments:
W -- weights, a numpy array of size (ny,nx)
b -- biases, a numpy array with shape (ny,1) (with ny=10 for MNIST).
x_train -- input data for training of shape (nx,m1)
y_train -- ground-truth labels - a numpy array with shape (1,m1)
x_test -- input data for training of shape (nx,m2)
y_test -- ground-truth labels - a numpy array with shape (1,m2)
nepochs -- number of iterations of the optimization loop
alpha -- learning rate of the gradient descent update rule
batchsize -- batch size, defaults to 32
debug -- if true prints training and test error values after each epoch. Defaults to True.
Returns:
params -- dictionary containing the (final) weights w and bias b
metrics -- contain the information about the learning curves
"""
metrics = Metrics(cost = cost)
m = x_train.shape[1] # number of samples
nx = x_train.shape[0] # number of input features
mb = int(m/batchsize) # number of mini-batches
print("Optimisation with batchsize %i and %i number of batches per epoch."%(batchsize,mb))
# compute and set the initial values for the metrics curves
ypred_train = predict(W,b,x_train)
ypred_test = predict(W,b,x_test)
metrics.update_iteration(ypred_train, y_train, ypred_test, y_test, 0, 0)
metrics.update_epoch(0)
# Loop over the epochs
for i in range(nepochs):
# prepare shuffled mini-batches for this epoch
batches = MiniBatches(x_train, y_train, batchsize)
### START YOUR CODE ###
'''
If you run batch update, every parameter update requires
your algorithm see each of the n training instances
exactly once, i.e., every epoch your parameters are
updated once.
If you run mini-batch update with batch size = b, every
parameter update requires your algorithm see b of n
training instances, i.e., every epoch your parameters
are updated about n/b times.
If you run SGD update, every parameter update requires
your algorithm see 1 of n training instances, i.e.,
every epoch your parameters are updated about n times.
Link:
https://stats.stackexchange.com/questions/117919/what-are-the-differences-between-epoch-batch-and-minibatch
'''
# navigate towards local minima with gradient descent on behalf of mini-batches
for mini_batch in range(batches.number_of_batches()):
x_train_mini, y_train_mini = batches.next()
ypred_train_mini = predict(W,b,x_train_mini)
gradJ = gradient(x_train_mini,y_train_mini,ypred_train_mini)
dW = alpha * gradJ["dW"]
db = alpha * gradJ["db"]
W = W - dW
b = b - db
ypred_train = predict(W,b,x_train)
ypred_test = predict(W,b,x_test)
metrics.update_iteration(ypred_train, y_train, ypred_test, y_test, dW, db)
### END YOUR CODE ###
metrics.update_epoch(i+1)
if debug:
metrics.print_latest_errors()
metrics.print_latest_costs()
metrics.print_latest_errors()
return {"W": W, "b": b}, metrics
# #### Initialize Parameters
#
# Implement a utility to generate intialized parameters.
#
# As part of exercise 1f (below), different strategies should be considered. In a first round (exercise 1b and following) just use the first setting below (with weights and bias equals 0).
#
# 1. All weights and biases set to zero: $b=0, W=0$
# 2. Biases set to zero, weights generated as independent standard normal random numbers (mean zero, standard deviation 1)
# 3. Biases set to zero, weights generated as independent normal random numbers with mean zero and standard deviation properly scaled (divided by $\sqrt{n}$)
def initialize_params(nx, ny):
"""
This function provides initialized parameters: a weights matrix and a bias vector.
Argument:
nx -- number of input features
ny -- number of output dimensions (number of different labels)
Returns:
w -- initialized weights matrix of shape (ny,nx)
b -- initialized bias vector of shape (ny,1)
"""
### START YOUR CODE ###
# initialize weights (enable one line only!)
w = np.zeros((ny,nx))
#w = np.random.randn(ny,nx)
#w = np.random.normal(0,1/np.sqrt(nx),(ny,nx))
# initialize biases to zero
b = np.zeros((ny,1))
### END YOUR CODE ###
return w, b
x,y, shape = load_mnist(data_home)
x_train1, x_test1, y_train, y_test = prepare_train_test(x, y, test_size=0.20)
x_train,x_test = normalize(x_train1,x_test1)
# #### Run the Training for Specific Setting and Plot Learning Curves
#
# Run the training of the model with a first setting (e.g. alpha=0.2, nepochs=20, batchsize=64).
# +
W,b = initialize_params(28*28, 10)
### START YOUR CODE ###
alpha = 0.2
nepochs = 20
batchsize = 64
params, metrics = optimize(W, b, x_train, y_train, x_test, y_test, nepochs=nepochs, alpha=alpha, batchsize=batchsize, debug=False)
### END YOUR CODE ###
metrics.plot_cost_curves(ymin=0.0, ymax=1.0,logy=False)
metrics.plot_error_curves(ymin=0.0, ymax=0.1,logy=False)
metrics.print_latest_errors()
# -
# ### 1b Explore Hyper-Parameter Settings and Describe your Findings
#
# Now run the training with different settings:
# * Different learning rate
# * Different number of epochs
# * Different batch size
#
# Explore which combination is best suited to obtain good test performance. Keep an eye on random estimates for the error rates due to random parameter initialisation and randomly shuffled mini-batches.
#
# Specify your choice of these hyper-parameters and justify why you consider your choice best suited.
# + tags=[]
import itertools
param_learning_rate = [0.01, 0.02, 0.05, 0.1, 0.2, 0.4, 0.8]
param_nepochs = [20, 40, 80]
param_batchsize = [16, 32, 64]
hyper_param_results = []
for learning_rate, nepochs, batchsize in itertools.product(param_learning_rate, param_nepochs, param_batchsize):
print("Learning Rate:", learning_rate, ", Num of Epochs:", nepochs, ", BatchSize:", batchsize)
params, metrics = optimize(W, b, x_train, y_train, x_test, y_test, nepochs=nepochs, alpha=alpha, batchsize=batchsize, debug=False)
hyper_param_results.append(((learning_rate, nepochs, batchsize), params, metrics))
print()
# +
hyper_param_results = np.array(hyper_param_results)
best_run = hyper_param_results[np.argmin([m.test_errors_last[-1] for m in hyper_param_results[:,2]])]
best_learning_rate, best_nepochs, best_batchsize = best_run[0]
print("Best Learning Rate:", best_learning_rate, ", Best Num of Epochs:", best_nepochs, ", Best BatchSize:", best_batchsize)
best_metrics = best_run[2]
best_metrics.plot_cost_curves(ymin=0.0, ymax=1.0,logy=False)
best_metrics.plot_error_curves(ymin=0.0, ymax=0.1,logy=False)
best_metrics.print_latest_errors()
# -
# #### YOUR FINDINGS ...
# - A small learning rate takes a long time to converge to the optimum, but results in a less wiggly learning curve. A high learning rate reduces the time to reach convergence, increases the wiggly-ness of the learning curve, but could fail to find the optimum, since it is jumping around it and in the worst case possibly escape from it. The "best" learning rate can be defined as a tradeoff of speed for convergence and reaching the local/global optima.
#
# - The number of epochs influences the performance directly. If the number of epochs is too low, the training phase has not reached it's optimum. This problem can be solved by increasing the number of epochs. On the other hand, if the number of epochs is too high, from one moment in time, the accurracy does not increase anymore, but the network is still trying to learn. This problem can be solved on behalf of earling stopping. Convergence is defined as the accurracy level, where the accurracy does not improve much further with training over some more epochs. At this point, the accurracy is vibrating slightly above the optima.
# Important note: If early stopping is used, one should know, that the network could possibly improve it's accurracy even further, if the number of epochs is extremly high, so that the double descend effect could occur.
#
# - If the mini-batch size is too small, e.g. 1, then the training could look like a random walk. If the mini-batch size is too large, the mini-batch approaches the size of the dataset. This is not desired, since the mini-batch approach allows to split the dataset into smaller chunks, which fit into the memory at once. Once again, the problem is to find the "sweet spot". According to Yann LeCun: Friends don't let friends use minibatches larger than 32. This batch-size ensures some randomness involved to escape local minima and is not too large to not fit into the memory anymore.
# ### 1c Compute the Error Rates for the individual Digits
#
# Now compute and print (or plot) the rate of misclassified images per digit (i.e. How many digits with label k are not been classified as label k).
#
# Which one seems most difficult to classify?
#
# Plot a few images of the wrongly classified images that have the label of the class that is most difficult to classify.
# +
## Reset using best hyper params
W,b = initialize_params(28*28, 10)
### START YOUR CODE ###
alpha = best_learning_rate
nepochs = best_nepochs
batchsize = best_batchsize
params, metrics = optimize(W, b, x_train, y_train, x_test, y_test, nepochs=nepochs, alpha=alpha, batchsize=batchsize, debug=False)
### END YOUR CODE ###
metrics.plot_cost_curves(ymin=0.0, ymax=1.0,logy=False)
metrics.plot_error_curves(ymin=0.0, ymax=0.1,logy=False)
metrics.print_latest_errors()
# +
W = params['W']
b = params['b']
y_pred = predict(W,b,x_test)
### START YOUR CODE ###
Ypredargmax = np.argmax(y_pred, axis=0)
Ypredargmax = Ypredargmax[np.newaxis, :]
#print(Ypredargmax)
#print("y-test: ", y_test)
digits_error_rate = []
for i in range(10):
n_cor = np.count_nonzero((Ypredargmax == y_test) & (y_test == i))
n_fal = np.count_nonzero((Ypredargmax != y_test) & (y_test == i))
er = n_fal/(n_cor+n_fal)
digits_error_rate.append((i, n_cor, n_fal, er))
digits_error_rate = sorted(digits_error_rate, key=lambda x: x[3])
print("Digit Correct Wrong Error Rate")
for el1, el2, el3, el4 in digits_error_rate:
print("{:<6}{:<10}{:<10}{}".format(el1, el2, el3, el4))
most_difficult = digits_error_rate[-1]
print("The most difficult digit to classify is: ", most_difficult[0]," with error rate: ", most_difficult[3])
### END YOUR CODE ###
# +
from sklearn.metrics import confusion_matrix, classification_report
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
cm = confusion_matrix(y_test[0], Ypredargmax[0], labels)
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
plt.xticks(np.arange(0, len(labels), 1))
plt.xlabel('Predicted')
plt.yticks(np.arange(0, len(labels), 1))
plt.ylabel('True')
plt.show()
report = classification_report(y_test[0], Ypredargmax[0])
print(report)
# -
# __Plot some misclassified of the most difficult class:__
# +
### START YOUR CODE ###
most_idx = np.where((Ypredargmax != y_test) & (y_test == most_difficult[0]))[1]
print("Most difficult wrongly classified: ", len(most_idx))
plot_digits(x_test, y_test, most_idx[0:20], shape)
### END YOUR CODE ###
# -
# ### 1d Analyse Wrongly Classified Images
#
# For the given best choice of hyper-parameters explore the mis-classified images.
# Select the images the model was wrong and most uncertain with and characterize which digits were most often confused and why.
# For plotting and inspecting, you can use the `plot_digits`-function defined above.
# +
### START YOUR CODE ###
wrong_idx = np.where(Ypredargmax != y_test)[1]
print("Mis-classified: ", len(wrong_idx))
most_uncertain_idx = np.argsort(y_pred[:,wrong_idx].max(axis=0))[:10]
plot_digits(x_test[:,wrong_idx], Ypredargmax[:,wrong_idx], most_uncertain_idx, shape)
### END YOUR CODE ###
# -
# ### Plot the Trained Weights as Image
#
# The following cell allows you to plot the trained weights as images and the trained bias for the 10 digits. This helps to understand what the given model actually is doing. For larger (deeper) models, this won't be that easy any more.
#
# __QUESTION:__
# * What could you tell about the predictions made by the model if one of the bias terms would be much larger than all the others (e.g. $b_5=10$ while $b_k\in[-0.1,0.1]$ for $k\ne5$)?
#
# * What could you tell about the predictions made by the model if one of the bias terms would be much smaller than all the others (e.g. $b_5=-10$ while $b_k\in[-0.1,0.1]$ for $k\ne5$)?
# __ANSWER:__
#
# What could you tell about the predictions made by the model if one of the bias terms would be much larger than all the others (e.g. 𝑏5=10 while 𝑏𝑘∈[−0.1,0.1] for 𝑘≠5 )?
# - The feature/pixel, which is predicted by predict(w5*x+b5), is considered as a much more important feature/pixel than all others in the beginning. If the training is too short, this mismatch cannot be equalized. If the training is running for a very long time, the initially very high bias for b5 should decrease significantly. Nevertheless, it is not in our interest to mark one specific pixel as much more important than all other pixels in the MNIST image recognition application.
#
#
#
# What could you tell about the predictions made by the model if one of the bias terms would be much smaller than all the others (e.g. 𝑏5=−10 while 𝑏𝑘∈[−0.1,0.1] for 𝑘≠5 )?
# - The feature/pixel, which is predicted by predict(w5*x+b5), is considered as a much less important feature/pixel than all others in the beginning. If the training is too short, this mismatch cannot be equalized. If the training is running for a very long time, the initially very low bias for b5 should increase significantly. Nevertheless, it is not in our interest to mark one specific pixel as much less important than all other pixels in the MNIST image recognition application, if it's not a dead pixel.
# +
weights = params['W']
biases = params['b']
cols = 5
rows = 2
plt.figure(figsize=(20,4*rows))
for i in range(10):
plt.subplot(rows, cols, i+1)
plt.imshow(np.reshape(weights[i], (28,28)), cmap=plt.cm.gray)
plt.title('Digit %i'%i, fontsize = 12)
plt.figure(figsize=(20,4))
plt.plot(range(10), [biases[i] for i in range(10)], '+')
# -
# ### 1f Analyse Weights Initialisation
#
# Implement and compare the weights initialisation strategies 1.-3.
#
# __QUESTION:__ Are there significant differences in the learning, the hyper parameter settings needed, the resulting error rates (and misclassified digits) for the different initialisation strategies?
# Answer:
# - Random initialization of the weights increases convergence speed of the training.
# - In general, differently initialized hyperparameters result in different solutions (Local optimas of tuned hyperparameters) -> One cannot ensure, that we have found the global optima.
# - The softmax function provides a nice gradient at around 0. If the value is much higher or much lower, the gradient approaches 0. This slows the learning progress down significantly. (Vanishing gradient)
# +
def initialize_params2(nx, ny):
"""
This function provides initialized parameters: a weights matrix and a bias vector.
Argument:
nx -- number of input features
ny -- number of output dimensions (number of different labels)
Returns:
w -- initialized weights matrix of shape (ny,nx)
b -- initialized bias vector of shape (ny,1)
"""
### START YOUR CODE ###
# initialize weights (enable one line only!)
# weights generated as independent standard normal random numbers (mean zero, standard deviation 1)
w = np.random.randn(ny,nx)
# initialize biases to zero
b = np.zeros((ny,1))
### END YOUR CODE ###
return w, b
## Reset using best hyper params
W,b = initialize_params2(28*28, 10)
### START YOUR CODE ###
alpha = best_learning_rate
nepochs = best_nepochs
batchsize = best_batchsize
params, metrics = optimize(W, b, x_train, y_train, x_test, y_test, nepochs=nepochs, alpha=alpha, batchsize=batchsize, debug=False)
### END YOUR CODE ###
metrics.plot_cost_curves(ymin=0.0, ymax=1.0,logy=False)
metrics.plot_error_curves(ymin=0.0, ymax=0.1,logy=False)
metrics.print_latest_errors()
W = params['W']
b = params['b']
y_pred = predict(W,b,x_test)
Ypredargmax = np.argmax(y_pred, axis=0)
Ypredargmax = Ypredargmax[np.newaxis, :]
#print(Ypredargmax)
#print("y-test: ", y_test)
digits_error_rate = []
for i in range(10):
n_cor = np.count_nonzero((Ypredargmax == y_test) & (y_test == i))
n_fal = np.count_nonzero((Ypredargmax != y_test) & (y_test == i))
er = n_fal/(n_cor+n_fal)
digits_error_rate.append((i, n_cor, n_fal, er))
digits_error_rate = sorted(digits_error_rate, key=lambda x: x[3])
print("Digit Correct Wrong Error Rate")
for el1, el2, el3, el4 in digits_error_rate:
print("{:<6}{:<10}{:<10}{}".format(el1, el2, el3, el4))
most_difficult = digits_error_rate[-1]
print("The most difficult digit to classify is: ", most_difficult[0]," with error rate: ", most_difficult[3])
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
cm = confusion_matrix(y_test[0], Ypredargmax[0], labels)
print(cm)
# +
def initialize_params3(nx, ny):
"""
This function provides initialized parameters: a weights matrix and a bias vector.
Argument:
nx -- number of input features
ny -- number of output dimensions (number of different labels)
Returns:
w -- initialized weights matrix of shape (ny,nx)
b -- initialized bias vector of shape (ny,1)
"""
### START YOUR CODE ###
# initialize weights (enable one line only!)
# weights generated as independent normal random numbers with mean zero and standard deviation properly scaled (divided by 𝑛⎯⎯√)
w = np.random.normal(0,1/np.sqrt(nx),(ny,nx))
# initialize biases to zero
b = np.zeros((ny,1))
### END YOUR CODE ###
return w, b
## Reset using best hyper params
W,b = initialize_params3(28*28, 10)
### START YOUR CODE ###
alpha = best_learning_rate
nepochs = best_nepochs
batchsize = best_batchsize
params, metrics = optimize(W, b, x_train, y_train, x_test, y_test, nepochs=nepochs, alpha=alpha, batchsize=batchsize, debug=False)
### END YOUR CODE ###
metrics.plot_cost_curves(ymin=0.0, ymax=1.0,logy=False)
metrics.plot_error_curves(ymin=0.0, ymax=0.1,logy=False)
metrics.print_latest_errors()
W = params['W']
b = params['b']
y_pred = predict(W,b,x_test)
Ypredargmax = np.argmax(y_pred, axis=0)
Ypredargmax = Ypredargmax[np.newaxis, :]
#print(Ypredargmax)
#print("y-test: ", y_test)
digits_error_rate = []
for i in range(10):
n_cor = np.count_nonzero((Ypredargmax == y_test) & (y_test == i))
n_fal = np.count_nonzero((Ypredargmax != y_test) & (y_test == i))
er = n_fal/(n_cor+n_fal)
digits_error_rate.append((i, n_cor, n_fal, er))
digits_error_rate = sorted(digits_error_rate, key=lambda x: x[3])
print("Digit Correct Wrong Error Rate")
for el1, el2, el3, el4 in digits_error_rate:
print("{:<6}{:<10}{:<10}{}".format(el1, el2, el3, el4))
most_difficult = digits_error_rate[-1]
print("The most difficult digit to classify is: ", most_difficult[0]," with error rate: ", most_difficult[3])
labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
cm = confusion_matrix(y_test[0], Ypredargmax[0], labels)
print(cm)
# -
# Weights initialization using independent normal random numbers with mean zero and standard deviation properly scaled also improves the model. Digit 8 is in this case the most difficult to classify and can be confused with digits 3, 5, and 9.
| exercises/exercise3/MNIST_classifier_softmax_stud.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pylab as plt
from astropy.io import fits
import numpy as np
import pandas as pd
import os
import scipy.interpolate
from scipy.interpolate import splev, splrep
# +
star = 'GJ251'
Wave_Path = '/mnt_home/malikb/code/keck_rwav.fits'
Flux_Path = '/mnt_home/malikb/code/rj237.279.fits'
wl_file = fits.open(Wave_Path)
fl_file = fits.open(Flux_Path)
wl_image = wl_file[0].data
fl_image = fl_file[0].data
# -
wl_image
wl_image[11,36]
wl_image[11,252]
# Magnesium
wl_image[2,2702]
wl_image[2,2211]
wl_image[2,1974]
# Mercury
wl_image[6,976]
wl_image[11:12]
print(4976.671387267135-4976.64794644686)
print(wl_image[0,1]-wl_image[0,0])
wl_image[-1,-1]
wl_image[-1,-2]
print(wl_image[11,252]-wl_image[11,37])
np.min(wl_image)
print(wl_image[-1,-1]/(wl_image[-1,-1]-wl_image[-1,-2]))
print(wl_image[0,0]/(wl_image[0,1]-wl_image[0,0]))
# +
plt.rcParams["figure.figsize"] = (7,5)
order = 11
wave = (wl_image[order])
flux = (fl_image[order])
plt.plot(wave,flux, "#01DF01")
plt.title('Raw Keck Spectrum of ' + star + ': Order ' + str(order))
plt.xlabel('Wavelength')
plt.ylabel('Flux')
plt.show(np.median(fl_image, axis=0, keepdims=True))
# -
bias = np.median(wl_image[-30:])
print(bias)
plt.figure(figsize=(10,5))
wave_adjusted = wave - (0.1*bias)
flux_adjusted = flux - (0.05*bias)
plt.plot(wave_adjusted, flux_adjusted)
ymax = np.median(flux_adjusted)*1.3
plt.ylim(0,ymax)
plt.xlabel('Wavelength')
plt.ylabel('Flux')
# +
bin = 100
# this list will contain the indices corresponding to each of the 95th percentile flux values in each bin
indices = []
for i in np.arange((len(wave) - (bin)), step = bin):
flux_values = []
for j in np.arange(i, i + bin, step = 1):
value = flux[j]
flux_values = np.append(flux_values, value)
# find the 95th percentile flux value: we use 95 to get the maximum flux value in general
# but avoid issues with cosmic rays and other emission lines
flux_in_bin = np.percentile(flux_values, 95)
# find the closest value in the flux array to the 95th percentile value
absolute_difference_function = lambda list_value : abs(list_value - flux_in_bin)
flux_in_bin = min(flux_values.tolist(), key=absolute_difference_function)
index_in_bin = flux_values.tolist().index(flux_in_bin)
index = i + index_in_bin
indices = np.append(indices, index)
# these lists will contain the wavlength and flux values at each index in 'indices'
wave_values = []
fl_values = []
for index in indices:
ind = int(index)
wave_values = np.append(wave_values, wave[ind])
fl_values = np.append(fl_values, flux[ind])
plt.plot(wave, flux, label = 'Data')
plt.scatter(wave_values, fl_values, color = 'black', label = 'Flux Values in the 95th Percentile')
plt.title('Mapping out the Echelle Blaze Function Fit')
plt.xlabel('Wavelength [A]')
plt.ylabel('Flux')
plt.legend()
plt.show()
# -
spl = splrep(wave_values, fl_values, s = 500000)
flux_fit = splev(wave, spl)
plt.plot(wave, flux_fit)
plt.xlabel('Wavelength [A]')
plt.ylabel('Flux')
plt.title('Echelle Blaze Function Fit')
plt.show()
first_normalized_flux = flux / flux_fit
plt.plot(wave, first_normalized_flux, "#48D1CC")
plt.xlabel('Wavelength [A]')
plt.ylabel('Flux')
plt.title('Normalized Flux')
plt.show()
# +
flux98 = np.percentile(first_normalized_flux, 98)
normalized_flux = first_normalized_flux / flux98
plt.plot(wave, first_normalized_flux, label = 'Normalized Once')
plt.plot(wave, normalized_flux, label = 'Normalized Twice')
plt.legend()
plt.xlabel('Wavelength [A]')
plt.ylabel('Flux')
plt.title('Double Normalized Data')
plt.show()
| Malik/Keckwave.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] cell_id="00000-964c276a-8215-46a6-bc02-0334ff6767e5" deepnote_cell_type="markdown"
# #### A
# Te megoldásod: $\exists (F(x) \land N(x)) \land \exists (P(f, y) \Leftrightarrow P(n, y)) \land N(p)$
# + [markdown] cell_id="00001-982487b8-bba1-411b-80d6-6063c301ae79" deepnote_cell_type="markdown"
# Visszajelzés:
# - A kvantorok után közvetlenül szükség van egy kötött paraméterre, majd egy zárójelre, amelyen belülre kerül az összes arra a paraméterre vonatkozó állítás, tehát $\exists x (\textit{ide jönnek az állítások, amikben x szerepel})$, és ezt a paramétert konzisztensen kell használni.
# - Te a kvantorok után lehagytad a paramétert, a továbbiakban odaképzeljük.
# - A kifejezés első tagja (helyesen $\exists x (F(x) \land N(x))$) azt jelenti, hogy van olyan $x$, aki nő is, és férfi is. Ez a rész nincs benne az állításban és felesleges.
# - A második tagjával (helyesen $\exists y(P(f, y) \Leftrightarrow P(n, y))$) két tartalmi probléma van:
# - $f$ és $n$ helyett $k$-t és $d$-t kell írni, mert az állítás úgy szólt, hogy nekik van közös lányuk (ezek konstansok, mint az algebrában pl. a 2 és a 3, azokat sem lehetne kicserélni mondjuk 4-re és 5-re)
# - az ekvivalencia itt nem jó, mert az akkor is igaz, ha egyik állítás sem igaz külön-külön, helyette $\land$-t kell használni
# - Az utolsó tagnak ($N(p)$) az előző zárójelen belül, és $p$ helyett $y$ paraméterrel kell szerepelnie, hiszen a ugyanúgy az $\exists y$ hatáskörében van (*van olyan személy, aki $k$ és $d$ közös gyereke **és** nő.*)
# + [markdown] cell_id="00002-6c2d6f21-c13e-4018-bce1-fb81168bc86b" deepnote_cell_type="markdown"
# #### B
# Te megoldásod: $\nexists (F(x) \land P(f, y)) \land \exists (N(x) \land P(n, z))$
# + [markdown] cell_id="00003-4c69ccd5-7a72-4838-b987-7c90784d3088" deepnote_cell_type="markdown"
# Visszajelzés:
# - Megint lehagytad a kvantorok után a paramétert.
# - Az $y$-hoz és $z$-hez is kell egy $\exists$ kvantor, mert különben nyitott marad a formula, tehát az igazságértéke attól függ, mit helyettesítünk $y$ és $z$ helyére.
# - Ezeknek a kijavítása után a kifejezés azt jelentené, hogy "*egy férfinak sincs gyereke, és van olyan nő, akinek van gyereke*", ami nem felel meg a B állításnak.
# + [markdown] cell_id="00004-86d7a52e-d115-4f56-8ac5-73a7309741e0" deepnote_cell_type="markdown"
# #### C
# Te megoldásod: $\exists (F(x) \land N(x)) \land \exists (P(f, y) \Leftrightarrow P(n, y)) \land \exists (F(x) \land N(x)) \land \exists (P(f, z) \Leftrightarrow P(n, z)) \land N(y) \land N(z)$
# + [markdown] cell_id="00005-efedb23f-5886-465d-92f7-ea8c074a0cc8" deepnote_cell_type="markdown"
# Visszajelzés:
# - A $\exists (F(x) \land N(x))$ továbbra is felesleges.
# - A kvantorok után itt is lehagytad a paramétereket ($\exists y (...)$ és $\exists z (...)$).
# - Az ekvivalencia helyett itt is $\land$-t kell használni az A-ban leírtak miatt.
# - $f$-hez és $n$-hez is kell a $\exists$ kvantor a B-ben leírtak miatt.
# - A végén $N(y)$-nek és $N(z)$-nek a $\exists y$, illetve $\exists z$-hez tartozó zárójelen belül kell szerepelniük, mert ezekre a paraméterekre vonatkozik az állítás.
# - A C állítás csak azt mondja ki, hogy *egynemű* testvérekről van szó, akik nem feltétlenül nők. Tehát itt az $\land$ helyett $\Leftrightarrow$-t kell használni, vagy kibontani, hogy *vagy mindkettő nő, vagy mindkettő férfi*.
# + [markdown] cell_id="00006-e4094447-8e7a-4d8a-a9d9-630a5cb39656" deepnote_cell_type="markdown"
# #### D
# Te megoldásod: $\forall (N(x) \land P(n, y))$
# + [markdown] cell_id="00007-10d0b116-e293-48b1-a16f-10b36597f972" deepnote_cell_type="markdown"
# Visszajelzés:
# - Továbbra is kell a paraméter a kvantor után ($\forall x (...)$).
# - A $P()$ predikátumban $x$-et felcserélted $n$-re.
# - $y$-hoz kell a $\exists$ kvantor a korábban leírtak miatt.
# - Ezeknek a kijavítása után pedig azt jelenti a kifejezés, hogy "*mindenki nő, és van gyereke*", ami nem felel meg a D állításnak.
# + [markdown] cell_id="00008-cb72879b-52c0-4005-a55f-7cba86fd6734" deepnote_cell_type="markdown"
# #### E
# Te megoldásod: $\exists (F(x) \land P(x, y) \land P(y, m) \land P(y, n)) F(m) \land N(n)$
# + [markdown] cell_id="00009-7f301c77-d7de-48a3-bd9b-92a7a8db4a69" deepnote_cell_type="markdown"
# Visszajelzés:
# - Az E állítás szerint $k$-nak vannak unokái. Itt $k$ egy konstans, mint az A-ban, tehát ezt a betűt kell használni, és nem kell hozzá kvantor (egy konkrét személyről mondunk valamit, lehetne *Kati* is).
# - Az $F(x)$ fölösleges és téves is, nem állítottuk, hogy $k$ férfi.
# - $y$-hoz, $m$-hez és $n$-hez is kell a $\exists$ kvantor.
# - A végén az $F(m) \land N(n)$ állítást a zárójelen belül, $\land$-sel össze kell kötni a kifejezés többi részével.
# + [markdown] tags=[] created_in_deepnote_cell=true deepnote_cell_type="markdown"
# <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=978e47b7-a961-4dca-a945-499e8b781a34' target="_blank">
# <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,<KEY> > </img>
# Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
| members/mbence/hanga_jav.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="fhg6JOUipvkU"
# # Introduction to Numpy
#
# RECAP: You have previously learned how to manipulate data with the `pandas` library.
#
# Today, we will be learning how to perform calculations with another library known as __`numpy`.__ `numpy` allows you to do math on entire lists of data, all at the same time!
# + [markdown] colab_type="text" id="YoGzk6H1pvkV"
# Let's __create a list containing the numbers 0 through 5__ and assign it to a variable:
# + colab={} colab_type="code" id="s2EQD0wEpvkW"
# Create a list containing the numbers 0 through 4
# or convert the range to a list
# + colab={} colab_type="code" id="N1uGkMLj2us0"
# let's see what it looks like
# + [markdown] colab_type="text" id="OvSKPwFspvka"
# Using lists can be really useful, because you can store any set of data in the list.
#
# Now, lets __add 1 to each of the items in the list.__ Is there a simple way to do this?
# + colab={} colab_type="code" id="wHp0jtLr-eET"
data + 1
# + [markdown] colab_type="text" id="2OtvNh0F-eEX"
# hmm ... that didn't work! That is because __the `+` operator acts as a concatenation operator__ on lists, and we learned previously that we can only concatenate lists with lists, not with integers.
#
# Another way we could update our list is to add 1 to each of the items in the list individually. We can do this by **indexing to isolate each value** in the list one by one.
# + colab={} colab_type="code" id="qFchEEvkpvkb"
# Add 1 to each item in the array you created above
# print the list to see how it changed
# + [markdown] colab_type="text" id="9vRgbEEhpvkk"
# __This is very inconvenient with a list.__ It is more useful with a single number, where you can do something like this to add a number:
# + colab={} colab_type="code" id="jc5cRe99pvkl"
# create a single variable and add a number to it
# + [markdown] colab_type="text" id="ZPdA2YAmpvkp"
# Today we will be using __`numpy`__, which allows us to quickly and efficiently perform mathematical operations on entire lists (or, as they're called in __`numpy`__, *arrays*)!
# + [markdown] colab_type="text" id="kxiwgby4pvkq"
# First we will __import `numpy`.__ Remember when we imported `pandas`, we gave it the special nickname `pd`? We're also going to give `numpy` a nickname: __`np`__:
# + colab={} colab_type="code" id="uN4ydfz8pvkr"
# Load numpy
# + [markdown] colab_type="text" id="Mpvw3-NApvku"
# Now whenever we type __`np`, python will know we really mean `numpy`.__
#
# There is a ton of useful stuff we can do with `numpy`, so let's __redo the example above using `numpy` `arrays`__ instead of `lists`.
# -
# + colab={} colab_type="code" id="nenQbNPypvkv"
# create a numpy array containing the numbers 0 through 4
# you can print arrays just like lists
# + [markdown] colab_type="text" id="alY8TwmVpvk0"
# Numpy `arrays` act very similarly to regular python `lists`, __we can grab individual items using the same syntax:__
# + colab={} colab_type="code" id="kyP00fcRpvk2"
# Print any number from the numpy array you just created:
# + [markdown] colab_type="text" id="D5u5-neLpvk6"
# Numpy `arrays` also add a lot of useful features such as the ability to perform commands __*on all items in a list at the same time*.__
#
# We can demonstrate this by __adding a number to all of the items in the Numpy `array`__:
# + colab={} colab_type="code" id="J2RmzwCrpvk7"
# Add any number to the array we created above
# print the array
# + [markdown] colab_type="text" id="REWquUE2pvlN"
# See how much easier that was than manually changing each element of a list? We will be using `numpy` a lot to perform calculations on arrays of data. In the above example we used addition, but you can also perform any mathematical operation we've talked about with `numpy` arrays.
#
# In this lesson you learned how to:
# * Load `numpy` into Python.
# * Create an `array` with `numpy`.
# * Perform math with `numpy` arrays.
#
| Lessons/Lesson20_Numpy_Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: deepTFM
# language: python
# name: deeptfm
# ---
# +
import os, glob
exp_set_dir = 'figs/mnistv17'
exp_times= []
count= 0
for exp_dir in glob.glob(f'{exp_set_dir}/*'):
if not os.path.isdir(exp_dir):continue
details_file_dir= f"{exp_dir}/details.txt"
if not os.path.isfile(details_file_dir):continue
with open(details_file_dir, 'r') as f:
data = f.read()
try:
train_loop_time = data.split('\n')[1]
val_loop_time = data.split('\n')[2]
train_time = float(train_loop_time.split(' ')[-2])
val_time = float(val_loop_time.split(' ')[-2])
exp_times += [[exp_dir.split('/')[-1], [train_time, val_time]]]
count+=1
except:
print(exp_dir, data)
print(f'total number of exps : {count}')
# -
1152//4
to_file= ''
for exp, times in sorted(exp_times, key= lambda x:x[1][0], reverse= True):
to_file+=exp+f'{"-"*(256- len(exp))} train time: {times[0]}\n'
# +
exp_set_name= exp_set_dir.replace('/', '_')
with open(f'./timing_{exp_set_name}.txt', 'w') as f:
f.write(to_file)
# -
| aim2/support_notebooks/analyze_timing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + code_folding=[0]
# RUN Main import block and TODO list
# TODO: see how uri calculated the ridges
# TODO: Perform Histogram equalization - start with it
# TODO:
# take integral from the Highest peak+-0.005 divide by integral of the entire graph
# This will be the peakness measure for the PSD ==> The desired ridge index
# TODO:
# take integral from the Highest peak+-0.005 divide by integral of the entire graph - it's the peakness measure for the PSD
# must select a peak above a min threshold in order to ignore noisy frequency
# must ignore peaks above a certain threshold in order to detect meaningful frequency
# run the PSD in moving windows every 200 px (deduced from the below PSD pointing to a freq of 1/0.02=50-> times 4= 200px)
# and medianf the result of the windows
# TODO:
# Another alternative: (with Yariv)
# Run PSD column by column - get the phase, freq, peakness and reconstruct an artificial ridge slice
# from this - reconstruct a "clean" artificial ridge image
# %matplotlib inline
import matplotlib.image as img
import matplotlib.pyplot as plt
import numpy as np
import urllib.request
import os
import shutil
import glob
from scipy import ndimage
from scipy import signal
#import cv2
from PIL import Image, ImageDraw, ImageFont
import mahotas as mh
from mahotas import polygon
# import pymorph as pm
import networkx as nx
from scipy import ndimage as nd
import skimage.transform as transform
import skimage.morphology as mp
import skimage.io as sio
import scipy.misc as sm
from skimage.filters import threshold_otsu, threshold_adaptive
from skimage.feature import hessian_matrix, hessian_matrix_eigvals
from skimage import exposure
from skimage import data, img_as_float
from sklearn.metrics import precision_recall_fscore_support
from bisect import bisect_left
import math
import warnings
import csv
import tensorflow as tf
from time import gmtime, strftime
# + code_folding=[0, 76, 82, 96, 105, 120, 138, 155, 389]
# RUN Utility functions
# One time init
# with open('results.csv', 'w') as csvfile:
# csvout = csv.writer(csvfile)
# csvout.writerow(["File", "Model", "Gap", "Slice_size", "Count", "Precision", "Recall", "F-score", "True Count", "Error Rate"])
#BASIC CROP FRAME
X_START = 1000
X_END = 6000
Y_START = 800
Y_END = 4300
BG_2_OBJ_RATIO = 0.91
CUBE_SIZE = 250
EDGE_GAP = 50
ROOT_FOLDER = "/home/il239838/files/"
# ROOT_FOLDER = "/Users/il239838/Downloads/private/Thesis/Papyrus/PX303/files/"
LEARNING_RATE = 0.001
BATCHES = 1000
BATCH_SIZE = 50
BREAK_VAL = 1000
# Simple crop by x/y ranges
def crop(image, ymin, ymax, xmin, xmax):
return image[ymin:ymax, xmin:xmax]
# returns a logical matrix of values beyond a threshld
def thresholded(image, val):
return np.logical_and(*[image[...] > val for t in enumerate([0, 0])])
def find_min_max_without_orphand_pixels(nonzero_dimension, crop_filter=20):
sorted = np.sort(nonzero_dimension)
prev=-1
min_val = sorted[0]
for i, x in enumerate(sorted[:100]):
if prev >= 0 and x - prev > crop_filter:
min_val = x
prev = x
prev=-1
max_val = sorted[-1]
for i, x in enumerate(sorted[-100:]):
if prev >= 0 and x - prev > crop_filter:
max_val = prev
break
prev = x
return min_val, max_val
def calc_min_max_coordinates(image, crop_val=50):
temp = thresholded(image, crop_val)
temp = temp * 1
temp = np.nonzero(temp)
ymin, ymax = find_min_max_without_orphand_pixels(temp[0])
xmin,xmax = find_min_max_without_orphand_pixels(temp[1])
return ymin, ymax, xmin, xmax
def calc_min_max_coordinates_dynamic(image, cutoff=1):
temp = exposure.equalize_adapthist(image, clip_limit=0.03)
flat = np.sort(np.matrix.getA1(temp))
sum_all = np.sum(flat)
index = np.argmin(flat.cumsum() < (sum_all * cutoff))
temp = thresholded(temp, flat[index])
temp = temp * 1
temp = np.nonzero(temp)
ymin, ymax = find_min_max_without_orphand_pixels(temp[0])
xmin,xmax = find_min_max_without_orphand_pixels(temp[1])
return ymin, ymax, xmin, xmax
# initial static crop and a seondary dynamic crop based on signal2noise ratio
def crop_full_scan(image, x_start, x_end, y_start, y_end):
temp = crop(image, y_start, y_end, x_start, x_end)
ymin, ymax, xmin, xmax = calc_min_max_coordinates_dynamic(temp, cutoff=BG_2_OBJ_RATIO)
temp = crop(image, y_start+ymin, y_start+ymax, x_start+xmin, x_start+xmax)
return temp
def crop_thresholded(image):
temp = crop(image, 0, image.shape[0]-1, 0, image.shape[1]-1)
ymin, ymax, xmin, xmax = calc_min_max_coordinates(temp)
temp = crop(image, ymin, ymax, xmin, xmax)
return temp
def read_and_crop(image_name, x_start=X_START, x_end=X_END, y_start=Y_START, y_end=Y_END):
if "il239838" in os.getcwd():
image = img.imread(ROOT_FOLDER + image_name)
else:
f = urllib.request.urlopen("https://dl.dropboxusercontent.com/s/31b96942qdcn73k/" + image_name)
image = img.imread(f, format='jpeg')
# Smart-crop the image to get rid of all the noise and redundant area
# return crop_full_scan(image)
cropped = crop_full_scan(image, x_start, x_end, y_start, y_end)
return exposure.equalize_adapthist(cropped, clip_limit=0.03)
# TODO: fix performance!!! http://scikit-image.org/docs/dev/user_guide/tutorial_parallelization.html
def combine_3_images_to_RGB(red, green, blue):
new_image = np.empty((blue.shape[0],blue.shape[1],3))
for x in range(0, blue.shape[0]):
for y in range(0, blue.shape[1]):
new_image[x,y,0] = red[x,y]
new_image[x,y,1] = green[x,y]
new_image[x,y,2] = blue[x,y]
return new_image
def slice_image_left_edge(original, width=200, rotate=0):
rot = ndimage.rotate(original, rotate)
# Slice the left slice of the so-called "blue" image
left_edge_orig = crop(rot, 1, 1400, 1, width)
left_edge_orig = crop_thresholded(left_edge_orig)
# Copy to a new array so we don't thrash the origin
left_edge = np.empty_like (left_edge_orig)
np.copyto(left_edge, left_edge_orig)
# Zero down low level "noise" values
low_values_indices = left_edge < 30 # Where values are low
left_edge[low_values_indices] = 0 # All low values set to 0
return left_edge
def get_best_angle_rotation(original, crop=True, width=200):
min_var = 99999999999
best_angle = -10
for x in range(-5,5):
if crop:
rot_edge = slice_image_left_edge(original, width, x)
else:
rot_edge = ndimage.rotate(original, x)
left_var = np.var(rot_edge, axis=1)
# left_var = np.apply_along_axis(lambda v: np.var(v[np.nonzero(v)]), 1, rot_edge)
var_sum = np.sum(left_var)
if (var_sum < min_var):
min_var = var_sum
best_angle = x
print ("best_angle="+str(best_angle))
return best_angle
# import pdb; pdb.set_trace()
def calc_neighbors(slice_map, col, row):
# import pdb; pdb.set_trace()
if ((col-1, row) in slice_map and slice_map[(col-1, row)] != None):
slice_map[(col, row)]["left"] = slice_map[(col-1, row)]
slice_map[(col-1, row)]["right"] = slice_map[(col, row)]
if ((col+1, row) in slice_map and slice_map[(col+1, row)] != None):
slice_map[(col, row)]["right"] = slice_map[(col+1, row)]
slice_map[(col+1, row)]["left"] = slice_map[(col, row)]
if ((col, row-1) in slice_map and slice_map[(col, row-1)] != None):
slice_map[(col, row)]["top"] = slice_map[(col, row-1)]
slice_map[(col, row-1)]["bottom"] = slice_map[(col, row)]
if ((col, row+1) in slice_map and slice_map[(col, row+1)] != None):
slice_map[(col, row)]["bottom"] = slice_map[(col, row+1)]
slice_map[(col, row+1)]["top"] = slice_map[(col, row)]
def VAL_create_cube(name, raw, x, y):
cube = {}
cube["cube"] = raw
cube["file"] = name
# if name.find('P') == 0:
# cube["index"] = int(name[name.find('P')+1:name.find('P')+4]) * 1000 + int(name[name.find('Fg')+2:name.find('Fg')+5])
# else:
# print("Found a ZERO index cube with the name:"+name)
cube["index"] = 0
cube["top_row"] = x
cube["left_col"] = y
cube["right_col"] = y + CUBE_SIZE
return cube
ZERO_CUBE = VAL_create_cube("ZERO", np.zeros((CUBE_SIZE, CUBE_SIZE), dtype=np.int), -1, -2)
# slice an image to cubes with 250X250 pixel size
def VAL_slice_TEAR_to_static_slices(name, cropped_original):
structure = {}
# cropped_original = cropped_original / 256 # divide by 256 to "normalize" between 0 and 1
# import pdb; pdb.set_trace()
x, y = cropped_original["cut"].shape
# print (x,y)
n = 0
# see n offset to see offset in pixels on the x axis == rows. every n equals CUBE_SIZE
while ((n + 1) * CUBE_SIZE < x):
# mark the piece as narrow so the first would be counted as lastt too
narrow = True if ((CUBE_SIZE + (4 * EDGE_GAP)) > y) else False
# cut a cube of 250X250 at the FIRST column
start_row_px = int(np.round(n * CUBE_SIZE, -1))
end_row_px = int(np.round((n + 1) * CUBE_SIZE, -1))
cube = (crop(cropped_original["cut"], start_row_px, end_row_px, EDGE_GAP, CUBE_SIZE + EDGE_GAP))
# keep only cubes for which half of the pixels have some "color"
if np.median(cube) > 0.2: # aligned with the normalization 0.2 correlates to 50
# keep the cube
new_cube = VAL_create_cube(name, cube, start_row_px, EDGE_GAP)
new_cube["col"] = 0 # marks that the cube is on the first col of the piece
new_cube["row"] = n
new_cube["last"] = narrow # marks that the cube is on the last col of the piece
new_cube["orig"] = cropped_original
new_cube["col_px_left"] = cropped_original["col_px"] + EDGE_GAP
new_cube["col_px_right"] = cropped_original["col_px"] + CUBE_SIZE + EDGE_GAP
new_cube["row_px_top"] = cropped_original["row_px"] + start_row_px
new_cube["row_px_bottom"] = cropped_original["row_px"] + end_row_px
structure[(0, n)] = new_cube
# cut a cube of 250X250 at the LAST column
cube = (crop(cropped_original["cut"], start_row_px, end_row_px, y - CUBE_SIZE - EDGE_GAP, y - EDGE_GAP))
# keep only cubes for which half of the pixels have some "color"
# aligned with the normalization 0.2 correlates to 50
if np.median(cube) > 0.2:
# keep the cube
new_cube = VAL_create_cube(name, cube, start_row_px, y - CUBE_SIZE - EDGE_GAP)
new_cube["col"] = 1 # marks that the cube is on the last col of the piece
new_cube["row"] = n
new_cube["last"] = not narrow # like col - marks that the cube is on the last col of the piece
new_cube["orig"] = cropped_original
new_cube["col_px_left"] = cropped_original["col_px"] + y - CUBE_SIZE - EDGE_GAP
new_cube["col_px_right"] = cropped_original["col_px"] + y - EDGE_GAP
new_cube["row_px_top"] = cropped_original["row_px"] + start_row_px
new_cube["row_px_bottom"] = cropped_original["row_px"] + end_row_px
structure[(1, n)] = new_cube
# m = 0
# # every 250 pixels on the y axis == cols
# while ((m + 1) * CUBE_SIZE < y):
# if ((m == 0) or ((m + 2) * CUBE_SIZE >= y)): # Only keep the left and right edges of the piece for matching!!
# # cut a cube of 250X250
# cube = crop(cropped_original["cut"], n * CUBE_SIZE, (n + 1) * CUBE_SIZE, m * CUBE_SIZE, (m + 1) * CUBE_SIZE)
# # keep only cubes for which half of the pixels have some "color"
# # print(np.median(cube))
# if np.median(cube) > 0.2: # aligned with the normalization 0.2 correlates to 50
# # keep the cube
# new_cube = VAL_create_cube(name, cube, n * CUBE_SIZE, m * CUBE_SIZE)
# new_cube["col"] = m
# new_cube["row"] = n
# new_cube["orig"] = cropped_original
# new_cube["col_px_left"] = cropped_original["col_px"] + m * CUBE_SIZE
# new_cube["col_px_right"] = cropped_original["col_px"] + (m + 1) * CUBE_SIZE
# new_cube["row_px_top"] = cropped_original["row_px"] + n * CUBE_SIZE
# new_cube["row_px_bottom"] = cropped_original["row_px"] + (n + 1) * CUBE_SIZE
# if ((m + 2) * CUBE_SIZE >= y):
# new_cube["last"] = True
# else:
# new_cube["last"] = False
# structure[(m, n)] = new_cube
# m += 1
n += 0.2 # currently set to jump in 50 px offset
# this loop has to be performed only after we've established all the None cubes
for cube in structure.values():
# set the reference to neighbor cubes
if cube != None:
calc_neighbors(structure, cube["col"], cube["row"])
# return the data structure with all the cubes and the counters of the rows and columns
return structure.values()
def pad_above(original, above, amount):
res = np.insert(original["cube"], np.zeros(amount), above["cube"][-amount:], axis=0)
res = np.delete(res, np.arange(CUBE_SIZE,CUBE_SIZE+amount), axis=0)
cube = VAL_create_cube(original["file"], res, original["top_row"] - amount, original["left_col"])
cube["col_px_left"] = original["col_px_left"]
cube["col_px_right"] = original["col_px_right"]
cube["row_px_top"] = original["row_px_top"] - amount
cube["row_px_bottom"] = original["row_px_bottom"] - amount
return cube
def pad_below(original, below, amount):
res = np.insert(original["cube"], np.full(amount, CUBE_SIZE), below["cube"][:amount], axis=0)
res = np.delete(res, np.arange(0, amount), axis=0)
cube = VAL_create_cube(original["file"], res, original["top_row"] + amount, original["left_col"])
cube["col_px_left"] = original["col_px_left"]
cube["col_px_right"] = original["col_px_right"]
cube["row_px_top"] = original["row_px_top"] + amount
cube["row_px_bottom"] = original["row_px_bottom"] + amount
return cube
def pad_left(original, left, amount):
res = np.insert(original["cube"], np.zeros(amount, dtype=int), left["cube"][:,-amount:], axis=1)
res = np.delete(res, np.arange(CUBE_SIZE, CUBE_SIZE+amount), axis=1)
cube = VAL_create_cube(original["file"], res, original["top_row"], original["left_col"] - amount)
cube["col_px_left"] = original["col_px_left"] - amount
cube["col_px_right"] = original["col_px_right"] - amount
cube["row_px_top"] = original["row_px_top"]
cube["row_px_bottom"] = original["row_px_bottom"]
return cube
def pad_right(original, right, amount):
res = np.insert(original["cube"], [CUBE_SIZE], right["cube"][:,:amount], axis=1)
res = np.delete(res, np.arange(0, amount), axis=1)
cube = VAL_create_cube(original["file"], res, original["top_row"], original["left_col"] + amount)
cube["col_px_left"] = original["col_px_left"] + amount
cube["col_px_right"] = original["col_px_right"] + amount
cube["row_px_top"] = original["row_px_top"]
cube["row_px_bottom"] = original["row_px_bottom"]
return cube
# "Shave" the right edge of the cube with <gap> pixels and pad with zeros on the left
def shave_right(original, amount):
return pad_left(original, ZERO_CUBE, amount)
# "Shave" the left edge of the cube with <gap> pixels and pad with zeros on the right
def shave_left(original, amount):
return pad_right(original, ZERO_CUBE, amount)
# concatenate cubes
def concatenate_cubes(left, right, slice_size):
con = np.concatenate((left["cube"][:,-slice_size:], right["cube"][:,:slice_size]), axis=1)
x_delta = right["top_row"] - left["top_row"]
y_delta = right["left_col"] - left["right_col"]
return con, x_delta, y_delta
# concatenate cubes
def VAL_concatenate_cubes(left, right, slice_size):
right_img = right["cube"]
# next block is not relevant for training ...
# # if the left cube is matched to another left cube (or right cube to another right cube) then rotate the right
# # cube by 180 so we try to match it upside down, covering the option that the cube was pictured rotated
# if ((left["col"] == 0 and right["col"] == 0) or (left["col"] != 0 and right["col"] != 0)):
# right_img = np.rot90(right["cube"], 2);
con = np.concatenate((left["cube"][:,-slice_size:], right_img[:,:slice_size]), axis=1)
# next block calculates distance based on the distance between left's right-top corner and right's left-top corner
# x_delta = right["top_row"] - left["top_row"]
# y_delta = right["left_col"] - left["right_col"]
# next block calculates the distance between the centers of cubes, accounting for test set's possibility of reverse slices (left instead of right and vice versa)
x_delta = right["row_px_top"] - left["row_px_top"] # equivalent to distance between vertical centers
y_delta = (right["col_px_left"] + (slice_size / 2)) - (left["col_px_right"] - (slice_size / 2)) # measuring the distance between horizontal centers of the slices
return con, x_delta, y_delta, left["file"], right["file"]
# concatenate cubes while artificially creating a gap between them. Pad the other end of the cube with zeros
def concatenate_cubes_zero_pad_gaps(left_orig, right_orig, gap):
left = left_orig if gap == 0 else shave_right(left_orig, gap)
right = right_orig if gap == 0 else shave_left(right_orig, gap)
return concatenate_cubes(left, right)
# concatenate cubes while artificially creating a gap between them. Pad the other end of the cobe with the nearby
# continuation of the cubes
def concatenate_cubes_with_gap(left_orig, right_orig, gap, left_pad, right_pad, slice_size):
# import pdb; pdb.set_trace()
left = left_orig if gap == 0 else pad_left(left_orig, left_pad, gap)
right = right_orig if gap == 0 else pad_right(right_orig, right_pad, gap)
return concatenate_cubes(left, right, slice_size)
# convert the data structure of cubes into a train set of 2 arrays of images and labels
# each image is a concatanation of 2 images from the original cubes set, covering all combinations of images
# effectively creating Nx(N-1) images
def VAL_build_train_set_for_euclidean_distance(cubes, slice_size, folder):
# clean folder before starting
for root, dirs, files in os.walk(folder):
for f in files:
os.unlink(os.path.join(root, f))
#import pdb; pdb.set_trace()
warnings.filterwarnings("ignore")
train_imgs = []
train_x_delta = []
train_y_delta = []
train_left_obj = []
train_right_obj = []
# iterate over all cubes
for curr in cubes:
# iterate over the others (effectively n^2)
for adj in cubes:
if (adj["file"] != curr["file"]): # no need to test against self CURRENTLY checking from directions!!!
#import pdb; pdb.set_trace()
# append the adjacent image to the current image
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr, adj, slice_size)
output = folder+x_file+"_"+str(curr["top_row"])+"_"+str(curr["left_col"])+"---"+y_file+"_"+str(adj["top_row"])+"_"+str(adj["left_col"])
np.save(output, conc)
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
train_left_obj.append(curr)
train_right_obj.append(adj)
warnings.filterwarnings("default")
return train_imgs, train_x_delta, train_y_delta, train_left_obj, train_right_obj
# convert the data structure of cubes into a train set of 2 arrays of images and labels
# each image is a concatanation of 2 images from the original cubes set, covering all combinations of images
# effectively creating Nx(N-1) images
def ORIG_build_train_set(cubes, gap):
# import pdb; pdb.set_trace()
warnings.filterwarnings("ignore")
train_imgs = []
train_lbls = []
train_x_delta = []
train_y_delta = []
# iterate over the rows and cols, essentially going over the grid of sliced cubes
for row in range(0, rows):
for col in range(0, cols):
# if this cube exists (could have been removed previously due to lack of data)
if (cubes[(col, row)] != None):
# for each "current" image in the iteration
curr = cubes[(col, row)]
# iterate over all the cubes to find all the "other" (adjacent) cubes
for adj_row in range(0, rows):
for adj_col in range(0, cols):
if (adj_row != row or adj_col != col):
if (cubes[(adj_col, adj_row)] != None):
adj = cubes[(adj_col, adj_row)]
# append the adjacent image to the current image
# pass the filling cubes on the right and left to pad against the gap
if (gap == 0 or ("left" in curr.keys() and "right" in adj.keys())):
if (gap == 0):
conc, x_delta, y_delta = concatenate_cubes(curr, adj, slice_size)
else:
conc, x_delta, y_delta = concatenate_cubes_with_gap(curr, adj, gap, curr["left"], adj["right"], slice_size)
train_imgs.append(conc)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# if the adj image is on the same row and on the right of the curr image - it will be marked as match
if (adj_row == row and adj_col == (col + 1)):
# mark the image as matched
train_lbls.append([0,1])
# need to enrich the set with a few more tru positive samples - so we offset
# the matched images up ad down a few times and create more matches
if ("top" in curr.keys() and "top"in adj.keys()):
for i in range(5, 101, 5):
curr1 = pad_above(curr, curr["top"],i)
adj1 = pad_above(adj, adj["top"],i)
if (gap == 0 or ("left" in curr.keys() and "right" in adj.keys() and "top" in curr["left"].keys() and "top"in curr["right"].keys())):
if (gap == 0):
conc, x_delta, y_delta = concatenate_cubes(curr1, adj1, slice_size)
else:
curr1Left = pad_above(curr["left"], curr["left"]["top"], i) # FIXIT?
adj1Right = pad_above(adj["right"], curr["right"]["top"], i) # FIXIT?
conc, x_delta, y_delta = concatenate_cubes_with_gap(curr1, adj1, gap, curr1Left, adj1Right, slice_size)
train_imgs.append(conc)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
if ("bottom" in curr.keys() and "bottom"in adj.keys()):
for i in range(5, 101, 5):
curr1 = pad_below(curr, curr["bottom"],i)
adj1 = pad_below(adj, adj["bottom"],i)
if (gap == 0 or ("left" in curr.keys() and "right" in adj.keys() and "bottom" in curr["left"].keys() and "bottom"in curr["right"].keys())):
if (gap == 0):
conc, x_delta, y_delta = concatenate_cubes(curr1, adj1, slice_size)
else:
curr1Left = pad_below(curr["left"], curr["left"]["bottom"], i) # FIXIT?
adj1Right = pad_below(adj["right"], curr["right"]["bottom"], i) # FIXIT?
conc, x_delta, y_delta = concatenate_cubes_with_gap(curr1, adj1, gap, curr1Left, adj1Right, slice_size)
train_imgs.append(conc)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
if ("left" in curr.keys()): # enough to check only the curr as the left of the adj is the curr
for i in range(5, 101, 5):
curr1 = pad_left(curr, curr["left"],i)
adj1 = pad_left(adj, adj["left"],i) # essentially the curr
if (gap == 0 or ("left" in curr.keys() and "right" in adj.keys())):
if (gap == 0):
conc, x_delta, y_delta = concatenate_cubes(curr1, adj1, slice_size)
else:
curr1Left = pad_left(curr["left"], ZERO_CUBE, i) # FIXIT? + assuming the gap will not be more than 150
adj1Right = pad_left(adj["right"], ZERO_CUBE, i) # FIXIT? + assuming the gap will not be more than 150
conc, x_delta, y_delta = concatenate_cubes_with_gap(curr1, adj1, gap, curr1Left, adj1Right, slice_size)
train_imgs.append(conc)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
if ("right" in adj.keys()): # enough to check only the adj as the right of the curr is the adj
for i in range(5, 101, 5):
curr1 = pad_right(curr, curr["right"],i) # essentially the adj
adj1 = pad_right(adj, adj["right"],i)
if (gap == 0 or ("left" in curr.keys() and "right" in adj.keys())):
if (gap == 0):
conc, x_delta, y_delta = concatenate_cubes(curr1, adj1, slice_size)
else:
curr1Left = pad_right(curr["left"], ZERO_CUBE, i) # FIXIT? + assuming the gap will not be more than 150
adj1Right = pad_right(adj["right"], ZERO_CUBE, i) # FIXIT? + assuming the gap will not be more than 150
conc, x_delta, y_delta = concatenate_cubes_with_gap(curr1, adj1, gap, curr1Left, adj1Right, slice_size)
train_imgs.append(conc)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
else:
# mark the image as not matched
train_lbls.append([1,0])
warnings.filterwarnings("default")
return train_imgs, train_lbls, train_x_delta, train_y_delta
# + code_folding=[0, 2, 7]
# RUN Utility functions 2
SAVE_PNG=False
def save_img(path, img):
np.save(path, img)
if SAVE_PNG:
plt.imsave(path+".png", img, cmap=plt.cm.gray)
def VAL_add_tolerance_matches(slice_size, folder, train_imgs, train_lbls, train_x_delta,
train_y_delta, is_enriched, curr, adj, tolerance_factor=0):
# need to enhance the set with a few more true positive samples
# allowing some up and down tolerance
if ("top" in curr.keys()):
for i in range(0, tolerance_factor * 10, 10):
if i == 0:
continue
curr1 = pad_above(curr, curr["top"],i)
adj1 = adj
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr1, adj1, slice_size)
output = folder+"1_1="+x_file+"_"+str(curr1["top_row"])+"_"+str(curr1["left_col"])+"---"+y_file+"_"+str(adj1["top_row"])+"_"+str(adj1["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
is_enriched.append(True)
if ("top" in adj.keys()):
for i in range(0, tolerance_factor * 10, 10):
if i == 0:
continue
curr1 = curr
adj1 = pad_above(adj, adj["top"],i)
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr1, adj1, slice_size)
output = folder+"1_1="+x_file+"_"+str(curr1["top_row"])+"_"+str(curr1["left_col"])+"---"+y_file+"_"+str(adj1["top_row"])+"_"+str(adj1["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
is_enriched.append(True)
if ("bottom" in curr.keys()):
for i in range(0, tolerance_factor * 10, 10):
if i == 0:
continue
curr1 = pad_below(curr, curr["bottom"],i)
adj1 = adj
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr1, adj1, slice_size)
output = folder+"1_1="+x_file+"_"+str(curr1["top_row"])+"_"+str(curr1["left_col"])+"---"+y_file+"_"+str(adj1["top_row"])+"_"+str(adj1["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
is_enriched.append(True)
if ("bottom"in adj.keys()):
for i in range(0, tolerance_factor * 10, 10):
if i == 0:
continue
curr1 = curr
adj1 = pad_below(adj, adj["bottom"],i)
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr1, adj1, slice_size)
output = folder+"1_1="+x_file+"_"+str(curr1["top_row"])+"_"+str(curr1["left_col"])+"---"+y_file+"_"+str(adj1["top_row"])+"_"+str(adj1["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
is_enriched.append(True)
# IMPORTANT: enrich_factor determines how many "duplications" of TRUE values will we have in the train set
# This allows for a more balanced train set however, it reduces the strictness of the matches
# i.e. (not sure why) when we have multiple nearby "duplicates" matches we get much more matches in the validation
# PARAMS: enrich_factor=1 means no enrich/duplicate, 20 means duplicate by 20, every 10 pixels
# PARAMS: tolerance_factor=0 means only match against exact horizon, each notch equals additional 10 pixels tolerance
def NEW_build_train_set_for_binary_labeling(cubes, slice_size, folder, enrich_factor=1, tolerance_factor=0):
# enrich_factor is split by 2 because it is dual-sided and 1 means actually no enrichment - i.e. 0.5
enrich_factor = enrich_factor / 2
# clean folder before starting
for root, dirs, files in os.walk(folder):
for f in files:
os.unlink(os.path.join(root, f))
warnings.filterwarnings("ignore")
train_imgs = []
train_lbls = []
train_x_delta = []
train_y_delta = []
is_enriched = []
discard_c = 0
# import pdb; pdb.set_trace()
# iterate over the cubes
for curr in cubes:
# iterate over the others (effectively n^2)
for adj in cubes:
# Initial filter: what CAN be matched against what?
# 1 - not of the same fragment (file==fragment)
# 2 - they ARE of the same tear - don't want to confuse the learning with false data coming from different tears
# 3 - no need to test against self and avoid checking from both directions
if adj["file"] != curr["file"] and \
adj["tear"] == curr["tear"] and \
curr["piece_col"] < adj["piece_col"]:
# last condition above - actually ignores pieces of the same col but different rows
# the assumption is that they are either "not-match" and then will tilt the balance further to not-match
# or they are "somewhat-matching" but in a way that might confuse the algorithm
# print(">>> >>>"+str(curr["cube"].shape)+" <<< <<<"+str(adj["cube"].shape))
# append the adjacent image to the current image
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr, adj, slice_size)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# Condition for marking as match:
# 1 - the adj piece is on the same row as the curr
# 2 - the adj piece is just to the right of the curr
# 3 - the curr cube is on the right edge of the piece
# 4 - the adj cube is on the left edge of the piece
# 5 - the cubes are in the same horizon
if curr["piece_row"] == adj["piece_row"] and \
curr["piece_col"] + 1 == adj["piece_col"] and \
(curr["col"] != 0 or curr["last"]) and \
(adj["col"] == 0 or not adj["last"]) and \
np.abs(x_delta) < 50:
# print(x_delta, y_delta)
# mark the image as matched
output = folder+"0_1="+x_file+"_"+str(curr["top_row"])+"_"+str(curr["left_col"])+"---"+y_file+"_"+str(adj["top_row"])+"_"+str(adj["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_lbls.append([0,1])
is_enriched.append(False)
#import pdb; pdb.set_trace()
# TOLERANCE
VAL_add_tolerance_matches(slice_size, folder, train_imgs, train_lbls, train_x_delta,
train_y_delta, is_enriched, curr, adj, tolerance_factor)
# ENRICH/DUPLICATE
# need to enrich the set with a few more true positive samples - so we offset
# the matched images up and down a few times and create more matches
if ("top" in curr.keys() and "top" in adj.keys()):
for i in range(0, 121, int(120/enrich_factor)):
if i == 0:
continue
curr1 = pad_above(curr, curr["top"],i)
adj1 = pad_above(adj, adj["top"],i)
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr1, adj1, slice_size)
output = folder+"1_1="+x_file+"_"+str(curr1["top_row"])+"_"+str(curr1["left_col"])+"---"+y_file+"_"+str(adj1["top_row"])+"_"+str(adj1["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
is_enriched.append(True)
# TOLERANCE
VAL_add_tolerance_matches(slice_size, folder, train_imgs, train_lbls, train_x_delta,
train_y_delta, is_enriched, curr1, adj1, tolerance_factor)
if ("bottom" in curr.keys() and "bottom"in adj.keys()):
for i in range(0, 121, int(120/enrich_factor)):
if i == 0:
continue
curr1 = pad_below(curr, curr["bottom"],i)
adj1 = pad_below(adj, adj["bottom"],i)
conc, x_delta, y_delta, x_file, y_file = VAL_concatenate_cubes(curr1, adj1, slice_size)
output = folder+"1_1="+x_file+"_"+str(curr1["top_row"])+"_"+str(curr1["left_col"])+"---"+y_file+"_"+str(adj1["top_row"])+"_"+str(adj1["left_col"])
# print(">>> MATCH >>>"+output)
save_img(output, conc)
# print(">>> >>> >>> SAVED")
train_imgs.append(output)
train_x_delta.append(x_delta)
train_y_delta.append(y_delta)
# mark the image as matched
train_lbls.append([0,1])
is_enriched.append(True)
# TOLERANCE
VAL_add_tolerance_matches(slice_size, folder, train_imgs, train_lbls, train_x_delta,
train_y_delta, curr1, adj1, tolerance_factor)
# adding a condition for marking as not-matched - we mark only the "key" cubes which are every 250px
# and not overlap - hence we reduce the ratio in favour of not matched which is enormous
# altering between the next 2 lines allows to control the number/ratio of non-match:match
# elif int(curr["row"]) == curr["row"] and int(adj["row"]) == adj["row"]: # this condition will match curr key cubes with adj key cubes only
elif int(adj["row"]) == adj["row"]: # this condition will allow curr cubes which are not just key
# mark the image as not matched
output = folder+"0_0="+x_file+"_"+str(curr["top_row"])+"_"+str(curr["left_col"])+"---"+y_file+"_"+str(adj["top_row"])+"_"+str(adj["left_col"])
# print("<<< nonmatch <<<"+output)
save_img(output, conc)
# print("<<< <<< <<< SAVED")
train_imgs.append(output)
train_lbls.append([1,0]) # not matched
is_enriched.append(False)
# discard not matched which are not "key" cubes (every 250px)
else:
discard_c += 1
print("*** MATCHED="+str(sum(x[1] == 1 for x in train_lbls)))
print("*** NOT MATCHED="+str(sum(x[0] == 1 for x in train_lbls)))
print("*** DISCARDED="+str(discard_c))
warnings.filterwarnings("default")
return train_imgs, train_lbls, train_x_delta, train_y_delta, is_enriched
def frame_to_n_by_m(orig, start_vector, end_vector, is_col):
max_val = np.amax(end_vector)
min_val = np.amin(start_vector)
width = max_val - min_val
if width < CUBE_SIZE:
width = CUBE_SIZE
if (is_col):
result = np.zeros((start_vector.size, width))
else:
result = np.zeros((width, start_vector.size))
for i in range(0, start_vector.size):
if (is_col):
row_vec = orig[i, start_vector[i]:end_vector[i]]
else:
row_vec = orig[start_vector[i]:end_vector[i],i]
temp = np.lib.pad(row_vec, (start_vector[i]-min_val, max_val-end_vector[i]), 'constant', constant_values=(0.09, 0.09))
if (is_col):
if (result[i].size != width):
import pdb; pdb.set_trace()
result[i] = temp[0:width]
else:
result[:,i] = temp[0:width]
return min_val, result
def rough_tear_line(orig, start_vector, cut_mean, is_col, chew_factor):
end_vector = np.empty(start_vector.size).astype(int)
if (is_col and np.absolute(cut_mean-orig.shape[1]) < 10):
end_vector.fill(orig.shape[1])
elif (not is_col and np.absolute(cut_mean-orig.shape[0]) < 10):
end_vector.fill(orig.shape[0])
else:
deviation_vector = np.random.normal(0, chew_factor, start_vector.size).astype(int)
end_vector[0] = cut_mean + deviation_vector[0]
for i in range(1, end_vector.size):
end_vector[i] = end_vector[i - 1] + deviation_vector[i]
cut_max = start_vector + (CUBE_SIZE + EDGE_GAP)
max_and_end = np.append([end_vector], [cut_max], axis=0)
end_vector = np.amax(max_and_end, axis=0)
start_px, cut_piece = frame_to_n_by_m(orig, start_vector, end_vector, is_col)
return start_px, cut_piece, end_vector
def rough_tear_image(image, cols, rows):
pieces = []
col_width = int(image.shape[1] / cols)
row_height = int(image.shape[0] / rows)
# print(col_width, row_height)
next_col_start_vec = np.zeros((image.shape[0],), dtype=int)
for col_idx in range(0, cols):
# import pdb; pdb.set_trace()
start_col_px, cut_column, next_col_start_vec = rough_tear_line(image, next_col_start_vec, col_width * (col_idx + 1), True, 3)
next_row_start_vec = np.zeros((cut_column.shape[1],), dtype=int)
for row_idx in range(0, rows):
start_row_px, cut_piece, next_row_start_vec = rough_tear_line(cut_column, next_row_start_vec, row_height * (row_idx + 1), False, 1)
ymin, ymax, xmin, xmax = calc_min_max_coordinates_dynamic(cut_piece, cutoff=BG_2_OBJ_RATIO)
temp = crop(cut_piece, ymin, ymax, xmin, xmax)
#import pdb; pdb.set_trace()
piece = {}
piece["orig"] = cut_piece
piece["cut"] = temp
piece["col"] = col_idx
piece["row"] = row_idx
piece["col_px"] = start_col_px + xmin
piece["row_px"] = start_row_px + ymin
pieces.append(piece)
return pieces
# + code_folding=[0]
# RUN Define model util functions
# initialize a shaped matrix of weights with random values
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# initialize a shaped matrix of bias with random values
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def max_pool_1x2(x):
return tf.nn.max_pool(x, ksize=[1, 1, 2, 1],
strides=[1, 1, 2, 1], padding='SAME')
def max_pool_2x1(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1],
strides=[1, 2, 1, 1], padding='SAME')
def max_pool_1x1(x):
return tf.nn.max_pool(x, ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1], padding='SAME')
def max_pool_5x5(x):
return tf.nn.max_pool(x, ksize=[1, 5, 5, 1],
strides=[1, 5, 5, 1], padding='SAME')
def max_pool_5x2(x):
return tf.nn.max_pool(x, ksize=[1, 5, 2, 1],
strides=[1, 5, 2, 1], padding='SAME')
# + code_folding=[0]
# RUN Image utility functions (external source)
def branchedPoints(skel):
branch1=np.array([[2, 1, 2], [1, 1, 1], [2, 2, 2]])
branch2=np.array([[1, 2, 1], [2, 1, 2], [1, 2, 1]])
branch3=np.array([[1, 2, 1], [2, 1, 2], [1, 2, 2]])
branch4=np.array([[2, 1, 2], [1, 1, 2], [2, 1, 2]])
branch5=np.array([[1, 2, 2], [2, 1, 2], [1, 2, 1]])
branch6=np.array([[2, 2, 2], [1, 1, 1], [2, 1, 2]])
branch7=np.array([[2, 2, 1], [2, 1, 2], [1, 2, 1]])
branch8=np.array([[2, 1, 2], [2, 1, 1], [2, 1, 2]])
branch9=np.array([[1, 2, 1], [2, 1, 2], [2, 2, 1]])
br1=mh.morph.hitmiss(skel,branch1)
br2=mh.morph.hitmiss(skel,branch2)
br3=mh.morph.hitmiss(skel,branch3)
br4=mh.morph.hitmiss(skel,branch4)
br5=mh.morph.hitmiss(skel,branch5)
br6=mh.morph.hitmiss(skel,branch6)
br7=mh.morph.hitmiss(skel,branch7)
br8=mh.morph.hitmiss(skel,branch8)
br9=mh.morph.hitmiss(skel,branch9)
return br1+br2+br3+br4+br5+br6+br7+br8+br9
def endPoints(skel):
endpoint1=np.array([[0, 0, 0],
[0, 1, 0],
[2, 1, 2]])
endpoint2=np.array([[0, 0, 0],
[0, 1, 2],
[0, 2, 1]])
endpoint3=np.array([[0, 0, 2],
[0, 1, 1],
[0, 0, 2]])
endpoint4=np.array([[0, 2, 1],
[0, 1, 2],
[0, 0, 0]])
endpoint5=np.array([[2, 1, 2],
[0, 1, 0],
[0, 0, 0]])
endpoint6=np.array([[1, 2, 0],
[2, 1, 0],
[0, 0, 0]])
endpoint7=np.array([[2, 0, 0],
[1, 1, 0],
[2, 0, 0]])
endpoint8=np.array([[0, 0, 0],
[2, 1, 0],
[1, 2, 0]])
ep1=mh.morph.hitmiss(skel,endpoint1)
ep2=mh.morph.hitmiss(skel,endpoint2)
ep3=mh.morph.hitmiss(skel,endpoint3)
ep4=mh.morph.hitmiss(skel,endpoint4)
ep5=mh.morph.hitmiss(skel,endpoint5)
ep6=mh.morph.hitmiss(skel,endpoint6)
ep7=mh.morph.hitmiss(skel,endpoint7)
ep8=mh.morph.hitmiss(skel,endpoint8)
ep = ep1+ep2+ep3+ep4+ep5+ep6+ep7+ep8
return ep
def pruning(skeleton, size):
'''remove iteratively end points "size"
times from the skeleton
'''
for i in range(0, size):
endpoints = endPoints(skeleton)
endpoints = np.logical_not(endpoints)
skeleton = np.logical_and(skeleton,endpoints)
return skeleton
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
ax2.set_adjustable('box-forced')
# + code_folding=[0]
# RUN model_tf_deep - Define the model - 250, 125, 62, 25
def model_tf_deep(input_width, forced_bias=0):
global accuracy, correct_prediction, train_step, x, y_, y_conv, keep_prob, probability, probabilities #, W_fc, b_fc, cost, y_conv_temp
# foundation of the model - the input layer of the image 250 x input_width*2
x = tf.placeholder(tf.float32, [None, 250, input_width*2], "001")
x_image = tf.reshape(x, [-1,250,input_width*2,1], "0011") # 1 is the number of color channels
# the target digits of the model
y_ = tf.placeholder(tf.float32, [None, 2], "002") # 1
# zero convolutional layer: one input image and 32 output filters of 5x5
W_conv0 = weight_variable([5, 5, 1, 32])
b_conv0 = bias_variable([32])
h_conv0 = tf.nn.relu(conv2d(x_image, W_conv0) + b_conv0, "0020")
h_pool0 = max_pool_1x1(h_conv0) # size is maintained
# first convolutional layer: one input image and 32 output filters of 5x5
W_conv1 = weight_variable([5, 5, 32, 32])
# W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(h_pool0, W_conv1) + b_conv1, "0021")
# h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, "0021")
if (input_width == 250):
h_pool1 = max_pool_2x2(h_conv1) # size is reduced to 125x250
elif (input_width == 125):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x250
elif (input_width == 62):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x125
elif (input_width == 25):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x50
else:
print("ERROR - unsupported slice width")
return
# second convolutional layer: 32 input (filtered) images and 32 output filters of 5x5
W_conv2 = weight_variable([5, 5, 32, 32])
b_conv2 = bias_variable([32])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, "0022")
if (input_width == 62):
h_pool2 = max_pool_1x1(h_conv2) # size is reduced to 125x125
elif (input_width == 25):
h_pool2 = max_pool_1x1(h_conv2) # size is reduced to 125x50
else:
h_pool2 = max_pool_1x2(h_conv2) # size is reduced to 125x125
# third convolutional layer: 32 input (filtered) images and 32 output filters of 5x5
W_conv3 = weight_variable([5, 5, 32, 32])
b_conv3 = bias_variable([32])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3, "0023")
if (input_width == 25):
h_pool3 = max_pool_5x2(h_conv3) # size is reduced to 25x25
else:
h_pool3 = max_pool_5x5(h_conv3) # size is reduced to 25x25
h_pool3_flat = tf.reshape(h_pool3, [-1, 25*25*32]) # shape as an array
# fourth layer - fully connected with input 25*25*128 and output 1024
W_fc1 = weight_variable([25*25*32, 1024])
b_fc1 = bias_variable([1024])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1, "0024")
# a drop layer with probability
keep_prob = tf.placeholder(tf.float32, name="003")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name="0031")
# # final layer - reduce to one "class" for the linear regression
# W_fc = weight_variable([1024, 1])
# b_fc = bias_variable([1])
# y_conv_temp = tf.matmul(h_fc1_drop, W_fc, name="0032") + b_fc
# y_conv = tf.minimum(y_conv_temp, tf.constant(BREAK_VAL, tf.float32))
# # # minimize loss function
# # cross_entropy = tf.reduce_mean(
# # tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# # cost = tf.reduce_sum(tf.pow(y_conv - y_, 2))/(2*BATCHES*BATCH_SIZE) # Mean squared error
# cost = tf.reduce_mean(tf.square(y_conv_temp - y_), name="0033") # Mean squared error
# # # define train step and rate
# # train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost) # Gradient descent
# # evaluate the prediction and the accuracy on the train test - needed only for printing during the training
# correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# final layer - softmax reduction 2 outputs
W_fc2 = weight_variable([1024, 2])
b_fc2 = bias_variable([2])
c_fc2 = tf.constant([0, forced_bias], dtype=tf.float32)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 + c_fc2
# minimize loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))
probability = tf.nn.softmax(y_conv,1)
probabilities=tf.reduce_sum(probability,1)
# define train step and rate
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# evaluate the prediction and the accuracy on the train test - needed only for printing during the training
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# + code_folding=[0]
# RUN model_tf_orig - Define the model - 250, 125, 62, 25
def model_tf_orig(input_width):
global accuracy, correct_prediction, train_step, x, y_, y_conv, keep_prob #, W_fc, b_fc, cost, y_conv_temp
# foundation of the model - the input layer of the image 250 x input_width*2
x = tf.placeholder(tf.float32, [None, 250, input_width*2], "001")
x_image = tf.reshape(x, [-1,250,input_width*2,1], "0011") # 1 is the number of color channels
# the target digits of the model
y_ = tf.placeholder(tf.float32, [None, 2], "002") # 1
# zero convolutional layer: one input image and 32 output filters of 5x5
# W_conv0 = weight_variable([5, 5, 1, 32])
# b_conv0 = bias_variable([32])
# h_conv0 = tf.nn.relu(conv2d(x_image, W_conv0) + b_conv0, "0020")
# h_pool0 = max_pool_1x1(h_conv0) # size is maintained
# first convolutional layer: one input image and 32 output filters of 5x5
# W_conv1 = weight_variable([5, 5, 32, 32])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# h_conv1 = tf.nn.relu(conv2d(h_pool0, W_conv1) + b_conv1, "0021")
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, "0021")
if (input_width == 250):
h_pool1 = max_pool_2x2(h_conv1) # size is reduced to 125x250
elif (input_width == 125):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x250
elif (input_width == 62):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x125
elif (input_width == 25):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x50
else:
print("ERROR - unsupported slice width")
return
# second convolutional layer: 32 input (filtered) images and 32 output filters of 5x5
W_conv2 = weight_variable([5, 5, 32, 32])
b_conv2 = bias_variable([32])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, "0022")
if (input_width == 62):
h_pool2 = max_pool_1x1(h_conv2) # size is reduced to 125x125
elif (input_width == 25):
h_pool2 = max_pool_1x1(h_conv2) # size is reduced to 125x50
else:
h_pool2 = max_pool_1x2(h_conv2) # size is reduced to 125x125
# third convolutional layer: 32 input (filtered) images and 32 output filters of 5x5
W_conv3 = weight_variable([5, 5, 32, 32])
b_conv3 = bias_variable([32])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3, "0023")
if (input_width == 25):
h_pool3 = max_pool_5x2(h_conv3) # size is reduced to 25x25
else:
h_pool3 = max_pool_5x5(h_conv3) # size is reduced to 25x25
h_pool3_flat = tf.reshape(h_pool3, [-1, 25*25*32]) # shape as an array
# fourth layer - fully connected with input 25*25*128 and output 1024
W_fc1 = weight_variable([25*25*32, 1024])
b_fc1 = bias_variable([1024])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1, "0024")
# a drop layer with probability
keep_prob = tf.placeholder(tf.float32, name="003")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name="0031")
# # final layer - reduce to one "class" for the linear regression
# W_fc = weight_variable([1024, 1])
# b_fc = bias_variable([1])
# y_conv_temp = tf.matmul(h_fc1_drop, W_fc, name="0032") + b_fc
# y_conv = tf.minimum(y_conv_temp, tf.constant(BREAK_VAL, tf.float32))
# # # minimize loss function
# # cross_entropy = tf.reduce_mean(
# # tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# # cost = tf.reduce_sum(tf.pow(y_conv - y_, 2))/(2*BATCHES*BATCH_SIZE) # Mean squared error
# cost = tf.reduce_mean(tf.square(y_conv_temp - y_), name="0033") # Mean squared error
# # # define train step and rate
# # train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost) # Gradient descent
# # evaluate the prediction and the accuracy on the train test - needed only for printing during the training
# correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# final layer - softmax reduction 2 outputs
W_fc2 = weight_variable([1024, 2])
b_fc2 = bias_variable([2])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# minimize loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# define train step and rate
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# evaluate the prediction and the accuracy on the train test - needed only for printing during the training
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# + code_folding=[0]
# RUN model_tf_wide - Define the model - 250, 125, 62, 25
def model_tf_wide(input_width):
global accuracy, correct_prediction, train_step, x, y_, y_conv, keep_prob #, W_fc, b_fc, cost, y_conv_temp
# foundation of the model - the input layer of the image 250 x input_width*2
x = tf.placeholder(tf.float32, [None, 250, input_width*2], "001")
x_image = tf.reshape(x, [-1,250,input_width*2,1], "0011") # 1 is the number of color channels
# the target digits of the model
y_ = tf.placeholder(tf.float32, [None, 2], "002") # 1
# first convolutional layer: one input image and 32 output filters of 5x5
W_conv1 = weight_variable([5, 5, 1, 64])
b_conv1 = bias_variable([64])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, "0021")
if (input_width == 250):
h_pool1 = max_pool_2x2(h_conv1) # size is reduced to 125x250
elif (input_width == 125):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x250
elif (input_width == 62):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x125
elif (input_width == 25):
h_pool1 = max_pool_2x1(h_conv1) # size is reduced to 125x50
else:
print("ERROR - unsupported slice width")
return
# second convolutional layer: 32 input (filtered) images and 32 output filters of 5x5
W_conv2 = weight_variable([5, 5, 64, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, "0022")
if (input_width == 62):
h_pool2 = max_pool_1x1(h_conv2) # size is reduced to 125x125
elif (input_width == 25):
h_pool2 = max_pool_1x1(h_conv2) # size is reduced to 125x50
else:
h_pool2 = max_pool_1x2(h_conv2) # size is reduced to 125x125
# third convolutional layer: 32 input (filtered) images and 32 output filters of 5x5
W_conv3 = weight_variable([5, 5, 64, 64])
b_conv3 = bias_variable([64])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3, "0023")
if (input_width == 25):
h_pool3 = max_pool_5x2(h_conv3) # size is reduced to 25x25
else:
h_pool3 = max_pool_5x5(h_conv3) # size is reduced to 25x25
h_pool3_flat = tf.reshape(h_pool3, [-1, 25*25*64]) # shape as an array
# fourth layer - fully connected with input 25*25*128 and output 1024
W_fc1 = weight_variable([25*25*64, 2048])
b_fc1 = bias_variable([2048])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1, "0024")
# a drop layer with probability
keep_prob = tf.placeholder(tf.float32, name="003")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name="0031")
# # final layer - reduce to one "class" for the linear regression
# W_fc = weight_variable([1024, 1])
# b_fc = bias_variable([1])
# y_conv_temp = tf.matmul(h_fc1_drop, W_fc, name="0032") + b_fc
# y_conv = tf.minimum(y_conv_temp, tf.constant(BREAK_VAL, tf.float32))
# # # minimize loss function
# # cross_entropy = tf.reduce_mean(
# # tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# # cost = tf.reduce_sum(tf.pow(y_conv - y_, 2))/(2*BATCHES*BATCH_SIZE) # Mean squared error
# cost = tf.reduce_mean(tf.square(y_conv_temp - y_), name="0033") # Mean squared error
# # # define train step and rate
# # train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost) # Gradient descent
# # evaluate the prediction and the accuracy on the train test - needed only for printing during the training
# correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# final layer - softmax reduction 2 outputs
W_fc2 = weight_variable([2048, 2])
b_fc2 = bias_variable([2])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# minimize loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# define train step and rate
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# evaluate the prediction and the accuracy on the train test - needed only for printing during the training
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# + code_folding=[0]
# RUN train
def train(train_imgs, train_lbls, output_model, input_model=""):
print("#####################################################################")
print("TRAINING:")
print("MODEL:"+output_model)
print("#####################################################################")
from random import randrange
# TRAIN Prepare the session
# create a saver object
saver = tf.train.Saver()
# start session and initialize variables
sess = tf.InteractiveSession()
if input_model != "":
# Restore variables from disk.
saver.restore(sess, input_model)
print("Model restored.")
else:
sess.run(tf.initialize_all_variables())
# TRAIN Train the model
x_batch = []
y_batch = []
# run the train batches
for i in range(BATCHES):
x_batch = []
y_batch = []
for _ in range(BATCH_SIZE):
random_index = randrange(0,len(train_imgs))
image = np.load(train_imgs[random_index]+".npy")
# print(train_imgs[random_index])
x_batch.append(image)
y_batch.append(train_lbls[random_index])
# train
# print("step %d"%(i))
train_step.run(feed_dict={x: x_batch, y_: y_batch, keep_prob: 0.5})
# print the accuracy thus far
if (i+1)%50 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:x_batch, y_: y_batch, keep_prob: 1.0})
print("step %d, training accuracy %.2f"%(i, train_accuracy))
print("Optimization Finished!")
train_accuracy = accuracy.eval(feed_dict={
x:x_batch, y_: y_batch, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
# Save the variables to disk.
save_path = saver.save(sess, output_model)
print("Model saved in file: %s" % save_path)
# Close the Session when we're done. If un-commented - need to run next bock of restore...
sess.close()
print("#####################################################################")
print("TRAINING ENDED")
print("#####################################################################")
print(" ")
print(" ")
# + code_folding=[0]
# RUN pre_process - OLD?
def pre_process(folder):
print("#####################################################################")
print("PRE_PROCESS:"+folder)
print("#####################################################################")
result = []
for root, dirs, files in os.walk(folder):
for file_ in files:
# Read the image
# image = img.imread(os.path.join(root, file_))
image = np.load(os.path.join(root, file_))
# import pdb; pdb.set_trace()
cubes = VAL_slice_to_static_slices(file_, image)
print("File: %s >>> cubes: %d"%(file_, len(cubes)))
result.extend(cubes)
return result
print("#####################################################################")
print("PRE_PROCESS ENDED")
print("#####################################################################")
print(" ")
print(" ")
# + code_folding=[]
# RUN pre_process_training - crop image, then tear it randomly to various tears, then per tear create cubes out of the edges, return cube set
def pre_process_training(img_name, x_start=X_START, x_end=X_END, y_start=Y_START, y_end=Y_END, max_cols=8, max_rows=4):
print("#####################################################################")
print("PRE_PROCESS:"+img_name)
print("#####################################################################")
short_name = img_name[:img_name.rfind('-D')]
image = read_and_crop(img_name, x_start, x_end, y_start, y_end)
result = []
for root, dirs, files in os.walk(ROOT_FOLDER+"fragments/"):
for f in files:
os.unlink(os.path.join(root, f))
for col_cut in range(3, max_cols): # 9 3...10
for row_cut in range(2, max_rows): # 6 2...5
print("PRE_PROCESS:::"+"TEAR_"+str(col_cut)+"X"+str(row_cut))
pieces = rough_tear_image(image, col_cut, row_cut)
for piece in pieces:
# print("PRE_PROCESS:::"+"PIECE_"+str(piece["col"])+"X"+str(piece["row"]))
fragment_name = short_name + "_TEAR_"+str(col_cut)+"X"+str(row_cut)+"_PIECE_"+str(piece["col"])+"X"+str(piece["row"])
fragment_file_name = short_name + "_"+str(col_cut)+"X"+str(row_cut)+"_"+str(piece["col"])+"X"+str(piece["row"])
# import pdb; pdb.set_trace()
plt.imsave(os.path.join(ROOT_FOLDER+"fragments/",fragment_file_name+".jpg"), piece["cut"], cmap=plt.cm.gray)
cubes = VAL_slice_TEAR_to_static_slices(fragment_name, piece)
for cube in cubes:
cube["tear"] = str(col_cut)+"X"+str(row_cut)
cube["piece_col"] = piece["col"]
cube["piece_row"] = piece["row"]
# print("File: %s >>> cubes: %d"%(file_, len(cubes)))
result.extend(cubes)
return result
print("#####################################################################")
print("PRE_PROCESS ENDED")
print("#####################################################################")
print(" ")
print(" ")
# + code_folding=[0]
def validate1(cubes, model, slice_size, folder, curr_cube):
# VALIDATE prepare the data sets
test_imgs, test_x_delta, test_y_delta, test_x_file, test_y_file = VAL_build_train_set(cubes, slice_size, folder, curr_cube)
print("loaded %d images"%(len(test_imgs)))
# + code_folding=[0]
def validate2(folder, model, slice_size):
test_imgs = []
test_x_file = []
test_y_file = []
the_root = ""
for root, dirs, files in os.walk(folder):
the_root = root
for file_ in files:
test_imgs.append( os.path.join(root, file_) )
test_x_file.append(file_[:file_.rfind('---P')])
test_y_file.append(file_[file_.rfind('---P')+3:])
print(len(test_imgs))
# VALIDATE Prepare a test session
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# start session and initialize variables
sess = tf.InteractiveSession()
# Restore variables from disk.
saver.restore(sess, model)
print("Model restored.")
# VALIDATE Validate the model
# import pdb; pdb.set_trace()
v1t = []
count = 0
length = len(test_imgs)
batch = 100
x_batch = []
# change the ranges in the loop below - first number is the start point (multiplied by batch size)
# second number is the end point (multiplied by batch size)
# third number is the jump from batch to batch
# use the length about to set the batch length
for start in range(0, length, batch):
for i in range(start, start+batch):
if (i < length):
image = np.load(test_imgs[i])
x_batch.append(image)
count += 1
# print("Validating start at #%d end at %d"%(start*batch,(start+length)*batch))
my_prediction=tf.argmax(y_conv,1)
v1 = my_prediction.eval(feed_dict={x:x_batch, keep_prob: 1.0})
v1t = np.concatenate((v1t, v1), axis=0)
x_batch = []
print(">>> step %d"%(start+batch))
match_indexes = np.nonzero(v1t)[0]
A = np.array(test_x_file)
B = np.array(test_y_file)
C = np.array(test_imgs)
match_x_files = A[match_indexes]
match_y_files = B[match_indexes]
match_images = C[match_indexes]
for matched_img in match_images:
load_img = np.load(matched_img)
plt.imsave(os.path.join("/Volumes/250GB/matched/",matched_img[matched_img.rfind('/')+1:]+".png"), load_img, cmap=plt.cm.gray)
for root, dirs, files in os.walk(folder):
for file_ in files:
os.remove( os.path.join(root, file_) ) # delete it from the FS
with open('matches.csv', 'a') as csvfile:
csvout = csv.writer(csvfile)
for match_index in match_indexes:
print("MATCH %s === %s"%(test_x_file[match_index], test_y_file[match_index]))
# print("MATCH %s === %s"%(A[match_index], B[match_index]))
# csvout.writerow([A[match_index], B[match_index]])
csvout.writerow([test_x_file[match_index], test_y_file[match_index]])
# plt.imsave("match_"+match_index+".jpg", C[match_index])
# Close the Session when we're done.
sess.close()
# + code_folding=[0]
def validate2_for_cross_validation(test_imgs, test_lbls, is_enriched, model, max_samples=0):
print(len(test_imgs))
# VALIDATE Prepare a test session
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# start session and initialize variables
sess = tf.InteractiveSession()
# Restore variables from disk.
saver.restore(sess, model)
print("Model restored.")
# VALIDATE Validate the model
count = 0
se = 0
st = 0
v1t = []
v2t = []
v1tt = []
v2tt = []
length = len(test_imgs)
if max_samples != 0:
length = max_samples
batch = 100
x_batch = []
y_batch = []
# change the ranges in the loop below - first number is the start point (multiplied by batch size)
# second number is the end point (multiplied by batch size)
# third number is the jump from batch to batch
# use the length about to set the batch length
for start in range(0, length, batch):
for i in range(start, start+batch):
if (i < length):
image = np.load(test_imgs[i]+".npy")
x_batch.append(image)
y_batch.append(train_lbls[i])
# print the accuracy thus far
# train_accuracy = accuracy.eval(feed_dict={
# x:x_batch, y_: y_batch, keep_prob: 1.0})
# print("step %d, training accuracy %g"%(i, train_accuracy))
# print("Validating start at #%d end at %d"%(start*batch,(start+length)*batch))
# my_prediction=tf.argmax(y_conv,1)
# v1 = my_prediction.eval(feed_dict={x:x_batch, keep_prob: 1.0})
# v1t = np.concatenate((v1t, v1), axis=0)
######## printing the predictions and their normalized values
# print("y_conv="+str(y_conv.eval(feed_dict={x:x_batch, y_: y_batch, keep_prob: 1.0})))
# print("probability="+str(probability.eval(feed_dict={x:x_batch, y_: y_batch, keep_prob: 1.0})))
# print("probabilities="+str(probabilities.eval(feed_dict={x:x_batch, y_: y_batch, keep_prob: 1.0})))
my_prediction=tf.argmax(y_conv,1)
my_target=tf.argmax(y_,1)
v1 = my_prediction.eval(feed_dict={x:x_batch, y_: y_batch, keep_prob: 1.0})
v2 = my_target.eval(feed_dict={x:x_batch, y_: y_batch, keep_prob: 1.0})
v1t = np.concatenate((v1t, v1), axis=0)
v2t = np.concatenate((v2t, v2), axis=0)
c1 = np.sum(np.absolute(np.subtract(v2, v1)))
c2 = np.sum(np.absolute(v2))
se += c1
st += c2
x_batch = []
y_batch = []
print(">>> step %d"%(start+batch))
count += ((i+1) - start)
precision, recall, f_score, support = precision_recall_fscore_support(v2t, v1t, average='binary')
print("step %d-%d, precision %f, recall %f, f_score %f"%(start, i, precision, recall, f_score))
# print("Accumulated total true = %d"%(st));
# print("Accumulated total error rate = %f"%(se/count));
# v1tt = np.concatenate((v1tt, v1t), axis=0)
# v2tt = np.concatenate((v2tt, v2t), axis=0)
print("=== total %d match %d"%(count, len(np.nonzero(v1t)[0])))
precision, recall, f_score, support = precision_recall_fscore_support(v2t, v1t, average='binary')
print("TOTAL %d, precision %f, recall %f, f_score %f"%(count, precision, recall, f_score))
print("TOTAL true = %d"%(st));
print("TOTAL error rate = %f"%(se/count));
match_indexes = np.nonzero(v1t)[0]
C = np.array(test_imgs)
match_images = C[match_indexes]
# for matched_img in match_images:
# load_img = np.load(matched_img+".npy")
# plt.imsave(os.path.join(ROOT_FOLDER+"synt_matched/",matched_img[matched_img.rfind('/')+1:]+".png"), load_img, cmap=plt.cm.gray)
with open(strftime("%Y%m%d_%H%M%S", gmtime())+'_synt_all.csv', 'a') as csvfile:
csvout = csv.writer(csvfile)
for idx, test_img in enumerate(test_imgs):
# print("MATCH %s === %s"%(test_imgs[match_index], train_lbls[match_index]))
match_class = 0
if idx in match_indexes:
match_class = 1
csvout.writerow([test_img, train_lbls[idx], match_class, is_enriched[idx]])
# plt.imsave("match_"+match_index+".jpg", C[match_index])
with open(strftime("%Y%m%d_%H%M%S", gmtime())+'_synt_matches.csv', 'a') as csvfile:
csvout = csv.writer(csvfile)
for match_index in match_indexes:
# print("MATCH %s === %s"%(test_imgs[match_index], train_lbls[match_index]))
csvout.writerow([test_imgs[match_index], train_lbls[match_index], is_enriched[match_index]])
# plt.imsave("match_"+match_index+".jpg", C[match_index])
# Close the Session when we're done.
sess.close()
# + code_folding=[0]
def iter_validate(cubes, model, slice_size, folder):
print("#####################################################################")
print("VALIDATING")
print("#####################################################################")
cubes_len = len(cubes)
batch_size = 100
count = 0
# iterate over the cubes
for curr in cubes:
count += 1
if count < batch_size: ### TEMP LIMITATION
print("CUBE:%s"%(curr["file"]+"_"+str(curr["top_row"])+"_"+str(curr["left_col"])))
validate1(cubes, model, slice_size, folder, curr)
validate2(folder, model, slice_size)
print("#####################################################################")
print("VALIDATION ENDED")
print("#####################################################################")
print(" ")
print(" ")
# + code_folding=[0]
def run_all(folder, model, slice_size):
model_tf(slice_size)
cubes_set = pre_process(folder)
validate(cubes_set, model, slice_size)
# + code_folding=[0]
# HELPER block
# image = read_and_crop("PX303/FG001/PX303-Fg001-V-C01-R01-D05032015-T112602-ML924__012.jpg")
## image = read_and_crop("PX303/FG004/PX303-Fg004-V-C01-R01-D08032015-T110900-ML924__012.jpg", 100, -1, 400, -1)
# image = read_and_crop("PX303/FG004/PX303-Fg004-V-C01-R02-D08032015-T105147-ML924__012.jpg")
# image = read_and_crop("PX303/FG004/PX303-Fg004-V-C02-R01-D08032015-T110025-ML924__012.jpg")
# image = read_and_crop("PX303/FG004/PX303-Fg004-V-C02-R02-D08032015-T105553-ML924__012.jpg")
# image = read_and_crop("PX303/FG006/PX303-Fg006-V-C01-R01-D08032015-T120605-ML924__012.jpg")
# image = read_and_crop("PX303/FG006/PX303-Fg006-V-C01-R02-D08032015-T115230-ML924__012.jpg")
# image = read_and_crop("PX303/FG006/PX303-Fg006-V-C02-R01-D08032015-T120158-ML924__012.jpg")
##image = read_and_crop("PX303/FG006/PX303-Fg006-V-C02-R02-D08032015-T115704-ML924__012.jpg", 0, 6200, 0, 4400)
##plt.imshow(image)
# + code_folding=[0]
def load_train_from_disk(path):
train_imgs = []
train_lbls = []
is_enriched = []
for root, dirs, files in os.walk(path):
for file_ in files:
file_name = os.path.join(root, file_)
file_name = file_name[:file_name.rfind(".")]
train_imgs.append(file_name)
# train_lbls.append([1,0] if file_.startswith("0=") else [0,1])
# return train_imgs, train_lbls
enriched = file_[0] == '1'
is_enriched.append(enriched)
label = [0,1] if file_[2] == '1' else [1,0]
train_lbls.append(label)
return train_imgs, train_lbls, is_enriched
# + code_folding=[]
# RUN1 - take 1st large pieces and train on it
cubes_set = pre_process_training("PX303-Fg001-V-C01-R01-D05032015-T112520-ML638__006.jpg", max_cols=8, max_rows=6)
train_imgs, train_lbls, train_x_delta, train_y_delta, is_enriched = \
NEW_build_train_set_for_binary_labeling(cubes_set, CUBE_SIZE, ROOT_FOLDER + "train_concats/", 1, 5)
tf.reset_default_graph()
model_tf_deep(250)
train(train_imgs, train_lbls, ROOT_FOLDER + "model_binary/tear_model1.ckpt")
# + code_folding=[0]
# RE-RUN1 - take 1st large pieces and train on it
train_imgs, train_lbls, is_enriched = \
load_train_from_disk(ROOT_FOLDER + "train_concats/")
tf.reset_default_graph()
model_tf_deep(250)
train(train_imgs, train_lbls, ROOT_FOLDER + "model_binary/tear_model1.ckpt")
# + code_folding=[]
# RUN2 - take 2nd large pieces and train on it
cubes_set = pre_process_training("PX303-Fg004-V-C01-R01-D08032015-T110817-ML638__006.jpg", 100, -1, 400, -1, max_cols=9, max_rows=6)
train_imgs, train_lbls, train_x_delta, train_y_delta, is_enriched = \
NEW_build_train_set_for_binary_labeling(cubes_set, CUBE_SIZE,
ROOT_FOLDER + "train_concats2/", 1, 7)
tf.reset_default_graph()
model_tf_deep(250)
train(train_imgs, train_lbls, ROOT_FOLDER + "model_binary/tear_model2.ckpt", ROOT_FOLDER + "model_binary/tear_model1.ckpt")
# + code_folding=[]
# RE-RUN2 - take 2nd large pieces and train on it
train_imgs, train_lbls, is_enriched = \
load_train_from_disk(ROOT_FOLDER + "train_concats2/")
tf.reset_default_graph()
model_tf_deep(250)
train(train_imgs, train_lbls, ROOT_FOLDER + "model_binary/tear_model2.ckpt", ROOT_FOLDER + "model_binary/tear_model1.ckpt")
# + code_folding=[0]
# OPTIONAL RUN3 - take 3rd large pieces and train on it OR TEST in next block
# cubes_set = pre_process_training("PX303/FG006/PX303-Fg006-V-C02-R02-D08032015-T115622-ML638__006.jpg", 0, 6200, 0, 4400, max_cols=8, max_rows=4)
# train_imgs, train_lbls, train_x_delta, train_y_delta, is_enriched = \
# NEW_build_train_set_for_binary_labeling(cubes_set, CUBE_SIZE, ROOT_FOLDER + "train_concats/")
# tf.reset_default_graph()
# model_tf(250)
# train(train_imgs, train_lbls, ROOT_FOLDER + "model_binary/tear_model3.ckpt", ROOT_FOLDER + "model_binary/tear_model2.ckpt")
# + code_folding=[]
# TEST3 - take 1 piece and cross- validate on this (uncomment all for full test run)
cubes_set = pre_process_training("PX303-Fg006-V-C02-R02-D08032015-T115622-ML638__006.jpg", 0, 6200, 0, 4400, max_cols=8, max_rows=4)
train_imgs, train_lbls, train_x_delta, train_y_delta, is_enriched = \
NEW_build_train_set_for_binary_labeling(cubes_set, CUBE_SIZE, ROOT_FOLDER + "train_concats3/", 1, 7)
tf.reset_default_graph()
model_tf_deep(250, 1)
validate2_for_cross_validation(train_imgs, train_lbls, is_enriched, ROOT_FOLDER + "model_binary/tear_model2.ckpt")
# + code_folding=[]
# RE-TEST3 - take 1 piece and cross- validate on this (uncomment all for full test run)
train_imgs, train_lbls, is_enriched = \
load_train_from_disk(ROOT_FOLDER + "train_concats3/")
tf.reset_default_graph()
model_tf_deep(250, 1)
validate2_for_cross_validation(train_imgs, train_lbls, ROOT_FOLDER + "model_binary/tear_model2.ckpt")
# + code_folding=[]
#### STOP
# -
len(train_imgs)
sum(x[1] == 1 for x in train_lbls)
# + code_folding=[0]
#####################################################################
TRAINING:
MODEL:/Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model1.ckpt
#####################################################################
step 49, training accuracy 0.78
step 99, training accuracy 0.84
step 149, training accuracy 0.9
step 199, training accuracy 0.92
step 249, training accuracy 0.88
step 299, training accuracy 0.84
step 349, training accuracy 0.92
step 399, training accuracy 0.86
Optimization Finished!
step 399, training accuracy 0.86
Model saved in file: /Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model1.ckpt
#####################################################################
TRAINING ENDED
#####################################################################
deper network
#####################################################################
TRAINING:
MODEL:/Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model1.ckpt
#####################################################################
step 49, training accuracy 0.84
step 99, training accuracy 0.94
step 149, training accuracy 0.86
step 199, training accuracy 0.92
step 249, training accuracy 0.88
step 299, training accuracy 0.92
step 349, training accuracy 0.96
step 399, training accuracy 0.96
Optimization Finished!
step 399, training accuracy 0.96
Model saved in file: /Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model1.ckpt
#####################################################################
TRAINING ENDED
#####################################################################
deeper network on GCP
#####################################################################
TRAINING:
MODEL:/home/il239838/files/model_binary/tear_model1.ckpt
#####################################################################
WARNING:tensorflow:From /home/il239838/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/tensorflow/python/util/tf_should_use.py:118: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
step 49, training accuracy 0.82
step 99, training accuracy 0.92
step 149, training accuracy 0.72
step 199, training accuracy 0.8
step 249, training accuracy 0.88
step 299, training accuracy 0.88
step 349, training accuracy 0.94
step 399, training accuracy 0.84
Optimization Finished!
step 399, training accuracy 0.84
Model saved in file: /home/il239838/files/model_binary/tear_model1.ckpt
#####################################################################
TRAINING ENDED
#####################################################################
# + code_folding=[0]
#####################################################################
TRAINING:
MODEL:/Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model2.ckpt
#####################################################################
INFO:tensorflow:Restoring parameters from /Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model1.ckpt
Model restored.
step 49, training accuracy 0.76
step 99, training accuracy 0.82
step 149, training accuracy 0.96
step 199, training accuracy 0.86
step 249, training accuracy 0.76
step 299, training accuracy 0.82
step 349, training accuracy 0.86
step 399, training accuracy 0.88
Optimization Finished!
step 399, training accuracy 0.88
Model saved in file: /Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model2.ckpt
#####################################################################
TRAINING ENDED
#####################################################################
deeper network
#####################################################################
TRAINING:
MODEL:/Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model2.ckpt
#####################################################################
INFO:tensorflow:Restoring parameters from /Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model1.ckpt
Model restored.
step 49, training accuracy 0.88
step 99, training accuracy 0.88
step 149, training accuracy 0.88
step 199, training accuracy 0.88
step 249, training accuracy 0.92
step 299, training accuracy 0.92
step 349, training accuracy 0.86
step 399, training accuracy 0.94
Optimization Finished!
step 399, training accuracy 0.94
Model saved in file: /Users/il239838/Downloads/private/Thesis/Papyrus/model_binary_new_X6/tear_model2.ckpt
#####################################################################
TRAINING ENDED
#####################################################################
deeper network on GCP
#####################################################################
TRAINING:
MODEL:/home/il239838/files/model_binary/tear_model2.ckpt
#####################################################################
INFO:tensorflow:Restoring parameters from /home/il239838/files/model_binary/tear_model1.ckpt
Model restored.
step 49, training accuracy 0.88
step 99, training accuracy 0.96
step 149, training accuracy 0.84
step 199, training accuracy 0.84
step 249, training accuracy 0.84
step 299, training accuracy 0.92
step 349, training accuracy 0.98
step 399, training accuracy 0.9
Optimization Finished!
step 399, training accuracy 0.9
Model saved in file: /home/il239838/files/model_binary/tear_model2.ckpt
#####################################################################
TRAINING ENDED
#####################################################################
# + code_folding=[0]
#####################################################################
>>> step 100
step 0-99, precision 0.695652, recall 0.640000, f_score 0.666667
>>> step 200
step 100-199, precision 0.512500, recall 0.732143, f_score 0.602941
>>> step 300
step 200-299, precision 0.350427, recall 0.732143, f_score 0.473988
>>> step 400
step 300-399, precision 0.408805, recall 0.677083, f_score 0.509804
>>> step 500
step 400-499, precision 0.366834, recall 0.651786, f_score 0.469453
>>> step 600
step 500-599, precision 0.349282, recall 0.651786, f_score 0.454829
>>> step 700
step 600-699, precision 0.376471, recall 0.662069, f_score 0.480000
>>> step 800
step 700-799, precision 0.383142, recall 0.595238, f_score 0.466200
>>> step 900
step 800-899, precision 0.394649, recall 0.617801, f_score 0.481633
>>> step 1000
step 900-999, precision 0.407821, recall 0.651786, f_score 0.501718
>>> step 1100
step 1000-1099, precision 0.404432, recall 0.651786, f_score 0.499145
>>> step 1200
step 1100-1199, precision 0.423559, recall 0.657588, f_score 0.515244
>>> step 1300
step 1200-1299, precision 0.408983, recall 0.662835, f_score 0.505848
>>> step 1400
step 1300-1399, precision 0.405640, recall 0.667857, f_score 0.504723
>>> step 1500
step 1400-1499, precision 0.415020, recall 0.670927, f_score 0.512821
>>> step 1600
step 1500-1599, precision 0.402852, recall 0.672619, f_score 0.503902
>>> step 1700
step 1600-1699, precision 0.401681, recall 0.647696, f_score 0.495851
>>> step 1800
step 1700-1799, precision 0.387987, recall 0.647696, f_score 0.485279
>>> step 1900
step 1800-1899, precision 0.385484, recall 0.647696, f_score 0.483316
>>> step 2000
step 1900-1999, precision 0.388802, recall 0.647668, f_score 0.485909
>>> step 2100
step 2000-2099, precision 0.379161, recall 0.650124, f_score 0.478976
>>> step 2200
step 2100-2199, precision 0.374286, recall 0.650124, f_score 0.475068
>>> step 2300
step 2200-2299, precision 0.381868, recall 0.651054, f_score 0.481385
>>> step 2400
step 2300-2399, precision 0.392622, recall 0.659292, f_score 0.492155
>>> step 2500
step 2400-2499, precision 0.393604, recall 0.665281, f_score 0.494590
>>> step 2600
step 2500-2599, precision 0.392601, recall 0.664646, f_score 0.493623
>>> step 2700
step 2600-2699, precision 0.389810, recall 0.664646, f_score 0.491412
>>> step 2800
step 2700-2799, precision 0.384884, recall 0.655446, f_score 0.484982
>>> step 2900
step 2800-2899, precision 0.390572, recall 0.664122, f_score 0.491873
>>> step 3000
step 2900-2999, precision 0.401064, recall 0.679279, f_score 0.504348
>>> step 3100
step 3000-3099, precision 0.418534, recall 0.695431, f_score 0.522568
>>> step 3200
step 3100-3199, precision 0.430129, recall 0.704545, f_score 0.534154
>>> step 3300
step 3200-3299, precision 0.446036, recall 0.716258, f_score 0.549735
>>> step 3400
step 3300-3399, precision 0.448563, recall 0.722388, f_score 0.553459
>>> step 3500
step 3400-3499, precision 0.455204, recall 0.719599, f_score 0.557650
>>> step 3600
step 3500-3599, precision 0.456560, recall 0.718271, f_score 0.558266
>>> step 3700
step 3600-3699, precision 0.457841, recall 0.724000, f_score 0.560950
>>> step 3800
step 3700-3799, precision 0.455285, recall 0.720721, f_score 0.558047
>>> step 3900
step 3800-3899, precision 0.459969, recall 0.724351, f_score 0.562650
>>> step 4000
step 3900-3999, precision 0.445289, recall 0.724351, f_score 0.551529
>>> step 4100
step 4000-4099, precision 0.442598, recall 0.724351, f_score 0.549461
>>> step 4200
step 4100-4199, precision 0.445596, recall 0.725301, f_score 0.552040
>>> step 4300
step 4200-4299, precision 0.429793, recall 0.724760, f_score 0.539597
>>> step 4400
step 4300-4399, precision 0.426450, recall 0.724760, f_score 0.536955
>>> step 4500
step 4400-4499, precision 0.427491, recall 0.727485, f_score 0.538528
>>> step 4600
step 4500-4599, precision 0.427310, recall 0.723820, f_score 0.537377
>>> step 4700
step 4600-4699, precision 0.422999, recall 0.723820, f_score 0.533956
>>> step 4800
step 4700-4799, precision 0.419893, recall 0.723820, f_score 0.531474
>>> step 4900
step 4800-4899, precision 0.423740, recall 0.723669, f_score 0.534504
>>> step 5000
step 4900-4999, precision 0.428387, recall 0.725683, f_score 0.538742
>>> step 5100
step 5000-5099, precision 0.417348, recall 0.725683, f_score 0.529928
>>> step 5200
step 5100-5199, precision 0.414482, recall 0.725683, f_score 0.527612
>>> step 5300
step 5200-5299, precision 0.412935, recall 0.725683, f_score 0.526358
>>> step 5400
step 5300-5399, precision 0.418093, recall 0.720759, f_score 0.529207
>>> step 5500
step 5400-5499, precision 0.409934, recall 0.720294, f_score 0.522502
>>> step 5600
step 5500-5599, precision 0.409280, recall 0.719665, f_score 0.521805
>>> step 5700
step 5600-5699, precision 0.411176, recall 0.716189, f_score 0.522422
>>> step 5800
step 5700-5799, precision 0.413056, recall 0.707921, f_score 0.521707
>>> step 5900
step 5800-5899, precision 0.408427, recall 0.701061, f_score 0.516152
>>> step 6000
step 5900-5999, precision 0.404113, recall 0.701061, f_score 0.512694
>>> step 6100
step 6000-6099, precision 0.399565, recall 0.700382, f_score 0.508839
>>> step 6200
step 6100-6199, precision 0.398913, recall 0.700382, f_score 0.508310
>>> step 6300
step 6200-6299, precision 0.402681, recall 0.704503, f_score 0.512453
>>> step 6400
step 6300-6399, precision 0.404178, recall 0.709441, f_score 0.514970
>>> step 6500
step 6400-6499, precision 0.402955, recall 0.713255, f_score 0.514974
>>> step 6600
step 6500-6599, precision 0.402532, recall 0.713645, f_score 0.514730
>>> step 6700
step 6600-6699, precision 0.413011, recall 0.712585, f_score 0.522933
>>> step 6800
step 6700-6799, precision 0.416506, recall 0.713813, f_score 0.526059
>>> step 6900
step 6800-6899, precision 0.421002, recall 0.712439, f_score 0.529253
>>> step 7000
step 6900-6999, precision 0.428773, recall 0.709176, f_score 0.534427
>>> step 7100
step 7000-7099, precision 0.430497, recall 0.704718, f_score 0.534488
>>> step 7200
step 7100-7199, precision 0.437929, recall 0.695273, f_score 0.537381
>>> step 7300
step 7200-7299, precision 0.440254, recall 0.693133, f_score 0.538483
>>> step 7400
step 7300-7399, precision 0.441348, recall 0.690577, f_score 0.538525
>>> step 7500
step 7400-7499, precision 0.439821, recall 0.688375, f_score 0.536719
>>> step 7600
step 7500-7599, precision 0.440693, recall 0.683196, f_score 0.535782
>>> step 7700
step 7600-7699, precision 0.440618, recall 0.676152, f_score 0.533547
>>> step 7800
step 7700-7799, precision 0.435618, recall 0.676152, f_score 0.529865
>>> step 7900
step 7800-7899, precision 0.431701, recall 0.674044, f_score 0.526316
>>> step 8000
step 7900-7999, precision 0.429482, recall 0.668651, f_score 0.523021
>>> step 8100
step 8000-8099, precision 0.428027, recall 0.667768, f_score 0.521672
>>> step 8200
step 8100-8199, precision 0.429717, recall 0.664057, f_score 0.521784
>>> step 8300
step 8200-8299, precision 0.428451, recall 0.664057, f_score 0.520849
>>> step 8400
step 8300-8399, precision 0.429589, recall 0.660438, f_score 0.520569
>>> step 8500
step 8400-8499, precision 0.428870, recall 0.660438, f_score 0.520041
>>> step 8600
step 8500-8599, precision 0.427618, recall 0.660438, f_score 0.519119
>>> step 8700
step 8600-8699, precision 0.424528, recall 0.655478, f_score 0.515310
>>> step 8800
step 8700-8799, precision 0.421074, recall 0.655478, f_score 0.512757
>>> step 8900
step 8800-8899, precision 0.420032, recall 0.650814, f_score 0.510555
>>> step 9000
step 8900-8999, precision 0.415667, recall 0.650814, f_score 0.507317
>>> step 9100
step 9000-9099, precision 0.414201, recall 0.646154, f_score 0.504808
>>> step 9200
step 9100-9199, precision 0.414510, recall 0.642944, f_score 0.504053
>>> step 9300
step 9200-9299, precision 0.413793, recall 0.639138, f_score 0.502352
>>> step 9400
step 9300-9399, precision 0.408101, recall 0.639138, f_score 0.498134
>>> step 9500
step 9400-9499, precision 0.407789, recall 0.639138, f_score 0.497902
>>> step 9600
step 9500-9599, precision 0.408248, recall 0.639218, f_score 0.498268
>>> step 9700
step 9600-9699, precision 0.406097, recall 0.639218, f_score 0.496663
>>> step 9800
step 9700-9799, precision 0.408482, recall 0.639487, f_score 0.498524
>>> step 9900
step 9800-9899, precision 0.407298, recall 0.639098, f_score 0.497524
>>> step 10000
step 9900-9999, precision 0.406250, recall 0.639098, f_score 0.496741
>>> step 10100
step 10000-10099, precision 0.407407, recall 0.636312, f_score 0.496758
>>> step 10200
step 10100-10199, precision 0.408892, recall 0.636415, f_score 0.497892
>>> step 10300
step 10200-10299, precision 0.407852, recall 0.636415, f_score 0.497120
>>> step 10400
step 10300-10399, precision 0.408796, recall 0.637079, f_score 0.498024
>>> step 10500
step 10400-10499, precision 0.406452, recall 0.637079, f_score 0.496280
>>> step 10600
step 10500-10599, precision 0.405540, recall 0.637989, f_score 0.495875
>>> step 10700
step 10600-10699, precision 0.403583, recall 0.636918, f_score 0.494087
>>> step 10800
step 10700-10799, precision 0.401748, recall 0.636918, f_score 0.492710
>>> step 10900
step 10800-10899, precision 0.402490, recall 0.636066, f_score 0.493011
>>> step 11000
step 10900-10999, precision 0.400619, recall 0.633496, f_score 0.490836
>>> step 11100
step 11000-11099, precision 0.396798, recall 0.633496, f_score 0.487958
>>> step 11200
step 11100-11199, precision 0.396944, recall 0.629510, f_score 0.486880
>>> step 11300
step 11200-11299, precision 0.397928, recall 0.628496, f_score 0.487316
>>> step 11400
step 11300-11399, precision 0.398474, recall 0.625521, f_score 0.486826
>>> step 11500
step 11400-11499, precision 0.397233, recall 0.620690, f_score 0.484435
>>> step 11600
step 11500-11599, precision 0.396189, recall 0.620690, f_score 0.483658
>>> step 11700
step 11600-11699, precision 0.397906, recall 0.620725, f_score 0.484945
>>> step 11800
step 11700-11799, precision 0.397856, recall 0.618999, f_score 0.484381
>>> step 11900
step 11800-11899, precision 0.396184, recall 0.618999, f_score 0.483139
>>> step 12000
step 11900-11999, precision 0.394779, recall 0.618999, f_score 0.482094
>>> step 12100
step 12000-12099, precision 0.397894, recall 0.619473, f_score 0.484554
>>> step 12200
step 12100-12199, precision 0.402704, recall 0.621242, f_score 0.488652
>>> step 12300
step 12200-12299, precision 0.408271, recall 0.618464, f_score 0.491852
>>> step 12400
step 12300-12399, precision 0.411149, recall 0.615207, f_score 0.492893
>>> step 12500
step 12400-12499, precision 0.414962, recall 0.612991, f_score 0.494902
>>> step 12600
step 12500-12599, precision 0.416262, recall 0.606275, f_score 0.493614
>>> step 12700
step 12600-12699, precision 0.419520, recall 0.603456, f_score 0.494951
>>> step 12800
step 12700-12799, precision 0.418013, recall 0.603456, f_score 0.493901
>>> step 12900
step 12800-12899, precision 0.417014, recall 0.600943, f_score 0.492362
>>> step 13000
step 12900-12999, precision 0.412596, recall 0.600943, f_score 0.489269
>>> step 13100
step 13000-13099, precision 0.410480, recall 0.599745, f_score 0.487383
>>> step 13200
step 13100-13199, precision 0.408577, recall 0.599745, f_score 0.486039
>>> step 13300
step 13200-13299, precision 0.407397, recall 0.599745, f_score 0.485203
>>> step 13400
step 13300-13399, precision 0.406178, recall 0.599409, f_score 0.484228
>>> step 13500
step 13400-13499, precision 0.401925, recall 0.599409, f_score 0.481193
>>> step 13600
step 13500-13599, precision 0.401584, recall 0.599409, f_score 0.480948
>>> step 13700
step 13600-13699, precision 0.400391, recall 0.600335, f_score 0.480389
>>> step 13800
step 13700-13799, precision 0.399224, recall 0.598753, f_score 0.479042
>>> step 13900
step 13800-13899, precision 0.395553, recall 0.598919, f_score 0.476442
>>> step 14000
step 13900-13999, precision 0.393915, recall 0.598432, f_score 0.475098
>>> step 14100
step 14000-14099, precision 0.393915, recall 0.598432, f_score 0.475098
>>> step 14200
step 14100-14199, precision 0.392741, recall 0.598432, f_score 0.474244
>>> step 14300
step 14200-14299, precision 0.393562, recall 0.597536, f_score 0.474560
>>> step 14400
step 14300-14399, precision 0.390675, recall 0.594374, f_score 0.471463
>>> step 14500
step 14400-14499, precision 0.389944, recall 0.592924, f_score 0.470474
>>> step 14600
step 14500-14599, precision 0.386943, recall 0.592924, f_score 0.468283
>>> step 14700
step 14600-14699, precision 0.386532, recall 0.592924, f_score 0.467983
>>> step 14800
step 14700-14799, precision 0.387326, recall 0.595152, f_score 0.469258
>>> step 14900
step 14800-14899, precision 0.386903, recall 0.595343, f_score 0.469007
>>> step 15000
step 14900-14999, precision 0.388658, recall 0.595694, f_score 0.470403
>>> step 15100
step 15000-15099, precision 0.387264, recall 0.595304, f_score 0.469260
>>> step 15200
step 15100-15199, precision 0.386923, recall 0.596679, f_score 0.469435
>>> step 15300
step 15200-15299, precision 0.387583, recall 0.597950, f_score 0.470315
>>> step 15400
step 15300-15399, precision 0.384228, recall 0.599214, f_score 0.468222
>>> step 15500
step 15400-15499, precision 0.383070, recall 0.599214, f_score 0.467361
>>> step 15600
step 15500-15599, precision 0.384500, recall 0.599143, f_score 0.468403
>>> step 15700
step 15600-15699, precision 0.386607, recall 0.601239, f_score 0.470606
>>> step 15800
step 15700-15799, precision 0.382607, recall 0.601239, f_score 0.467630
>>> step 15900
step 15800-15899, precision 0.382626, recall 0.603309, f_score 0.468269
>>> step 16000
step 15900-15999, precision 0.383248, recall 0.603296, f_score 0.468731
>>> step 16100
step 16000-16099, precision 0.382317, recall 0.603296, f_score 0.468034
>>> step 16200
step 16100-16199, precision 0.381946, recall 0.603296, f_score 0.467756
>>> step 16300
step 16200-16299, precision 0.379460, recall 0.603296, f_score 0.465887
>>> step 16400
step 16300-16399, precision 0.382472, recall 0.604669, f_score 0.468563
>>> step 16500
step 16400-16499, precision 0.385124, recall 0.605644, f_score 0.470843
>>> step 16600
step 16500-16599, precision 0.386226, recall 0.609174, f_score 0.472732
>>> step 16700
step 16600-16699, precision 0.385509, recall 0.609174, f_score 0.472195
>>> step 16800
step 16700-16799, precision 0.387558, recall 0.609641, f_score 0.473870
>>> step 16900
step 16800-16899, precision 0.389587, recall 0.611689, f_score 0.476004
>>> step 17000
step 16900-16999, precision 0.392534, recall 0.612858, f_score 0.478555
>>> step 17100
step 17000-17099, precision 0.393763, recall 0.611711, f_score 0.479115
>>> step 17200
step 17100-17199, precision 0.392442, recall 0.611711, f_score 0.478136
>>> step 17300
step 17200-17299, precision 0.394895, recall 0.614296, f_score 0.480746
>>> step 17400
step 17300-17399, precision 0.397101, recall 0.616644, f_score 0.483100
>>> step 17500
step 17400-17499, precision 0.401132, recall 0.617085, f_score 0.486208
>>> step 17600
step 17500-17599, precision 0.399220, recall 0.617085, f_score 0.484801
>>> step 17700
step 17600-17699, precision 0.400259, recall 0.617539, f_score 0.485707
>>> step 17800
step 17700-17799, precision 0.402402, recall 0.618734, f_score 0.487653
>>> step 17900
step 17800-17899, precision 0.403036, recall 0.617830, f_score 0.487836
>>> step 18000
step 17900-17999, precision 0.404838, recall 0.618076, f_score 0.489231
>>> step 18100
step 18000-18099, precision 0.407634, recall 0.618758, f_score 0.491482
>>> step 18200
step 18100-18199, precision 0.406473, recall 0.618682, f_score 0.490614
>>> step 18300
step 18200-18299, precision 0.407004, recall 0.619139, f_score 0.491144
>>> step 18400
step 18300-18399, precision 0.406982, recall 0.619472, f_score 0.491232
>>> step 18500
step 18400-18499, precision 0.405737, recall 0.619093, f_score 0.490206
>>> step 18600
step 18500-18599, precision 0.404076, recall 0.620025, f_score 0.489282
>>> step 18700
step 18600-18699, precision 0.401760, recall 0.620025, f_score 0.487581
>>> step 18800
step 18700-18799, precision 0.399308, recall 0.620025, f_score 0.485771
>>> step 18900
step 18800-18899, precision 0.398062, recall 0.620321, f_score 0.484938
>>> step 19000
step 18900-18999, precision 0.396462, recall 0.620321, f_score 0.483748
>>> step 19100
step 19000-19099, precision 0.396994, recall 0.620614, f_score 0.484234
>>> step 19200
step 19100-19199, precision 0.395224, recall 0.620431, f_score 0.482859
>>> step 19300
step 19200-19299, precision 0.393585, recall 0.620281, f_score 0.481589
>>> step 19400
step 19300-19399, precision 0.393508, recall 0.620281, f_score 0.481531
>>> step 19500
step 19400-19499, precision 0.393243, recall 0.621160, f_score 0.481597
>>> step 19600
step 19500-19599, precision 0.392549, recall 0.621160, f_score 0.481077
>>> step 19700
step 19600-19699, precision 0.392088, recall 0.621160, f_score 0.480730
>>> step 19800
step 19700-19799, precision 0.391704, recall 0.621160, f_score 0.480442
>>> step 19900
step 19800-19899, precision 0.391016, recall 0.621160, f_score 0.479923
>>> step 20000
step 19900-19999, precision 0.390358, recall 0.620519, f_score 0.479236
>>> step 20100
step 20000-20099, precision 0.390504, recall 0.620764, f_score 0.479419
>>> step 20200
step 20100-20199, precision 0.386887, recall 0.621114, f_score 0.476787
>>> step 20300
step 20200-20299, precision 0.384308, recall 0.621114, f_score 0.474824
>>> step 20400
step 20300-20399, precision 0.384161, recall 0.621114, f_score 0.474712
>>> step 20500
step 20400-20499, precision 0.383577, recall 0.621114, f_score 0.474266
>>> step 20600
step 20500-20599, precision 0.383660, recall 0.622861, f_score 0.474837
>>> step 20700
step 20600-20699, precision 0.383569, recall 0.624886, f_score 0.475355
>>> step 20800
step 20700-20799, precision 0.382785, recall 0.624886, f_score 0.474752
>>> step 20900
step 20800-20899, precision 0.384644, recall 0.627704, f_score 0.476995
>>> step 21000
step 20900-20999, precision 0.383092, recall 0.627704, f_score 0.475800
>>> step 21100
step 21000-21099, precision 0.383464, recall 0.626977, f_score 0.475878
>>> step 21200
step 21100-21199, precision 0.383045, recall 0.626977, f_score 0.475555
>>> step 21300
step 21200-21299, precision 0.383430, recall 0.626852, f_score 0.475816
>>> step 21400
step 21300-21399, precision 0.384685, recall 0.628119, f_score 0.477146
>>> step 21500
step 21400-21499, precision 0.383581, recall 0.628119, f_score 0.476296
>>> step 21600
step 21500-21599, precision 0.384506, recall 0.629446, f_score 0.477391
>>> step 21700
step 21600-21699, precision 0.382462, recall 0.629446, f_score 0.475813
>>> step 21800
step 21700-21799, precision 0.382716, recall 0.628439, f_score 0.475721
>>> step 21900
step 21800-21899, precision 0.380835, recall 0.628439, f_score 0.474265
>>> step 22000
step 21900-21999, precision 0.380235, recall 0.628439, f_score 0.473799
>>> step 22100
step 22000-22099, precision 0.380420, recall 0.629084, f_score 0.474126
>>> step 22200
step 22100-22199, precision 0.378838, recall 0.629758, f_score 0.473086
>>> step 22300
step 22200-22299, precision 0.378196, recall 0.628481, f_score 0.472225
>>> step 22400
step 22300-22399, precision 0.377388, recall 0.627647, f_score 0.471359
>>> step 22500
step 22400-22499, precision 0.377378, recall 0.626815, f_score 0.471117
>>> step 22600
step 22500-22599, precision 0.377937, recall 0.628183, f_score 0.471939
>>> step 22700
step 22600-22699, precision 0.376335, recall 0.628183, f_score 0.470688
>>> step 22800
step 22700-22799, precision 0.376016, recall 0.628183, f_score 0.470439
>>> step 22900
step 22800-22899, precision 0.375968, recall 0.629473, f_score 0.470762
>>> step 23000
step 22900-22999, precision 0.375210, recall 0.629473, f_score 0.470167
>>> step 23100
step 23000-23099, precision 0.374708, recall 0.630191, f_score 0.469973
>>> step 23200
step 23100-23199, precision 0.376144, recall 0.631564, f_score 0.471484
>>> step 23300
step 23200-23299, precision 0.376630, recall 0.632834, f_score 0.472219
>>> step 23400
step 23300-23399, precision 0.375021, recall 0.632834, f_score 0.470952
>>> step 23500
step 23400-23499, precision 0.374286, recall 0.633527, f_score 0.470564
>>> step 23600
step 23500-23599, precision 0.374068, recall 0.634763, f_score 0.470732
>>> step 23700
step 23600-23699, precision 0.373814, recall 0.635494, f_score 0.470731
>>> step 23800
step 23700-23799, precision 0.372496, recall 0.635494, f_score 0.469685
>>> step 23900
step 23800-23899, precision 0.372906, recall 0.635918, f_score 0.470127
>>> step 24000
step 23900-23999, precision 0.372976, recall 0.634865, f_score 0.469894
>>> step 24100
step 24000-24099, precision 0.373713, recall 0.634750, f_score 0.470448
>>> step 24200
step 24100-24199, precision 0.373123, recall 0.634750, f_score 0.469979
>>> step 24300
step 24200-24299, precision 0.374528, recall 0.635369, f_score 0.471263
>>> step 24400
step 24300-24399, precision 0.374765, recall 0.635397, f_score 0.471458
# -
print("test %.2f"%0.12)
for i in range(len(cubes_set)):
if (cubes_set[i]["file"]) == "PX303-Fg001-V-C01-R01_TEAR_8X5_PIECE_2X4":
plt.imshow(cubes_set[i]["cube"])
for i in range(len(cubes_set)):
if (cubes_set[i]["file"]) == "PX303-Fg001-V-C01-R01_TEAR_8X5_PIECE_4X4":
plt.imshow(cubes_set[i]["cube"])
# +
# When trained on a ratio of 1:2 non-match:match - and tested on the same the results are excellend
*** MATCHED=19065
*** NOT MATCHED=8183
*** DISCARDED=1979027
TOTAL 27300, precision 0.815258, recall 0.979806, f_score 0.889990
TOTAL true = 19065
TOTAL error rate = 0.169158
# When trained on a ratio of 1:2 non-match:match - and tested on 6:1 (much more non-match) the results are poor
# coverage is still excellent but precision is bad - means a lot of false-positive
# this usually implies that we'll get a lot of trash in the validation set
# hence next attempt will be to train on 6:1 and see it the test is better
*** MATCHED=19065
*** NOT MATCHED=125210
*** DISCARDED=1862000
precision 0.237849, recall 0.973684, f_score 0.382309
| src/others/17_Process_training_tears_using_cubes_match-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Decision Tree Classification
# Importing the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# -
# Importing the dataset from sklearn
from sklearn.datasets import load_iris
dataset = load_iris()
# Getting the dependent varaibles
dataset.target_names
# Getting the independent varaibles
dataset.feature_names
dataset.data
# Assigning input and output
X = dataset.data
y = dataset.target
# Splitting the data into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# Fitting the classifier to the Training set
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(criterion = 'entropy', splitter = 'best', random_state = 0)
model.fit(X_train, y_train)
# Predicting the test set result
y_pred = model.predict(X_test)
# Making the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Getting the score for our model
model.score(X_test, y_test)
| Day22_Decision_tree_and_Random_Forest_Classification/Decision Tree Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Session III: Control Structures and Functions
# ---
# ### Session II Challenge
# +
str1 = ')()(())))'
str2 = '(()(()('
new_strings = [str1 + str2, str2 + str1]
balance = None
for new_string in new_strings:
# check for closing paranthesis at the beginning
if new_string.startswith(')'):
balance = False
# check for opening paranthesis at the end
elif new_string.endswith('('):
balance = False
else:
count = 0
for paran in new_string:
if paran == '(':
count += 1
else:
count -= 1
if count < 0:
# If there are more closing than opening at any point
# it is unbalanced
balance = False
break
if count == 0:
balance = True
else:
balance = False
if balance:
print('Balanced')
else:
print('Unbalanced')
# -
# ---
# We will be going over some of the 'anatomy' of a script. Again, keep the following in mind:
#
# | Coding Counterpart | Paper Component |
# | :-- | :-- |
# | ~~Variables~~ | ~~Nouns~~ |
# | ~~Operators~~ | ~~Verbs~~ |
# | ~~Lines~~ | ~~Sentences~~ |
# | Functions | Paragraphs |
# | Modules | Sections |
# ---
# ## Control Structures
# Remeber the paragraph picture earlier?
# 
# Let's add some context
# A **Control Structure** is simpler than it sounds, it *controls* things with its *structure*. Easy, right?
# In either of the examples above, how can you tell where one paragraph starts and stops?
#
# Control Structures control how the code it is associated with it works by wrapping it within its structure.
# ---
# ### Explore
# Look at the [Challenge](#Session-II-Challenge) again, see if you can identify the different control structures.
# line 9
# line 27
# line 31
# ---
# ### `if`: The simplest control structure
# `if` statements are meant to check conditions (or heuristics) through the processing of your program. The syntax is as follows:
#
# ```python
# if [not] <some condition that returns a boolean>:
# do_something()
# ```
# The setup
a_var = None
# +
# Checking logic
if a_var:
print('This is True')
# And the opposites
if not a_var:
print('This is False')
# -
# However, since the first check is just the opposite of the second check, we can make this into a morce concise control structure by using the `else` keyward.
if a_var:
print('This is True')
else:
print('This is False')
# But, are the above conditions ***always*** true?
# Comprehensive if-elif-else statement
if a_var:
print('This is True')
elif a_var is False:
print('This is False')
else:
print('This is not a boolean')
# +
# Multiple if statements
f_name = 'Sharkus'
if 'S' in f_name and len(f_name) > 5: # Only goes if BOTH are True
print(f_name)
l_name = 'Merma'
if 'S' in l_name or len(l_name) > 5: # Only goes if EITHER are True
print(l_name)
# -
# #### Exercise
# Write an `if-else` control structure that outputs the name of whomever person in your pair has a longer name
# +
d_name = 'Kari'
n_name = 'Laurie'
if len(d_name) == len(n_name):
print('Twins!!!')
elif len(d_name) > len(n_name):
print(d_name)
else:
print(n_name)
# -
# ---
# ### `for`: The most popular control structure
# The main keyword to remember here is '**iterate**'. If you want to go through something *one (or more) at a time*, you are going to be **iterating** through it. To do that, we use a `for`-loop
# Range
for item in range(9):
print(item)
# Iterate range
for i in range(len(d_name)):
if d_name[i] == n_name[i]:
print('Ah! The element of surprise')
# Iterate a list of names
names = ['kari', 'laurie', 'michael', 'josh']
for blah in names:
for letter in blah:
print(blah, letter)
# Iterate a string
for letter in 'Hello, World':
print(letter)
# #### Exercise
# Codons are sequences of three nucleotides (nt). The nts are 'A', 'C', 'G', 'T'.
#
# Write a nested `for-loop` that outputs all possible combinations of codons.
nts = []
for nucleotide in 'ACTG':
for nucleotide2 in 'ACTG':
for nucleotide3 in 'ACTG':
nts.append((nucleotide, nucleotide2, nucleotide3))
len(nts)
# What, do you think, is the potential downside to `for`-loops?
# ---
# ### `while`: The most improperly used control structure
# A `while` loop is just a `for`-loop that continues until a condition is met.
#
# **Careful**: `while` loops are one of the easiest ways to cause an 'infinite loop'
from random import choice
# while counting
counter = 0
while counter <= 5:
print('The counter is at '+ str(counter))
counter += 1
# while booelan
done_status = False
while not done_status:
flip = choice([0,1])
if flip:
print('Heads, I win')
done_status = flip
else:
print('Tails, you lose')
# Below is an example of a situation that would cause an infinite loop. I purposely made it a non-coding cell so that you don't accidentally run it. Think about why this would run on forever.
# ```python
# # Infinite loop
# while True:
# print('Hello, World')
# ```
# ---
# ### `with`: A context manager
# `with` statements aren't *really* control structures. They are called **context managers**. However, they work in much the same way as a control structure: everything indented underneath it, belongs to it.
#
# The special part about `with` statements is when they are paired with I/O.
with open('./datasets/pokemon.csv') as poke:
for i in range(5):
line = poke.readline().strip().split(',')
print(line)
# What happens is that the file is opened and processed. However, unlike normal, as soon as you exit the `with` statement, it **automatically** closes the file for you.
# ---
# ## Functions
# Think of functions like the paragraphs of a paper. They start with a purpose (definition). They often require some background (arguments). They need evidence & explanation (code). And, they link to the next idea (returned data).
# Additionally, functions are the easiest way to be efficiently lazy.
# #### Exercise
# Q. How do you eat an elephant?<br/>
# A. One bite at a time
# +
elephant = 100
bite = 1
# Eat the elephant
def eat_elephant(an_elephant):
while an_elephant:
print('NomNom')
an_elephant -= 1
print(str(an_elephant) + '% of the elephant is left.')
return None
eat_elephant(elephant)
# -
# #### Function Syntax
# ```python
# def function_name(func_arg1, func_arg2, func_kwarg=None):
# some_function_code = func_arg1 + func_arg2
# if some_function_code > 0:
# return func_kwarg
# else:
# return some_function_code
# ```
# * Each function should have a name. This is declared by using the `def` keyword
# * A function doesn't need to have arguments to work
# * The collection of arguments for a given function is called a **signature**
# * The function works within its own ***scope*** unless it is using something that was passed to it or is global
# * `return` statments exit the function while passing on the data
# * Defining a function does not run a function. It must be called using `([args])` after the function name
# +
# scope example
spam = 42
def spam_alot(spam):
spam = spam * spam
return spam
# -
# ---
# What is the original value of spam?
print(f'Staring value of spam: {spam}')
# ---
# What is the value of spam now?
print(f'spam_alot spam: {spam_alot(spam)}')
# ---
# Care to take a guess?
print(f'Final value of spam: {spam}')
# After seeing this example, can anyone describe scope?
# ---
# ## Challenge
# Use [this](./basic_template.py) template script, and modify it so that will take in 2 arguments (look for the part about `sys.argv`):
# 1. navi_age (int): the age of the navigator
# 2. driver_age (int): the age of the driver
# 3. compare the two and print out the difference of age in years between the two
#
# When you are done, save the script and click on the Launcher tab (or the '+' button above the file explorer) and select 'Terminal'. check to see if it runs by typing:
#
# ```bash
# python basic_template.py 9001 19
# ```
| session3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Jordan-Ireland/DS-Unit-2-Regression-Classification/blob/master/module1/assignment_regression_classification_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="7IXUfiQ2UKj6" colab_type="text"
# Lambda School Data Science, Unit 2: Predictive Modeling
#
# # Regression & Classification, Module 1
#
# ## Assignment
#
# You'll use another **New York City** real estate dataset.
#
# But now you'll **predict how much it costs to rent an apartment**, instead of how much it costs to buy a condo.
#
#
# The data comes from renthop.com, an apartment listing website.
#
# - [X] Look at the data. What's the distribution of the target, `price`, and features such as `longitude` and `latitude`? Remove outliers.
# - [X] After you remove outliers, what is the mean price in your subset of the data?
# - [X] Choose a feature, and plot its relationship with the target.
# - [X] Use scikit-learn for linear regression with one feature. You can follow the [5-step process from Jake VanderPlas](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.html#Basics-of-the-API).
# - [X] Define a function to make new predictions and explain the model coefficient.
# - [X] Organize and comment your code.
#
# > [Do Not Copy-Paste.](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) You must type each of these exercises in, manually. If you copy and paste, you might as well not even do them. The point of these exercises is to train your hands, your brain, and your mind in how to read, write, and see code. If you copy-paste, you are cheating yourself out of the effectiveness of the lessons.
# + [markdown] id="VwchXNaPgc3j" colab_type="text"
# ##Assignment
# + id="o9eSnDYhUGD7" colab_type="code" outputId="51a2c150-e15f-4afc-edbe-376ec4bd0f4a" colab={"base_uri": "https://localhost:8080/", "height": 102}
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
# # !pip install --upgrade pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
# !git pull origin master
# Change into directory for module
os.chdir('data')
# + id="ipBYS77PUwNR" colab_type="code" colab={}
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + id="4S2wXSrFV_g4" colab_type="code" colab={}
# Read New York City apartment rental listing data
import pandas as pd
pd.options.display.float_format = '{:,.0f}'.format
df = pd.read_csv('apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
# + id="vpx7D44mOB8Z" colab_type="code" outputId="01898eca-dfcf-4d5b-e6df-ff11f2290086" colab={"base_uri": "https://localhost:8080/", "height": 513}
df.head()
# df.describe()
# + id="XKU0rsJnMWGT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="954a55b9-aebc-4df2-9e1b-58501f428680"
df.bathrooms.value_counts()
df.query('bathrooms > 5')
# + id="0lPD3kAMNSMT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 448} outputId="95c5aa1a-946a-4fa8-b984-16511e754881"
df.bedrooms.value_counts()
df.query('bedrooms > 6')
# + id="dmpHqemNPJbD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 380} outputId="af4e07b1-c544-4a6e-ca11-1639c29574c3"
df.price.value_counts()
df.query('price > 1000000')
# + [markdown] id="FVhjflIGNEOq" colab_type="text"
# 10 bathrooms but only 2 rooms?? Probably a mistake and meant 1, but have to remove it.
#
# TODO: Remove bathrooms > 6, remove bedrooms > 6, remove prices > 50,000 since there are too few to get accurate representation with
# + id="FrogJ3HPPoRe" colab_type="code" colab={}
df = df[(df['bathrooms'] < 5) & (df['price'] < 1000000) & (df['bedrooms'] > 0) & (df['bedrooms'] <= 6) & (df['bathrooms'] > 0) & (df['price'] <= 50000)]
# + id="RgwystsnQNAz" colab_type="code" outputId="a0a47242-f5f8-44a3-9d08-44cd0cf996d1" colab={"base_uri": "https://localhost:8080/", "height": 34}
df.shape
# + id="ICgYyV38Qus0" colab_type="code" outputId="60a80a0a-8690-4ec3-f089-7f3edafd4821" colab={"base_uri": "https://localhost:8080/", "height": 279}
import plotly
import matplotlib.pyplot as plt
df.plot.scatter('bathrooms', 'price');
# + id="-hAn9RvzUv7N" colab_type="code" outputId="05c287cb-e13c-4bf0-9953-ed6d00d3f0a1" colab={"base_uri": "https://localhost:8080/", "height": 279}
df.plot.scatter('bedrooms', 'price');
# + [markdown] id="Bor6_QLNVcI6" colab_type="text"
# Most commonly, the steps in using the Scikit-Learn estimator API are as follows:
#
# 1. Choose a class of model by importing the appropriate estimator class from Scikit-Learn.
# 2. Choose model hyperparameters by instantiating this class with desired values.
# 3. Arrange data into a features matrix and target vector following the discussion above.
# 4. Fit the model to your data by calling the `fit()` method of the model instance.
# 5. Apply the Model to new data: For supervised learning, often we predict labels for unknown data using the `predict()` method.
# + id="VxCYAAb3VY9E" colab_type="code" colab={}
#import sklearn model
from sklearn.linear_model import LinearRegression
#choose model
model = LinearRegression()
# choose features and targets
features = ['bedrooms']
target = 'price'
X = df[features]
y = df[target]
#fit to model
model.fit(X,y)
#predict
y_pred = model.predict(X)
# + id="70jSUGZjWKkz" colab_type="code" outputId="f73056db-2223-4969-d3a9-01f0b805674b" colab={"base_uri": "https://localhost:8080/", "height": 265}
plt.scatter(X, y)
plt.scatter(X, y_pred);
# + id="8OAieUooWUp6" colab_type="code" outputId="24ef2e87-c6e8-4d58-a6a2-c4adb23784e4" colab={"base_uri": "https://localhost:8080/", "height": 34}
model.coef_[0]
##Price increase per bedroom??
# + id="of_a9oC4Wbk-" colab_type="code" outputId="3d076235-3942-4efb-a979-26961aaf1f8a" colab={"base_uri": "https://localhost:8080/", "height": 34}
model.intercept_
##price ??
# + id="rBogAcFNWmB3" colab_type="code" colab={}
def predict_bedrooms(bedrooms):
y_pred = model.predict([[bedrooms]])
estimate = y_pred[0]
coefficient = model.coef_[0]
result = f'${estimate:,.0f} estimated price for {bedrooms:,.0f} bedroom apt'
explanation = f' Each additional bedroom adds about ${coefficient:,.0f} increase in this model.'
return result + explanation
##good to know about f'' instead of .format
# + id="ed5iNXNPW-iw" colab_type="code" outputId="45bd7bed-3e75-45c4-9e0b-b39478bba7bb" colab={"base_uri": "https://localhost:8080/", "height": 34}
predict_bedrooms(7)
# + id="XvMBEvpRXb6x" colab_type="code" outputId="a36d52c4-e332-4157-8bc6-f05f211ea55f" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["657e2c50407a43e6bf7130213b7657ba", "23385c1cfe1f4ded92a9249478b2e84e", "e627c52a64304849a0dc1ed1bc598425", "9b478f6c1af346f2a321ff60ccce8b50", "cf987bd81d1e47fe9c6498a150eb9e4e", "bde840bc704c453193ba36911bdb6afe"]}
from ipywidgets import interact
# # interact??
interact(predict_bedrooms, bedrooms=(1, 10), step=1);
# + [markdown] id="JyTEvlM8XUZ0" colab_type="text"
# ###Clean Code
# + id="M9UbvHVuXXRH" colab_type="code" outputId="71129923-0519-4659-db3b-30b92caa3678" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["76f12bfced3b4212a98db001d36cf9d9", "cc59fb3ab2284c3693956ef316fbd21b", "c0def9588ecf4ecf88790417a3236ca9", "4fdf4552ad3a4efe8ede5c2bcc426273", "8547501a1233418d9db3a48237934cb4", "<KEY>"]}
##import what we need
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from ipywidgets import interact
#import data
pd.options.display.float_format = '{:,.0f}'.format
df = pd.read_csv('apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
#Get rid of any outliers or dirtiness for cleaner results
mask = ((df['bathrooms'] < 5) &
(df['price'] < 1000000) &
(df['bedrooms'] > 0) &
(df['bedrooms'] <= 6) &
(df['bathrooms'] > 0) &
(df['price'] <= 50000))
df = df[mask]
#choose model
model = LinearRegression()
# choose features and targets
features = ['bedrooms']
target = 'price'
X = df[features]
y = df[target]
#fit to model
model.fit(X,y)
#predict w/ function
def predict_bedrooms(bedrooms):
y_pred = model.predict([[bedrooms]])
estimate = y_pred[0]
coefficient = model.coef_[0]
result = f'${estimate:,.0f} estimated price for {bedrooms:,.0f} bedroom apt'
explanation = f' Each additional bedroom adds about ${coefficient:,.0f} increase in this model.'
return result + explanation
interact(predict_bedrooms, bedrooms=(1, 10), step=1);
# + [markdown] id="XSmVpibOgUSr" colab_type="text"
# ##Stretch Goals
# + [markdown] id="RQcKl-TJgjtz" colab_type="text"
# - [X] Do linear regression with two or more features.
# - [ ] Read [The Discovery of Statistical Regression](https://priceonomics.com/the-discovery-of-statistical-regression/)
# - [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 2.1: What Is Statistical Learning?
# - [ ] Do the [Plotly Dash](https://dash.plot.ly/) Tutorial, Parts 1 & 2.
# + id="lddd5pATg2os" colab_type="code" colab={}
df.columns
# + id="Iy965979iLGs" colab_type="code" colab={}
df.interest_level.isnull().sum()
# + id="D4NDJwYmgW6P" colab_type="code" outputId="fd6845e3-a215-47ef-9894-1093df2ce777" colab={"base_uri": "https://localhost:8080/", "height": 211, "referenced_widgets": ["d3988449c7014897b9fc8facd3e7db07", "ce1b943d97394d2fab1854469039c2fb", "f3e30f4c2fa245ca8bff5f099dd58786", "fea83dd8de934fb99566af867c237ec1", "1d9ba59e21eb479a81c025adf5c75b19", "0cc199e2443f4f61aa610804b443ba1d", "<KEY>", "<KEY>", "caad1b80e9344817b3beac9d5423c1f2", "<KEY>", "<KEY>", "b5cae518804c4a5384e6d0b4485e658a", "2ea7fb85ee174eb28e18166e4880dc3e", "dc18c5b08a164438be56da48252ffa9b", "01894a45231d4d778e076be701fd7931", "<KEY>", "32be17681ca54a4e91ae542f325ce6ce", "<KEY>"]}
from ipywidgets import interactive
#import data
pd.options.display.float_format = '{:,.0f}'.format
df = pd.read_csv('apartments/renthop-nyc.csv')
assert df.shape == (49352, 34)
#Get rid of any outliers or dirtiness for cleaner results
mask = ((df['bathrooms'] < 5) &
(df['price'] < 1000000) &
(df['bedrooms'] > 0) &
(df['bedrooms'] <= 6) &
(df['bathrooms'] > 0) &
(df['price'] <= 50000))
df = df[mask]
#change from low medium high to 1,2,3
levels = {'low':1,'medium':2,'high':3}
df.interest_level = df.interest_level.map(levels)
#choose model
stretch_model = LinearRegression()
# choose features and targets
features = ['bedrooms','bathrooms','interest_level','elevator','dishwasher']
target = 'price'
X = df[features]
y = df[target]
#fit to model
stretch_model.fit(X,y)
#predict w/ function
def predict_all(bedrooms, bathrooms, interest=1, elevator=0, dishwasher=0):
y_pred = stretch_model.predict([[bedrooms,bathrooms,interest,elevator,dishwasher]])
estimate = y_pred[0]
coefficient = stretch_model.coef_[0]
result = f'${estimate:,.0f} estimated price for {bedrooms:.0f} bed/ {bathrooms:.0f} bath apartment'
print('\n',result)
# print('Welcome to the Ireland NYC Apt rental estimator!\n')
inputs = interactive(predict_all, bedrooms=(1, 10), bathrooms=(1,10), interest=[1,2,3], elevator=[True,False], dishwasher=[True,False]);
display(inputs)
# + id="5PkJBJw6oCPL" colab_type="code" colab={}
| module1/assignment_regression_classification_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# BEGIN DESCR_KINDS
### auxiliary functions for display only ###
def cls_name(obj_or_cls):
cls = type(obj_or_cls)
if cls is type:
cls = obj_or_cls
return cls.__name__.split('.')[-1]
def display(obj):
cls = type(obj)
if cls is type:
return '<class {}>'.format(obj.__name__)
elif cls in [type(None), int]:
return repr(obj)
else:
return '<{} object>'.format(cls_name(obj))
def print_args(name, *args):
pseudo_args = ', '.join(display(x) for x in args)
print('-> {}.__{}__({})'.format(cls_name(args[0]), name, pseudo_args))
### essential classes for this example ###
class Overriding: # <1>
"""a.k.a. data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner) # <2>
def __set__(self, instance, value):
print_args('set', self, instance, value)
class OverridingNoGet: # <3>
"""an overriding descriptor without ``__get__``"""
def __set__(self, instance, value):
print_args('set', self, instance, value)
class NonOverriding: # <4>
"""a.k.a. non-data or shadowable descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner)
class Managed: # <5>
over = Overriding()
over_no_get = OverridingNoGet()
non_over = NonOverriding()
def spam(self): # <6>
print('-> Managed.spam({})'.format(display(self)))
# END DESCR_KINDS
# -
# Overriding descriptor (a.k.a. data descriptor or enforced descriptor):
#
# # BEGIN DESCR_KINDS_DEMO1
#
#
obj = Managed() # <1>
obj.over # <2>
Managed.over # <3>
obj.over = 7 # <4>
obj.over # <5>
obj.__dict__['over'] = 8 # <6>
vars(obj) # <7>
obj.over # <8>
# # END DESCR_KINDS_DEMO1
#
# Overriding descriptor without ``__get__``:
#
# (these tests are reproduced below without +ELLIPSIS directives for inclusion in the book;
# look for DESCR_KINDS_DEMO2)
#
#
obj.over_no_get # doctest: +ELLIPSIS
Managed.over_no_get # doctest: +ELLIPSIS
obj.over_no_get = 7
obj.over_no_get # doctest: +ELLIPSIS
obj.__dict__['over_no_get'] = 9
obj.over_no_get
obj.over_no_get = 7
obj.over_no_get
# Non-overriding descriptor (a.k.a. non-data descriptor or shadowable descriptor):
#
# # BEGIN DESCR_KINDS_DEMO3
#
#
obj = Managed()
obj.non_over # <1>
obj.non_over = 7 # <2>
obj.non_over # <3>
Managed.non_over # <4>
del obj.non_over # <5>
obj.non_over # <6>
# # END DESCR_KINDS_DEMO3
#
# No descriptor type survives being overwritten on the class itself:
#
# # BEGIN DESCR_KINDS_DEMO4
#
#
obj = Managed() # <1>
Managed.over = 1 # <2>
Managed.over_no_get = 2
Managed.non_over = 3
obj.over, obj.over_no_get, obj.non_over # <3>
# # END DESCR_KINDS_DEMO4
#
# Methods are non-overriding descriptors:
#
#
obj.spam # doctest: +ELLIPSIS
Managed.spam # doctest: +ELLIPSIS
obj.spam()
Managed.spam()
# ...
# TypeError: spam() missing 1 required positional argument: 'self'
#
Managed.spam(obj)
Managed.spam.__get__(obj) # doctest: +ELLIPSIS
obj.spam.__func__ is Managed.spam
obj.spam = 7
obj.spam
# NOTE: These tests are here because I can't add callouts after +ELLIPSIS
# directives and if doctest runs them without +ELLIPSIS I get test failures.
#
# # BEGIN DESCR_KINDS_DEMO2
#
#
obj.over_no_get # <1>
Managed.over_no_get # <2>
obj.over_no_get = 7 # <3>
obj.over_no_get # <4>
obj.__dict__['over_no_get'] = 9 # <5>
obj.over_no_get # <6>
obj.over_no_get = 7 # <7>
obj.over_no_get # <8>
# # END DESCR_KINDS_DEMO2
#
# Methods are non-overriding descriptors:
#
# # BEGIN DESCR_KINDS_DEMO5
#
#
obj = Managed()
obj.spam # <1>
Managed.spam # <2>
obj.spam = 7 # <3>
obj.spam
# # END DESCR_KINDS_DEMO5
#
#
# +
# BEGIN DESCRIPTORKINDS
def print_args(name, *args): # <1>
cls_name = args[0].__class__.__name__
arg_names = ['self', 'instance', 'owner']
if name == 'set':
arg_names[-1] = 'value'
print('{}.__{}__() invoked with args:'.format(cls_name, name))
for arg_name, value in zip(arg_names, args):
print(' {:8} = {}'.format(arg_name, value))
class Overriding: # <2>
"""a.k.a. data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner) # <3>
def __set__(self, instance, value):
print_args('set', self, instance, value)
class OverridingNoGet: # <4>
"""an overriding descriptor without ``__get__``"""
def __set__(self, instance, value):
print_args('set', self, instance, value)
class NonOverriding: # <5>
"""a.k.a. non-data or shadowable descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner)
class Model: # <6>
over = Overriding()
over_no_get = OverridingNoGet()
non_over = NonOverriding()
def spam(self): # <7>
print('Model.spam() invoked with arg:')
print(' self =', self)
#END DESCRIPTORKINDS
# -
# Overriding descriptor (a.k.a. data descriptor or enforced descriptor):
#
#
obj = Model()
obj.over # doctest: +ELLIPSIS
# self = <descriptorkinds.Overriding object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# owner = <class 'descriptorkinds.Model'>
#
Model.over # doctest: +ELLIPSIS
# self = <descriptorkinds.Overriding object at 0x...>
# instance = None
# owner = <class 'descriptorkinds.Model'>
#
#
# An overriding descriptor cannot be shadowed by assigning to an instance:
#
#
obj = Model()
obj.over = 7 # doctest: +ELLIPSIS
# self = <descriptorkinds.Overriding object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# value = 7
#
obj.over # doctest: +ELLIPSIS
# self = <descriptorkinds.Overriding object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# owner = <class 'descriptorkinds.Model'>
#
#
# Not even by poking the attribute into the instance ``__dict__``:
#
#
obj.__dict__['over'] = 8
obj.over # doctest: +ELLIPSIS
# self = <descriptorkinds.Overriding object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# owner = <class 'descriptorkinds.Model'>
#
vars(obj)
# Overriding descriptor without ``__get__``:
#
#
obj.over_no_get # doctest: +ELLIPSIS
Model.over_no_get # doctest: +ELLIPSIS
obj.over_no_get = 7 # doctest: +ELLIPSIS
# self = <descriptorkinds.OverridingNoGet object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# value = 7
#
obj.over_no_get # doctest: +ELLIPSIS
# Poking the attribute into the instance ``__dict__`` means you can read the new
# value for the attribute, but setting it still triggers ``__set__``:
#
#
obj.__dict__['over_no_get'] = 9
obj.over_no_get
obj.over_no_get = 7 # doctest: +ELLIPSIS
# self = <descriptorkinds.OverridingNoGet object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# value = 7
#
obj.over_no_get
# Non-overriding descriptor (a.k.a. non-data descriptor or shadowable descriptor):
#
#
obj = Model()
obj.non_over # doctest: +ELLIPSIS
# self = <descriptorkinds.NonOverriding object at 0x...>
# instance = <descriptorkinds.Model object at 0x...>
# owner = <class 'descriptorkinds.Model'>
#
Model.non_over # doctest: +ELLIPSIS
# self = <descriptorkinds.NonOverriding object at 0x...>
# instance = None
# owner = <class 'descriptorkinds.Model'>
#
#
# A non-overriding descriptor can be shadowed by assigning to an instance:
#
#
obj.non_over = 7
obj.non_over
# Methods are non-over descriptors:
#
#
obj.spam # doctest: +ELLIPSIS
Model.spam # doctest: +ELLIPSIS
obj.spam() # doctest: +ELLIPSIS
# self = <descriptorkinds.Model object at 0x...>
#
obj.spam = 7
obj.spam
# No descriptor type survives being overwritten on the class itself:
#
#
Model.over = 1
obj.over
Model.over_no_get = 2
obj.over_no_get
Model.non_over = 3
obj.non_over
# +
# BEGIN FUNC_DESCRIPTOR_EX
import collections
class Text(collections.UserString):
def __repr__(self):
return 'Text({!r})'.format(self.data)
def reverse(self):
return self[::-1]
# END FUNC_DESCRIPTOR_EX
# -
# # BEGIN FUNC_DESCRIPTOR_DEMO
#
#
word = Text('forward')
word # <1>
word.reverse() # <2>
Text.reverse(Text('backward')) # <3>
type(Text.reverse), type(word.reverse) # <4>
list(map(Text.reverse, ['repaid', (10, 20, 30), Text('stressed')])) # <5>
Text.reverse.__get__(word) # <6>
Text.reverse.__get__(None, Text) # <7>
word.reverse # <8>
word.reverse.__self__ # <9>
word.reverse.__func__ is Text.reverse # <10>
# # END FUNC_DESCRIPTOR_DEMO
#
# +
# # %load ./bulkfood/bulkfood_v3.py
# BEGIN LINEITEM_V3
class Quantity: # <1>
def __init__(self, storage_name):
self.storage_name = storage_name # <2>
def __set__(self, instance, value): # <3>
if value > 0:
instance.__dict__[self.storage_name] = value # <4>
else:
raise ValueError('value must be > 0')
class LineItem:
weight = Quantity('weight') # <5>
price = Quantity('price') # <6>
def __init__(self, description, weight, price): # <7>
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V3
# -
# A line item for a bulk food order has description, weight and price fields::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
raisins.weight, raisins.description, raisins.price
# A ``subtotal`` method gives the total price for that line item::
#
#
raisins.subtotal()
# The weight of a ``LineItem`` must be greater than 0::
#
#
raisins.weight = -20
# ...
# ValueError: value must be > 0
#
# Negative or 0 price is not acceptable either::
#
#
truffle = LineItem('White truffle', 100, 0)
# ...
# ValueError: value must be > 0
#
#
# No change was made::
#
#
raisins.weight
# +
# # %load ./bulkfood/bulkfood_v4.py
# BEGIN LINEITEM_V4
class Quantity:
__counter = 0 # <1>
def __init__(self):
cls = self.__class__ # <2>
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index) # <3>
cls.__counter += 1 # <4>
def __get__(self, instance, owner): # <5>
return getattr(instance, self.storage_name) # <6>
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value) # <7>
else:
raise ValueError('value must be > 0')
class LineItem:
weight = Quantity() # <8>
price = Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V4
# -
# A line item for a bulk food order has description, weight and price fields::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
raisins.weight, raisins.description, raisins.price
# A ``subtotal`` method gives the total price for that line item::
#
#
raisins.subtotal()
# The weight of a ``LineItem`` must be greater than 0::
#
#
raisins.weight = -20
# ...
# ValueError: value must be > 0
#
# No change was made::
#
#
raisins.weight
# The value of the attributes managed by the descriptors are stored in
# alternate attributes, created by the descriptors in each ``LineItem``
# instance::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
# 'description', 'price', 'subtotal', 'weight']
#
getattr(raisins, '_Quantity#0')
getattr(raisins, '_Quantity#1')
# +
# # %load ./bulkfood/bulkfood_v4b.py
# BEGIN LINEITEM_V4B
class Quantity:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self # <1>
else:
return getattr(instance, self.storage_name) # <2>
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value)
else:
raise ValueError('value must be > 0')
# END LINEITEM_V4B
class LineItem:
weight = Quantity()
price = Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# -
# A line item for a bulk food order has description, weight and price fields::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
raisins.weight, raisins.description, raisins.price
# A ``subtotal`` method gives the total price for that line item::
#
#
raisins.subtotal()
# The weight of a ``LineItem`` must be greater than 0::
#
#
raisins.weight = -20
# ...
# ValueError: value must be > 0
#
# No change was made::
#
#
raisins.weight
# The value of the attributes managed by the descriptors are stored in
# alternate attributes, created by the descriptors in each ``LineItem``
# instance::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
# 'description', 'price', 'subtotal', 'weight']
#
getattr(raisins, '_Quantity#0')
getattr(raisins, '_Quantity#1')
# If the descriptor is accessed in the class, the descriptor object is
# returned:
#
#
LineItem.weight # doctest: +ELLIPSIS
LineItem.weight.storage_name
# +
# # %load ./bulkfood/bulkfood_v4c.py
# BEGIN LINEITEM_V4C
import model_v4c as model # <1>
class LineItem:
weight = model.Quantity() # <2>
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V4C
# -
# A line item for a bulk food order has description, weight and price fields::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
raisins.weight, raisins.description, raisins.price
# A ``subtotal`` method gives the total price for that line item::
#
#
raisins.subtotal()
# The weight of a ``LineItem`` must be greater than 0::
#
#
raisins.weight = -20
# ...
# ValueError: value must be > 0
#
# No change was made::
#
#
raisins.weight
# The value of the attributes managed by the descriptors are stored in
# alternate attributes, created by the descriptors in each ``LineItem``
# instance::
#
#
raisins = LineItem('Gold<NAME>', 10, 6.95)
dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
# 'description', 'price', 'subtotal', 'weight']
#
getattr(raisins, '_Quantity#0')
getattr(raisins, '_Quantity#1')
# If the descriptor is accessed in the class, the descriptor object is
# returned:
#
#
LineItem.weight # doctest: +ELLIPSIS
LineItem.weight.storage_name
# +
# # %load ./bulkfood/bulkfood_v4prop.py
# BEGIN LINEITEM_V4_PROP
def quantity(): # <1>
try:
quantity.counter += 1 # <2>
except AttributeError:
quantity.counter = 0 # <3>
storage_name = '_{}:{}'.format('quantity', quantity.counter) # <4>
def qty_getter(instance): # <5>
return getattr(instance, storage_name)
def qty_setter(instance, value):
if value > 0:
setattr(instance, storage_name, value)
else:
raise ValueError('value must be > 0')
return property(qty_getter, qty_setter)
# END LINEITEM_V4_PROP
class LineItem:
weight = quantity()
price = quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# -
# A line item for a bulk food order has description, weight and price fields::
#
#
raisins = LineItem('Gold<NAME>', 10, 6.95)
raisins.weight, raisins.description, raisins.price
# A ``subtotal`` method gives the total price for that line item::
#
#
raisins.subtotal()
# The weight of a ``LineItem`` must be greater than 0::
#
#
raisins.weight = -20
# ...
# ValueError: value must be > 0
#
# No change was made::
#
#
raisins.weight
# The value of the attributes managed by the descriptors are stored in
# alternate attributes, created by the descriptors in each ``LineItem``
# instance::
#
#
raisins = LineItem('Golden raisins', 10, 6.95)
dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
# 'price', 'subtotal', 'weight']
#
getattr(raisins, '_quantity:0')
getattr(raisins, '_quantity:1')
# # %load ./bulkfood/model_v4c.py
# BEGIN MODEL_V4
class Quantity:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value)
else:
raise ValueError('value must be > 0')
# END MODEL_V4
# +
# # %load ./bulkfood/bulkfood_v5.py
# BEGIN LINEITEM_V5
import model_v5 as model # <1>
class LineItem:
description = model.NonBlank() # <2>
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V5
# -
# A line item for a bulk food order has description, weight and price fields::
#
#
raisins = LineItem('Gold<NAME>', 10, 6.95)
raisins.weight, raisins.description, raisins.price
# A ``subtotal`` method gives the total price for that line item::
#
#
raisins.subtotal()
# The weight of a ``LineItem`` must be greater than 0::
#
#
raisins.weight = -20
# ...
# ValueError: value must be > 0
#
# No change was made::
#
#
raisins.weight
# The value of the attributes managed by the descriptors are stored in
# alternate attributes, created by the descriptors in each ``LineItem``
# instance::
#
#
raisins = LineItem('Gold<NAME>', 10, 6.95)
dir(raisins) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
# 'description', 'price', 'subtotal', 'weight']
#
getattr(raisins, '_Quantity#0')
getattr(raisins, '_NonBlank#0')
# If the descriptor is accessed in the class, the descriptor object is
# returned:
#
#
LineItem.weight # doctest: +ELLIPSIS
LineItem.weight.storage_name
# The `NonBlank` descriptor prevents empty or blank strings to be used
# for the description:
#
#
br_nuts = LineItem('Brazil Nuts', 10, 34.95)
br_nuts.description = ' '
# ...
# ValueError: value cannot be empty or blank
#
void = LineItem('', 1, 1)
# ...
# ValueError: value cannot be empty or blank
#
#
#
| 20-descriptor/bulkfood/auto_chapter20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Figure 6 - Supervised Clustering R-squared
# +
import xgboost as xgb
import numpy as np
import shap
import matplotlib.pyplot as pl
import scipy.cluster
import pickle
import random
import xgboost
import sklearn.datasets
import shap
def plot_m(m, y, name="", color=""):
m = np.nan_to_num(m)
D = np.vstack([np.sum((m - m[i,:])**2, 1) for i in range(m.shape[0])])
clust = scipy.cluster.hierarchy.complete(D)
group_vals = [[y[i]] for i in range(m.shape[0])]
for i in range(len(clust)):
group_vals.append([])
#print(clust[i,0], clust[i,1])
group_vals[-1].extend(group_vals[int(clust[i,0])])
group_vals[-1].extend(group_vals[int(clust[i,1])])
count = m.shape[0]
counts = [count]
var = 1.0
variances = [var]
total_var = np.var(y)
for i in range(m.shape[0], len(group_vals)):
#print(np.var(group_vals[i]))
count = count - 1
counts.append(count)
clust_ind = i-m.shape[0]
ind1 = int(clust[clust_ind,0])
ind2 = int(clust[clust_ind,1])
var = var - np.var(group_vals[ind1])*len(group_vals[ind1])
var = var - np.var(group_vals[ind2])*len(group_vals[ind2])
var = var + np.var(group_vals[i])*(len(group_vals[ind1])+len(group_vals[ind2]))
variances.append(1-(var/total_var)/m.shape[0])
#print(variances)
#print(np.mean(variances), m.shape[0])
return pl.plot([x for x in counts], np.array(variances), color=color, linewidth=2, label=name+" (AUC = "+str(round(np.mean(variances),2))+")")
# +
module_expression = np.loadtxt("data/module_expression.txt")
cf = lambda x: -1000 if x == b'NA' else x
neuropath = np.loadtxt("data/neuropath.txt", converters={i:cf for i in range(8)})
target = neuropath[:,1]
dtrain = xgb.DMatrix(module_expression, label=target)
dtrain = xgb.DMatrix(module_expression, label=target)
param = { "max_depth": 6, "base_score": np.mean(target), "eta": 0.01}
bst = xgb.train(param, dtrain, 300)
out = bst.predict(xgb.DMatrix(module_expression), pred_contribs=True)
out_path = bst.predict(xgb.DMatrix(module_expression), pred_contribs=True, approx_contribs=True)
out_pred = bst.predict(xgb.DMatrix(module_expression))
pl.close()
pl.rcParams["figure.figsize"] = (4,3)
plot_m(out, out_pred, "SHAP", color="#008BE0")
plot_m(out_path, out_pred, "Path", color="#ff165a")
#plot_m(module_expression, target, "Unsupervised", color="#18C45D")
pl.legend(loc="lower left", frameon=False, prop={'size':10})
pl.ylabel("R^2 (% variance explained)")
pl.xlabel("# groups")
pl.ylim(0,1)
pl.xlim(0,len(target))
pl.gca().invert_xaxis()
#pl.figsize(5,4)
#pl.figure(num=0, figsize=(4, 3))
#pl.savefig("alz2.pdf")
pl.show()
# +
# load the data
raw_train_data = np.genfromtxt("data/adult.data", delimiter=",", dtype=None, autostrip=True, deletechars=["'"])
raw_test_data = np.genfromtxt("data/adult.test", delimiter=",", dtype=None, autostrip=True, deletechars=["'"], skip_header=1)
# extract the category options in the training data
col_names = [
"age", "workclass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship", "race", "sex", "capital-gain",
"capital-loss", "hours-per-week", "native-country"
]
work_classes = list(set([v[col_names.index("workclass")] for v in raw_train_data]))
education_types = list(set([v[col_names.index("education")] for v in raw_train_data]))
marriage_statuses = list(set([v[col_names.index("marital-status")] for v in raw_train_data]))
occupations = list(set([v[col_names.index("occupation")] for v in raw_train_data]))
relationships = list(set([v[col_names.index("relationship")] for v in raw_train_data]))
races = list(set([v[col_names.index("race")] for v in raw_train_data]))
sexes = list(set([v[col_names.index("sex")] for v in raw_train_data]))
countries = list(set([v[col_names.index("native-country")] for v in raw_train_data]))
types = [work_classes, education_types, marriage_statuses, occupations, relationships, races, sexes, countries]
N = raw_train_data.shape[0]
P = sum(map(len, types)) + 5
def build_matrix(data, P):
N = data.shape[0]
X = np.zeros((N, P))
group_names = []
feature_groups = []
def assign_class(i, offset, name, classes, data_col):
if i == 0:
group_names.append(name)
feature_groups.append(list(range(offset, offset+len(classes))))
j = classes.index(data[i][data_col])
X[i,offset+j] = 1
offset += len(classes)
return offset
def assign_num(i, offset, name, data_col):
if i == 0:
group_names.append(name)
feature_groups.append([offset])
X[i,offset] = data[i][data_col]
offset += 1
return offset
for i in range(N):
offset = 0
offset = assign_num(i, offset, "Age", 0)
offset = assign_class(i, offset, "Work class", work_classes, 1)
offset = assign_class(i, offset, "Education", education_types, 3)
offset = assign_num(i, offset, "Years in school", 4)
offset = assign_class(i, offset, "Marital status", marriage_statuses, 5)
offset = assign_class(i, offset, "Occupation", occupations, 6)
offset = assign_class(i, offset, "Relationship", relationships, 7)
offset = assign_class(i, offset, "Race", races, 8)
offset = assign_class(i, offset, "Sex", sexes, 9)
offset = assign_num(i, offset, "Capital gain", 10)
offset = assign_num(i, offset, "Capital loss", 11)
offset = assign_num(i, offset, "Weekly working hours", 12)
offset = assign_class(i, offset, "Native country", countries, 13)
y = np.array(list(v[-1] == b'>50K' for v in data))
return X,y,group_names,feature_groups
def group_values(x):
out = []
offset = 0
def add_class(offset, class_members):
pos = -1
try:
pos = list(x[offset:offset+len(class_members)]).index(1)
except:
pass
out.append("" if pos == -1 else class_members[pos])
offset += len(class_members)
return offset
out.append(x[0])
offset += 1
offset = add_class(offset, work_classes)
offset = add_class(offset, education_types)
out.append(x[offset])
offset += 1
offset = add_class(offset, marriage_statuses)
offset = add_class(offset, occupations)
offset = add_class(offset, relationships)
offset = add_class(offset, races)
offset = add_class(offset, sexes)
out.append(x[offset])
offset += 1
out.append(x[offset])
offset += 1
out.append(x[offset])
offset += 1
offset = add_class(offset, countries)
return out
# build the training data
train_data,train_labels,group_names,feature_groups = build_matrix(raw_train_data, P)
data_median = shap.DenseData(np.reshape(np.median(train_data,0), (1,train_data.shape[1])), group_names, feature_groups)
# and test data
test_data,test_labels,group_names,feature_groups = build_matrix(raw_test_data, P)
# -
inds = list(range(train_data.shape[0]))
random.shuffle(inds)
# +
module_expression = train_data#np.loadtxt("data/module_expression.txt")
#cognitive_score = np.loadtxt("data/cognitive_score.txt")
#cf = lambda x: -1000 if x == b'NA' else x
#neuropath = np.loadtxt("data/neuropath.txt", converters={i:cf for i in range(8)})
cut_ind = 31000
target = train_labels#neuropath[:,label_ind]
module_expression_train = module_expression[inds[:cut_ind],:]
target_train = target[inds[:cut_ind]]
module_expression_test = module_expression[inds[cut_ind:],:]
target_test = target[inds[cut_ind:]]
dtrain = xgb.DMatrix(module_expression_train, label=target_train)
dtest = xgb.DMatrix(module_expression_test, label=target_test)
param = { "max_depth": 6, "base_score": np.mean(target_train), "eta": 0.1, "colsample_bytree": 0.1}
param = { "max_depth": 6, "base_score": np.mean(target_train), "eta": 0.1, "subsample": 0.5}
bst = xgb.train(param, dtrain, 200)
out = bst.predict(xgb.DMatrix(module_expression_test), pred_contribs=True)
out_path = bst.predict(xgb.DMatrix(module_expression_test), pred_contribs=True, approx_contribs=True)
pred = bst.predict(xgb.DMatrix(module_expression_test))
# -
pl.close()
pl.rcParams["figure.figsize"] = (4,3)
plot_m(out, pred, "SHAP", color="#008BE0")
plot_m(out_path, pred, "Path", color="#ff165a")
#plot_m(module_expression_test_std, pred, "Unsupervised", color="#18C45D")
pl.legend(loc="lower left", frameon=False, prop={'size':10})
pl.ylabel("R^2 (% variance explained)")
pl.xlabel("# groups")
pl.ylim(0,1)
pl.xlim(0,len(target_test))
pl.gca().invert_xaxis()
#pl.figsize(5,4)
#pl.figure(num=0, figsize=(4, 3))
#pl.savefig("census_data2.pdf")
pl.show()
| notebooks/tree_explainer/tree_shap_paper/Figure 6 - Supervised Clustering R-squared.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# -
# # Notebook for testing Dataiku plugin functionality
# !ls -l octave-workspace
# !rm -rf octave-workspace
# !rm -rf Detector.mat
# !rm -rf Log.txt
import oct2py as op
o = op.Oct2Py()
octave = o
o.version()
o.restart()
o.eval('clear all')
o.addpath(o.genpath('octave'))
o.eval("opts=acfTrain();")
o.eval("opts.posWinDir = 'images/simple/posWinDir/';")
o.eval("opts.negWinDir = 'images/simple/negWinDir/';")
o.eval("opts.modelDs = [19 19];")
o.eval("opts.modelDsPad = [20 20];")
o.eval("model = acfTrain(opts);")
opts = o.acfTrain()
opts
octave.feval('opts')
ptr.address
opts
octave.eval('x = struct("y", {1, 2}, "z", {3, 4});')
x = octave.pull('x')
t = o.eval('rand(1, 2)', verbose=True)
o.eval('foo = [1, 2];')
ptr = o.get_pointer('foo')
ptr.value
o.eval('opts=acfTrain()', verbose=True)
o.pull('opts')
opts
#
#
#
#
#
#
#
#
op.demo()
| 6_Dataiku_Plugin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="RMLy2ykZ1Tyu" colab_type="text"
# # 💫 PySpark App
#
# This is my Apache Spark project. Here you will find some stuff that I've done while I was learning about working with Spark and Python.
#
# ---
#
# _You can find [@avcaliani](#) at [GitHub](https://github.com/avcaliani) or [GitLab](https://gitlab.com/avcaliani)._
# + id="mBLPZK6VhlwG" colab_type="code" outputId="033d1ab7-be8b-4dec-d7f3-663b9c83a6d6" colab={"base_uri": "https://localhost:8080/", "height": 34}
# ___ _
# / __|_ __ __ _ _ _| |__
# \__ \ '_ \/ _` | '_| / /
# |___/ .__/\__,_|_| |_\_\
# |_|
#
# !apt-get install openjdk-8-jdk-headless -qq > /dev/null
# !wget -q https://www-us.apache.org/dist/spark/spark-2.4.5/spark-2.4.5-bin-hadoop2.7.tgz
# !tar xf spark-2.4.5-bin-hadoop2.7.tgz
# !pip install -q findspark
# !python -V
import os
import findspark
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-8-openjdk-amd64'
os.environ['SPARK_HOME'] = '/content/spark-2.4.5-bin-hadoop2.7'
findspark.init()
from pyspark.sql import SparkSession
spark = SparkSession \
.builder\
.master('local[*]') \
.getOrCreate()
# + id="03TJY9szmosT" colab_type="code" colab={}
# _
# __ _ ___ ___ __ _| |___
# / _` / _ \/ _ \/ _` | / -_)
# \__, \___/\___/\__, |_\___|
# |___/ |___/
#
from google.colab import files
files.upload() # data/yob1997.csv
# + id="0jiwgVogpU1x" colab_type="code" colab={}
from pyspark.sql import DataFrame, Column
from pyspark.sql.functions import col, when, udf
# + id="DqKn-2vNpBQs" colab_type="code" colab={}
# __ __ _ _ _ _
# \ \ / /_ _| (_)__| |__ _| |_ ___ _ _
# \ V / _` | | / _` / _` | _/ _ \ '_|
# \_/\__,_|_|_\__,_\__,_|\__\___/_|
def validate(data: DataFrame) -> DataFrame:
return data \
.withColumn('gender_valid', check_gender(data.gender)) \
.withColumn('number_valid', check_number(data.number)) \
.withColumn('is_valid' , is_record_valid(col('gender_valid'), col('number_valid'))) \
.drop('gender_valid', 'number_valid')
def check_gender(gender: Column) -> Column:
return when((gender == "F") | (gender == "M"), True) \
.otherwise(False) \
.cast("boolean")
@udf('boolean')
def check_number(number: str) -> bool:
return number is not None and number.isdigit() and int(number) >= 20000
def is_record_valid(gender_valid: Column, number_valid: Column) -> Column:
return when(gender_valid & number_valid, True) \
.otherwise(False) \
.cast("boolean")
# + id="RzK0lmLWluBa" colab_type="code" outputId="ac740dae-856f-44a0-ad46-2b3a48bfb43c" colab={"base_uri": "https://localhost:8080/", "height": 425}
# __ __ _
# | \/ |__ _(_)_ _
# | |\/| / _` | | ' \
# |_| |_\__,_|_|_||_|
#
data: DataFrame = spark \
.read \
.option('header', 'true') \
.csv('yob1997.csv')
print(f"""
___
| _ \__ ___ __ __
| / _` \ V V /
|_|_\__,_|\_/\_/
Input Records: {data.count()}
""")
data.printSchema()
data.show(5)
# + id="aPu6bA5q6VYj" colab_type="code" outputId="3d38d123-8848-4bb3-85ab-2f3fa709a8e7" colab={"base_uri": "https://localhost:8080/", "height": 544}
# _ _ _
# (_) (_) (_)
#
valid_data = validate(data) \
.filter(col('is_valid') == True) \
.drop('is_valid') \
.orderBy(col('number').desc())
print(f"""
__ __ _ _ _
\ \ / /_ _| (_)__| |
\ V / _` | | / _` |
\_/\__,_|_|_\__,_|
Valid Records: {valid_data.count()}
""")
valid_data.printSchema()
valid_data.show(5)
valid_data.groupBy('gender').count().show()
# + id="m5oyXuj_2_30" colab_type="code" colab={}
# ______ ___
# | |__| | / __| __ ___ _____
# | () | \__ \/ _` \ V / -_)
# |______| |___/\__,_|\_/\___|
#
valid_data\
.write \
.mode('overwrite') \
.option("header", "true") \
.option("delimiter", ',') \
.option("nullValue", None) \
.option("emptyValue", None) \
.csv('yob1997.valid.csv')
# _
# __ _ ___ ___ __ _| |___
# / _` / _ \/ _ \/ _` | / -_)
# \__, \___/\___/\__, |_\___|
# |___/ |___/
#
# !rm output.zip
# !zip -r output.zip yob1997.valid.csv
files.download('output.zip')
| notebooks/pyspark-app.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Amazon SageMaker IP Insights Algorithm - Alternative Approach
#
# Here there! <br>
# Welcome to this notebook and repository. This is an code example for the [blog](https://data-centric-mind.medium.com/ip-insights-model-add-some-11d993c0d860) series. Hope you enjoy this notebook with your coffee (or tea)!
#
# -------
#
# ## Introduction
#
# In the previous [blogs](https://data-centric-mind.medium.com/ip-insights-model-de-simplify-part-i-6e8067227ceb), we explained how the IP Insights model works and how the data were simulated. However, it may also occur to you, hey, we are just trying to seperate two very different distributions.
# For normal traffics, it was drawn from beta distribution and the IPs, or to be more accurate, the ASNs are highly repetitive. However, the malicious logins are all randomly generated. Hmm, looks like we just need a model that can separate beta and random distributions.
#
# I know this is a much simplified problem abstracted by the AWS researchers. It doesn’t make sense to argue here, as it’s like a 🐓 and 🥚 question. However, the point is, whenever presented a problem, sometimes even with solutions or directions, we should still take a step back and ask yourself — have you over complicated the problem? Did we use the model for the sake of using it ?
# Is there an easier approach to take instead of the using embeddings ?
#
# In this notebook, we want to explore the possiblity of creating a simple benchmark model to detect malicious login events with a non-parametric approach.
#
#
# ## Contents
#
# -------
# 1.[ObtainASN](#ObtainASN)
#
# 2.[CreateTestingData](#CreateTestingData)
#
# 3.[RandomnessTest](#RandomnessTest)
#
# 4.[AddingSpice](#AddingSpice)
#
# 5.[Summary](#Summary)
#
#
# ______
#
# #### Tips for AWS free tier users:
#
# 1. check the doc [here](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all) to make sure you only used the services (especially instance) covered by AWS free tier
# 2. Don't repeat the data generation process, as S3 charged by the number of read/write.
# 3. You can start with a much smaller set of users by set NUM_USERS = 100
#
# # ObtainASN
#
# If you recall the [data](https://data-centric-mind.medium.com/ip-insights-model-de-simplify-part-ii-generate-simulated-data-9f3fa4dd3b5e) sampling approach, we first sample ASN for each user with certian patterns, and then an IP will be drawn from each ASN randomly. Therefore, the assumption here is, the ASN from normal users should follow a pattern of some kind.
# <br>
# <br>
# To obtain an accurate ASN informtion, we repeat the data simultion process and save the ASN into our logs. With real time logs and traffic, you can do a ASN lookup with the function [here](#ASNlookup).
from os import path
import generate_data_asn
from generate_data_asn import generate_dataset
import importlib
importlib.reload(generate_data_asn)
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
data_generation_file = "generate_data_asn.py" # Synthetic data generation module
log_file = "ipinsights_web_traffic_asn_added.log"
if not path.exists(data_generation_file):
print("file couldn't find")
# We simulate traffic for 10,000 users. This should yield about 3 million log lines (~700 MB).
NUM_USERS = 10000
generate_dataset(NUM_USERS, log_file)
# I have modified the generate_data.py script to include the asn into each log as the first field. Let's take a look at the log sample we created.
# !wc -l $log_file
# !head -3 $log_file
import pandas as pd
df_raw = pd.read_csv(
log_file,
sep=" ",
na_values="-",
header=None,
names=[
"asn",
"ip_address",
"rcf_id",
"user",
"timestamp",
"time_zone",
"request",
"status",
"size",
"referer",
"user_agent",
],
)
df_raw.head()
# Let's do a quick summary of the #ASN for each user. So as least half of the users have up to 3 asns. But there are some extreme values which are likely the travellers we have defined.
df_raw.groupby(['user']).agg({'asn':'nunique'}).value_counts().describe()
df_raw["timestamp"] = pd.to_datetime(df_raw["timestamp"], format="[%d/%b/%Y:%H:%M:%S")
df_raw["timestamp"].describe(datetime_is_numeric=True)
# Check if they are all in the same timezone
num_time_zones = len(df_raw["time_zone"].unique())
num_time_zones
from datetime import datetime
time_partition = (
datetime(2018, 11, 11, tzinfo=pytz.FixedOffset(0))
if num_time_zones > 1
else datetime(2018, 11, 11)
)
# +
## create model training and testing data
df = df_raw[["user", "ip_address", "timestamp", "asn"]]
train_df = df[df["timestamp"] <= time_partition]
test_df = df[df["timestamp"] > time_partition]
# Shuffle train data
train_df = train_df.sample(frac=1)
# -
train_df.shape
test_df.shape
# # CreateTestingData
#
# Next, let's create a sample with simulated bad traffic added. Therefore, we can use this data set to verify test if our approach would work.
def create_test_case(train_df, test_df, num_samples, attack_freq):
"""Creates a test case from provided train and test data frames.
This generates test case for accounts that are both in training and testing data sets.
:param train_df: (panda.DataFrame with columns ['user', 'ip_address']) training DataFrame
:param test_df: (panda.DataFrame with columns ['user', 'ip_address']) testing DataFrame
:param num_samples: (int) number of test samples to use
:param attack_freq: (float) the ratio of negative_samples:positive_samples to generate for test case
:return: DataFrame with both good and bad traffic, with labels
"""
# Get all possible accounts. The IP Insights model can only make predictions on users it has seen in training
# Therefore, filter the test dataset for unseen accounts, as their results will not mean anything.
valid_accounts = set(train_df["user"])
valid_test_df = test_df[test_df["user"].isin(valid_accounts)]
good_traffic = valid_test_df.sample(num_samples, replace=False)
good_traffic = good_traffic[["user", "ip_address", "asn"]]
good_traffic["label"] = 0
# Generate malicious traffic
num_bad_traffic = int(num_samples * attack_freq)
bad_traffic_accounts = np.random.choice(
list(valid_accounts), size=num_bad_traffic, replace=True
)
# bad_traffic_ips = [draw_ip() for i in range(num_bad_traffic)]
# bad_traffic = pd.DataFrame({"user": bad_traffic_accounts, "ip_address": bad_traffic_ips})
# bad_traffic["label"] = 1
bad_traffic_ips = [draw_ip() for i in range(num_bad_traffic)]
bad_traffic = pd.DataFrame({"user": bad_traffic_accounts, "ip_address": [t[1] for t in bad_traffic_ips], "asn": [t[0] for t in bad_traffic_ips]})
bad_traffic["label"] = 1
# All traffic labels are: 0 for good traffic; 1 for bad traffic.
all_traffic = good_traffic.append(bad_traffic)
return all_traffic
NUM_SAMPLES = 100000
test_case = create_test_case(train_df, test_df, num_samples=NUM_SAMPLES, attack_freq=1)
test_case.head()
test_case['label'].value_counts()
# Tada! <br>
# We have create a balanced data set with 200, 000 login entries. Lable 0 means it's a normal login and label 1 means it's a malicious login.
#
# Next, let's take a look up how and the key feature here, how many times each user login from the same ASN, aka, count of login group by user and ASN.
#
# --------
# Quick note: I am a big fan of this book - <data storytelling>, where some principlies of how to present and visualize your data is summarized in detail. Feel free to take a look at it. In my day to day, I also follow the same princeple to tell the stories about my data. The sample code of the visualization style can be found in this repo.
freq_count_good = test_case[test_case['label'] == 0].groupby('user').agg({'asn': 'count', 'label':'max'}).reset_index()
freq_count_bad = test_case[test_case['label'] == 1].groupby('user').agg({'asn': 'count', 'label':'max'}).reset_index()
good_freq = test_case[test_case['label'] == 0].groupby('user').agg({'asn': 'count'})['asn'].to_list()
bad_freq = test_case[test_case['label'] == 1].groupby('user').agg({'asn': 'count'})['asn'].to_list()
test_case[test_case['label'] == 1].groupby('user').agg({'asn': 'count'}).describe()
freq_count.head()
import numpy as np
import matplotlib
from matplotlib import transforms, pyplot as plt
import seaborn as sns
# %matplotlib inline
# plt.ioff()
fig, ax1 = plt.subplots(figsize=(8.2, 6.09), dpi=150);
_=fig.subplots_adjust(left=0.104, right=0.768, top=0.751, bottom=0.187);
_=sns.distplot(freq_count_good['asn'], hist = False, kde = True, label = 'good', ax = ax1);
_=sns.distplot(freq_count_bad['asn'], hist = False, kde = True,label = 'bad', ax = ax1);
_=ax1.set_xlim([0, 80]);
_=ax1.set_ylim([0, 0.15]);
# _=plt.setp(ax1);
_=plt.xticks(rotation=45);
_=ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),
fancybox=True, shadow=True, ncol=5);
_=ax1.spines['top'].set_visible(False);
_=ax1.spines['right'].set_visible(False);
# pass
# The distribution actually met our expectations that the good one follows beta and the bad one looks quite normal with a bell shape.
#
# # RandomnessTest
#
#
# First, let's try to run a rundomness test on the two groups before we further test on each user's events. Here we use the popular run test approach. To learn about it, please check out this [blog](https://data-centric-mind.medium.com/randomness-test-run-test-8333a8b956a1).
from statsmodels.sandbox.stats.runs import runstest_1samp
runstest_1samp(freq_count_good['asn'], cutoff = 'median')
runstest_1samp(freq_count_bad['asn'], cutoff = 'median')
# The first returned value from is the z-stats and the second value in the tuple is the p-value. We can determinte if the data is randomly distributed based on the p-value and an ideal siginificance level. The null hypothesis is that the data is randomly distributed. If the p-value is less than the selected power, we can reject the null and we have reason to believe the data is not randomly genereated.
#
# Well, it looks like the first good count can be considered as rejection to null hypothesis (random distribution) at a siginificance level of 0.105. While for second test on the bad samples, we do not have enough evidence to reject it as a random distribution.
#
# Alright, it seems we can seperate the good and bad traffic based on the number of logins each user has on each ASN.
#
# ________
# ## AddingSpice
#
# Making sense so far? OK, now let's think about how to make this more practical. I will skip most of the detailed demo for this session, as otherwise the notebook will go endless long. However, I will provide the key thoughts and functions, you are highly encouraged to try it out yourself.
#
# 1. data streams
# In reality, you recieve one login & IP instead of a sequence of logins. How can you test the randonmess in this case? A possible solution is to model n historical login events with the current new login and run the randomness test.
#
# You can use the two functions provided below to complete the test.
#
def check_random(df, asn_col = 'asn'):
""" Run randomness test on asn_col of the provided dataframe df.
"""
# when sample size is small, remember to use correction on the data.
s = [x for x in df[asn_col].tolist() if x is not None]
v = runstest_1samp(s, cutoff = 'median' ,correction=True)
return (v)
def get_user_p(df, label, group_col = 'user'):
""" Function return the z and p value for randomness test for each group of observations in the df
:para df: dataframe that contains all the observations
:para label: str, the column which used as labels we want to seperate
:return dataframe with randomtest zscore and pvalue added.
"""
dfg = df[df['label'] == label].groupby(group_col).apply(lambda x : check_random(x))
df_random0 = pd.DataFrame(dfg)
df_random0.columns = ['run_test']
df_random0['zscore'] = df_random0['run_test'].apply(lambda x: round(x[0], 3))
df_random0['pvalue'] = df_random0['run_test'].apply(lambda x: round(x[1],3))
return df_random0
# #### ASNlookup
#
# 2. ANSlookup
#
# When your data and IP are not sampled from a simulation, you need to a different approach to obtain the ASN info. You can use the code provided below to do a ASN lookup.
# install the package and needed data
# !apt-get install python-pip python-dev -y build-essential
# !apt-get update && apt-get install -y build-essential
# !python -m pip install pyasn
# !pyasn_util_download.py --latest
# !pyasn_util_convert.py --single rib.20220123.1200.bz2 20220123.dat
import pyasn
asndb = pyasn.pyasn('20220123.dat')
df['asn'] = ''
for idx, row in df.iterrows():
df.at[idx, 'asn'] = asndb.lookup(row['ip_address'])[0]
# 3. Add noise
#
# Remember what we did in the very first [notebook](https://github.com/avoca-dorable/aws_ipinsights/blob/main/ipinsights-v1-add-noise.ipynb)? We added noise into our data because you are not likely having a perfect dataset in reality. Therefore, we can use the similar approach adding some noisy logins into our good sample and see it our approach still work. The modified function can be used were provided below.
# +
import numpy as np
from generate_data_asn import draw_ip
def add_noise(train_df, user_perc, noise_per_account):
"""
This is a modified function compared to the original one.
Creates a test case from provided train and test data frames.
This generates test case for accounts that are both in training and testing data sets.
:param train_df: (panda.DataFrame with columns ['user', 'ip_address']) training DataFrame
:param user_perc: (float, [0,1]) percentage of users have noisy IPs
:param num_samples: (int) number of test samples to use
:param noise_pert_account: (int) number of random logins to added to each account
:return: DataFrame with both good and bad traffic, with labels
"""
# Get all possible accounts. The IP Insights model can only make predictions on users it has seen in training
# Therefore, filter the test dataset for unseen accounts, as their results will not mean anything.
valid_accounts = set(train_df["user"])
# Generate malicious traffic
num_bad_account = int(len(valid_accounts) * user_perc )
bad_traffic_accounts = np.random.choice(
list(valid_accounts), size=num_bad_account, replace=False
)
bad_traffic_ips = [draw_ip() for i in range(num_bad_account * noise_per_account)]
bad_traffic = pd.DataFrame({"user": list(bad_traffic_accounts) * noise_per_account, "ip_address": [t[1] for t in bad_traffic_ips], "asn": [t[0] for t in bad_traffic_ips]})
bad_traffic["label"] = 1
# All traffic labels are: 0 for good traffic; 1 for bad traffic.
return bad_traffic
# -
noise_df = add_noise(train_df, user_perc = 0.005, noise_per_account = 20)
noise_df.head()
noise_train = pd.concat([noise_df[['user', 'ip_address']], train_df], ignore_index = True)
# # Summary
# In this post, we covered an alternative approach of identifying abnormal login with unsupervised learning approach. <br>
#
# The conclusion is, yes, we can seperate the two groups of activities based on a randomness test.
#
# However, there are some ideas that I brought up but were not fully tested that would impact the real life success of this simple approach. It doesn't make much sense to further test these ideas on the simulated data, but if you have a production model using this IP Insights model, feel free to give it a try and let me know how it goes.1 I may come back to this test later when I find a good data set to demonstrate my point.
#
# Anyways, thank you for reading the story hope some of the contents are beneficial to you.
#
# See you in the next post!
| alter_approach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import re
example_str = """|R1D33= |R1D33score= |R1D33flag= |R1D33win=
|R1D34= |R1D34score= |R1D34flag= |R1D34win=
|R1D35= |R1D35score= |R1D35flag= |R1D35win=
|R1D36= |R1D36score= |R1D36flag= |R1D36win=
|R1D37= |R1D37score= |R1D37flag= |R1D37win=
|R1D38= |R1D38score= |R1D38flag= |R1D38win=
|R1D39= |R1D39score= |R1D39flag= |R1D39win=
|R1D40= |R1D40score= |R1D40flag= |R1D40win=
|R1D41= |R1D41score= |R1D41flag= |R1D41win=
|R1D42= |R1D42score= |R1D42flag= |R1D42win=
|R1D43= |R1D43score= |R1D43flag= |R1D43win=
|R1D44= |R1D44score= |R1D44flag= |R1D44win=
|R1D45= |R1D45score= |R1D45flag= |R1D45win=
|R1D46= |R1D46score= |R1D46flag= |R1D46win=
|R1D47= |R1D47score= |R1D47flag= |R1D47win=
|R1D48= |R1D48score= |R1D48flag= |R1D48win=
|R2D1= |R2D1score= |R2D1flag= |R2D1win=
|R2W17= |R2W17score= |R2W17flag= |R2W17win=
|R2D2= |R2D2score= |R2D2flag= |R2D2win=
|R2W18= |R2W18score= |R2W18flag= |R2W18win=
|R2D3= |R2D3score= |R2D3flag= |R2D3win=
|R2W19= |R2W19score= |R2W19flag= |R2W19win=
|R2D4= |R2D4score= |R2D4flag= |R2D4win=
|R2W20= |R2W20score= |R2W20flag= |R2W20win=
|R2D5= |R2D5score= |R2D5flag= |R2D5win=
|R2W21= |R2W21score= |R2W21flag= |R2W21win=
|R2D6= |R2D6score= |R2D6flag= |R2D6win=
|R2W22= |R2W22score= |R2W22flag= |R2W22win=
|R2D7= |R2D7score= |R2D7flag= |R2D7win=
|R2W23= |R2W23score= |R2W23flag= |R2W23win=
|R2D8= |R2D8score= |R2D8flag= |R2D8win=
|R2W24= |R2W24score= |R2W24flag= |R2W24win=
|R3W9= |R3W9score= |R3W9flag= |R3W9win=
|R3W10= |R3W10score= |R3W10flag= |R3W10win=
|R3W11= |R3W11score= |R3W11flag= |R3W11win=
|R3W12= |R3W12score= |R3W12flag= |R3W12win=
|R3W13= |R3W13score= |R3W13flag= |R3W13win=
|R3W14= |R3W14score= |R3W14flag= |R3W14win=
|R3W15= |R3W15score= |R3W15flag= |R3W15win=
|R3W16= |R3W16score= |R3W16flag= |R3W16win=
|R4D1= |R4D1score= |R4D1flag= |R4D1win=
|R4W5= |R4W5score= |R4W5flag= |R4W5win=
|R4D2= |R4D2score= |R4D2flag= |R4D2win=
|R4W6= |R4W6score= |R4W6flag= |R4W6win=
|R4D3= |R4D3score= |R4D3flag= |R4D3win=
|R4W7= |R4W7score= |R4W7flag= |R4W7win=
|R4D4= |R4D4score= |R4D4flag= |R4D4win=
|R4W8= |R4W8score= |R4W8flag= |R4W8win=
|R5W3= |R5W3score= |R5W3flag= |R5W3win=
|R5W4= |R5W4score= |R5W4flag= |R5W4win=
|R5W5= |R5W5score= |R5W5flag= |R5W5win=
|R5W6= |R5W6score= |R5W6flag= |R5W6win=
|R6W1= |R6W1score= |R6W1flag= |R6W1win=
|R6D1= |R6D1score= |R6D1flag= |R6D1win=
|R6W2= |R6W2score= |R6W2flag= |R6W2win=
|R6D2= |R6D2score= |R6D2flag= |R6D2win="""
example_str = """|R1D1= |R1D1score= |R1D1flag= |R1D1win=
|R1D2= |R1D2score= |R1D2flag= |R1D2win=
|R1D3= |R1D3score= |R1D3flag= |R1D3win=
|R1D4= |R1D4score= |R1D4flag= |R1D4win=
|R1D5= |R1D5score= |R1D5flag= |R1D5win=
|R1D6= |R1D6score= |R1D6flag= |R1D6win=
|R1D7= |R1D7score= |R1D7flag= |R1D7win=
|R1D8= |R1D8score= |R1D8flag= |R1D8win=
|R1D9= |R1D9score= |R1D9flag= |R1D9win=
|R1D10= |R1D10score= |R1D10flag= |R1D10win=
|R1D11= |R1D11score= |R1D11flag= |R1D11win=
|R1D12= |R1D12score= |R1D12flag= |R1D12win=
|R1D13= |R1D13score= |R1D13flag= |R1D13win=
|R1D14= |R1D14score= |R1D14flag= |R1D14win=
|R1D15= |R1D15score= |R1D15flag= |R1D15win=
|R1D16= |R1D16score= |R1D16flag= |R1D16win=
|R1D17= |R1D17score= |R1D17flag= |R1D17win=
|R1D18= |R1D18score= |R1D18flag= |R1D18win=
|R1D19= |R1D19score= |R1D19flag= |R1D19win=
|R1D20= |R1D20score= |R1D20flag= |R1D20win=
|R1D21= |R1D21score= |R1D21flag= |R1D21win=
|R1D22= |R1D22score= |R1D22flag= |R1D22win=
|R1D23= |R1D23score= |R1D23flag= |R1D23win=
|R1D24= |R1D24score= |R1D24flag= |R1D24win=
|R1D25= |R1D25score= |R1D25flag= |R1D25win=
|R1D26= |R1D26score= |R1D26flag= |R1D26win=
|R1D27= |R1D27score= |R1D27flag= |R1D27win=
|R1D28= |R1D28score= |R1D28flag= |R1D28win=
|R1D29= |R1D29score= |R1D29flag= |R1D29win=
|R1D30= |R1D30score= |R1D30flag= |R1D30win=
|R1D31= |R1D31score= |R1D31flag= |R1D31win=
|R1D32= |R1D32score= |R1D32flag= |R1D32win=
|R2W1= |R2W1score= |R2W1flag= |R2W1win=
|R2W2= |R2W2score= |R2W2flag= |R2W2win=
|R2W3= |R2W3score= |R2W3flag= |R2W3win=
|R2W4= |R2W4score= |R2W4flag= |R2W4win=
|R2W5= |R2W5score= |R2W5flag= |R2W5win=
|R2W6= |R2W6score= |R2W6flag= |R2W6win=
|R2W7= |R2W7score= |R2W7flag= |R2W7win=
|R2W8= |R2W8score= |R2W8flag= |R2W8win=
|R2W9= |R2W9score= |R2W9flag= |R2W9win=
|R2W10= |R2W10score= |R2W10flag= |R2W10win=
|R2W11= |R2W11score= |R2W11flag= |R2W11win=
|R2W12= |R2W12score= |R2W12flag= |R2W12win=
|R2W13= |R2W13score= |R2W13flag= |R2W13win=
|R2W14= |R2W14score= |R2W14flag= |R2W14win=
|R2W15= |R2W15score= |R2W15flag= |R2W15win=
|R2W16= |R2W16score= |R2W16flag= |R2W16win=
|R3W1= |R3W1score= |R3W1flag= |R3W1win=
|R3W2= |R3W2score= |R3W2flag= |R3W2win=
|R3W3= |R3W3score= |R3W3flag= |R3W3win=
|R3W4= |R3W4score= |R3W4flag= |R3W4win=
|R3W5= |R3W5score= |R3W5flag= |R3W5win=
|R3W6= |R3W6score= |R3W6flag= |R3W6win=
|R3W7= |R3W7score= |R3W7flag= |R3W7win=
|R3W8= |R3W8score= |R3W8flag= |R3W8win=
|R4W1= |R4W1score= |R4W1flag= |R4W1win=
|R4W2= |R4W2score= |R4W2flag= |R4W2win=
|R4W3= |R4W3score= |R4W3flag= |R4W3win=
|R4W4= |R4W4score= |R4W4flag= |R4W4win="""
example_str = """|R1D1= |R1D1race= |R1D1flag= |R1D1score= |R1D1win=
|R1D2= |R1D2race= |R1D2flag= |R1D2score= |R1D2win=
|R1D3= |R1D3race= |R1D3flag= |R1D3score= |R1D3win=
|R1D4= |R1D4race= |R1D4flag= |R1D4score= |R1D4win=
|R1D5= |R1D5race= |R1D5flag= |R1D5score= |R1D5win=
|R1D6= |R1D6race= |R1D6flag= |R1D6score= |R1D6win=
|R1D7= |R1D7race= |R1D7flag= |R1D7score= |R1D7win=
|R1D8= |R1D8race= |R1D8flag= |R1D8score= |R1D8win=
|R1D9= |R1D9race= |R1D9flag= |R1D9score= |R1D9win=
|R1D10= |R1D10race= |R1D10flag= |R1D10score= |R1D10win=
|R1D11= |R1D11race= |R1D11flag= |R1D11score= |R1D11win=
|R1D12= |R1D12race= |R1D12flag= |R1D12score= |R1D12win=
|R1D13= |R1D13race= |R1D13flag= |R1D13score= |R1D13win=
|R1D14= |R1D14race= |R1D14flag= |R1D14score= |R1D14win=
|R1D15= |R1D15race= |R1D15flag= |R1D15score= |R1D15win=
|R1D16= |R1D16race= |R1D16flag= |R1D16score= |R1D16win=
|R1D17= |R1D17race= |R1D17flag= |R1D17score= |R1D17win=
|R1D18= |R1D18race= |R1D18flag= |R1D18score= |R1D18win=
|R1D19= |R1D19race= |R1D19flag= |R1D19score= |R1D19win=
|R1D20= |R1D20race= |R1D20flag= |R1D20score= |R1D20win=
|R1D21= |R1D21race= |R1D21flag= |R1D21score= |R1D21win=
|R1D22= |R1D22race= |R1D22flag= |R1D22score= |R1D22win=
|R1D23= |R1D23race= |R1D23flag= |R1D23score= |R1D23win=
|R1D24= |R1D24race= |R1D24flag= |R1D24score= |R1D24win=
|R1D25= |R1D25race= |R1D25flag= |R1D25score= |R1D25win=
|R1D26= |R1D26race= |R1D26flag= |R1D26score= |R1D26win=
|R1D27= |R1D27race= |R1D27flag= |R1D27score= |R1D27win=
|R1D28= |R1D28race= |R1D28flag= |R1D28score= |R1D28win=
|R1D29= |R1D29race= |R1D29flag= |R1D29score= |R1D29win=
|R1D30= |R1D30race= |R1D30flag= |R1D30score= |R1D30win=
|R1D31= |R1D31race= |R1D31flag= |R1D31score= |R1D31win=
|R1D32= |R1D32race= |R1D32flag= |R1D32score= |R1D32win=
|R1D33= |R1D33race= |R1D33flag= |R1D33score= |R1D33win=
|R1D34= |R1D34race= |R1D34flag= |R1D34score= |R1D34win=
|R1D35= |R1D35race= |R1D35flag= |R1D35score= |R1D35win=
|R1D36= |R1D36race= |R1D36flag= |R1D36score= |R1D36win=
|R1D37= |R1D37race= |R1D37flag= |R1D37score= |R1D37win=
|R1D38= |R1D38race= |R1D38flag= |R1D38score= |R1D38win=
|R1D39= |R1D39race= |R1D39flag= |R1D39score= |R1D39win=
|R1D40= |R1D40race= |R1D40flag= |R1D40score= |R1D40win=
|R1D41= |R1D41race= |R1D41flag= |R1D41score= |R1D41win=
|R1D42= |R1D42race= |R1D42flag= |R1D42score= |R1D42win=
|R1D43= |R1D43race= |R1D43flag= |R1D43score= |R1D43win=
|R1D44= |R1D44race= |R1D44flag= |R1D44score= |R1D44win=
|R1D45= |R1D45race= |R1D45flag= |R1D45score= |R1D45win=
|R1D46= |R1D46race= |R1D46flag= |R1D46score= |R1D46win=
|R1D47= |R1D47race= |R1D47flag= |R1D47score= |R1D47win=
|R1D48= |R1D48race= |R1D48flag= |R1D48score= |R1D48win=
|R1D49= |R1D49race= |R1D49flag= |R1D49score= |R1D49win=
|R1D50= |R1D50race= |R1D50flag= |R1D50score= |R1D50win=
|R1D51= |R1D51race= |R1D51flag= |R1D51score= |R1D51win=
|R1D52= |R1D52race= |R1D52flag= |R1D52score= |R1D52win=
|R1D53= |R1D53race= |R1D53flag= |R1D53score= |R1D53win=
|R1D54= |R1D54race= |R1D54flag= |R1D54score= |R1D54win=
|R1D55= |R1D55race= |R1D55flag= |R1D55score= |R1D55win=
|R1D56= |R1D56race= |R1D56flag= |R1D56score= |R1D56win=
|R1D57= |R1D57race= |R1D57flag= |R1D57score= |R1D57win=
|R1D58= |R1D58race= |R1D58flag= |R1D58score= |R1D58win=
|R1D59= |R1D59race= |R1D59flag= |R1D59score= |R1D59win=
|R1D60= |R1D60race= |R1D60flag= |R1D60score= |R1D60win=
|R1D61= |R1D61race= |R1D61flag= |R1D61score= |R1D61win=
|R1D62= |R1D62race= |R1D62flag= |R1D62score= |R1D62win=
|R1D63= |R1D63race= |R1D63flag= |R1D63score= |R1D63win=
|R1D64= |R1D64race= |R1D64flag= |R1D64score= |R1D64win=
|R2W1= |R2W1race= |R2W1flag= |R2W1score= |R2W1win=
|R2W2= |R2W2race= |R2W2flag= |R2W2score= |R2W2win=
|R2W3= |R2W3race= |R2W3flag= |R2W3score= |R2W3win=
|R2W4= |R2W4race= |R2W4flag= |R2W4score= |R2W4win=
|R2W5= |R2W5race= |R2W5flag= |R2W5score= |R2W5win=
|R2W6= |R2W6race= |R2W6flag= |R2W6score= |R2W6win=
|R2W7= |R2W7race= |R2W7flag= |R2W7score= |R2W7win=
|R2W8= |R2W8race= |R2W8flag= |R2W8score= |R2W8win=
|R2W9= |R2W9race= |R2W9flag= |R2W9score= |R2W9win=
|R2W10= |R2W10race= |R2W10flag= |R2W10score= |R2W10win=
|R2W11= |R2W11race= |R2W11flag= |R2W11score= |R2W11win=
|R2W12= |R2W12race= |R2W12flag= |R2W12score= |R2W12win=
|R2W13= |R2W13race= |R2W13flag= |R2W13score= |R2W13win=
|R2W14= |R2W14race= |R2W14flag= |R2W14score= |R2W14win=
|R2W15= |R2W15race= |R2W15flag= |R2W15score= |R2W15win=
|R2W16= |R2W16race= |R2W16flag= |R2W16score= |R2W16win=
|R2W17= |R2W17race= |R2W17flag= |R2W17score= |R2W17win=
|R2W18= |R2W18race= |R2W18flag= |R2W18score= |R2W18win=
|R2W19= |R2W19race= |R2W19flag= |R2W19score= |R2W19win=
|R2W20= |R2W20race= |R2W20flag= |R2W20score= |R2W20win=
|R2W21= |R2W21race= |R2W21flag= |R2W21score= |R2W21win=
|R2W22= |R2W22race= |R2W22flag= |R2W22score= |R2W22win=
|R2W23= |R2W23race= |R2W23flag= |R2W23score= |R2W23win=
|R2W24= |R2W24race= |R2W24flag= |R2W24score= |R2W24win=
|R2W25= |R2W25race= |R2W25flag= |R2W25score= |R2W25win=
|R2W26= |R2W26race= |R2W26flag= |R2W26score= |R2W26win=
|R2W27= |R2W27race= |R2W27flag= |R2W27score= |R2W27win=
|R2W28= |R2W28race= |R2W28flag= |R2W28score= |R2W28win=
|R2W29= |R2W29race= |R2W29flag= |R2W29score= |R2W29win=
|R2W30= |R2W30race= |R2W30flag= |R2W30score= |R2W30win=
|R2W31= |R2W31race= |R2W31flag= |R2W31score= |R2W31win=
|R2W32= |R2W32race= |R2W32flag= |R2W32score= |R2W32win=
|R4W1= |R4W1race= |R4W1flag= |R4W1score= |R4W1win=
|R4W2= |R4W2race= |R4W2flag= |R4W2score= |R4W2win=
|R4W3= |R4W3race= |R4W3flag= |R4W3score= |R4W3win=
|R4W4= |R4W4race= |R4W4flag= |R4W4score= |R4W4win=
|R4W5= |R4W5race= |R4W5flag= |R4W5score= |R4W5win=
|R4W6= |R4W6race= |R4W6flag= |R4W6score= |R4W6win=
|R4W7= |R4W7race= |R4W7flag= |R4W7score= |R4W7win=
|R4W8= |R4W8race= |R4W8flag= |R4W8score= |R4W8win=
|R4W6= |R4W9race= |R4W9flag= |R4W9score= |R4W9win=
|R4W10= |R4W10race= |R4W10flag= |R4W10score= |R4W10win=
|R4W11= |R4W11race= |R4W11flag= |R4W11score= |R4W11win=
|R4W12= |R4W12race= |R4W12flag= |R4W12score= |R4W12win=
|R4W13= |R4W13race= |R4W13flag= |R4W13score= |R4W13win=
|R4W14= |R4W14race= |R4W14flag= |R4W14score= |R4W14win=
|R4W15= |R4W15race= |R4W15flag= |R4W15score= |R4W15win=
|R4W16= |R4W16race= |R4W16flag= |R4W16score= |R4W16win=
|R6W1= |R6W1race= |R6W1flag= |R6W1score= |R6W1win=
|R6W2= |R6W2race= |R6W2flag= |R6W2score= |R6W2win=
|R6W3= |R6W3race= |R6W3flag= |R6W3score= |R6W3win=
|R6W4= |R6W4race= |R6W4flag= |R6W4score= |R6W4win=
|R6W5= |R6W5race= |R6W5flag= |R6W5score= |R6W5win=
|R6W6= |R6W6race= |R6W6flag= |R6W6score= |R6W6win=
|R6W7= |R6W7race= |R6W7flag= |R6W7score= |R6W7win=
|R6W8= |R6W8race= |R6W8flag= |R6W8score= |R6W8win="""
example_str = """|R1D65= |R1D65race= |R1D65flag= |R1D65score= |R1D65win=
|R1D66= |R1D66race= |R1D66flag= |R1D66score= |R1D66win=
|R1D67= |R1D67race= |R1D67flag= |R1D67score= |R1D67win=
|R1D68= |R1D68race= |R1D68flag= |R1D68score= |R1D68win=
|R1D69= |R1D69race= |R1D69flag= |R1D69score= |R1D69win=
|R1D70= |R1D70race= |R1D70flag= |R1D70score= |R1D70win=
|R1D71= |R1D71race= |R1D71flag= |R1D71score= |R1D71win=
|R1D72= |R1D72race= |R1D72flag= |R1D72score= |R1D72win=
|R1D73= |R1D73race= |R1D73flag= |R1D73score= |R1D73win=
|R1D74= |R1D74race= |R1D74flag= |R1D74score= |R1D74win=
|R1D75= |R1D75race= |R1D75flag= |R1D75score= |R1D75win=
|R1D76= |R1D76race= |R1D76flag= |R1D76score= |R1D76win=
|R1D77= |R1D77race= |R1D77flag= |R1D77score= |R1D77win=
|R1D78= |R1D78race= |R1D78flag= |R1D78score= |R1D78win=
|R1D79= |R1D79race= |R1D79flag= |R1D79score= |R1D79win=
|R1D80= |R1D80race= |R1D80flag= |R1D80score= |R1D80win=
|R1D81= |R1D81race= |R1D81flag= |R1D81score= |R1D81win=
|R1D82= |R1D82race= |R1D82flag= |R1D82score= |R1D82win=
|R1D83= |R1D83race= |R1D83flag= |R1D83score= |R1D83win=
|R1D84= |R1D84race= |R1D84flag= |R1D84score= |R1D84win=
|R1D85= |R1D85race= |R1D85flag= |R1D85score= |R1D85win=
|R1D86= |R1D86race= |R1D86flag= |R1D86score= |R1D86win=
|R1D87= |R1D87race= |R1D87flag= |R1D87score= |R1D87win=
|R1D88= |R1D88race= |R1D88flag= |R1D88score= |R1D88win=
|R1D89= |R1D89race= |R1D89flag= |R1D89score= |R1D89win=
|R1D90= |R1D90race= |R1D90flag= |R1D90score= |R1D90win=
|R1D91= |R1D91race= |R1D91flag= |R1D91score= |R1D91win=
|R1D92= |R1D92race= |R1D92flag= |R1D92score= |R1D92win=
|R1D93= |R1D93race= |R1D93flag= |R1D93score= |R1D93win=
|R1D94= |R1D94race= |R1D94flag= |R1D94score= |R1D94win=
|R1D95= |R1D95race= |R1D95flag= |R1D95score= |R1D95win=
|R1D96= |R1D96race= |R1D96flag= |R1D96score= |R1D96win=
|R2D1= |R2D1race= |R2D1flag= |R2D1score= |R2D1win=
|R2W33= |R2W33race= |R2W33flag= |R2W33score= |R2W33win=
|R2D2= |R2D2race= |R2D2flag= |R2D2score= |R2D2win=
|R2W34= |R2W34race= |R2W34flag= |R2W34score= |R2W34win=
|R2D3= |R2D3race= |R2D3flag= |R2D3score= |R2D3win=
|R2W35= |R2W35race= |R2W35flag= |R2W35score= |R2W35win=
|R2D4= |R2D4race= |R2D4flag= |R2D4score= |R2D4win=
|R2W36= |R2W36race= |R2W36flag=|R2W36score= |R2W36win=
|R2D5= |R2D5race= |R2D5flag=|R2D5score= |R2D5win=
|R2W37= |R2W37race= |R2W37flag= |R2W37score= |R2W37win=
|R2D6= |R2D6race= |R2D6flag= |R2D6score= |R2D6win=
|R2W38= |R2W38race= |R2W38flag= |R2W38score= |R2W38win=
|R2D7= |R2D7race= |R2D7flag= |R2D7score= |R2D7win=
|R2W39= |R2W39race= |R2W39flag= |R2W39score= |R2W39win=
|R2D8= |R2D8race= |R2D8flag= |R2D8score= |R2D8win=
|R2W40= |R2W40race= |R2W40flag= |R2W40score= |R2W40win=
|R2D9= |R2D9race= |R2D9flag= |R2D9score= |R2D9win=
|R2W41= |R2W41race= |R2W41flag= |R2W41score= |R2W41win=
|R2D10= |R2D10race= |R2D10flag= |R2D10score= |R2D10win=
|R2W42= |R2W42race= |R2W42flag= |R2W42score= |R2W42win=
|R2D11= |R2D11race= |R2D11flag= |R2D11score= |R2D11win=
|R2W43= |R2W43race= |R2W43flag= |R2W43score= |R2W43win=
|R2D12= |R2D12race= |R2D12flag= |R2D12score= |R2D12win=
|R2W44= |R2W44race= |R2W44flag=|R2W44score= |R2W44win=
|R2D13= |R2D13race= |R2D13flag=|R2D13score= |R2D13win=
|R2W45= |R2W45race= |R2W45flag= |R2W45score= |R2W45win=
|R2D14= |R2D14race= |R2D14flag= |R2D14score= |R2D14win=
|R2W46= |R2W46race= |R2W46flag= |R2W46score= |R2W46win=
|R2D15= |R2D15race= |R2D15flag= |R2D15score= |R2D15win=
|R2W47= |R2W47race= |R2W47flag= |R2W47score= |R2W47win=
|R2D16= |R2D16race= |R2D16flag= |R2D16score= |R2D16win=
|R2W48= |R2W48race= |R2W48flag= |R2W48score= |R2W48win=
|R3W1= |R3W1race= |R3W1flag= |R3W1score= |R3W1win=
|R3W2= |R3W2race= |R3W2flag= |R3W2score= |R3W2win=
|R3W3= |R3W3race= |R3W3flag= |R3W3score= |R3W3win=
|R3W4= |R3W4race= |R3W4flag= |R3W4score= |R3W4win=
|R3W5= |R3W5race= |R3W5flag= |R3W5score= |R3W5win=
|R3W6= |R3W6race= |R3W6flag= |R3W6score= |R3W6win=
|R3W7= |R3W7race= |R3W7flag= |R3W7score= |R3W7win=
|R3W8= |R3W8race= |R3W8flag= |R3W8score= |R3W8win=
|R3W9= |R3W9race= |R3W9flag= |R3W9score= |R3W9win=
|R3W10= |R3W10race= |R3W10flag= |R3W10score= |R3W10win=
|R3W11= |R3W11race= |R3W11flag= |R3W11score= |R3W11win=
|R3W12= |R3W12race= |R3W12flag= |R3W12score= |R3W12win=
|R3W13= |R3W13race= |R3W13flag= |R3W13score= |R3W13win=
|R3W14= |R3W14race= |R3W14flag= |R3W14score= |R3W14win=
|R3W15= |R3W15race= |R3W15flag= |R3W15score= |R3W15win=
|R3W16= |R3W16race= |R3W16flag= |R3W16score= |R3W16win=
|R4D1= |R4D1race= |R4D1flag= |R4D1score= |R4D1win=
|R4W17= |R4W17race= |R4W17flag= |R4W17score= |R4W17win=
|R4D2= |R4D2race= |R4D2flag= |R4D2score= |R4D2win=
|R4W18= |R4W18race= |R4W18flag= |R4W18score= |R4W18win=
|R4D3= |R4D3race= |R4D3flag= |R4D3score= |R4D3win=
|R4W19= |R4W19race= |R4W19flag= |R4W19score= |R4W19win=
|R4D4= |R4D4race= |R4D4flag= |R4D4score= |R4D4win=
|R4W20= |R4W20race= |R4W20flag= |R4W20score= |R4W20win=
|R4D5= |R4D5race= |R4D5flag= |R4D5score= |R4D5win=
|R4W21= |R4W21race= |R4W21flag= |R4W21score= |R4W21win=
|R4D6= |R4D6race= |R4D6flag= |R4D6score= |R4D6win=
|R4W22= |R4W22race= |R4W22flag= |R4W22score= |R4W22win=
|R4D7= |R4D7race= |R4D7flag= |R4D7score= |R4D7win=
|R4W23= |R4W23race= |R4W23flag= |R4W23score= |R4W23win=
|R4D8= |R4D8race= |R4D8flag= |R4D8score= |R4D8win=
|R4W24= |R4W24race= |R4W24flag= |R4W24score= |R4W24win=
|R5W1= |R5W1race= |R5W1flag= |R5W1score= |R5W1win=
|R5W2= |R5W2race= |R5W2flag= |R5W2score= |R5W2win=
|R5W3= |R5W3race= |R5W3flag= |R5W3score= |R5W3win=
|R5W4= |R5W4race= |R5W4flag= |R5W4score= |R5W4win=
|R5W5= |R5W5race= |R5W5flag= |R5W5score= |R5W5win=
|R5W6= |R5W6race= |R5W6flag= |R5W6score= |R5W6win=
|R5W7= |R5W7race= |R5W7flag= |R5W7score= |R5W7win=
|R5W8= |R5W8race= |R5W8flag= |R5W8score= |R5W8win=
|R6D1= |R6D1race= |R6D1flag= |R6D1score= |R6D1win=
|R6W9= |R6W9race= |R6W9flag= |R6W9score= |R6W9win=
|R6D2= |R6D2race= |R6D2flag= |R6D2score= |R6D2win=
|R6W10= |R6W10race= |R6W10flag= |R6W10score= |R6W10win=
|R6D3= |R6D3race= |R6D3flag= |R6D3score= |R6D3win=
|R6W11= |R6W11race= |R6W11flag= |R6W11score= |R6W11win=
|R6D4= |R6D4race= |R6D4flag= |R6D4score= |R6D4win=
|R6W12= |R6W12race= |R6W12flag= |R6W12score= |R6W12win=
|R7W1= |R7W1race= |R7W1flag= |R7W1score= |R7W1win=
|R7W2= |R7W2race= |R7W2flag= |R7W2score= |R7W2win=
|R7W3= |R7W3race= |R7W3flag= |R7W3score= |R7W3win=
|R7W4= |R7W4race= |R7W4flag= |R7W4score= |R7W4win="""
example_str = """|R1D1= |R1D1flag= |R1D1score= |R1D1win=
|R1D2= |R1D2flag= |R1D2score= |R1D2win=
|R1D3= |R1D3flag= |R1D3score= |R1D3win=
|R1D4= |R1D4flag= |R1D4score= |R1D4win=
|R1D5= |R1D5flag= |R1D5score= |R1D5win=
|R1D6= |R1D6flag= |R1D6score= |R1D6win=
|R1D7= |R1D7flag= |R1D7score= |R1D7win=
|R1D8= |R1D8flag= |R1D8score= |R1D8win=
|R1D9= |R1D9flag= |R1D9score= |R1D9win=
|R1D10= |R1D10flag= |R1D10score= |R1D10win=
|R1D11= |R1D11flag= |R1D11score= |R1D11win=
|R1D12= |R1D12flag= |R1D12score= |R1D12win=
|R1D13= |R1D13flag= |R1D13score= |R1D13win=
|R1D14= |R1D14flag= |R1D14score= |R1D14win=
|R1D15= |R1D15flag= |R1D15score= |R1D15win=
|R1D16= |R1D16flag= |R1D16score= |R1D16win=
|R1D17= |R1D17flag= |R1D17score= |R1D17win=
|R1D18= |R1D18flag= |R1D18score= |R1D18win=
|R1D19= |R1D19flag= |R1D19score= |R1D19win=
|R1D20= |R1D20flag= |R1D20score= |R1D20win=
|R1D21= |R1D21flag= |R1D21score= |R1D21win=
|R1D22= |R1D22flag= |R1D22score= |R1D22win=
|R1D23= |R1D23flag= |R1D23score= |R1D23win=
|R1D24= |R1D24flag= |R1D24score= |R1D24win=
|R1D25= |R1D25flag= |R1D25score= |R1D25win=
|R1D26= |R1D26flag= |R1D26score= |R1D26win=
|R1D27= |R1D27flag= |R1D27score= |R1D27win=
|R1D28= |R1D28flag= |R1D28score= |R1D28win=
|R1D29= |R1D29flag= |R1D29score= |R1D29win=
|R1D30= |R1D30flag= |R1D30score= |R1D30win=
|R1D31= |R1D31flag= |R1D31score= |R1D31win=
|R1D32= |R1D32flag= |R1D32score= |R1D32win=
|R2W1= |R2W1flag= |R2W1score= |R2W1win=
|R2W2= |R2W2flag= |R2W2score= |R2W2win=
|R2W3= |R2W3flag= |R2W3score= |R2W3win=
|R2W4= |R2W4flag= |R2W4score= |R2W4win=
|R2W5= |R2W5flag= |R2W5score= |R2W5win=
|R2W6= |R2W6flag= |R2W6score= |R2W6win=
|R2W7= |R2W7flag= |R2W7score= |R2W7win=
|R2W8= |R2W8flag= |R2W8score= |R2W8win=
|R2W9= |R2W9flag= |R2W9score= |R2W9win=
|R2W10= |R2W10flag= |R2W10score= |R2W10win=
|R2W11= |R2W11flag= |R2W11score= |R2W11win=
|R2W12= |R2W12flag= |R2W12score= |R2W12win=
|R2W13= |R2W13flag= |R2W13score= |R2W13win=
|R2W14= |R2W14flag= |R2W14score= |R2W14win=
|R2W15= |R2W15flag= |R2W15score= |R2W15win=
|R2W16= |R2W16flag= |R2W16score= |R2W16win=
|R3W1= |R3W1flag= |R3W1score= |R3W1win=
|R3W2= |R3W2flag= |R3W2score= |R3W2win=
|R3W3= |R3W3flag= |R3W3score= |R3W3win=
|R3W4= |R3W4flag= |R3W4score= |R3W4win=
|R3W5= |R3W5flag= |R3W5score= |R3W5win=
|R3W6= |R3W6flag= |R3W6score= |R3W6win=
|R3W7= |R3W7flag= |R3W7score= |R3W7win=
|R3W8= |R3W8flag= |R3W8score= |R3W8win=
|R4W1= |R4W1flag= |R4W1score= |R4W1win=
|R4W2= |R4W2flag= |R4W2score= |R4W2win=
|R4W3= |R4W3flag= |R4W3score= |R4W3win=
|R4W4= |R4W4flag= |R4W4score= |R4W4win=
|R5W1= |R5W1flag= |R5W1score= |R5W1win=
|R5W2= |R5W2flag= |R5W2score= |R5W2win=
|R5D1= |R5D1flag= |R5D1score= |R5D1win=
|R5D2= |R5D2flag= |R5D2score= |R5D2win="""
# +
codes = []
for line in example_str.split('\n'):
codes.append(line[1:].strip().rstrip().split(' |')[0][:-1])
codes = [_ for _ in codes if len(_) > 0]
# -
dfs = pd.read_html('4-year-64-lower-1.html')
dfs[0]
dft = dfs[0].iloc[3:,:]
dft
def clean2(x):
for sub in ['cK-','All*','{DigA}','c4']:
x = x.replace(sub,'')
return x
import pickle
with open('flags.pkl','rb') as fp:
flags = pickle.load(fp)
def clean3(x):
x= ' '.join(x.split(' ')[1:])
for s in ['cK-','()','G1-','G4-','n|','tm>','{187}','.',"''"]:
x = x.replace(s,'')
return x
stages = []
for i in range(0,dft.shape[1]):
try:
col1 = dft.iloc[:,i].dropna()
#print(col1)
players = [(col1.iloc[i],col1.iloc[i+2]) for i in range(0,len(col1),3)]
players = [(clean3(_[0]),clean3(_[1])) for _ in players]
scores = [(col1.iloc[i+1]) for i in range(0,len(col1),3)]
scores = [x if 'WBF' not in x else '(1-0) (1-0)' for x in scores]
scores = [[(int(_[0]),int(_[1])) for _ in re.findall('\(([\d-]+?)-([\d-]+)\)',x)] for x in scores]
s1s = [sum([1 for _ in x if _[0] > _[1]]) for x in scores]
s2s = [sum([1 for _ in x if _[1] > _[0]]) for x in scores]
stages.append([(p[0],p[1],s[0],s[1]) for p,s in zip(players,list(zip(s1s,s2s)))])
except:
raise
with open('cpl4yrL.txt','wt') as fp:
for s in stages:
for g in s:
print(*g,file=fp)
print(file=fp)
scores
dfs[1]
# +
w1 = """ czm > s1k - 44:-1
Sarge P Doom > sp1N - 14:7
FienD < n|soap - 15:30 @ t4
Bluehaze > Rommel - 12:8
daler > Carde - 34:2
stx-revenant > K9-rel - 19:13
Blokey > killaton - 26:3
carnage > ic-FOX
ic-TOXIC > KungFoo - 50:1
ASUS*c58-Jibo > BYE
esp-jack > BYE
g1|blood < socrates_
ic-GOPHER > esp-Chadwick - 24:8
redemzzz vs reflux - 37:4
2live > jello - 22:5
ss-sirdark < FFSmasher - 0:15
Ms.X < barnak - 10:32
tim > BYE
Palarity > mik3d - 28:-1
.BldJello. > radialG - 25:9
ve-elpajuo > chance
Junkie < GATOR519
]km[serm > Habib - 24:6
Appleseed > Missy - 34:4
etcz > RaLpAp - 18:6
PoRky > viju
exorcist > [B]CocaCola - 35:0
LeXeR < territory.ru>cooller - 8:20 @ ztn (demo)
ic-TECH < Forever` - 10:39
Maddog > ren3000 - 44:11
cha0ticz > snucks - 41:11
[KoG]^SoulXtrot < ZeRo4 - 0:70"""
w2 = """ czm > Sarge P Doom - 30:0
n|soap > Bluehaze - 21:7
daler > stx-revenant - 36:8
Blokey > carnage - 23:13
ic-TOXIC > ASUS*c58-Jibo - 11:8
esp-jack < socrates_ - 9:32
redemzzz > ic-GOPHER - 7:6
2live < FFSmasher
barnak > tim - 24:3
Palarity > .BldJello.
ve-elpajuo > GATOR519 - 19:13
]km[serm > Appleseed - 32:15
etcz > PoRky
exorcist < territory.ru>cooller - 3:17
Forever` < Maddog - 9:10
cha0ticz < ZeRo4 - 11:12 @ ztn (demo)"""
w3 = """ czm > n|soap - 38:5
daler > Blokey - 25:23
ic-TOXIC > socrates_ - 14:1
redemzzz < FFSmasher - 20:35
barnak < Palarity - 10:23
ve-elpajuo < ]km[serm - 8:16
etcz. < territory.ru`cooller - 3:13
Maddog < ZeRo4 - 4:16"""
w4 = """ czm > daler 23:5 @ dm6
ic-TOXIC > FFSmasher 34:5 @ ztn
Palarity < ]km[serm 7:23
territory.ru`cooller < ZeRo4 16:24 @ hub"""
w5 = """ czm > ic-TOXIC - 29:6 (!) @ t4
ZeRo4 > ]km[serm - 31:10 @ t4"""
# +
w1 = """sp1N < Sarge P Doom - 3:24
FienD > Bluehaze
K9-rel < stx-revenant
killaton < carnage
KungFoo < Jibo
g1|blood < esp-jack
reflux < ic-GOPHER
ss-sirdark < 2live
Ms. X < tim
radialG < .BldJello.
chance > GATOR519
Habib < Appleseed - 9:19
ReaLpAp < PoRky
LeXeR > exorcist
ic-TECH < Forever
BYE < cha0ticz
"""
w2 = """
Sarge P Doom > FienD
stx-revenant < carnage
Jibo > esp-jack
ic-GOPHER > 2live
tim < BldJello
chance > Appleseed
ReaLpAp < LeXeR
Forever < cha0ticz
"""
w3 = """ Maddog > Sarge P Doom
carnage > etc
ve-elpajuo < Jibo
barnak < ic-GOPHER
redemzzz > .BldJello. - 24:4
chance > socrates_ - 37:12
LeXeR > Blokey - 18:4
n|soap < cha0ticz
"""
w4 = """Maddog > carnage
Jibo > ic-GOPHER
redemzzz < chance
LeXeR > cha0ticz
"""
w5 = """ Maddog < Daler - 7:26
Jibo < FFSmasher - 10:49
chance > Polarity - 28:5
LeXeR < territory.ru`cooller - 7:11
"""
w6 =""" Daler > FFSmasher - 21:9
chance < territory.ru`cooller - 9:34 t4 (demo)"""
w7 = """ ]km[.serm < Daler - 1:46
ic-TOXIC < territory.ru`cooller - 10:18
"""
w8 = """
Daler < territory.ru`cooller - 12:18
"""
# +
with open('qlan.txt') as fp:
text = fp.read()
text_games = [_.split('\n') for _ in text.split('\n\n')]
all_matches = []
for sec in text.split('\n\n'):
sec = sec.replace('\n\n','\n')
matches = []
s1,s2 = -1,-1
match_cand = re.findall('(.+?)\s+([><]|vs)\s+(.+?)[-\s]+([\d-]+):([-\d]+)',sec)
for p1,t,p2,s1,s2 in match_cand:
p1 = clean(p1)
p2 = clean(p2)
s1 = int(s1)
s2 = int(s2)
matches.append((p1,p2,s1,s2))
match_cand = re.findall('(.+?)\s+([><]|vs)\s+([^:]+?)\n',sec)
for p1,c,p2 in match_cand:
p1 = clean(p1)
p2 = clean(p2)
s1 = int('>' in c)
s2 = int('<' in c)
matches.append((p1,p2,s1,s2))
all_matches.append(matches)
all_matches
# +
def clean(n):
n = ''.join(n.split())
n = n.replace('n|','').replace('ic-','').replace('stx-','').replace('[KoG]^','').replace(']km[','')
n = n.replace('[B]','').replace('territory.ru>','').replace('esp-','').replace('ss-','').replace('K9-','')
n = n.replace('socrates_','socrates').replace('ASUS*c58-','').replace('ve-','').replace('g1|','')
n = n.replace('TOXIC','toxjq').replace('.','').replace('territoryru`','')
return n
all_matches = []
gen_str = [w1,w2,w3,w4,w5]
for sec in gen_str:
sec = sec.replace('\n\n','\n')
matches = []
s1,s2 = -1,-1
match_cand = re.findall('(.+?)\s+([><]|vs)\s+(.+?)[-\s]+([\d-]+):([-\d]+)',sec)
for p1,t,p2,s1,s2 in match_cand:
p1 = clean(p1)
p2 = clean(p2)
s1 = int(s1)
s2 = int(s2)
matches.append((p1,p2,s1,s2))
match_cand = re.findall('(.+?)\s+([><]|vs)\s+([^:]+?)\n',sec)
for p1,c,p2 in match_cand:
p1 = clean(p1)
p2 = clean(p2)
s1 = int('>' in c)
s2 = int('<' in c)
if 'vs' in c:
raise
matches.append((p1,p2,s1,s2))
all_matches.append(matches)
# -
all_matches
# +
att2 = pd.read_table('temp.txt',header=None)
rnd1 = []
tmp2 = set(np.array(att2.iloc[:,1:]).flatten())
tmp = list([_ for _ in att2.iloc[:,0] if len(_.strip()) > 0])
for i in range(0,len(tmp),3):
p1 = clean(tmp[i])
p2 = clean(tmp[i+2])
if 'BYE' in p1:
s1 = 0
s2 = 1
elif 'BYE' in p2:
s1 = 1
s2 = 0
else:
s1 = int(tmp[i] in tmp2)
s2 = int(tmp[i+2] in tmp2)
rnd1.append((p1,p2,s1,s2))
# -
all_matches.insert(0,rnd1)
all_matches
[len(_) for _ in all_matches],sum([len(_) for _ in all_matches])
# +
queue = [list(_) + [0] for _ in all_matches[-1]]
all_match_us = sum(all_matches[:-1],[])[::-1]
res = []
temp_res = []
curr_level = 0
print(queue,res)
while len(queue) > 0:
print(len(queue),len(res),curr_level, len(temp_res),len(all_match_us))
match=queue.pop(0)
flip = False
if match[-1] != curr_level:
curr_level += 1
res.append(temp_res)
temp_res = []
del_idx = -1
for i in range(len(all_match_us)):
if all_match_us[i][0] == match[0] or all_match_us[i][1] == match[0]:
queue.append(list(all_match_us[i]) + [curr_level+1])
del_idx = i
break
#print(del_idx,len(all_match_us))
if del_idx != -1:
del all_match_us[del_idx]
flip =True
del_idx = -1
for i in range(len(all_match_us)):
if all_match_us[i][0] == match[1] or all_match_us[i][1] == match[1]:
queue.append(list(all_match_us[i]) + [curr_level+1])
del_idx = i
break
#print(del_idx,len(all_match_us))
if del_idx != -1:
del all_match_us[del_idx]
flip = False
if flip:
match = (match[1],match[0],match[3],match[2],match[4])
temp_res.append(match)
#temp_res.append(['schnucks', 'SoulXtort', 0, 0] + [curr_level])
res.append(temp_res)
# -
[len(_) for _ in res],sum([len(_) for _ in res])
#res[4]
res
new_res = sum(res[::-1],[])
#|R1D43=belmakor|R1D43score=15 |R1D43flag= |R1D43win=1
#|R1D44=dCypher|R1D44score=7 |R1D44flag= |R1D44win=
for i in range(0,len(new_res)):
p1,p2,s1,s2,lvl = new_res[i]
s1,s2 = int(s1),int(s2)
w1 = '1' if int(s1) > int(s2) else ''
w2 = '1' if int(s2) > int(s1) else ''
bn =codes[2*i]
if abs(s1) + abs(s2) ==1:
s1 = ''
s2 = ''
print('|{}={}|{}score={} |{}flag= |{}win={}'.format(bn,p1,bn,s1,bn,bn,w1))
bn = codes[2*i+1]
print('|{}={}|{}score={} |{}flag= |{}win={}'.format(bn,p2,bn,s2,bn,bn,w2))
all_matches
len(codes)/2,len(res),print(all_match_us)
dfs = pd.read_html('wcggroup.html')
dfs[5]
| parse_qcon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright (c) 2019 [<NAME>](https://www.linkedin.com/in/hasan-dayoub-853845108/)
#
# https://github.com/HassanDayoub/Python-for-Machine-Learning-Deep-Learning-and-Data-Science-
#
# [MIT License](https://github.com/HassanDayoub/Python-for-Machine-Learning-Deep-Learning-and-Data-Science-/blob/master/LICENSE.txt)
#
# <br>
#
#
#
# # List
# A list is a value that contains multiple values in an ordered sequence. The
# term list value refers to the list itself (which is a value that can be stored in a
# variable or passed to a function like any other value), not the values inside
# the list value. A list value looks like this: ['cat', 'bat', 'rat', 'elephant'].
# Just as string values are typed with quote characters to mark where the
# string begins and ends, a list begins with an opening square bracket and
# ends with a closing square bracket, []. Values inside the list are also called
# items. Items are separated with commas (that is, they are comma-delimited).
lis = [1,2,3]
lis = [1.,2.,3.]
lis = ['a','b','c']
lis = [1,'a',3.]
# ## Removing Values from Lists with del Statements
# The ***del*** statement will delete values at an index in a list. All of the values
# in the list after the deleted value will be moved up one index
spam = ['cat', 'bat', 'rat', 'elephant']
del spam[2]
spam
# ## Using for Loops with Lists
supplies = ['pens', 'staplers', 'flame-throwers', 'binders']
for i in range(len(supplies)):
print('Index ' + str(i) + ' in supplies is: ' + supplies[i])
# ## The in and not in Operators
# You can determine whether a value is or isn’t in a list with the in and not in
# operators. Like other operators, in and not in are used in expressions and
# connect two values: a value to look for in a list and the list where it may be
# found. These expressions will evaluate to a Boolean value.
'howdy' in ['hello', 'hi', 'howdy', 'heyas']
spam = ['hello', 'hi', 'howdy', 'heyas']
'cat' in spam
'howdy' not in spam
myPets = ['Zophie', 'Pooka', 'Fat-tail']
print('Enter a pet name:')
name = input()
if name not in myPets:
print('I do not have a pet named ' + name)
else:
print(name + ' is my pet.')
# ## The Multiple Assignment Trick
# The multiple assignment trick is a shortcut that lets you assign multiple variables
# with the values in a list in one line of code.
cat = ['fat', 'black', 'loud']
size, color, disposition = cat
# ## Augmented Assignment Operators
# ## Finding a Value in a List with the index() Method
# List values have an index() method that can be passed a value, and if that
# value exists in the list, the index of the value is returned. If the value isn’t
# in the list, then Python produces a ValueError error
spam = ['hello', 'hi', 'howdy', 'heyas']
spam.index('hello')
# ## Adding Values to Lists with the append() and insert() Methods
# To add new values to a list, use the append() and insert() methods. Enter the
# following into the interactive shell to call the append() method on a list value
# stored in the variable spam:
spam = ['cat', 'dog', 'bat']
spam.append('moose')
spam
spam = ['cat', 'dog', 'bat']
spam.insert(1, 'chicken')
spam
# ## Removing Values from Lists with remove()
# The remove() method is passed the value to be removed from the list it is
# called on.
spam = ['cat', 'bat', 'rat', 'elephant']
spam.remove('bat')
spam
# ## Sorting the Values in a List with the sort() Method
# Lists of number values or lists of strings can be sorted with the sort()
# method.
spam = [2, 5, 3.14, 1, -7]
spam.sort()
spam
# You can also pass True for the reverse keyword argument to have sort()
# sort the values in reverse order.
spam.sort(reverse=True)
spam
# Third, sort() uses ***“ASCIIbetical order”*** rather than actual alphabetical
# order for sorting strings. This means uppercase letters come before lowercase
# letters. Therefore, the lowercase a is sorted so that it comes after the
# uppercase Z.
spam = ['Alice', 'ants', 'Bob', 'badgers', 'Carol', 'cats']
spam.sort()
spam
# If you need to sort the values in regular alphabetical order, pass str.
# lower for the key keyword argument in the sort() method call.
spam = ['a', 'z', 'A', 'Z']
spam.sort(key=str.lower)
spam
# ## List-like Types: Strings and Tuples
# Lists aren’t the only data types that represent ordered sequences of values.
# For example, strings and lists are actually similar, if you consider a string to
# be a “list” of single text characters. Many of the things you can do with lists
# can also be done with strings: indexing; slicing; and using them with for
# loops, with len(), and with the in and not in operators
name = 'Zophie'
name[0]
name[-2]
for i in name:
print('* * * ' + i + ' * * *')
# ## Mutable and Immutable Data Types
# But lists and strings are different in an important way. A list value is a mutable
# data type: It can have values added, removed, or changed. However, a string
# is immutable: It cannot be changed. Trying to reassign a single character in
# a string results in a TypeError error
name = '<NAME>'
name[7] = 'the'
# The proper way to “mutate” a string is to use slicing and concatenation
# to build a new string by copying from parts of the old string.
name = '<NAME>'
newName = name[0:7] + 'the' + name[8:12]
name
newName
# ## The Tuple Data Type
# The tuple data type is almost identical to the list data type, except in two
# ways. First, tuples are typed with parentheses, ( and ), instead of square
# brackets, [ and ].
eggs = ('hello', 42, 0.5)
eggs[0]
# But the main way that tuples are different from lists is that tuples,
# like strings, are immutable. Tuples cannot have their values modified,
# appended, or removed.
eggs = ('hello', 42, 0.5)
eggs[1] = 99
# If you have only one value in your tuple, you can indicate this by placing
# a trailing comma after the value inside the parentheses. Otherwise, Python
# will think you’ve just typed a value inside regular parentheses.
type(('hello',))
type(('hello'))
# ## Converting Types with the list() and tuple() Functions
# Just like how str(42) will return '42', the string representation of the integer
# 42, the functions list() and tuple() will return list and tuple versions
# of the values passed to them.
tuple(['cat', 'dog', 5])
list(('cat', 'dog', 5))
# Converting a tuple to a list is handy if you need a mutable version of a
# tuple value.
# ## References
# As you’ve seen, variables store strings and integer values.
# ## The copy Module’s copy() and deepcopy() Functions
# Although passing around references is often the handiest way to deal with
# lists and dictionaries, if the function modifies the list or dictionary that is
# passed, you may not want these changes in the original list or dictionary
# value. For this, Python provides a module named copy that provides both
# the copy() and deepcopy() functions. The first of these, copy.copy(), can be used
# to make a duplicate copy of a mutable value like a list or dictionary, not just a
# # copy of a reference.
spam = [1,2,3]
cheese = spam
cheese[0] = 10
print(spam)
print(chees)
# +
import copy
spam = [1,2,3]
cheese = copy.copy(spam)
cheese[0] = 10
print(spam)
print(cheese)
# -
# # Dictionary
Dict = {"Digits":[0,1,2,3,4,5,6,7,8,9],
"Letters":['a','b','c','d','e','f'],
"letters":['A','B','C','D']
}
Dict["Digits"]
Dict["Digits"][3]
Dict["Letters"][:3]
countries_famous_food = {"Syria":{"Damascus":"Icecream","Aleppo":"Kibbeh","Lattakia":"Fatah"},
"Egypt":["Ta’meya","Ful Mudammas","Kushari","Gebna Makleyah","Sayadeya"],
"USA":"Hot dogs",
"Italy":"spaghetti",
"Japan":"Sushi"
}
countries_famous_food["Syria"]["Aleppo"]
| Chapter 4_List_Tuple_Dictionaries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -
# # Natural Language Processing (NLP) with machine learning (ML)
#
# **Preprocessing of textual data**
# ### Download NLTK data - we need to do this only one time
# The download process can last longer (with GUI) and all data packages are bigger size of 3.3 GB
#
# Uncomment the *nltk.download()* line to download all! It open a new download window, which requires to click !
import nltk
# nltk.download()
# nltk.download('punkt')
# nltk.download('stopwords')
# nltk.download('averaged_perceptron_tagger') # Part-of-Speech Tagging (POS)
# nltk.download('tagsets')
# nltk.download('maxent_ne_chunker') # Name Entity Recognition (NER)
# nltk.download('words')
# ### Tokenization
# +
import string
import re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
# load data
filename = 'data/metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into words
tokens = word_tokenize(text)
# convert to lower case
tokens = [w.lower() for w in tokens]
# prepare regex for char filtering
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
stripped = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
print(words[:100])
# -
# ### TF-IDF with TfidfVectorizer
# +
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
dataset = [
"I enjoy reading about Machine Learning and Machine Learning is my PhD subject",
"I would enjoy a walk in the park",
"I was reading in the library"
]
vectorizer = TfidfVectorizer(use_idf=True)
tfIdf = vectorizer.fit_transform(dataset)
df = pd.DataFrame(tfIdf[0].T.todense(), index=vectorizer.get_feature_names(), columns=["TF-IDF"])
df = df.sort_values('TF-IDF', ascending=False)
print (df.head(25))
# -
# ### TF-IDF with TfidfTransformer
# +
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
transformer = TfidfTransformer(use_idf=True)
countVectorizer = CountVectorizer()
wordCount = countVectorizer.fit_transform(dataset)
newTfIdf = transformer.fit_transform(wordCount)
df = pd.DataFrame(newTfIdf[0].T.todense(), index=countVectorizer.get_feature_names(), columns=["TF-IDF"])
df = df.sort_values('TF-IDF', ascending=False)
print (df.head(25))
# -
# ### Cosine similarity
# URL: https://stackoverflow.com/questions/12118720/python-tf-idf-cosine-to-find-document-similarity
# +
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
# twenty dataset
twenty = fetch_20newsgroups()
tfidf = TfidfVectorizer().fit_transform(twenty.data)
# cosine similarity
cosine_similarities = linear_kernel(tfidf[0:1], tfidf).flatten()
# top-5 related documents
related_docs_indices = cosine_similarities.argsort()[:-5:-1]
print(related_docs_indices)
print(cosine_similarities[related_docs_indices])
# print the first result to check
print(twenty.data[0])
print(twenty.data[958])
# -
# ### Text classification
#
# URL https://towardsdatascience.com/machine-learning-nlp-text-classification-using-scikit-learn-python-and-nltk-c52b92a7c73a
# +
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
# twenty dataset
twenty_train = fetch_20newsgroups(subset='train', shuffle=True)
twenty_test = fetch_20newsgroups(subset='test', shuffle=True)
print(twenty_train.target_names)
# print("\n".join(twenty_train.data[0].split("\n")[:3]))
# -
# ### Multinomial Naive Bayes
# +
from sklearn.naive_bayes import MultinomialNB
# Bag-of-words
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(twenty_train.data)
X_test_counts = count_vect.transform(twenty_test.data)
# TF-IDF
transformer = TfidfTransformer()
X_train_tfidf = transformer.fit_transform(X_train_counts)
X_test_tfidf = transformer.transform(X_test_counts)
# Naive Bayes (NB) for text classification
clf = MultinomialNB().fit(X_train_tfidf, twenty_train.target)
# Performance of the model
predicted = clf.predict(X_test_tfidf)
np.mean(predicted == twenty_test.target)
# -
# ### Pipeline
# The above code with Multinomial Naive Bayes can be written more ellegant with scikit-learn pipeline. The code will be shorter and more reliable.
# +
text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
text_clf = text_clf.fit(twenty_train.data, twenty_train.target)
# Performance of the model
predicted = text_clf.predict(twenty_test.data)
np.mean(predicted == twenty_test.target)
# -
# ### GridSearchCV with Naive Bayes
# We want and need to optimize the pipeline by hyper-parameter tunning. We may get some better classification results.
# +
from sklearn.model_selection import GridSearchCV
parameters = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)
gs_clf = gs_clf.fit(twenty_train.data, twenty_train.target)
print(gs_clf.best_score_)
print(gs_clf.best_params_)
# -
# ### SGDClassifier
# We are trying another classifier called SGDClassifier instead of the previous Multinomial Naive Bayes.
#
# Let see if this new classifier acts better incomparison with and without optimization.
# +
from sklearn.linear_model import SGDClassifier
text_clf_svm = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf-svm', SGDClassifier(loss='hinge',
penalty='l2',
alpha=1e-3,
random_state=42)),
])
text_clf_svm = text_clf_svm.fit(twenty_train.data, twenty_train.target)
# Performance of the model
predicted_svm = text_clf_svm.predict(twenty_test.data)
np.mean(predicted_svm == twenty_test.target)
# -
# ### GridSearchCV with SVM
#
# Here a more classifiers, e.g., SVM.
#
# We are going to try SVM with Grid Search optimization.
# +
from sklearn.model_selection import GridSearchCV
parameters_svm = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf-svm__alpha': (1e-2, 1e-3),
}
gs_clf_svm = GridSearchCV(text_clf_svm, parameters_svm, n_jobs=-1)
gs_clf_svm = gs_clf_svm.fit(twenty_train.data, twenty_train.target)
print(gs_clf_svm.best_score_)
print(gs_clf_svm.best_params_)
# -
# ### Stemming
#
# Stemming can improve classifier results too. Let see if it works in our case example with Multinomial Naive Bayes.
# +
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english", ignore_stopwords=True)
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
stemmed_count_vect = StemmedCountVectorizer(stop_words='english')
text_mnb_stemmed = Pipeline([('vect', stemmed_count_vect),
('tfidf', TfidfTransformer()),
('mnb', MultinomialNB(fit_prior=False))])
text_mnb_stemmed = text_mnb_stemmed.fit(twenty_train.data, twenty_train.target)
predicted_mnb_stemmed = text_mnb_stemmed.predict(twenty_test.data)
np.mean(predicted_mnb_stemmed == twenty_test.target)
# -
| cvicenia/tyzden-07/IAU_071_nlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.1 64-bit (''base'': conda)'
# language: python
# name: python37164bitbaseconda0566f9e1637641a3bfb39389e76e5ab9
# ---
# ## Feature selection with categorical data
# https://machinelearningmastery.com/feature-selection-with-categorical-data/#
# ### Prepare data for Ordinal Encoder
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import mutual_info_classif
from matplotlib import pyplot
filename = "breast-cancer.csv"
# load the dataset as a pandas DataFrame
data = read_csv(filename, header=None)
# retrieve numpy array
dataset = data.values
# split into input (X) and output (y) variables
X = dataset[:, :-1]
y = dataset[:,-1]
# format all fields as string
X = X.astype(str)
# load the dataset
def load_dataset(filename):
# load the dataset as a pandas DataFrame
data = read_csv(filename, header=None)
# retrieve numpy array
dataset = data.values
# split into input (X) and output (y) variables
X = dataset[:, :-1]
y = dataset[:,-1]
# format all fields as string
X = X.astype(str)
return X, y
# lead the dataset
X, y = load_dataset(filename)
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# summarise
print('Train', X_train.shape, y_train.shape)
print('Test', X_test.shape, y_test.shape)
# prepare input data
def prepare_inputs(X_train, X_test):
oe = OrdinalEncoder()
oe.fit(X_train)
X_train_enc = oe.transform(X_train)
X_test_enc = oe.transform(X_test)
return X_train_enc, X_test_enc
# prepare target - could be done with OrdinalEncoder, but LabelEncoder is designed for a single variable
def prepare_targets(y_train, y_test):
le = LabelEncoder()
le.fit(y_train)
y_train_enc = le.transform(y_train)
y_test_enc = le.transform(y_test)
return y_train_enc, y_test_enc
# prepare input data
X_train_enc, X_test_enc = prepare_inputs(X_train, X_test)
# prepare output data
y_train_enc, y_test_enc = prepare_targets(y_train,y_test)
# ### Chi-Squared Feature Selection
# custom function for feature selection
def select_features(X_train, y_train, X_test):
fs = SelectKBest(score_func=chi2, k='all')
fs.fit(X_train, y_train)
X_train_fs = fs.transform(X_train)
X_test_fs = fs.transform(X_test)
return X_train_fs, X_test_fs, fs
# feature selection
X_train_fs, X_test_fs, fs=select_features(X_train_enc, y_train_enc, X_test_enc)
# +
# what are scores for the features
for i in range(len(fs.scores_)):
print('Feature %d: %f' % (i, fs.scores_[i]))
# plot the scores
pyplot.bar([i for i in range(len(fs.scores_))], fs.scores_)
pyplot.show()
# -
# The bar chart indicates that features 3, 4, 5, and 8 are most relevant.
# We could set k=4 When configuring the SelectKBest to select these top four features.
# In a new branch of this Github, update the Ordinal Encoding example to try specifying the order for those variables that have a natural ordering and see if it has an impact on model performance.
| .ipynb_checkpoints/20200624 JBL featureSel tutorial-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="TBJpdgjFRfsI" outputId="2aa60c2f-dea9-4af7-ef0f-c7f5ab2fc2a9"
from google.colab import drive
drive.mount('/content/drive')
# + id="mk_CWJg6RvSp"
import sys
import os
path = '/content/drive/My Drive'
sys.path.append(path)
os.chdir(path)
os.listdir(path)
# + colab={"base_uri": "https://localhost:8080/"} id="K64Wrd_RRzYr" outputId="38204a05-50d0-4bc6-ff29-abf745078e8e"
import scipy.cluster as cluster
import dask.dataframe as dd
import numpy as np
import time
dataset = dd.read_csv('kdd_pre_final.csv', sep=',')
category_real = dataset.loc[:, ["classification"]]
dataset = dataset.iloc[:, :-2]
dataset = np.array(dataset, dtype=np.float32)
k = 23
start = time.time()
for _ in range(10):
_, labels = cluster.vq.kmeans2(dataset, k, 50, minit='points')
end = time.time()
print("Running time is {} seconds.".format((end-start)/10))
# + colab={"base_uri": "https://localhost:8080/"} id="v6v2kpk5aMAY" outputId="60fb64c7-b8ae-4dd0-e22f-e0a37593a28e"
from sklearn import metrics
from collections import Counter
category_real = np.array(category_real)
category = []
for i in range(dataset.shape[0]):
category.append(category_real[i][0])
category = np.array(category)
category_pre = labels
real = Counter(category)
pre = Counter(category_pre)
print(real)
print(pre)
real = real.most_common()
pre = pre.most_common()
for j in range(dataset.shape[0]):
for nn in range(k):
if(category[j] == real[nn][0]):
category[j] = int(pre[nn][0])
ARI = metrics.adjusted_rand_score(category, category_pre)
AMI = metrics.adjusted_mutual_info_score(category, category_pre)
print("调整兰德指数为", ARI)
print("归一化互信息指数为", AMI)
# + colab={"base_uri": "https://localhost:8080/"} id="oFvMjuSLSfje" outputId="3402a31c-ba6a-40cb-de93-6d47671f3fb0"
import sklearn.cluster as skcluster
start = time.time()
for _ in range(10):
kmeans = skcluster.KMeans(n_clusters=k, init='random', n_init=1, max_iter=50).fit_predict(dataset)
end = time.time()
print("Running time is {} seconds.".format((end-start)/10))
# + colab={"base_uri": "https://localhost:8080/"} id="__oNszVeVIKv" outputId="56c90ccc-7fd4-4c48-f720-98afb9a1b055"
from sklearn import metrics
from collections import Counter
category_real = np.array(category_real)
category = []
for i in range(dataset.shape[0]):
category.append(category_real[i][0])
category = np.array(category)
category_pre = kmeans
real = Counter(category)
pre = Counter(category_pre)
print(real)
print(pre)
real = real.most_common()
pre = pre.most_common()
for j in range(dataset.shape[0]):
for nn in range(k):
if(category[j] == real[nn][0]):
category[j] = int(pre[nn][0])
ARI = metrics.adjusted_rand_score(category, category_pre)
AMI = metrics.adjusted_mutual_info_score(category, category_pre)
print("调整兰德指数为", ARI)
print("归一化互信息指数为", AMI)
| built_in_referenced/kmeans_scipy&sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # __Requirements__
#
# ### __Data Source__
# - The data used in this project comes from the dataset provided by IABAC for a fictitious company named INC Future Inc. The data set contains data of employee's profile with various features that are categorical as well as numerical in nature and the target is to predict the employee's performance rating with respect to the various feature values provided.
# - No other third party data has been used as the dataset provided for the purpose is sufficient to create a generalized model for performance prediction. Although further optimization may require addition data that can be collected from various sources especially kaggle.com
#
# ### __Libraries used__
# ##### env loading
# - os
# - dotenv
#
# ##### data loading and wrangling libraries for EDA
# - pandas
# - numpy
#
# ##### data visualization libraries
# - matplotlib
# - seaborn
# - bokeh
#
# ##### data balancing
# - imblearn
#
# ##### data splitting, ml algorithms, cross validation and evaluation
# - sklearn
# - xgboost
#
# ##### warnings
# - warnings
| Project Summary/Requirements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="72245b3a35aa5abfe2c70cc934b523f87a6b5c72"
# # Introduction
# Machine learning competitions are a great way to improve your data science skills and measure your progress.
#
# In this exercise, you will create and submit predictions for a Kaggle competition. You can then improve your model (e.g. by adding features) to improve and see how you stack up to others taking this course.
#
# The steps in this notebook are:
# 1. Build a Random Forest model with all of your data (**X** and **y**)
# 2. Read in the "test" data, which doesn't include values for the target. Predict home values in the test data with your Random Forest model.
# 3. Submit those predictions to the competition and see your score.
# 4. Optionally, come back to see if you can improve your model by adding features or changing your model. Then you can resubmit to see how that stacks up on the competition leaderboard.
# + [markdown] _uuid="cf513b1a7ef57f4d3b290e8aa8f2fe4f312259c9"
# ## Recap
# Here's the code you've written so far. Start by running it again.
# -
def str2cols(df, column, col_vals, prefix):
'''
df: pandas DataFrame
column: string (name of original column)
col_vals: list of string (unique value in original column)
prefix: string
return: None (modify df)
'''
for col_val in col_vals:
df[prefix + col_val] = (df[column] == col_val).astype('int64')
return
def add_feature(home_data):
#home_data['Price_per_SF'] = home_data.SalePrice / \
# (home_data['1stFlrSF'] + home_data['2ndFlrSF'] + home_data['TotalBsmtSF'])
str2cols(home_data, 'SaleType', ['WD', 'New', 'COD'], 'ST_')
sale_condition = ['Normal', 'Abnorml', 'Partial', 'AdjLand', 'Alloca', 'Family']
str2cols(home_data, 'SaleCondition', sale_condition, 'SC_')
bldg = ['1Fam', '2fmCon', 'Duplex', 'TwnhsE', 'Twnhs']
str2cols(home_data, 'BldgType', bldg, 'BT_')
house_style = ['2Story', '1Story', '1.5Fin', 'SFoyer', 'SLvl']
str2cols(home_data, 'HouseStyle', house_style, 'HS_')
return
# + _uuid="8daa0655d66f7dffe337bd7cc96bedcf1ab9330e"
# Code you have previously used to load data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from xgboost import XGBRegressor
# Path of the file to read. We changed the directory structure to simplify submitting to a competition
iowa_file_path = '../input/train.csv'
home_data = pd.read_csv(iowa_file_path)
# Create target object and call it y
y = home_data.SalePrice
# Create X
add_feature(home_data)
# home_data['YearBuilt'] = 2011 - home_data['YearBuilt'] # degrade in RF, no change in LR
features = ['OverallQual', 'OverallCond', 'LotArea',
'ST_WD', 'ST_New', 'ST_COD', 'SC_Abnorml', 'SC_Partial', # 'SC_Normal',
'MSSubClass',
'GarageCars', # 'GarageArea',
'YearBuilt', # 'YearRemodAdd', 'YrSold',
# 'BT_1Fam', 'BT_2fmCon', 'BT_Duplex', 'BT_TwnhsE', 'BT_Twnhs',
# 'HS_2Story', 'HS_1Story', 'HS_1.5Fin', 'HS_SFoyer', 'HS_SLvl',
'1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
"""
# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit Model
iowa_model.fit(train_X, train_y)
# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE when not specifying max_leaf_nodes: {:,.0f}".format(val_mae))
# Using best value for max_leaf_nodes
iowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)
iowa_model.fit(train_X, train_y)
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE for best value of max_leaf_nodes: {:,.0f}".format(val_mae))
"""
# Define the model. Set random_state to 1
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
scaler = StandardScaler()
train_Xnorm = scaler.fit_transform(train_X)
val_Xnorm = scaler.transform(val_X)
"""
svm_model = SVR(kernel='linear')
svm_model.fit(train_Xnorm, train_y)
svm_val_predict = svm_model.predict(val_Xnorm)
svm_val_mae = mean_absolute_error(svm_val_predict, val_y)
print('Validation MAE for SVM: {}'.format(svm_val_mae))
"""
lr_model = LinearRegression()
lr_model.fit(train_X, train_y)
lr_val_predict = lr_model.predict(val_X)
lr_val_mae = mean_absolute_error(lr_val_predict, val_y)
print('Validation MAE for Linear Regression: {:,.0f}'.format(lr_val_mae))
xg_model = XGBRegressor(n_estimators=5000)
xg_model.fit(train_X, train_y, early_stopping_rounds=10, eval_set=[(val_X, val_y)], verbose=False)
xg_val_predict = xg_model.predict(val_X)
xg_val_mae = mean_absolute_error(xg_val_predict, val_y)
print('Validation MAE for XGboost Regression: {:,.0f}'.format(xg_val_mae))
print(rf_val_predictions[:5])
print(np.round(lr_val_predict[:5]))
print(val_y[:5])
#print(val_X[:5])
# -
xg_model
home_data.SaleType.value_counts()
# +
# xgboost regressor
from xgboost import XGBRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
params = {"n_estimators":[250, 300, 350, 400, 500, 600, 1000], "max_depth": [4,5,6], 'learning_rate': [0.02, 0.03, 0.04]}
randsearch = RandomizedSearchCV(XGBRegressor(), params, cv=5, n_iter=100, scoring="neg_mean_absolute_error", return_train_score=True, n_jobs=-1, verbose=1)
randsearch.fit(train_X, train_y, early_stopping_rounds=10, eval_set=[(val_X, val_y)], verbose=False)
print("Best Parm =", randsearch.best_params_, "score =", randsearch.best_score_)
print()
# + [markdown] _uuid="7cb35c687dbfe283b3bebb3dfb4217acb507330a"
# # Creating a Model For the Competition
#
# Build a Random Forest model and train it on all of **X** and **y**.
# +
# Fit with XGBRegressor
#xgb = XGBRegressor(n_estimators=250, max_depth=5, learning_rate=0.03)
xgb = XGBRegressor(n_estimators=500, max_depth=5, learning_rate=0.02)
xgb.fit(X, y)
# -
# predict using cross validation data
p_cv = xgb.predict(val_X)
print(p_cv[:5], val_y[:5])
# + [markdown] _uuid="fbd740853c59245550529e5fd0fbd62e3b4f4ff8"
# # Make Predictions
# Read the file of "test" data. And apply your model to make predictions
# + _uuid="<KEY>"
# path to file you will use for predictions
test_data_path = '../input/test.csv'
# read test data file using pandas
test_data = pd.read_csv(test_data_path)
test_data['GarageCars'].fillna(0.0, inplace=True)
add_feature(test_data)
# create test_X which comes from test_data but includes only the columns you used for prediction.
# The list of columns is stored in a variable called features
test_X = test_data[features]
# make predictions which we will submit.
#test_preds = rf_model_on_full_data.predict(test_X)
test_preds = xgb.predict(test_X)
# The lines below shows you how to save your data in the format needed to score it in the competition
output = pd.DataFrame({'Id': test_data.Id,
'SalePrice': test_preds})
output.to_csv('submission.csv', index=False)
# + [markdown] _uuid="ac1a3da971c7884eef796d3be458a65dcd361b3d"
# # Test Your Work
# After filling in the code above:
# 1. Click the **Commit and Run** button.
# 2. After your code has finished running, click the small double brackets **<<** in the upper left of your screen. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
# 3. Go to the output tab at top of your screen. Select the button to submit your file to the competition.
# 4. If you want to keep working to improve your model, select the edit button. Then you can change your model and repeat the process.
#
# Congratulations, you've started competing in Machine Learning competitions.
#
# # Continuing Your Progress
# There are many ways to improve your model, and **experimenting is a great way to learn at this point.**
#
# The best way to improve your model is to add features. Look at the list of columns and think about what might affect home prices. Some features will cause errors because of issues like missing values or non-numeric data types.
#
# Level 2 of this course will teach you how to handle these types of features. You will also learn to use **xgboost**, a technique giving even better accuracy than Random Forest.
#
#
# # Other Courses
# The **[Pandas course](https://kaggle.com/Learn/Pandas)** will give you the data manipulation skills to quickly go from conceptual idea to implementation in your data science projects.
#
# You are also ready for the **[Deep Learning](https://kaggle.com/Learn/Deep-Learning)** course, where you will build models with better-than-human level performance at computer vision tasks.
#
# ---
# **[Course Home Page](https://www.kaggle.com/learn/machine-learning)**
#
# **[Learn Discussion Forum](https://kaggle.com/learn-forum)**.
#
| kernel_houseprice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="aBgHVKN1wlJb"
# # Linear Regression with Ridge Regularization
# + [markdown] id="C3ooFZ6Iwouo"
# ## Useful imports
# + id="GQabhvF2wWiK"
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="KBdLGWuImjTM"
# ## Load the dataset
#
# + id="vP2-WsZal6QV"
# EDIT THE PATH OF THE CSV HERE
HOUSING_PATH = os.path.join(".", "drive", "My Drive", "Colab Notebooks", "AI2",
"Project1", "dataset", "HousingData.csv")
def load_housing_data(housing_path=HOUSING_PATH):
return pd.read_csv(housing_path)
df = load_housing_data(housing_path=HOUSING_PATH)
# + [markdown] id="QsJRbkO3n5LK"
# ## Take a look at the dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="h55psH0YnTMO" outputId="cbbb05cf-99f0-4805-d3d8-84d1da3aa463"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="mb6WoWuAw8Jb" outputId="5e07d343-8474-48e7-eed7-7851958d20d2"
# get rid of the "Unnamed: 0" index column
df = df.drop("Unnamed: 0", axis=1)
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="hrqGUcEMxGFX" outputId="453b97a3-a22d-40c4-b17e-6eb315e0c240"
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 301} id="5VxAan_UxH5s" outputId="32105797-9082-4380-cd21-725573c9e035"
df.describe()
# + [markdown] id="qzSqSF5uxYVq"
# Great, we can see that there are no missing values in the dataset
# + [markdown] id="1PpK4JlTwN8f"
# ### Plottings to gain insight
# + colab={"base_uri": "https://localhost:8080/", "height": 879} id="h7hNpnYowL5n" outputId="2f478f2c-36a5-42d3-8f06-05cd270924ea"
df.hist(bins=75, figsize=(20, 15))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 626} id="XcxURPys8KoP" outputId="ece83332-03cf-4eba-9ce5-367a11f21cf3"
df.plot(kind="scatter", x="Longitude", y="Latitude", alpha=0.5,
s=df["AveOccup"], label="Average Occupation", figsize=(15,10),
c="Median House Value", cmap=plt.get_cmap("jet"), colorbar=True, sharex=False)
plt.legend()
# + [markdown] id="NKOxGeN8dOpu"
# ## Now we would like to create the sets for training, validation and testing.
# ### First let's do some analysis on the data.
# + colab={"base_uri": "https://localhost:8080/", "height": 191} id="tHVmcG635RTw" outputId="6e05e0e9-6c82-4c78-ed5b-42016e053210"
# get Pearsons r coefficient
corr_matrix = df.corr()
corr_matrix["Median House Value"].sort_values(ascending=False)
# + [markdown] id="0Rw1TUPb5ttM"
# ### We observe that the Median Income is highly correlated with the Median House Value. Let's take a better look.
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="SYGRBTVodNtA" outputId="9f5b28f3-66c8-4c43-fa4e-da59857558dc"
# we have seen that most median incomes appear in the range [1, 6]
df["income_cat"] = pd.cut(df["MedInc"],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
df["income_cat"].hist()
# + [markdown] id="HJFyRI0FezCr"
# We can clearly see that there are way more training examples where the Median Income is in the range [1.5, 3.5]. If we split the dataset in a random way and the training set ends up having few instances with Median Income **NOT** in the set [1.5, 3.5], then the sets will be skewed and we will not have good performance in validation and testing. So we must perform a Stratified Splitting of the dataset.
# + [markdown] id="t_x8l1cERSax"
# ### We can use sklearns StratifiedShuffleSplit() to perform the split.
# + id="lTQvN-72wLeb"
from sklearn.model_selection import StratifiedShuffleSplit
# + colab={"base_uri": "https://localhost:8080/", "height": 104} id="vvRX4z4xwLLA" outputId="009d59e6-da2a-49e6-8cb6-13dabaf43436"
# first make a split for training (70%) and validation-testing (30%)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42)
for train_index, test_validation_index in split.split(df, df["income_cat"]):
train_set = df.iloc[train_index]
test_validation_set = df.iloc[test_validation_index]
# make another split for validation (15%) and testing (15%)
split2 = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=42)
for test_index, validation_index in split2.split(test_validation_set, test_validation_set["income_cat"]):
test_set = test_validation_set.iloc[test_index]
validation_set = test_validation_set.iloc[validation_index]
# now get rid of the "income_cat" column
for set_ in (train_set, validation_set, test_set):
set_.drop("income_cat", axis=1, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="FwZGlkuMwJ1e" outputId="818a22e1-14a6-4fd5-cadc-fe84cb8785be"
train_set.head()
# + [markdown] id="WAoYb5sJtSan"
# ## Now we have to prepare the data.
# + [markdown] id="Kkx42MlGurFI"
# First start by creating the appropriate sets
# + id="KkBrTwn_VgAO"
# prepare the training data
x_train = train_set.drop("Median House Value", axis=1)
y_train = train_set["Median House Value"].copy()
# prepare the validation data
x_val = validation_set.drop("Median House Value", axis=1)
y_val = validation_set["Median House Value"].copy()
# prepare the testing data
x_test = test_set.drop("Median House Value", axis=1)
y_test = test_set["Median House Value"].copy()
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="g3onwKtoBI1b" outputId="4d921cea-13ab-4184-a58e-f1066cb3d231"
x_test
# + [markdown] id="4T0hRtW1uwM8"
# Now scale the data to have mean 0 ($\mu = 0$) and standard deviation 1 ($\sigma = 1$)
# + id="PNtC1L30Vf2K"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train)
# scale the data according to the fitting of the training set
x_train = scaler.transform(x_train)
x_val = scaler.transform(x_val)
x_test = scaler.transform(x_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 243} id="70xSkR1KVfo0" outputId="b934e2f1-e6c3-4451-f964-c5f4a069986b"
x_train
# + [markdown] id="HzhnBvMD4M2L"
# ### Convert DataFrames labels to numpy arrays
# + id="WxKppMpjoVDZ"
y_train = np.reshape(y_train.to_numpy(), (-1, 1))
y_val = np.reshape(y_val.to_numpy(), (-1, 1))
y_test = np.reshape(y_test.to_numpy(), (-1, 1))
# + [markdown] id="9Bc1E8cxKf1N"
# ## Now that the data has been processed, build the Ridge Regression class.
# + id="7WzrHUGNVfcl"
class RidgeRegression:
def __init__(self, learning_rate=0.01, _lambda=1):
self.learning_rate = learning_rate
self._lambda = _lambda
self.w = None
def fit(self, X_train, Y_train, X_val, Y_val, gd_type="mini-batch", iterations=100, batch_size=32, t0=5, t1=50, tolerance=10e-6, verbose=0, plot_curves=False):
"""
@param X_train: Training set, which is a numpy array of shape m x n
@param Y_train: Training labels, which is a numpy array of shape m x 1
@param X_val: Validation set, which is a numpy array
@param Y_val: Validation labels, which is a numpy array
@param gd_type: Gradient Descent Type preferred (string).
Possible values are 1) "batch", 2) "sgd", 3) "mini-batch"
@param iterations: Int, denoting how many iterations of gradient
descent to perform
@param batch_size: The batch size in case the gd_type is "mini-batch"
@param t0: Hyperparameter used for sgd learning schedule
@param t1: Hyperparameter used for sgd learning schedule
@param tolerance: In case of batch gradient descent, it is how much we
tolerate that the loss diminished (basically it implements Early Stopping)
@param verbose: Set to 1 if you want the progress of the model to be printed
@param plot_curves: Flag used to denote whether to plot the Learning curves or not
"""
m = X_train.shape[0]
n = X_train.shape[1]
_X_train = np.concatenate((np.ones((m, 1)), X_train), axis=1)
m_val = X_val.shape[0]
_X_val = np.concatenate((np.ones((m_val, 1)), X_val), axis=1)
# initialize the weights
self.w = np.random.randn(n + 1, 1)
# determine which gradient descent method to use, and call it
if gd_type == "batch":
return self._batch_gradient_descent(_X_train, Y_train, _X_val, Y_val, iterations, tolerance, verbose, plot_curves)
elif gd_type == "sgd":
return self._stochastic_gradient_descent(_X_train, Y_train, _X_val, Y_val, t0, t1, iterations, verbose, plot_curves)
elif gd_type == "mini-batch":
return self._mini_batch_gradient_descent(_X_train, Y_train, _X_val, Y_val, batch_size, iterations, verbose, plot_curves)
else:
print("Wrong input for parameter gd_type (gradient descent type).\n"
"Possible values are 1) batch, 2) sgd, 3) mini-batch\n")
def _compute_mae_error(self, m_test, Y_hat, Y_test):
return (1. / m_test) * np.sum(np.abs(Y_hat - Y_test), axis=0)
def _compute_mse_loss(self, X, Y):
m = X.shape[0]
return (1. / m) * np.sum(np.square((X @ self.w - Y)), axis=0)
def _compute_ridge_loss(self, X, Y):
mse = self._compute_mse_loss(X, Y)
regularization_factor = self._lambda * (1. / 2) * np.sum(np.square(self.w), axis=0)
return mse + regularization_factor
def _compute_ridge_gradient(self, X, Y):
m = X.shape[0]
mse_grad = (2. / m) * (X.T @ (X @ self.w - Y))
regularization_grad = self._lambda * np.concatenate((np.zeros((1, 1)), self.w[1:]), axis=0)
return mse_grad + regularization_grad
def _batch_gradient_descent(self, X_train, Y_train, X_val, Y_val, iterations, tolerance, verbose, plot_curves=False):
"""
To see what parameters represent, check the comments in the .fit() method
Note that batch gradient descent has a fixed learning rate parameter,
might make a schedule later if I don't forget it
"""
# arrays to store loss functions values in order to plot learning curves later
train_loss = np.zeros((iterations, 1))
val_loss = np.zeros((iterations, 1))
# value to monitor early stopping
stopped_at_iteration = iterations
# start the loop for batch gradient descent
for iteration in range(iterations):
gradients = self._compute_ridge_gradient(X_train, Y_train)
self.w -= self.learning_rate * gradients
train_loss[iteration] = self._compute_ridge_loss(X_train, Y_train)
val_loss[iteration] = self._compute_mse_loss(X_val, Y_val)
if verbose:
print("At iteration {}, Training Loss: {}, Validation Loss: {}".format(iteration, train_loss[iteration], val_loss[iteration]))
if (tolerance is not None) and (iteration > 0) and (val_loss[iteration - 1] - val_loss[iteration] <= tolerance):
if verbose:
print("Stopped training at iteration {} because validation loss started decreasing very slowly\n".format(iteration))
print("Previous validation loss: {}, current validation loss: {}\n".format(val_loss[iteration - 1], val_loss[iteration]))
stopped_at_iteration = iteration
break
train_loss = train_loss[:stopped_at_iteration]
val_loss = val_loss[:stopped_at_iteration]
# plot the learning curves if specified
if plot_curves:
self._plot_curves(train_loss, val_loss, "Batch Gradient Descent")
return train_loss, val_loss
def _learning_rate_schedule(self, value, t0, t1=50):
return t0 / (value + t1)
def _stochastic_gradient_descent(self, X_train, Y_train, X_val, Y_val, t0, t1, iterations, verbose, plot_curves=False):
"""
To see what parameters represent, check the comments in the .fit() method
"""
# total training examples
m = X_train.shape[0]
# arrays to store loss functions values in order to plot learning curves later
train_loss = np.zeros((iterations, 1))
val_loss = np.zeros((iterations, 1))
# start the loop for stochastic gradient descent
for iteration in range(iterations):
for tr_example in range(m):
random_index = np.random.randint(m)
xi = np.reshape(X_train[random_index, :], (1, -1))
yi = np.reshape(Y_train[random_index], (-1, 1))
gradients = self._compute_ridge_gradient(xi, yi)
self.learning_rate = self._learning_rate_schedule(iteration * m + tr_example, t0=t0, t1=t1)
self.w -= self.learning_rate * gradients
train_loss[iteration] = self._compute_ridge_loss(X_train, Y_train)
val_loss[iteration] = self._compute_mse_loss(X_val, Y_val)
if verbose:
print("At iteration {}, Training Loss: {}, Validation Loss: {}".format(iteration, train_loss[iteration], val_loss[iteration]))
# plot the learning curves if specified
if plot_curves:
self._plot_curves(train_loss, val_loss, "Stochastic Gradient Descent")
return train_loss, val_loss
def _mini_batch_gradient_descent(self, X_train, Y_train, X_val, Y_val, batch_size, iterations, verbose, plot_curves=False):
"""
To see what parameters represent, check the comments in the .fit() method
"""
# total training examples
m = X_train.shape[0]
# arrays to store loss functions values in order to plot learning curves later
train_loss = np.zeros((iterations, 1))
val_loss = np.zeros((iterations, 1))
total_batches = m // batch_size
if m % batch_size != 0:
total_batches += 1
# start the loop for mini batch gradient descent
for iteration in range(iterations):
for batch in range(total_batches):
random_indices = np.random.choice(m, batch_size, replace=False)
x_batch = X_train[random_indices, :]
y_batch = Y_train[random_indices]
gradients = self._compute_ridge_gradient(x_batch, y_batch)
self.w -= self.learning_rate * gradients
train_loss[iteration] = self._compute_ridge_loss(X_train, Y_train)
val_loss[iteration] = self._compute_mse_loss(X_val, Y_val)
if verbose:
print("At iteration {}, Training Loss: {}, Validation Loss: {}".format(iteration, train_loss[iteration], val_loss[iteration]))
# plot the learning curves if specified
if plot_curves:
self._plot_curves(train_loss, val_loss, "Mini-Batch Gradient Descent")
return train_loss, val_loss
def _plot_curves(self, train_loss, val_loss, gd_type):
plt.rcParams['figure.figsize'] = [15, 10]
plt.plot(train_loss, color='r', label="Training Loss")
plt.plot(val_loss, color='b', linestyle="--", label="Validation Loss")
plt.legend()
plt.xlabel('Iterations')
plt.ylabel('Loss J(w)')
plt.title("Learning Curves for " + gd_type)
plt.show()
def predict(self, X_test, Y_test):
m_test = X_test.shape[0]
_X_test = np.concatenate((np.ones((m_test, 1)), X_test), axis=1)
Y_hat = _X_test @ self.w
mae_accuracy = self._compute_mae_error(m_test, Y_hat, Y_test)
mse_loss = self._compute_mse_loss(_X_test, Y_test)
return Y_hat, mae_accuracy, mse_loss
# + [markdown] id="A4b8fmpPKkSp"
# ## Now try all the 3 different methods and compare them!
# + [markdown] id="xqY8eh9dLBp7"
# ### Batch Gradient Descent
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="UvpM-Wu8K9yT" outputId="efd48600-485e-458e-963c-22f5b1c66d56"
ridge_batch_gd = RidgeRegression(learning_rate=0.01, _lambda=0.001)
train_loss_bgd, val_loss_bgd = ridge_batch_gd.fit(x_train, y_train, x_val, y_val, gd_type="batch", iterations=150, tolerance=10e-8, verbose=0, plot_curves=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="Q4b7cQAtMUDz" outputId="6db774ed-8757-45c4-bb91-992551cec723"
results_bgd, mae_test_accuracy_bgd, mse_test_loss_bgd = ridge_batch_gd.predict(x_test, y_test)
m_test = y_test.shape[0]
# change 20 with m_test if you want all the predictions to be printed
for test_example in range(20):
print("Prediction: {}, Actual Value: {}". format(results_bgd[test_example], y_test[test_example]))
print("\nBGD: For the Test set, Mean Absolute Error: {}, Mean Squared Error: {}\n".format(mae_test_accuracy_bgd, mse_test_loss_bgd))
# + [markdown] id="7Xlv9kfWNFfc"
# ### Stochastic Gradient Descent
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="wv8FASQHVfO7" outputId="de60b23a-0393-4c5e-b8ce-9109956b8f23"
ridge_sgd = RidgeRegression(learning_rate=0.01, _lambda=0.01)
train_loss_sgd, val_loss_sgd = ridge_sgd.fit(x_train, y_train, x_val, y_val, gd_type="sgd", iterations=100, verbose=0, plot_curves=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="5A5RYrXhNU4l" outputId="d00bfd87-1347-44cf-9860-e3a812813b49"
results_sgd, mae_test_accuracy_sgd, mse_test_loss_sgd = ridge_sgd.predict(x_test, y_test)
m_test = y_test.shape[0]
# change 20 with m_test if you want all the predictions to be printed
for test_example in range(20):
print("Prediction: {}, Actual Value: {}". format(results_sgd[test_example], y_test[test_example]))
print("\nSGD: For the Test set, Mean Absolute Error: {}, Mean Squared Error: {}\n".format(mae_test_accuracy_sgd, mse_test_loss_sgd))
# + [markdown] id="t5VMAP5iN8Be"
# ### Mini-Batch Gradient Descent
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="H-0i8FGpVerL" outputId="0f72a4c9-efa5-4f76-e6c3-76a0ffb03aed"
ridge_mbgd = RidgeRegression(learning_rate=0.001, _lambda=0.01)
train_loss_mbgd, val_loss_mbgd = ridge_mbgd.fit(x_train, y_train, x_val, y_val, gd_type="mini-batch", iterations=100, verbose=0, plot_curves=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="taWsFtBGxyRu" outputId="39eb9f20-2f86-496e-a204-23f116312e15"
results_mbgd, mae_test_accuracy_mbgd, mse_test_loss_mbgd = ridge_mbgd.predict(x_test, y_test)
m_test = y_test.shape[0]
# change 20 with m_test if you want all the predictions to be printed
for test_example in range(20):
print("Prediction: {}, Actual Value: {}". format(results_mbgd[test_example], y_test[test_example]))
print("\nMBGD: For the Test set, Mean Absolute Error: {}, Mean Squared Error: {}\n".format(mae_test_accuracy_mbgd, mse_test_loss_mbgd))
# + [markdown] id="BT2NKR8vGVVp"
# ## As a last touch we can implement some Grid Search functions to fine-tune our models
# + id="TFlRkiLuL5yh"
class GridSearchRidge:
def __init__(self, x_train, y_train, x_val, y_val):
self.x_train = x_train
self.y_train = y_train
self.x_val = x_val
self.y_val = y_val
def perform_grid_search(self, gd_type, hyperparameters):
"""
@param gd_type: the type of gradient descent
@param hyperparameters: dictionary with hyperprameters appropriate for each
gradient descent method
"""
# determine in which type of gradient descent to perform the grid search
if gd_type == "batch":
ridge_models, training_losses, validation_losses = self._bgd_grid_search(hyperparameters)
elif gd_type == "sgd":
ridge_models, training_losses, validation_losses = self._sgd_grid_search(hyperparameters)
elif gd_type == "mini-batch":
ridge_models, training_losses, validation_losses = self._mbgd_grid_search(hyperparameters)
else:
print("Wrong input for parameter gd_type (gradient descent type).\n"
"Possible values are 1) batch, 2) sgd, 3) mini-batch\n")
return None
# find the best configuration of hyperparameters
min_loss = float('inf')
best_configuration = None
for key in validation_losses.keys():
if validation_losses[key][-1] < min_loss:
min_loss = validation_losses[key][-1]
best_configuration = key
# print corresponding message depending on the gradient descent type
if gd_type == "batch":
learning_rate = best_configuration[0]
_lambda = best_configuration[1]
print("The best configuration of hyperparameters is learning rate = {}, lambda = {}. Validation Loss is {}\n\n".format(learning_rate, _lambda, min_loss))
elif gd_type == "sgd":
_lambda = best_configuration[0]
t0 = best_configuration[1]
t1 = best_configuration[2]
print("The best configuration of hyperparameters is lambda = {}, t0 = {}, t1 = {}. Validation Loss is {}\n\n".format(_lambda, t0, t1, min_loss))
else:
learning_rate = best_configuration[0]
_lambda = best_configuration[1]
batch_size = best_configuration[2]
print("\n\nThe best configuration of hyperparameters is learning rate = {}, lambda = {}, batch_size = {}. Validation Loss is {}\n\n".format(learning_rate, _lambda, batch_size, min_loss))
self._plot_validation_losses(gd_type, validation_losses, best_configuration)
return best_configuration, ridge_models[best_configuration], training_losses[best_configuration], validation_losses[best_configuration]
def _bgd_grid_search(self, hyperparameters):
learning_rates = hyperparameters["learning_rates"]
_lambdas = hyperparameters["_lambdas"]
iterations = hyperparameters.get("iterations", 150)
tolerance = hyperparameters.get("tolerance", 10e-9)
# use dictionaries to store information
ridge_models = {}
training_losses = {}
validation_losses = {}
for lr in learning_rates:
for _l in _lambdas:
ridge_models[(lr, _l)] = RidgeRegression(learning_rate=lr, _lambda=_l)
train_loss, val_loss = ridge_models[(lr, _l)].fit(self.x_train, self.y_train, self.x_val, self.y_val, gd_type="batch", iterations=iterations, tolerance=tolerance)
training_losses[(lr, _l)] = train_loss
validation_losses[(lr, _l)] = val_loss
return ridge_models, training_losses, validation_losses
def _sgd_grid_search(self, hyperparameters):
learning_rate = hyperparameters.get("learning_rate", 0.001)
_lambdas = hyperparameters["_lambdas"]
t0s = hyperparameters["t0"]
t1s = hyperparameters["t1"]
iterations = hyperparameters.get("iterations", 100)
# use dictionaries to store information
ridge_models = {}
training_losses = {}
validation_losses = {}
for _l in _lambdas:
for t0 in t0s:
for t1 in t1s:
ridge_models[(_l, t0, t1)] = RidgeRegression(learning_rate=learning_rate, _lambda=_l)
train_loss, val_loss = ridge_models[(_l, t0, t1)].fit(self.x_train, self.y_train, self.x_val, self.y_val, gd_type="sgd", t0=t0, t1=t1, iterations=iterations)
training_losses[(_l, t0, t1)] = train_loss
validation_losses[(_l, t0, t1)] = val_loss
return ridge_models, training_losses, validation_losses
def _mbgd_grid_search(self, hyperparameters):
learning_rates = hyperparameters["learning_rates"]
_lambdas = hyperparameters["_lambdas"]
batch_sizes = hyperparameters["batch_sizes"]
iterations = hyperparameters.get("iterations", 100)
# use dictionaries to store information
ridge_models = {}
training_losses = {}
validation_losses = {}
for lr in learning_rates:
for _l in _lambdas:
for batch_size in batch_sizes:
ridge_models[(lr, _l, batch_size)] = RidgeRegression(learning_rate=lr, _lambda=_l)
train_loss, val_loss = ridge_models[(lr, _l, batch_size)].fit(self.x_train, self.y_train, self.x_val, self.y_val, gd_type="mini-batch", batch_size=batch_size, iterations=iterations)
training_losses[(lr, _l, batch_size)] = train_loss
validation_losses[(lr, _l, batch_size)] = val_loss
return ridge_models, training_losses, validation_losses
def _plot_validation_losses(self, gd_type, validation_losses, best_configuration):
"""
Plotting method. It can be easily changed to plot preferred attributes.
"""
all_configurations = list(validation_losses)
# batch gradient descent
if gd_type == "batch":
plt.rcParams['figure.figsize'] = [28, 10]
learning_rate, _lambda = best_configuration
# keep the configurations with the optimal lambda just for the plot
target_configurations = [configuration for configuration in all_configurations if configuration[1] == _lambda]
total_conf = len(target_configurations)
# use matplotlib for the plots
f, axes = plt.subplots(nrows=1, ncols=total_conf)
f.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("iterations", fontsize=18)
plt.ylabel("Loss J(w)", fontsize=18)
plt.suptitle("Validation Loss for different learning rates (alpha), with fixed regularization parameter lambda = {}".format(_lambda), fontsize=15)
for i in range(total_conf):
current_conf = target_configurations[i]
val_loss = validation_losses[current_conf]
label = "learning rate = {}".format(current_conf[0])
axes[i].plot(val_loss, color='b', label=label)
axes[i].legend()
# stochastic gradient descent
elif gd_type == "sgd":
plt.rcParams['figure.figsize'] = [28, 10]
_lambda, t0, t1 = best_configuration
# keep the configurations with optimal t0 and t1 just for the plot
target_configurations = [configuration for configuration in all_configurations if configuration[1] == t0 and configuration[2] == t1]
total_conf = len(target_configurations)
# use matplotlib for the plots
f, axes = plt.subplots(nrows=1, ncols=total_conf)
f.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("iterations", fontsize=18)
plt.ylabel("Loss J(w)", fontsize=18)
plt.suptitle("Validation Loss for different regularization parameters (lambda), with fixed t0 = {} and t1 = {}".format(t0, t1), fontsize=15)
for i in range(total_conf):
current_conf = target_configurations[i]
val_loss = validation_losses[current_conf]
label = "lambda = {}".format(current_conf[0])
axes[i].plot(val_loss, color='r', label=label)
axes[i].legend()
# mini batch gradient descent
else:
plt.rcParams['figure.figsize'] = [28, 10]
learning_rate, _lambda, batch_size = best_configuration
# keep the configurations with optimal learning rate and lambda just for the plot
target_configurations = [configuration for configuration in all_configurations if configuration[0] == learning_rate and configuration[1] == _lambda]
total_conf = len(target_configurations)
# use matplotlib for the plots
f, axes = plt.subplots(nrows=1, ncols=total_conf)
f.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("iterations", fontsize=18)
plt.ylabel("Loss J(w)", fontsize=18)
plt.suptitle("Validation Loss for different batch sizes, with fixed learning rate = {} and lambda = {}".format(learning_rate, _lambda), fontsize=15)
for i in range(total_conf):
current_conf = target_configurations[i]
val_loss = validation_losses[current_conf]
label = "batch size = {}".format(current_conf[2])
axes[i].plot(val_loss, color='g', label=label)
axes[i].legend()
# + [markdown] id="f8OGDmD1wF6w"
# ### Perform a grid search for every type of gradient descent
#
# + id="DNIJjcfdx4Gy"
# create a GridSearch Object
grid_search = GridSearchRidge(x_train, y_train, x_val, y_val)
# + [markdown] id="2NfzbveB4n3i"
# Perform Grid Search for Batch Gradient Descent
# + colab={"base_uri": "https://localhost:8080/", "height": 738} id="gbMeWArgcF5g" outputId="b2b9e716-70e9-4846-97f0-180b771921da"
hyperparameters_bgd = {
"learning_rates": [0.0001, 0.001, 0.01, 0.1],
"_lambdas": [0.0001, 0.001, 0.01, 0.1, 1]
}
# this will run instantly
result_bgd = grid_search.perform_grid_search(gd_type="batch", hyperparameters=hyperparameters_bgd)
# + [markdown] id="hai4d6kD4sak"
# Perform Grid Search for Stochastic Gradient Descent
# + colab={"base_uri": "https://localhost:8080/", "height": 738} id="jTzCZ5NY4yCz" outputId="537d77cd-9696-4cbe-d0a2-fbcd2875560f"
hyperparameters_sgd = {
"_lambdas": [0.001, 0.01, 0.1, 1],
"t0": [5, 10],
"t1": [50, 100],
"iterations": 50
}
# this will take 4-5 mins to run
result_sgd = grid_search.perform_grid_search(gd_type="sgd", hyperparameters=hyperparameters_sgd)
# + [markdown] id="e0MdMB3a4ygp"
# Perform Grid Search for Mini-Batch Gradient Descent
# + colab={"base_uri": "https://localhost:8080/", "height": 773} id="vFisRu4k40qU" outputId="330413a2-9600-4a2b-b225-73cd802306c4"
hyperparameters_mbgd = {
"learning_rates": [0.0001, 0.001, 0.01, 0.1],
"_lambdas": [0.001, 0.01, 0.1, 1],
"batch_sizes": [16, 32, 64, 128]
}
# this will take 8-10 mins to run
result_mbgd = grid_search.perform_grid_search(gd_type="mini-batch", hyperparameters=hyperparameters_mbgd)
# + id="yYsRJnIpALTn"
| Project1/Notebooks/Exercise_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="MI3XOKoea9iZ"
# # Tesseract and OCR doc
#
# This notebook was created by <NAME> for the Ancient World Citation Analysis project in Spring 2019.
#
# The work was created with Anaconda Environment manager
#
# **Python version:** 3.7.3 <br>
# -
# 1. You may want to familiarize yourself with the jupyter magic commands, specifically with the exclamation marks, which allows you to access your terminal. These commands are tested on a mac. Some examples:
#
# ```
# # shows the content of your directory
# # !ls
#
# # shows the content of sub-directories
# # !ls subdirectory/example/deeper
# ```
# # 1) Installation
# Using jupyter magic commands, we can install the package locally. We can also use the terminal too. <br>
# To install for the first time, uncomment the line two below and run it. For future uses, keep it as a comment.
#
# **Install Tesseract**
# ```
# pip3 install tesseract
# ```
# +
# # !pip3 install tesseract
# -
# # 2) File Organization
# Since we are working with files external to this notebook, we have to manage our files and folders carefully. Its always good practice to keep things organized.
#
# **Suppose you have a book called _YoffeeandCowgill-TheCollapseofAncientStatesandCivilizations_** as a pdf <and want to turn it into a text file for use with tesseract
# Our example is with a folder structure like the image below.<br>
#
# The **Tesseract.ipynb** is the notebook created currently. <br> We have our file of interest in a folder called **pdfs**.<br>
# We also have .tiffs in the **tiffs** folder. <br>Eventually output .txt file in the **txt** folder<br>
#
# <img src="pics/tesseract_directory.png">
#
# The rest of the instructions and code depends on this structure by the use of "paths".
# # 3) Using Tesseract
# **We will OCR one file from the pdfs and place it in the txt folder**
#
# 1. Prepare your pdf as a .tiff file (for Macs, Preview, export as .tiff)
# 2. Run the tesseract program with magic commands:
# ```
# tesseract tiffs/file.tiff txt/filename
# ```
# Notice that there are three seperate commands. <br>
# 1. **tesseract** invokes the program
# 1. **tiffs/file.tiff** tells tesseract where to look for the file to ocr. Its in the folder **tiffs** and the filename is **file.tiff** ("file is just a generic name for whatever file you have)
# 1. **txt/filename** is where we specifiy where to output our .txt file. We are saying, put it in the folder **txt** and lets name it _filename_
#
# Uncomment and run the code below to run.
# +
# # !tesseract tiffs/YoffeeandCowgill-TheCollapseofAncientStatesandCivilizations.tiff txt/civilizatoins
# -
# **Done!**, you should see the txt file in your folder. Try to access it with the magic commands!
# # 4) Miscellanous Instructions
# ### a) If you would like a different language
# run tesseract with the -l tag.
# ```
# tesseract tiffs/file.pdf txt/filename -l deu
# ```
# The -l tells tesseract that our file is in german. Tesseract uses 3-character ISO 639-2 language codes.
# ### b) Multiple langauges
# ```
# tesseract tiffs/file.pdf txt/filename -l deu+eng
# ```
# If something has two languages, add the languages like the example
# !for i in 1 2 3 4; do echo $i; done
# ls
# !for i in tiffs/*.tiff; do echo $i; done
# !for i in tiffs/*.tiff; do tesseract $i out; done
| Tesseract-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mean of Means
#
# Script to calculate the mean of means for motion data from MR-Linac
# This is then used for RT margins calculations
#
# $$RMS = \sqrt{\frac{1}{n}\sum_ix^2_i}$$
#
# ## Sections
# 1. Importing and defining functions
# 2. Declaring folder names and parameters
# 3. Running scripts
#
# ## Section 1:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import csv
import statistics
import math
import numpy as np
from pathlib import Path
import re
# +
## TODO: move this into another script to import for clean code
# similar to max_min but this being the cleaner one
# need to adapt slightly for max_min
def splitter_base_comparison(original_df, roi):
"""Splits the original csv into two dataframes - base scan and comparison scan for ROI.
Arguments:
original_df = full original csv in pandas dataFrame
roi = string of region of interest, ie. "Bladder"
Outputs:
comparison_scan = comparison week scan in pandas dataFrame for ROI
df_base = base week scan data in pandas dataFrame for ROI
"""
# pull out comparison week data from original df
# get "comparison week' value from unique exam values
exam_unique_values = original_df.exam.unique()
# perform regex on exam values and use max to return higher week value
comparison_week = (max(re.search(r'(Wk.)', exam_unique_values[0]).group(0),
re.search(r'(Wk.)', exam_unique_values[-1]).group(0)))
# if case of intrafraction, same week: then take 2nd scan
if comparison_week == re.search(r'(Wk.)', exam_unique_values[0]).group(0):
comparison_df = original_df.loc[original_df['exam'] == exam_unique_values[-1]]
else:
# check for exam columns that contain comparison week and keep those rows
comparison_df = original_df.loc[original_df['exam'].str.contains(comparison_week)]
# then filter df by roi
comparison_df = comparison_df[comparison_df['roi'].str.contains(roi)]
# get base week df based on ROI, and remove comparison week
df_base = original_df[original_df['roi'].str.match(roi)]
df_base = df_base[df_base['R.x'].eq(0)] if ('R.x' in df_base) else df_base[df_base['S.z'].eq(0)]
# nb. not the most failsafe method for exception handling!
return df_base, comparison_df
def calculate_means_sd(df, roi, direction):
# run splitter to separate out comparison week data and base week data into two dataframes
df_base, df_compare = splitter_base_comparison(df, roi)
# since only comparison week has motion values, calculate mean and sd for those
mean = df_compare[direction].mean()
sd = df_compare[direction].std()
return df_base, df_compare, mean, sd
# -
# main function
def output_means(dir_list, roi, direction):
output_file_paths = [] # init output_paths
output_boxmean_paths = []
output_boxsd_paths = []
print("Computing for ROI: {}".format(roi))
for folder in dir_list:
output = []
sigma = []
# this part collects means from each patient into a string for calculation of mean of means later
for filename in os.listdir(folder) :
if direction in ['R.x', 'L.x', 'A.y', 'P.y'] and filename.endswith('.csv') and filename.startswith('Z') and ("SUPINF" not in filename):
df = pd.read_csv(os.path.join(folder, filename))
df_base, df_compare, mean, sd = calculate_means_sd(df, roi, direction)
output.append(mean)
sigma.append(sd)
elif direction in ['S.z', 'I.z'] and filename.endswith('.csv') and filename.startswith('Z') and ("SUPINF" in filename):
df = pd.read_csv(os.path.join(folder, filename))
df_base, df_compare, mean, sd = calculate_means_sd(df, roi, direction)
output.append(mean)
sigma.append(sd)
# start of new folder indent
# calculation of mean of means and, rmse
mean_of_means = statistics.mean(output)
sd_means_per_patient = statistics.stdev(output)
root_mean_square = math.sqrt(statistics.mean([n**2 for n in sigma]))
mean_of_means = [direction, roi, mean_of_means, sd_means_per_patient, root_mean_square]
# get outputs for boxplots
# why insert again?
sigma.insert(0, roi)
sigma.insert(0, direction)
output.insert(0, roi)
output.insert(0, direction)
#output means of means to csv
os.makedirs(os.path.join(folder, 'output'), exist_ok=True)
with open(os.path.join(folder, 'output/meanofmeans_{}.csv'.format(os.path.basename(folder))), 'a') as file_:
output_file_paths.append(os.path.realpath(file_.name))
wr = csv.writer(file_, delimiter=',')
wr.writerow(mean_of_means)
os.makedirs(os.path.join(folder, 'output'), exist_ok=True)
with open(os.path.join(folder, 'output/boxplot_mean_data_{}.csv'.format(os.path.basename(folder))), 'a') as file_:
output_boxmean_paths.append(os.path.realpath(file_.name))
wr = csv.writer(file_, delimiter=',')
wr.writerow(output)
os.makedirs(os.path.join(folder, 'output'), exist_ok=True)
with open(os.path.join(folder, 'output/boxplot_sd_data_{}.csv'.format(os.path.basename(folder))), 'a') as file_:
output_boxsd_paths.append(os.path.realpath(file_.name))
wr = csv.writer(file_, delimiter=',')
wr.writerow(sigma)
return list(set(output_file_paths)), list(set(output_boxmean_paths)), list(set(output_boxsd_paths))
# +
## TODO: move this to another separate script too
def write_headers(headers, output_file_paths):
# get output file paths and to add headers to output files
for file_path in output_file_paths:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
add_header = pd.read_csv(file_path, names=headers, index_col=None)
add_header.to_csv(file_path)
print('Done header:' + file_path)
# -
def save_boxplot(file, statistic):
print(file)
means_boxplot = pd.read_csv(file, header = None)
headers = ['direction', 'volume' ]
headers.extend(range(2, means_boxplot.shape[1]))
means_boxplot.columns = headers
means_boxplot = means_boxplot[~means_boxplot.volume.str.contains("GTV")]
means = means_boxplot.melt(id_vars=['direction', 'volume'], var_name='scan_num', value_name='distance')
plt.figure(figsize=(15,9.27))
sns.boxplot(x='direction', y='distance', hue='volume',
data= means,
whis=[5,95],
meanprops={"marker": None,"markerfacecolor":"black", "markeredgecolor":"black", "color":"black", "linestyle":"solid"},
medianprops={"linewidth":0},
meanline=True, showmeans=True,
showfliers=False).set_title('Patient {} displacement {}'.format(statistic, os.path.basename(Path(file).resolve().parents[1])))
sns.despine()
plt.tick_params(bottom=False)
plt.savefig(os.path.join(Path(file).resolve().parents[1], 'output/{}{}.png'.format(statistic, os.path.basename(Path(file).resolve().parents[1]))))
return plt, means
# ## Part 2 : Specify folders and params
# +
folder_name = [
'Dec20_data/Interfraction/Interfraction 3D 0.8',
'Dec20_data/Interfraction/Interfraction DIXON 2.0',
'Dec20_data/Intrafraction 3D vs DIXON HR IP 2.0'
]
dir_list = []
for i in range(len(folder_name)):
dir_list.append(
os.path.join(os.getcwd(), folder_name[i])
)
roi_list = ['CTV_Clin', 'CTV_SmallVol', 'GTV_T']
direction_list = ['R.x', 'L.x', 'A.y', 'P.y', 'S.z', 'I.z']
# -
# ## Part 3 : Run Scripts
# +
# execute all functions
# 1. do all calculations
for roi in roi_list:
for direction in direction_list:
output_file_paths, output_boxmean_paths, output_boxsd_paths = output_means(dir_list, roi, direction)
# 2. write all headers'
headers_meansofmeans = ['direction', 'volume', 'mean of means', 'E', 'sigma']
write_headers(headers_meansofmeans, output_file_paths)
# 3. draw all boxplots
for file in output_boxmean_paths:
save_boxplot(file, 'mean')
for file in output_boxsd_paths:
save_boxplot(file, 'std dev')
# -
#
| jupyter notebooks (to be deprecated)/meanofmeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CLUSTERING - KMEANS ALGO. (PYTHON).
# Submitted By : <NAME>
# * From the given ‘Iris’ dataset, predict the optimum number of clusters and represent it visually.
# importing the reqired libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import seaborn as sns
# Importing the Dataset.
url = "iris.csv"
data = pd.read_csv(url)
# # Analysing Dataset
# Checking the shape of data set
data.shape
# * Here we have 150 rows and 6 columns
# Displying first 10 records of data.
data.head(10)
# Describing the dataset i.e. finding basic mathmatical operation on data.
data.describe()
# Getting information about data
data.info()
# * Here we have 150 records with the respective data types and all values are not null.
# Checking unique species of iris and their count
data['Species'].value_counts()
# * So we have 50 records of each specie.
# checking the variation of species with given parameters
sns.pairplot(data, hue='Species')
# * we can see that the charactersics of iris setosa is different form virginica and varsicolor
# Dropping the not required columns from data
sp = data["Species"].values
data.drop("Id", inplace=True, axis=1)
data.drop("Species", inplace=True, axis=1)
data.head(5)
# checking correlation between variables
data.corr()
# * here we can see that petal length and sepallength are correlated, petal width and sepallength are correlated
# * petal length and petal width are correlated.
# +
# Finding best no of clusters for clustering by elbow method
x = data.iloc[:,[0,1,2]].values
R = range(1,10)
Sum_of_Squared_Distance = []
for k in R:
km = KMeans(n_clusters = k)
km = km.fit(x)
Sum_of_Squared_Distance.append(km.inertia_)
# -
# Visualizing the Optimum Clusters
plt.plot(R, Sum_of_Squared_Distance, 'go--', color="green")
plt.title("Optimum Clusters By Elbow Method")
plt.xlabel("No of Clusters")
plt.ylabel("Sum Of Squared Distance")
plt.grid()
plt.show()
# * Here we are getting the optimum clusters as 3 as drop after 3 is minimum
# # Training And Testing The Model
kmeans = KMeans(n_clusters = 3, init = 'k-means++',
max_iter = 300, n_init = 10, random_state = 0)
predictions = kmeans.fit_predict(x)
# Checking The Values
predictions
# +
# Visualising the clusters predicted by our model
plt.figure(figsize=(10,7))
plt.scatter(x[predictions == 0, 0], x[predictions == 0, 1],
s = 100, c = 'red', label = 'Iris-setosa')
plt.scatter(x[predictions == 1, 0], x[predictions == 1, 1],
s = 100, c = 'blue', label = 'Iris-versicolour')
plt.scatter(x[predictions == 2, 0], x[predictions == 2, 1],
s = 100, c = 'green', label = 'Iris-virginica')
# Plotting the centroids of the clusters
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1],
s = 100, c = 'yellow', label = 'Centroids')
plt.grid()
plt.legend()
plt.show()
# -
Species = ["Iris-setosa","Iris-versicolor" ,"Iris-virginica"]
Pred_Species = []
for i in predictions:
Pred_Species.append(Species[i])
sns.countplot(Pred_Species)
plt.xlabel("Species")
plt.ylabel("Predicted")
plt.show()
| Machine Learning/Kmeans_Clustering/Kmeans_Clustering_Algorithm.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: JavaScript (Node.js)
// language: javascript
// name: javascript
// ---
// # The Cryptopals Crypto Challenges (Javascript)
// ## Set 1
//
// ### Convert hex to base64
// +
const hexString = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d";
console.log(Buffer.from(hexString, 'hex').toString('base64'));
// -
// ### Fixed XOR
// +
const s1 = Buffer.from("1c0111001f010100061a024b53535009181c", 'hex')
const s2 = Buffer.from("686974207468652062756c6c277320657965", 'hex')
var s = ""
for(var i = 0; i < s1.length; i++) {
s += (s1[i] ^ s2[i]).toString(16)
}
console.log(s);
// -
// ### Single-byte XOR Cipher
// +
const encryptedString = Buffer.from("1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736", 'hex')
for(var i = 0; i < 256; i++){
var output = [];
for(var j = 0; j < encryptedString.length; j++) {
output[j] = i ^ encryptedString[j];
}
console.log(i, Buffer.from(output).toString("ascii"));
}
// Answer at character (88,120,216,248) which represent (X,x,Ï,°) in ASCII
// 88 Cooking MC's like a pound of bacon
// 120 cOOKINGmcSLIKEAPOUNDOFBACON
// 216 Cooking MC's like a pound of bacon
// 248 cOOKINGmcSLIKEAPOUNDOFBACON
// -
// ### Detect single-character XOR
// +
const fs = require('fs')
fs.readFile("set1-4.txt", "utf8" , (err, data) => {
if (err) {
console.error(err)
return
}
const encryptedArr = data.split("\n");
for(var i = 0; i < 256; i++){
var output = [];
for(var j = 0; j < encryptedArr.length; j++) {
var x = Buffer.from(encryptedArr[j], 'hex');
for(var z = 0; z < x.length; z++) {
output[z] = i ^ x[z];
}
console.log(j, i, Buffer.from(output).toString("ascii"));
}
}
})
// HEX string at position 170 in the file can be decrypted by XORing it
// with ASCII charachters (5,Á) represetned by (53,181).
// 170 53 Now that the party is jumping
// 170 181 Now that the party is jumping
// -
// ### Implement repeating-key XOR
// +
const txt = Buffer.from("Burning 'em, if you ain't quick and nimble\nI go crazy when I hear a cymbal");
const key = Buffer.from("ICE");
var output = [];
for(var i = 0; i < txt.length; i++) {
output[i] = txt[i] ^ key[i%3];
}
console.log(Buffer.from(output).toString("hex"));
// -
| Set1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Cobalt FCC
# For this calculation all input files can be found in [test/Co](https://github.com/K4ys4r/ProcarPy).<br>
# The Band Structure has bee calculated along the high symmetry $k-$points path: $X-\Gamma-\overline{X}$.
# ### LORBIT = 10
from ProcarPy import PROCARBandStructure as PB
from ProcarPy import p
Co_arg = dict(
filename = "./LORBIT=10/PROCAR",
ef = 11.12565589,
path = [r"$X$",r"$\Gamma$",r"$\overline{X}$"],
pathstyle = "continuous",
SO = True
)
Co_Bands = PB(**Co_arg)
# Plotting the Total Band Structure
p.rc("font",size=20)
Co_Bands.Init_Fig(height=10)
Co_Bands.getTotalBandsPlot(label="Co $fcc$")
Co_Bands.PlotShow(-10,5)
# +
# Plotting the. Projected Band of s and p orbitals of Co atom.
s=100
alpha=0.5
ymin,ymax = -10,7
width,height = 10,10
Co_Bands.Init_Fig(width=width,height=height)
Co_Bands.getTotalBandsPlot(color="k",alpha=alpha,lw=0.5)
Co_Bands.getOrbitalBandsPlot(magn="mz",sign="+",orbital="s",marker="o",color="b",scale=s,alpha=alpha)
Co_Bands.getOrbitalBandsPlot(magn="mz",sign="-",orbital="s",marker="o",color="r",scale=s,alpha=alpha)
Co_Bands.ax.set_title("Orbital $s$")
Co_Bands.PlotShow(ymin=ymin,ymax=ymax)
Co_Bands.Init_Fig(width=width,height=height)
Co_Bands.getTotalBandsPlot(color="k",alpha=alpha,lw=0.5)
Co_Bands.getOrbitalBandsPlot(magn="mz",sign="+",orbital="p",marker="o",color="b",scale=s,alpha=alpha)
Co_Bands.getOrbitalBandsPlot(magn="mz",sign="-",orbital="p",marker="o",color="r",scale=s,alpha=alpha)
Co_Bands.ax.set_title("Orbital $p$")
Co_Bands.PlotShow(ymin=ymin,ymax=ymax)
Co_Bands.Init_Fig(width=width,height=height)
Co_Bands.getTotalBandsPlot(color="k",alpha=alpha,lw=0.5)
Co_Bands.getOrbitalBandsPlot(magn="mz",sign="+",orbital="d",marker="o",color="b",scale=s,alpha=alpha)
Co_Bands.getOrbitalBandsPlot(magn="mz",sign="-",orbital="d",marker="o",color="r",scale=s,alpha=alpha)
Co_Bands.ax.set_title("Orbital $d$")
Co_Bands.PlotShow(ymin=ymin,ymax=ymax)
# -
# ### LORBIT = 11
# Plotting the. Projected Band of s, px, py ad pz orbitals of Si atom.
| test/Co/Co_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Section I: Building the Initial Configuration
#
# The first step to simulating a lipid bilayer is generating the initial configuration. Here the `mBuild` package is used to generate a minimal stratum corneum system containing an equimolar mixture of ceramide N-hydroxy sphingosine, cholesterol, and free fatty acids. The `mBuild` software allows for the easy and reproducible construction of a preassembled bilayer with tunable parameters. This script will use the following packages:
#
# - `numpy` : https://numpy.org
# - `mbuild` : https://mosdef.org/mbuild/
# - `mdtraj` : http://mdtraj.org/
# - `py3dmol` : https://3dmol.csb.pitt.edu
#
# These packages are installed using `conda` in the cell below. The Anaconda software can be installed from https://www.anaconda.com/distribution/
# !conda install -c mosdef -c conda-forge -c omnia -y numpy
# !conda install -c mosdef -c conda-forge -c omnia -y mbuild
# !conda install -c mosdef -c conda-forge -c omnia -y mdtraj
# !conda install -c mosdef -c conda-forge -c omnia -y py3dmol
# In addition, an `mBuild` recipe for constructing bilayers is required for this notebook. The `Bilayer` recipe can be found at https://github.com/uppittu11/mbuild_bilayer. This contains a `python` class used to construct a bilayer `mBuild` `Compound`.
# !git clone https://github.com/uppittu11/mbuild_bilayer.git
# !cd mbuild_bilayer && pip install -e . && cd ..
# **It is recommended that you restart the kernel before continuing to ensure that environnment variables are correctly set**
#
# You can do this by selecting "Kernel" > "Restart" from the menu at the top of the page.
# The required packages are imported below.
import mbuild as mb
import numpy as np
import mdtraj as md
Bilayer = mb.recipes.Bilayer
# The configurations of each molecule type are saved as `.mol2` files. Here these compounds are loaded from disk and saved to a dictionary.
# +
def load_molecule(filename):
"""Worker function to load the configuration of a single lipid."""
lipid = mb.load(filename)
lipid.translate_to([0, 0, 0])
return lipid
prototypes = dict()
for molecule_name in ["cer", "chol", "ffa", "tip3p"]:
filename = f"./molecules/{molecule_name}.mol2"
molecule = load_molecule(filename)
molecule.name = molecule_name
prototypes.update({molecule_name : molecule})
# -
# Below are the parameters chosen for the system being simulated. The parameter space (for example, the lipid composition and/or water content) can be easily explored further by adjusting these values.
# +
# Equimolar ratio of CER, CHOL, and FFA
lipids = [(prototypes["cer"], 0.33),
(prototypes["chol"], 0.33),
(prototypes["ffa"], 0.34)]
n_lipids_per_edge = 6
tilt_angle = 10 * np.pi / 180.0 # radians
area_per_lipid = .32 # nm^2
spacing_z = 2.8 #nm
waters_per_lipid = 40
water_density = 1.0 # g/cm^3
water_mass = 18.01 # amu
# -
# The bilayer system is set up using the parameters set above and saved as a GROMACS `.gro` file.
# +
system = Bilayer(lipids,
ref_atoms=[77, 2, 26],
n_lipids_x=n_lipids_per_edge,
n_lipids_y=n_lipids_per_edge,
area_per_lipid=area_per_lipid,
spacing_z=spacing_z,
solvent=prototypes["tip3p"],
solvent_per_lipid=waters_per_lipid,
solvent_density=water_density,
solvent_mass=water_mass,
tilt=tilt_angle,
random_seed=2019,
mirror=False)
# Create box with 0.1 nm boundary
box = mb.Box(mins=[0, 0, 0],
maxs=(system.solvent_components.boundingbox.lengths + np.array([0.05, 0.05, 0.05])))
# Translate to box center
system.translate_to(box.lengths * 0.5)
# Convert to mdTraj Trajectory and save to disk
configuration = system.to_trajectory(residues=["cer", "chol", "ffa", "tip3p"], box=box)
configuration.save("start.gro")
# -
# A render of the system can visually inspected using the `py3dmol` renderer built into `mBuild` in order to validate the configuration.
# visualize
system.visualize()
# When finished, exit this notebook window and return to the main notebook.
| workflow/building/building.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2gzMGSGB5hHo"
# ##### Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + id="_RX4_K8Z5msT"
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="kCQeUqKYe2dC"
# # Distributed Inference with JAX
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/probability/examples/Distributed_Inference_with_JAX"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Distributed_Inference_with_JAX.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Distributed_Inference_with_JAX.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Distributed_Inference_with_JAX.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="stPyOUvle5ZG"
# TensorFlow Probability (TFP) on JAX now has tools for distributed numerical computing. To scale to large numbers of accelerators, the tools are built around writing code using the "single-program multiple-data" paradigm, or SPMD for short.
#
# In this notebook, we'll go over how to "think in SPMD" and introduce the new TFP abstractions for scaling to configurations such as TPU pods, or clusters of GPUs. If you're running this code yourself, make sure to select a TPU runtime.
# + [markdown] id="FOV8NaLkgBvW"
# We'll first install the latest versions TFP, JAX and TF.
# + colab={"base_uri": "https://localhost:8080/"} id="O0AI1GXeNgiw" outputId="f5d707d6-6ace-4611-b75e-7d03f064f08f"
#@title Installs
# !pip install jaxlib --upgrade -q 2>&1 1> /dev/null
# !pip install tfp-nightly[jax] --upgrade -q 2>&1 1> /dev/null
# !pip install tf-nightly-cpu -q -I 2>&1 1> /dev/null
# !pip install jax -I -q --upgrade 2>&1 1>/dev/null
# + [markdown] id="iEZhgnkYgG-N"
# We'll import some general libraries, along with some JAX utilities.
# + colab={"base_uri": "https://localhost:8080/"} id="Z0wJAO4FNaMx" outputId="e2651b04-a302-4ac1-d390-c9d54a865f15"
#@title Setup and Imports
import functools
import collections
import contextlib
import jax
import jax.numpy as jnp
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import tensorflow_datasets as tfds
from tensorflow_probability.substrates import jax as tfp
sns.set(style='white')
# + [markdown] id="5DHdAK2rgM5F"
# We'll also set up some handy TFP aliases. The new abstractions are currently provided in `tfp.experimental.distribute` and `tfp.experimental.mcmc`.
# + id="X4Pe3mZKgO6i"
tfd = tfp.distributions
tfb = tfp.bijectors
tfm = tfp.mcmc
tfed = tfp.experimental.distribute
tfde = tfp.experimental.distributions
tfem = tfp.experimental.mcmc
Root = tfed.JointDistributionCoroutine.Root
# + [markdown] id="b_Xkt7MtgThA"
# To connect the notebook to a TPU, we use the following helper from JAX. To confirm that we're connected, we print out the number of devices, which should be eight.
# + colab={"base_uri": "https://localhost:8080/"} id="HfgK8wpJgRdw" outputId="da2863e9-77c1-4da6-8822-675c59ae6854"
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
print(f'Found {jax.device_count()} devices')
# + [markdown] id="2a__PBkDZBjm"
# # A quick introduction to `jax.pmap`
# + [markdown] id="tTNW_FyuZEiM"
# After connecting to a TPU, we have access to *eight* devices. However, when we run JAX code eagerly, JAX defaults to running computations on just one.
#
# The simplest way of executing a computation across many devices is to map a function, having each device execute one index of the map. JAX provides the `jax.pmap` ("parallel map") transformation which turns a function into one that maps the function across several devices.
#
# In the following example, we create an array of size 8 (to match the number of available devices) and map a function that adds 5 across it.
# + colab={"base_uri": "https://localhost:8080/"} id="EV8gboOzZ9pt" outputId="f3140e2b-c507-4dcb-ed06-d71fc64f49ed"
xs = jnp.arange(8.)
out = jax.pmap(lambda x: x + 5.)(xs)
print(type(out), out)
# + [markdown] id="1C5WNNsmaTiE"
# Note that we receive a `ShardedDeviceArray` type back, indicating that the output array is physically split across devices.
# + [markdown] id="xWy1APt11voc"
# `jax.pmap` acts semantically like a map, but has a few important options that modify its behavior. By default, `pmap` assumes all inputs to the function are being mapped over, but we can modify this behavior with the `in_axes` argument.
# + colab={"base_uri": "https://localhost:8080/"} id="4eQ1g-xe42kY" outputId="7a482508-baa7-4cfb-9019-87878d4fb728"
xs = jnp.arange(8.)
y = 5.
# Map over the 0-axis of `xs` and don't map over `y`
out = jax.pmap(lambda x, y: x + y, in_axes=(0, None))(xs, y)
print(out)
# + [markdown] id="5GEq6tVq5Ux4"
# Analogously, the `out_axes` argument to `pmap` determines whether or not to return the values on every device. Setting `out_axes` to `None` automatically returns the value on the 1st device and should only be used if we are confident the values are the same on every device.
# + colab={"base_uri": "https://localhost:8080/"} id="ex1jYXJ95jlG" outputId="c1c938c0-7b45-4727-be54-ad93269b9c1b"
xs = jnp.ones(8) # Value is the same on each device
out = jax.pmap(lambda x: x + 1, out_axes=None)(xs)
print(out)
# + [markdown] id="pEEXyAODauyi"
# What happens when what we'd like to do isn't easily expressible as a mapped pure function? For example, what if we'd like to do a sum across the axis we're mapping over? JAX offers "collectives", functions that communicate across devices, to enable writing more interesting and complex distributed programs. To understand how exactly they work, we'll introduce SPMD.
# + [markdown] id="e4MmQQamg7Wn"
# # What is SPMD?
# + [markdown] id="DJKyMsQNnFUI"
# Single-program multiple-data (SPMD) is a concurrent programming model in which a single program (i.e. the same code) is executed simultaneously across devices, but the inputs to each of the running programs can differ.
#
# If our program is a simple function of its inputs (i.e. something like `x + 5`), running a program in SPMD is just mapping it over different data, like we did with `jax.pmap` earlier. However, we can do more than just "map" a function. JAX offers "collectives", which are functions that communicate across devices.
#
# For example, maybe we'd like to take the sum of a quantity across all our devices. Before we do that, we need to assign a name to the axis we're mapping over in the `pmap`. We then use the `lax.psum` ("parallel sum") function to perform a sum across devices, ensuring we identify the named axis we're summing over.
# + colab={"base_uri": "https://localhost:8080/"} id="fsZfnmr3eMYj" outputId="ddc7f720-8158-4293-a78b-b3c38ffdbf47"
def f(x):
out = lax.psum(x, axis_name='i')
return out
xs = jnp.arange(8.) # Length of array matches number of devices
jax.pmap(f, axis_name='i')(xs)
# + [markdown] id="eF2Nv7KXeTrb"
# The `psum` collective aggregates the value of `x` on each device and synchronizes its value across the map i.e. `out` is `28.` on each device.
# We're no longer performing a simple "map", but we're executing an SPMD program where each device's computation can now interact with the same computation on other devices, albeit in a limited way using collectives. In this scenario, we can use `out_axes = None`, because `psum` will synchronize the value.
# + colab={"base_uri": "https://localhost:8080/"} id="n2dPYYQO5-vz" outputId="fd72f196-8d8a-423c-ec79-6e6067b16f59"
def f(x):
out = lax.psum(x, axis_name='i')
return out
jax.pmap(f, axis_name='i', out_axes=None)(jnp.arange(8.))
# + [markdown] id="JsoseMKEf-nA"
# SPMD enables us to write one program that is run on every device in any TPU configuration simultaneously. The same code that is used to do machine learning on 8 TPU cores can be used on a TPU pod that may have hundreds to thousands of cores! For a more detailed tutorial about `jax.pmap` and SPMD, you can refer to the the [JAX 101 tutorial](https://jax.readthedocs.io/en/latest/jax-101/06-parallelism.html).
# + [markdown] id="TpRG6flRfoed"
# # MCMC at scale
# + [markdown] id="ulERQF93geL1"
# In this notebook, we focus on using Markov Chain Monte Carlo (MCMC) methods for Bayesian inference. There are may ways we utilize many devices for MCMC, but in this notebook, we'll focus on two:
# 1. Running independent Markov chains on different devices. This case is fairly simple and is possible to do with vanilla TFP.
# 2. Sharding a dataset across devices. This case is a bit more complex and requires recently added TFP machinery.
# + [markdown] id="QfSa12Hqhy09"
# ## Independent Chains
# + [markdown] id="hqos2uVgh2Zw"
# Say we'd like to do Bayesian inference on a problem using MCMC and would like to run several chains in parallel across several devices (say 2 on each device). This turns out to be a program we can just "map" across devices, i.e. one that needs no collectives. To make sure each program executes a different Markov chain (as opposed to running the same one), we pass in a different value for the random seed to each device.
#
# Let's try it on a toy problem of sampling from a 2-D Gaussian distribution. We can use TFP's existing MCMC functionality out of the box.
# In general, we try to put most of the logic inside of our mapped function to more explicitly distinguish between what's running on all the devices versus just the first.
# + id="LQAqJ4O3h1oM"
def run(seed):
target_log_prob = tfd.Sample(tfd.Normal(0., 1.), 2).log_prob
initial_state = jnp.zeros([2, 2]) # 2 chains
kernel = tfm.HamiltonianMonteCarlo(target_log_prob, 1e-1, 10)
def trace_fn(state, pkr):
return target_log_prob(state)
states, log_prob = tfm.sample_chain(
num_results=1000,
num_burnin_steps=1000,
kernel=kernel,
current_state=initial_state,
trace_fn=trace_fn,
seed=seed
)
return states, log_prob
# + [markdown] id="sFPWI5GijOj4"
# By itself, the `run` function takes in a stateless random seed (to see how stateless randomness work, you can read the [TFP on JAX](https://www.tensorflow.org/probability/examples/TensorFlow_Probability_on_JAX) notebook or see the [JAX 101 tutorial](https://jax.readthedocs.io/en/latest/jax-101/05-random-numbers.html)). Mapping `run` over different seeds will result in running several independent Markov chains.
# + colab={"base_uri": "https://localhost:8080/"} id="N3mzWLKPjpsB" outputId="3e508a4b-d8b4-4047-afaf-65960d6a7eeb"
states, log_probs = jax.pmap(run)(random.split(random.PRNGKey(0), 8))
print(states.shape, log_probs.shape)
# states is (8 devices, 1000 samples, 2 chains, 2 dimensions)
# log_prob is (8 devices, 1000 samples, 2 chains)
# + [markdown] id="pt5aCj0Ej3pA"
# Note how we now have an extra axis corresponding to each device. We can rearrange the dimensions and flatten them to get an axis for the 16 chains.
# + id="uz1etedpjw_f"
states = states.transpose([0, 2, 1, 3]).reshape([-1, 1000, 2])
log_probs = log_probs.transpose([0, 2, 1]).reshape([-1, 1000])
# + colab={"base_uri": "https://localhost:8080/", "height": 322} id="rrw6e5rhkLYy" outputId="3f4ad6ed-0356-47da-fa1f-e09adb90242e"
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(log_probs.T, alpha=0.4)
ax[1].scatter(*states.reshape([-1, 2]).T, alpha=0.1)
plt.show()
# + [markdown] id="q7hL_vVAkuaR"
# When running independent chains on many devices, it's as easy as `pmap`-ing over a function that uses `tfp.mcmc`, ensuring we pass different values for the random seed to each device.
# + [markdown] id="EfbcQmFzlYau"
# ## Sharding data
# + [markdown] id="iKWgLWSPlfOv"
# When we do MCMC, the target distribution is often a posterior distribution obtained by conditioning on a dataset, and computing an unnormalized log-density involves summing likelihoods for each observed data.
#
# With very large datasets, it can be prohibitively expensive to even run one chain on a single device. However, when we have access to multiple devices, we can split up the dataset across the devices to better leverage the compute we have available.
#
# If we'd like to do MCMC with a sharded dataset, we need to ensure the unnormalized log-density we compute on each device represents the *total*, i.e. the density over all data, otherwise each device will be doing MCMC with their own incorrect target distribution. To this end, TFP now has new tools (i.e. `tfp.experimental.distribute` and `tfp.experimental.mcmc`) that enable computing "sharded" log probabilities and doing MCMC with them.
# + [markdown] id="FTl2U7kCmhC_"
# ### Sharded distributions
# + [markdown] id="GgqqVZjlmj3w"
# The core abstraction TFP now provides for computing sharded log probabiliities is the `Sharded` meta-distribution, which takes a distribution as input and returns a new distribution that has specific properties when executed in an SPMD context. `Sharded` lives in `tfp.experimental.distribute`.
#
# Intuitively, a `Sharded` distribution corresponds to a set of random variables that have been "split" across devices. On each device, they will produce different samples, and can individually have different log-densities. Alternatively, a `Sharded` distribution corresponds to a "plate" in graphical model parlance, where the plate size is the number of devices.
#
# + [markdown] id="wNlR4OaWm1w8"
# #### Sampling a `Sharded` distribution
# + [markdown] id="MJ0oarZsnBHP"
# If we sample from a `Normal` distribution in a program being `pmap`-ed using the same seed on each device, we will get the same sample on each device. We can think of the following function as sampling a single random variable that is synchronized across devices.
# + colab={"base_uri": "https://localhost:8080/"} id="fyiT7iPQm3cK" outputId="325ca0cd-864f-4248-89fe-dea76645c167"
# `pmap` expects at least one value to be mapped over, so we provide a dummy one
def f(seed, _):
return tfd.Normal(0., 1.).sample(seed=seed)
jax.pmap(f, in_axes=(None, 0))(random.PRNGKey(0), jnp.arange(8.))
# + [markdown] id="TVJ48nEdnYmB"
# If we wrap `tfd.Normal(0., 1.)` with a `tfed.Sharded`, we logically now have eight different random variables (one on each device) and will therefore produce a different sample for each one, despite passing in the same seed.
# + colab={"base_uri": "https://localhost:8080/"} id="f-W8UhTxnmmy" outputId="c39ef11d-ec42-4dff-f876-64320240288c"
def f(seed, _):
return tfed.Sharded(tfd.Normal(0., 1.), shard_axis_name='i').sample(seed=seed)
jax.pmap(f, in_axes=(None, 0), axis_name='i')(random.PRNGKey(0), jnp.arange(8.))
# + [markdown] id="BqTKixQfARlQ"
# An equivalent representation of this distribution on a single device is just a 8 independent normal samples. Even though the value of the sample will be different (`tfed.Sharded` does pseudo-random number generation slightly differently), they both represent the same distribution.
# + colab={"base_uri": "https://localhost:8080/"} id="YwYeBAJHAYqm" outputId="132b416d-adb1-42af-9e9c-4dea25da155d"
dist = tfd.Sample(tfd.Normal(0., 1.), jax.device_count())
dist.sample(seed=random.PRNGKey(0))
# + [markdown] id="7vV2kIhxom_9"
# #### Taking the log-density of a `Sharded` distribution
# + [markdown] id="9NLOMW6Po3E6"
# Let's see what happens when we compute the log-density of a sample from a regular distribution in an SPMD context.
# + colab={"base_uri": "https://localhost:8080/"} id="Wo_1xYRForc_" outputId="239f8b9c-7a3d-4b4a-9534-6be1f1ae1bb2"
def f(seed, _):
dist = tfd.Normal(0., 1.)
x = dist.sample(seed=seed)
return x, dist.log_prob(x)
jax.pmap(f, in_axes=(None, 0))(random.PRNGKey(0), jnp.arange(8.))
# + [markdown] id="N1liUvISo8Lm"
# Each sample is the same on each device, so we compute the same density on each device too. Intuitively, here we only have a distribution over a single normally distributed variable.
#
# With a `Sharded` distribution, we have a distribution over 8 random variables, so when we compute the `log_prob` of a sample, we sum, across devices, over each of the individual log densities. (You might notice that this total log_prob value is larger than the singleton log_prob computed above.)
# + colab={"base_uri": "https://localhost:8080/"} id="i4__zjePpVul" outputId="228b3ed0-b1a3-4ff2-ec57-58351c5ee1ec"
def f(seed, _):
dist = tfed.Sharded(tfd.Normal(0., 1.), shard_axis_name='i')
x = dist.sample(seed=seed)
return x, dist.log_prob(x)
sample, log_prob = jax.pmap(f, in_axes=(None, 0), axis_name='i')(
random.PRNGKey(0), jnp.arange(8.))
print('Sample:', sample)
print('Log Prob:', log_prob)
# + [markdown] id="u8iNYSVCBSEX"
# The equivalent, "unsharded" distribution produces the same log density.
# + colab={"base_uri": "https://localhost:8080/"} id="UM0QvFGJA7BA" outputId="f8af42f8-bbbe-42c8-bfb4-20cfdbaaf660"
dist = tfd.Sample(tfd.Normal(0., 1.), jax.device_count())
dist.log_prob(sample)
# + [markdown] id="3d94dKwmpcFh"
# A `Sharded` distribution produces different values from `sample` on each device, but get the same value for `log_prob` on each device. What's happening here? A `Sharded` distribution does a `psum` internally to ensure the `log_prob` values are in sync across devices. Why would we want this behavior? If we're running the *same* MCMC chain on each device, we'd like the `target_log_prob` to be the same across each device, even if some random variables in the computation are sharded across devices.
# + [markdown] id="CJBg-y09jI4L"
# Additionally, a `Sharded` distribution ensures that gradients across devices are the correct, to ensure that algorithms like HMC, which take gradients of the log-density function as part of the transition function, produce proper samples.
# + [markdown] id="ospRyIUlqld2"
# ### Sharded `JointDistribution`s
# + [markdown] id="_dLU_CC9rime"
# We can create models with multiple `Sharded` random variables by using `JointDistribution`s (JDs). Unfortunately, `Sharded` distributions cannot be safely used with vanilla `tfd.JointDistribution`s, but `tfp.experimental.distribute` exports "patched" JDs that will behave like `Sharded` distributions.
# + colab={"base_uri": "https://localhost:8080/"} id="CZHp912esU1l" outputId="36ef8d60-fab8-4d15-9f65-936fc584c893"
def f(seed, _):
dist = tfed.JointDistributionSequential([
tfd.Normal(0., 1.),
tfed.Sharded(tfd.Normal(0., 1.), shard_axis_name='i'),
])
x = dist.sample(seed=seed)
return x, dist.log_prob(x)
jax.pmap(f, in_axes=(None, 0), axis_name='i')(random.PRNGKey(0), jnp.arange(8.))
# + [markdown] id="2Jem6vNLsjgm"
# These sharded JDs can have both `Sharded` and vanilla TFP distributions as components. For the unsharded distributions, we obtain the same sample on each device, and for the sharded distributions, we get different samples. The `log_prob` on each device is synchronized as well.
# + [markdown] id="yGa7snFos2de"
# ### MCMC with `Sharded` distributions
# + [markdown] id="voZoIikGs69-"
# How do we think about `Sharded` distributions in the context of MCMC? If we have a generative model that can be expressed as a `JointDistribution`, we can pick some axis of that model to "shard" across. Typically, one random variable in the model will correspond to observed data, and if we have a large dataset that we'd like to shard across devices, we want the variables that are associated to data points to be sharded as well. We also may have "local" random variables that are one-to-one with the observations we are sharding, so we will have to additionally shard those random variables.
#
# We'll go over examples of the usage of `Sharded` distributions with TFP MCMC in this section. We'll start with a simpler Bayesian logistic regression example, and conclude with a matrix factorization example, with the goal of demonstrating some use-cases for the `distribute` library.
#
# + [markdown] id="tTDEJ-20t-RU"
# ##### Example: Bayesian logistic regression for MNIST
# + [markdown] id="Btckhsht7TOd"
# We'd like to do Bayesian logistic regression on a large dataset; the model has a prior $p(\theta)$ over the regression weights, and a likelihood $p(y_i | \theta, x_i)$ that is summed over all data $\{x_i, y_i\}_{i = 1}^N$ to obtain the total joint log density. If we shard our data, we'd shard the observed random variables $x_i$ and $y_i$ in our model.
# + [markdown] id="wUeySkfVuqI-"
# We use the following Bayesian logistic regression model for MNIST classification:
# $$
# \begin{align*}
# w &\sim \mathcal{N}(0, 1) \\
# b &\sim \mathcal{N}(0, 1) \\
# y_i | w, b, x_i &\sim \textrm{Categorical}(w^T x_i + b)
# \end{align*}
# $$
# + [markdown] id="Q16-CWWi7ju5"
# Let's load MNIST using TensorFlow Datasets.
# + colab={"base_uri": "https://localhost:8080/", "height": 202, "referenced_widgets": ["a302adcb8d824d00be0125331fe7df1a", "4da2d87727e942c9956a2c507109c107", "eb1aba62376345729c10817275841ad1", "9a546ada258846cbad21774eea3af7cc", "<KEY>", "f48b175a5b8e469da891b76be737e8cd", "bbfbc2dbad584046835b3fcac9917d57", "b14fcb50f7644de7b40c89486ab999ba"]} id="Tzsu879puFmo" outputId="87f3bc0b-55f4-4d98-f24f-3993cef281a3"
mnist = tfds.as_numpy(tfds.load('mnist', batch_size=-1))
raw_train_images, train_labels = mnist['train']['image'], mnist['train']['label']
train_images = raw_train_images.reshape([raw_train_images.shape[0], -1]) / 255.
raw_test_images, test_labels = mnist['test']['image'], mnist['test']['label']
test_images = raw_test_images.reshape([raw_test_images.shape[0], -1]) / 255.
# + [markdown] id="dTXbO3FyuoqE"
# We have 60000 training images but let's take advantage of our 8 available cores and split it 8 ways. We'll use this handy `shard` utility function.
# + id="cRvMbzl8vO3h"
def shard_value(x):
x = x.reshape((jax.device_count(), -1, *x.shape[1:]))
return jax.pmap(lambda x: x)(x) # pmap will physically place values on devices
shard = functools.partial(jax.tree_map, shard_value)
# + colab={"base_uri": "https://localhost:8080/"} id="w7MneZdGuTvk" outputId="44abc612-258a-463b-ee14-bef65c1c23b6"
sharded_train_images, sharded_train_labels = shard((train_images, train_labels))
print(sharded_train_images.shape, sharded_train_labels.shape)
# + [markdown] id="h4cSdTI21z_W"
# Before we continue, let's quickly discuss precision on TPUs and its impact on HMC. TPUs execute matrix multiplications using low `bfloat16` precision for speed. `bfloat16` matrix multiplications are often sufficient for many deep learning applications, but when used with HMC, we have empirically found the lower precision can lead to diverging trajectories, causing rejections. We can use higher precision matrix multiplications, at the cost of some additional compute.
#
# To increase our matmul precision, we can use the `jax.default_matmul_precision` decorator with `"tensorfloat32"` precision (for even higher precision we could use `"float32"` precision).
# + [markdown] id="67WV7IcdvUtg"
# Let's now define our `run` function, which will take in a random seed (which will be the same on each device) and a shard of MNIST. The function will implement the aforementioned model and we will then use TFP's vanilla MCMC functionality to run a single chain. We'll make sure to decorate `run` with the `jax.default_matmul_precision` decorator to make sure the matrix multiplication is run with higher precision, though in the particular example below, we could just as well use `jnp.dot(images, w, precision=lax.Precision.HIGH)`.
# + id="VuJ9Um1xvSPt"
# We can use `out_axes=None` in the `pmap` because the results will be the same
# on every device.
@functools.partial(jax.pmap, axis_name='data', in_axes=(None, 0), out_axes=None)
@jax.default_matmul_precision('tensorfloat32')
def run(seed, data):
images, labels = data # a sharded dataset
num_examples, dim = images.shape
num_classes = 10
def model_fn():
w = yield Root(tfd.Sample(tfd.Normal(0., 1.), [dim, num_classes]))
b = yield Root(tfd.Sample(tfd.Normal(0., 1.), [num_classes]))
logits = jnp.dot(images, w) + b
yield tfed.Sharded(tfd.Independent(tfd.Categorical(logits=logits), 1),
shard_axis_name='data')
model = tfed.JointDistributionCoroutine(model_fn)
init_seed, sample_seed = random.split(seed)
initial_state = model.sample(seed=init_seed)[:-1] # throw away `y`
def target_log_prob(*state):
return model.log_prob((*state, labels))
def accuracy(w, b):
logits = images.dot(w) + b
preds = logits.argmax(axis=-1)
# We take the average accuracy across devices by using `lax.pmean`
return lax.pmean((preds == labels).mean(), 'data')
kernel = tfm.HamiltonianMonteCarlo(target_log_prob, 1e-2, 100)
kernel = tfm.DualAveragingStepSizeAdaptation(kernel, 500)
def trace_fn(state, pkr):
return (
target_log_prob(*state),
accuracy(*state),
pkr.new_step_size)
states, trace = tfm.sample_chain(
num_results=1000,
num_burnin_steps=1000,
current_state=initial_state,
kernel=kernel,
trace_fn=trace_fn,
seed=sample_seed
)
return states, trace
# + [markdown] id="pMq6alT_OmCC"
# `jax.pmap` includes a JIT compile but the compiled function is cached after the first call. We'll call `run` and ignore the output to cache the compilation.
# + colab={"base_uri": "https://localhost:8080/"} id="LYsRQkTUxCPD" outputId="06062688-9df6-4a98-d526-a1a639ba692b"
# %%time
output = run(random.PRNGKey(0), (sharded_train_images, sharded_train_labels))
jax.tree_map(lambda x: x.block_until_ready(), output)
# + [markdown] id="AZhCrhKqOxkc"
# We'll now call `run` again to see how long the actual execution takes.
# + colab={"base_uri": "https://localhost:8080/"} id="5SuYWRhUDGWB" outputId="88447cdc-6988-47d1-bd8c-1233bf09af51"
# %%time
states, trace = run(random.PRNGKey(0), (sharded_train_images, sharded_train_labels))
jax.tree_map(lambda x: x.block_until_ready(), trace)
# + [markdown] id="6njumqgPO1zG"
# We're executing 200,000 leapfrog steps, each of which computes a gradient over the entire dataset. Splitting the computation over 8 cores enables us to compute the equivalent of 200,000 epochs of training in about 95 seconds, about 2,100 epochs per second!
# + [markdown] id="vWciJ-eLPRkr"
# Let's plot the log-density of each sample and each sample's accuracy:
# + colab={"base_uri": "https://localhost:8080/", "height": 338} id="2PNzYZOXxd1R" outputId="6219df98-19ea-4c00-e82e-711cbc19e540"
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].plot(trace[0])
ax[0].set_title('Log Prob')
ax[1].plot(trace[1])
ax[1].set_title('Accuracy')
ax[2].plot(trace[2])
ax[2].set_title('Step Size')
plt.show()
# + [markdown] id="dTz4X8_8PXZP"
# If we ensemble the samples, we can compute a Bayesian model average to improve our performance.
# + colab={"base_uri": "https://localhost:8080/"} id="mP5Vd7w4PbJ2" outputId="4b04c2e8-9436-41bc-9884-6b27f95986e4"
@functools.partial(jax.pmap, axis_name='data', in_axes=(0, None), out_axes=None)
def bayesian_model_average(data, states):
images, labels = data
logits = jax.vmap(lambda w, b: images.dot(w) + b)(*states)
probs = jax.nn.softmax(logits, axis=-1)
bma_accuracy = (probs.mean(axis=0).argmax(axis=-1) == labels).mean()
avg_accuracy = (probs.argmax(axis=-1) == labels).mean()
return lax.pmean(bma_accuracy, axis_name='data'), lax.pmean(avg_accuracy, axis_name='data')
sharded_test_images, sharded_test_labels = shard((test_images, test_labels))
bma_acc, avg_acc = bayesian_model_average((sharded_test_images, sharded_test_labels), states)
print(f'Average Accuracy: {avg_acc}')
print(f'BMA Accuracy: {bma_acc}')
print(f'Accuracy Improvement: {bma_acc - avg_acc}')
# + [markdown] id="c2CjiAMgQ9Vd"
# A Bayesian model average increases our accuracy by almost 1%!
# + [markdown] id="d9CYg3YC6XbK"
# ##### Example: MovieLens recommendation system
# + [markdown] id="kMd9c6SI7rm7"
# Let's now try doing inference with the MovieLens recommendations dataset, which is a collection of users and their ratings of various movies. Specifically, we can represent MovieLens as an $N \times M$ watch matrix $W$ where $N$ is the number of users and $M$ is the number of movies; we expect $N > M$. The entries of $W_{ij}$ are a boolean indicating whether or not user $i$ watched movie $j$. Note that MovieLens provides user ratings, but we're ignoring them to simplify the problem.
#
#
# First, we'll load the dataset. We'll use the version with 1 million ratings.
# + colab={"base_uri": "https://localhost:8080/", "height": 298, "referenced_widgets": ["0509d5f63e8447c5bf3047dec3c9b436", "62ff554eba004da3809a1dd2e21311fb", "74764971a48a4f79a495f1132d805db0", "30b99f5c7029409ebca66ffedb065879", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "24c536ef29c547ef965bb3270637bcc2", "33822c1d39ca42f5b5a44f330ba19df3", "8074ff27fdd24ba6844f3a3f4eda7aed", "a1966d13e72d4a9f9fcda67545e8fee3", "<KEY>", "<KEY>", "ee5454904d4649e1a23c7b29eee96b27", "<KEY>", "<KEY>", "<KEY>", "56b9e4a682864227a14f95274afea6c8", "<KEY>", "e177f0e205af4369ad4e16af5c5f5f6a", "<KEY>", "c400310f418546909ba8ffe2e10e3e12", "<KEY>", "445cf583fac44e3895e51e2cde2a350c", "<KEY>", "<KEY>", "3ee6fe19d17d4df99ffeb289775b1c2e", "<KEY>", "7154891fb6ac45c4a24e151af3fb6903", "<KEY>", "c2e9d94dd4cd459da8cd6c5d253b7ed5", "48ec20644a394848927cf72e8bea86b5", "<KEY>", "<KEY>", "<KEY>", "ef4b4c0fa6be4907a9e86c77eb4249f6", "5f993f93d9764230b0fd70f0945a3739", "bc2e9dd37de44b5192881851fc42099d", "e3f937fc3dfb4486bfd7211b52877a80"]} id="8Ra9LNDC-bF9" outputId="16054a5b-648f-49e5-b51d-dfaaf303b025"
movielens = tfds.as_numpy(tfds.load('movielens/1m-ratings', batch_size=-1))
GENRES = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy',
'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir',
'Horror', 'IMAX', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
'Thriller', 'Unknown', 'War', 'Western', '(no genres listed)']
# + [markdown] id="U5opyYKv-ioM"
# We'll do some preprocessing of the dataset to obtain the watch matrix $W$.
# + colab={"base_uri": "https://localhost:8080/"} id="OGK5nn3u-ltG" outputId="9ebed1be-b197-40af-85eb-117e5b3bb89e"
raw_movie_ids = movielens['train']['movie_id']
raw_user_ids = movielens['train']['user_id']
genres = movielens['train']['movie_genres']
movie_ids, movie_labels = pd.factorize(movielens['train']['movie_id'])
user_ids, user_labels = pd.factorize(movielens['train']['user_id'])
num_movies = movie_ids.max() + 1
num_users = user_ids.max() + 1
movie_titles = dict(zip(movielens['train']['movie_id'],
movielens['train']['movie_title']))
movie_genres = dict(zip(movielens['train']['movie_id'],
genres))
movie_id_to_title = [movie_titles[movie_labels[id]].decode('utf-8')
for id in range(num_movies)]
movie_id_to_genre = [GENRES[movie_genres[movie_labels[id]][0]] for id in range(num_movies)]
watch_matrix = np.zeros((num_users, num_movies), bool)
watch_matrix[user_ids, movie_ids] = True
print(watch_matrix.shape)
# + [markdown] id="H8hKi0Aw-KVT"
# We can define a generative model for $W$, using a simple probabilistic matrix factorization model. We assume a latent $N \times D$ user matrix $U$ and a latent $M \times D$ movie matrix $V$, which when multiplied produce the logits of a Bernoulli for the watch matrix $W$. We'll also include a bias vectors for users and movies, $u$ and $v$.
# $$
# \begin{align*}
# U &\sim \mathcal{N}(0, 1) \quad
# u \sim \mathcal{N}(0, 1)\\
# V &\sim \mathcal{N}(0, 1) \quad
# v \sim \mathcal{N}(0, 1)\\
# W_{ij} &\sim \textrm{Bernoulli}\left(\sigma\left(\left(UV^T\right)_{ij} + u_i + v_j\right)\right)
# \end{align*}
# $$
# + [markdown] id="la6o8iam9nTN"
# This is a pretty big matrix; 6040 user and 3706 movies leads to a matrix with over 22 million entries in it.
# How do we approach sharding this model? Well, if we assume that $N > M$ (i.e. there are more users than movies), then it would make sense to shard the watch matrix across the user axis, so each device would have a chunk of watch matrix corresponding to a subset of users. Unlike the previous example, however, we'll also have to shard up the $U$ matrix, since it has an embedding for each user, so each device will be responsible for a shard of $U$ and a shard of $W$. On the other hand, $V$ will be unsharded and be synchronized across devices.
# + id="4SUlkEg__ElT"
sharded_watch_matrix = shard(watch_matrix)
# + [markdown] id="zdZgFkzH_BYI"
# Before we write our `run`, let's quickly discuss the additional challenges with sharding the local random variable $U$. When running HMC, the vanilla `tfp.mcmc.HamiltonianMonteCarlo` kernel will sample momenta for each element of the chain's state. Previously, only unsharded random variables were part of that state, and the momenta were the same on each device. When we now have a sharded $U$, we need to sample different momenta on each device for $U$, while sampling the same momenta for $V$. To accomplish this, we can use `tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo` with a `Sharded` momentum distribution. As we continue to make parallel computation first-class, we may simplify this, e.g. by taking a shardedness indicator to the HMC kernel.
# + id="XEPHnzdo-_G2"
def make_run(*,
axis_name,
dim=20,
num_chains=2,
prior_variance=1.,
step_size=1e-2,
num_leapfrog_steps=100,
num_burnin_steps=1000,
num_results=500,
):
@functools.partial(jax.pmap, in_axes=(None, 0), axis_name=axis_name)
@jax.default_matmul_precision('tensorfloat32')
def run(key, watch_matrix):
num_users, num_movies = watch_matrix.shape
Sharded = functools.partial(tfed.Sharded, shard_axis_name=axis_name)
def prior_fn():
user_embeddings = yield Root(Sharded(tfd.Sample(tfd.Normal(0., 1.), [num_users, dim]), name='user_embeddings'))
user_bias = yield Root(Sharded(tfd.Sample(tfd.Normal(0., 1.), [num_users]), name='user_bias'))
movie_embeddings = yield Root(tfd.Sample(tfd.Normal(0., 1.), [num_movies, dim], name='movie_embeddings'))
movie_bias = yield Root(tfd.Sample(tfd.Normal(0., 1.), [num_movies], name='movie_bias'))
return (user_embeddings, user_bias, movie_embeddings, movie_bias)
prior = tfed.JointDistributionCoroutine(prior_fn)
def model_fn():
user_embeddings, user_bias, movie_embeddings, movie_bias = yield from prior_fn()
logits = (jnp.einsum('...nd,...md->...nm', user_embeddings, movie_embeddings)
+ user_bias[..., :, None] + movie_bias[..., None, :])
yield Sharded(tfd.Independent(tfd.Bernoulli(logits=logits), 2), name='watch')
model = tfed.JointDistributionCoroutine(model_fn)
init_key, sample_key = random.split(key)
initial_state = prior.sample(seed=init_key, sample_shape=num_chains)
def target_log_prob(*state):
return model.log_prob((*state, watch_matrix))
momentum_distribution = tfed.JointDistributionSequential([
Sharded(tfd.Independent(tfd.Normal(jnp.zeros([num_chains, num_users, dim]), 1.), 2)),
Sharded(tfd.Independent(tfd.Normal(jnp.zeros([num_chains, num_users]), 1.), 1)),
tfd.Independent(tfd.Normal(jnp.zeros([num_chains, num_movies, dim]), 1.), 2),
tfd.Independent(tfd.Normal(jnp.zeros([num_chains, num_movies]), 1.), 1),
])
# We pass in momentum_distribution here to ensure that the momenta for
# user_embeddings and user_bias are also sharded
kernel = tfem.PreconditionedHamiltonianMonteCarlo(target_log_prob, step_size,
num_leapfrog_steps,
momentum_distribution=momentum_distribution)
num_adaptation_steps = int(0.8 * num_burnin_steps)
kernel = tfm.DualAveragingStepSizeAdaptation(kernel, num_adaptation_steps)
def trace_fn(state, pkr):
return {
'log_prob': target_log_prob(*state),
'log_accept_ratio': pkr.inner_results.log_accept_ratio,
}
return tfm.sample_chain(
num_results, initial_state,
kernel=kernel,
num_burnin_steps=num_burnin_steps,
trace_fn=trace_fn,
seed=sample_key)
return run
# + [markdown] id="3cUqCRXSWtOg"
# We'll again run it once to cache the compiled `run`.
# + colab={"base_uri": "https://localhost:8080/"} id="gNQFjD0lBA7N" outputId="ed5b6434-1129-4709-830c-f06807488018"
# %%time
run = make_run(axis_name='data')
output = run(random.PRNGKey(0), sharded_watch_matrix)
jax.tree_map(lambda x: x.block_until_ready(), output)
# + [markdown] id="hBLG3HwyWx3c"
# Now we'll run it again without the compilation overhead.
# + colab={"base_uri": "https://localhost:8080/"} id="tfwsOUhmBMNB" outputId="8a4f5fb3-5cf4-44e0-94bc-e7429f003fe2"
# %%time
states, trace = run(random.PRNGKey(0), sharded_watch_matrix)
jax.tree_map(lambda x: x.block_until_ready(), trace)
# + [markdown] id="fl1uGAVWW1sR"
# Looks like we completed about 150,000 leapfrog steps in about 3 minutes, so about 83 leapfrog steps per second! Let's plot the accept ratio and log density of our samples.
# + colab={"base_uri": "https://localhost:8080/", "height": 338} id="1wJrfbgYBS0t" outputId="232e5062-cddf-4779-a2ae-e4ac6ca22a67"
fig, axs = plt.subplots(1, len(trace), figsize=(5 * len(trace), 5))
for ax, (key, val) in zip(axs, trace.items()):
ax.plot(val[0]) # Indexing into a sharded array, each element is the same
ax.set_title(key);
# + [markdown] id="zGX10FOXYfvl"
# Now that we have some samples from our Markov chain, let's use them to make some predictions. First, let's extract each of the components. Remember that the `user_embeddings` and `user_bias` are split across device, so we need to concatenate our `ShardedArray` to obtain them all. On the other hand, `movie_embeddings` and `movie_bias` are the same on every device, so we can just pick the value from the first shard. We'll use regular `numpy` to copy the values from the TPUs back to CPU.
# + colab={"base_uri": "https://localhost:8080/"} id="OtN1xACUYw5O" outputId="71ea5fdf-390b-4ae1-e332-d7ab46e7536c"
user_embeddings = np.concatenate(np.array(states.user_embeddings, np.float32), axis=2)
user_bias = np.concatenate(np.array(states.user_bias, np.float32), axis=2)
movie_embeddings = np.array(states.movie_embeddings[0], dtype=np.float32)
movie_bias = np.array(states.movie_bias[0], dtype=np.float32)
samples = (user_embeddings, user_bias, movie_embeddings, movie_bias)
print(f'User embeddings: {user_embeddings.shape}')
print(f'User bias: {user_bias.shape}')
print(f'Movie embeddings: {movie_embeddings.shape}')
print(f'Movie bias: {movie_bias.shape}')
# + [markdown] id="Zn0qF_1uc17L"
# Let's try to build a simple recommender system that utilizes the uncertainty captured in these samples. Let's first write a function that ranks movies according to the watch probability.
# + id="q-cc2vAfZYyz"
@jax.jit
def recommend(sample, user_id):
user_embeddings, user_bias, movie_embeddings, movie_bias = sample
movie_logits = (
jnp.einsum('d,md->m', user_embeddings[user_id], movie_embeddings)
+ user_bias[user_id] + movie_bias)
return movie_logits.argsort()[::-1]
# + [markdown] id="2sRtm0NaeTFG"
# We can now write a function that loops over all the samples and for each one, picks the top ranked movie that the user hasn't watched already. We can then see the counts of all recommended movies across the samples.
# + id="hfAjsSmueSZH"
def get_recommendations(user_id):
movie_ids = []
already_watched = set(jnp.arange(num_movies)[watch_matrix[user_id] == 1])
for i in range(500):
for j in range(2):
sample = jax.tree_map(lambda x: x[i, j], samples)
ranking = recommend(sample, user_id)
for movie_id in ranking:
if int(movie_id) not in already_watched:
movie_ids.append(movie_id)
break
return movie_ids
def plot_recommendations(movie_ids, ax=None):
titles = collections.Counter([movie_id_to_title[i] for i in movie_ids])
ax = ax or plt.gca()
names, counts = zip(*sorted(titles.items(), key=lambda x: -x[1]))
ax.bar(names, counts)
ax.set_xticklabels(names, rotation=90)
# + [markdown] id="XLmgJwXYecl9"
# Let's take the user who has seen the most movies versus the one who has seen the least.
# + colab={"base_uri": "https://localhost:8080/"} id="ncwX6MGSfVqf" outputId="877fa292-0165-45c2-8075-be602baf75ec"
user_watch_counts = watch_matrix.sum(axis=1)
user_most = user_watch_counts.argmax()
user_least = user_watch_counts.argmin()
print(user_watch_counts[user_most], user_watch_counts[user_least])
# + [markdown] id="e2UfkAagel78"
# We hope our system has more certainty about `user_most` than `user_least`, given that we have more information about what sorts of movies `user_most` is more likely to watch.
# + colab={"base_uri": "https://localhost:8080/", "height": 791} id="CS0clOvEelD6" outputId="2109267c-9a9f-451a-cb97-8ccefba2ae5d"
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
most_recommendations = get_recommendations(user_most)
plot_recommendations(most_recommendations, ax=ax[0])
ax[0].set_title('Recommendation for user_most')
least_recommendations = get_recommendations(user_least)
plot_recommendations(least_recommendations, ax=ax[1])
ax[1].set_title('Recommendation for user_least');
# + [markdown] id="fIReD8VthTUL"
# We see that there is more variance in our recommendations for `user_least` reflecting our additional uncertainty in their watch preferences.
#
# We can also see look at the genres of the recommended movies.
# + colab={"base_uri": "https://localhost:8080/", "height": 482} id="MEmslkFVgaOP" outputId="21bd9181-22be-44b4-b95e-cf5a076fe0e2"
most_genres = collections.Counter([movie_id_to_genre[i] for i in most_recommendations])
least_genres = collections.Counter([movie_id_to_genre[i] for i in least_recommendations])
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
ax[0].bar(most_genres.keys(), most_genres.values())
ax[0].set_title('Genres recommended for user_most')
ax[1].bar(least_genres.keys(), least_genres.values())
ax[1].set_title('Genres recommended for user_least');
# + [markdown] id="ZwCsrFFgnALF"
# `user_most` has seen a lot of movies and has been recommended more niche genres like mystery and crime whereas `user_least` has not watched many movies and was recommended more mainstream movies, which skew comedy and action.
| tensorflow_probability/examples/jupyter_notebooks/Distributed_Inference_with_JAX.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: projectq
# language: python
# name: projectq
# ---
# # Bernstein-Varzirani Algorithm
# The Bernstein-Varzirani algorithm solves the following problem:
# 1. Given an n-bit secret number x
# 2. There is an oracle holding the secret key that one can query with a number y, and the oracle will return $x \bullet y$, the bit-wise dot product modulo 2 of the two numbers. For example, given a 4-bit secret number $x=13$ with binary representation 1101 and $y=15$ with binary representation 1111, then $x \bullet y =$ 3 mod 2, which is 1
# 3. The question is: how many oracle queries does one need to figure out x?
#
# With the classical approach, if we query the oracle with 1, 2(10), 4(100) and 8(1000), then we can figure out each bit of the secret key and thus we need four queries. For a general n-bit number, we need n queries. Surprisingly, in the quantum computing paradigm, the Bernstein-Varzirani algorithm needs only one measurement, regardless of the size of the secret number
# +
import projectq
from projectq.backends import CircuitDrawer
from projectq.ops import H, Z, All, Measure, Barrier
from pathlib import Path
# -
n = 3 #No. of qubits
# ## The classical approach
def query_oracle(y):
"""
The oracle returns the bit-wise dot product modulo 2
of the secret key and y
"""
x = 6 #secret key
s = bin(x & y).count('1') % 2
return s
b = ''.join([str(query_oracle(2**(k))) for k in reversed(range(n))])
print("Secret key is {} after {} tries".format(int(b, 2), n))
# ## The quantum computing approach
# There are five steps in the Bernstein-Varzirani circuit construction:
# 1. Initialize all qubits to 0 (This is the default value anyway)
# 2. Apply Hadamard transform to all qubits
# 3. Implement the oracle
# 4. Apply Hadamard transform to all qubits
# 5. Measure the circuit
#
# The workings of the Bernstein-Varzirani algorithm can only be understood by working through the mathematics. After step 2, the application of the Hadamard transform to all qubits results in
# $$\begin{align} H^{\otimes n} |0\rangle = \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} | y \rangle \end{align}$$
# where $N = 2^n$
#
# The oracle seeks to implement the following operation in step 3:
# $$\begin{align} \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} | y \rangle \rightarrow \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle \end{align}$$
# The oracle effectively multiplies each superposition state $|y \rangle$ with -1 raised to the power of $x \bullet y$, the bit-wise inner product modulo 2. But why does the oracle implement this operation?
#
# Using the identity (proof given below)
# $$\begin{align} H^{\otimes n} |x\rangle = \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle \end{align}$$
# If we further apply another Hadamard transform to all qubits in step 4, then $$ H^{\otimes n} H^{\otimes n} |x\rangle = H^{\otimes n} \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle $$
# i.e. $$ |x\rangle = H^{\otimes n} \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle $$
# since the Hadamard transform is symmetric and unitary (meaning that the transform is its own inverse). Thus measuring the circuit in step 5 will give the secret key.
#
# Summarizing,
# $$ H^{\otimes n} |0\rangle ^{\otimes n} \rightarrow \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} | y \rangle \rightarrow \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle \rightarrow H^{\otimes n} \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle = |x\rangle$$
# ### The oracle implementation
# First note that in implementing
# $$ \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle = \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{(x_{n-1} y_{n-1} \oplus \ldots \oplus x_0 y_0)} | y_{n-1}\ldots y_0\rangle $$
# $$ = \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x_{n-1} y_{n-1}} (-1)^{x_{n-2} y_{n-2}} \ldots (-1)^{x_0 y_0}) | y_{n-1}\ldots y_0\rangle $$
# the individual product ${x_{n-1} y_{n-1}, \ldots, x_0 y_0}$ only needs to be evaluated for bits in x that are 1.
#
# For the bits in x that are 1, we only introduce a minus sign for the bits in y that are 1 i.e.
# $ |0 \rangle \rightarrow |0 \rangle$ and $ |1 \rangle \rightarrow -|1 \rangle$. This is precisely what the Z gate does. Thus the oracle can be implemented as: if a bit in x is 1, apply the Z gate to the corresponding bit in y.
def make_bv_circuit(engine, n):
circuit = engine.allocate_qureg(n)
All(H) | circuit
Barrier | circuit
# Oracle
x = 6 # secret key
for i in range(n):
if x & 1: # Only apply Z if the current bit of the secret key x is 1
Z | circuit[i]
x >>= 1 # Move the next bit to the 1 position
Barrier | circuit
All(H) | circuit
All(Measure) | circuit
engine.flush()
return circuit
# ## Draw circuit diagram
drawing_engine = CircuitDrawer()
main_engine = projectq.MainEngine(drawing_engine)
circuit = make_bv_circuit(main_engine, n)
# +
p = Path('diagram')
if not p.exists(): #if the diagram directory doesn't exist, create it
p.mkdir()
with open('diagram/bv.tex', 'w') as f:
latex = drawing_engine.get_latex() #get circuit diagram as latex
f.write(latex)
#Change the pdf scale to 1.8 from 0.8 to have better visual effect
# !sed -i 's@tikzpicture\}\[scale=0.8@tikzpicture\}\[scale=1.8@g' diagram/bv.tex
# !cd diagram; pdflatex bv.tex > /dev/null #convert tex to latex, piping to /dev/null to silent output
#Wand package needed to convert pdf to image
from wand.image import Image as WImage
img = WImage(filename='diagram/bv.pdf')
img
# -
# ## Run circuit
main_engine = projectq.MainEngine()
circuit = make_bv_circuit(main_engine, n)
measurement = ''.join(str(int(c)) for c in reversed(circuit))
print('Measurement outcome:', measurement)
print('Secret key:', int(measurement, 2))
# ## Proof of the key identity
# $$\begin{align} H^{\otimes n} |x\rangle = \frac{1}{\sqrt{N}} \sum_{y=0}^{N-1} (-1)^{x \bullet y} | y \rangle \end{align}$$ where $ N = 2^n $ and the $ \bullet$ operator denotes bitwise dot product modulo 2 i.e. $ x \bullet y = x_0y_0 \oplus x_1y_1 \oplus \ldots x_{N-1}y_{N-1}$ where $\oplus$ denotes the sum modulo 2 operation or the XOR operation. We prove this identity by induction.
#
# Base case, n = 1:
# $$ H |x_0\rangle = \frac{1}{\sqrt{2}} (|0\rangle + (-1)^{x_0}|1\rangle) = \frac{1}{\sqrt{2}} \sum_{y=0}^{1} (-1)^{x_0y} | y \rangle$$
#
# Assuming n-1 (n > 1) is true i.e. $$ H^{\otimes (n-1)} |x_{n-2} \ldots x_0\rangle = \frac{1}{\sqrt{2^{n-1}}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{(x_{n-2} \ldots x_0) \bullet (y_{n-2} \ldots y_0)} | y \rangle$$
#
# $$ = \frac{1}{\sqrt{2^{n-1}}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{(x_{n-2} y_{n-2} \oplus \ldots \oplus x_0 y_0)} | y \rangle$$
#
# where $y = y_{n-2} \ldots y_0$ is the binary representation
#
#
# Then $$ H^{\otimes n} |x_{n-1} \ldots x_0 \rangle = \frac{1}{\sqrt{2}} (|0\rangle + (-1)^{x_{n-1}}|1\rangle) \times \frac{1}{\sqrt{2^{n-1}}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{(x_{n-2} y_{n-2} \oplus \ldots \oplus x_0 y_0)} | y \rangle $$
#
# $$ \begin{align} = \frac{1}{\sqrt{2^n}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{(x_{n-2} y_{n-2} \oplus \ldots \oplus x_0 y_0)} |0\rangle| y \rangle + \frac{1}{\sqrt{2^n}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{x_{n-1}} (-1)^{(x_{n-2} y_{n-2} \oplus \ldots \oplus x_0 y_0)} |1\rangle| y \rangle \end{align} $$
#
# $$ \begin{align} = \frac{1}{\sqrt{2^n}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{(x_{n-1}0 \oplus x_{n-2}y_{n-2} \oplus \ldots \oplus x_0y_0)} | y \rangle + \frac{1}{\sqrt{2^n}} \sum_{y=2^{n-1}}^{2^{n} - 1} (-1)^{(x_{n-1}1 \oplus x_{n-2}y_{n-2}\oplus \ldots \oplus x_0y_0)} | y \rangle \end{align}$$
#
# $$ = \frac{1}{\sqrt{2^n}} \sum_{y=0}^{2^{n-1} - 1} (-1)^{(x_{n-1}x_{n-2} \ldots x_0) \bullet (0y_{n-2} \ldots y_0)} | y \rangle + \frac{1}{\sqrt{2^n}} \sum_{y=2^{n-1}}^{2^{n} - 1} (-1)^{(x_{n-1} \ldots x_0) \bullet (1y_{{n}-2} \ldots y_0)} | y \rangle $$
#
# $$ = \frac{1}{\sqrt{2^{n}}} \sum_{y=0}^{2^{n} - 1} (-1)^{(x_{n-1} \ldots x_0) \bullet (y_{n-1} \ldots y_0)} | y \rangle $$
| bernstein_varzirani_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import frame as fr
import algo_result as alg_res
import os
from scipy.fft import fft, fftn, fftfreq, fftshift
import xgboost as xgb
from xgboost import plot_importance, plot_tree
import graphviz
# %run algo_process.ipynb
# -
def peak_search_dopp(spectrum,Lo_thresh,Hi_thresh,peak_relevance):
counter = 0
peak_strength = np.max(spectrum)
peak_bin = np.argmax(spectrum)
doppler_details = {"peak_count":0,"total_harmonic_relative_strength":0,"peak_bin":[],"harmonic_relative_strength":[],"max_peak_strength":peak_strength}
harmonic_strength = 0
fbc = 2
fbl2 = fbc - 2
fbl1 = fbc - 1
fbr1 = fbc + 1
fbr2 = fbc + 2
for idx in range(len(spectrum)):
if fbr2 >= len(spectrum):
continue
pvl2 = spectrum[fbl2]
pvl1 = spectrum[fbl1]
pvc = spectrum[fbc]
pvr1 = spectrum[fbr1]
pvr2 = spectrum[fbr2]
if pvl2+peak_relevance < pvl1 and pvr1 > pvr2+peak_relevance and pvc > Lo_thresh and pvc <= Hi_thresh and pvc >= pvl1 and pvc >= pvr1 and fbc != peak_bin:
harmonic_strength += pvc[0]
counter += 1
doppler_details["peak_bin"].append(fbc)
doppler_details["harmonic_relative_strength"].append(pvc[0]/peak_strength)
fbc += 1
fbl2 = fbc - 2
fbl1 = fbc - 1
fbr1 = fbc + 1
fbr2 = fbc + 2
doppler_details["peak_count"] = counter
doppler_details["total_harmonic_relative_strength"] = harmonic_strength/peak_strength
return doppler_details
def doppler_details_extraction(spectrum,T_Lo_thr,T_Mi_thr,T_Hi_thr):
doppler_details = peak_search_dopp(spectrum,0,np.Inf,0)
Hi_details={"total_harmonic_relative_strength":0,"peak_count":0,"peak_bin":[],"harmonic_relative_strength":[]}
Mi_details={"total_harmonic_relative_strength":0,"peak_count":0,"peak_bin":[],"harmonic_relative_strength":[]}
Lo_details={"total_harmonic_relative_strength":0,"peak_count":0,"peak_bin":[],"harmonic_relative_strength":[]}
for peak_idx in range(doppler_details["peak_count"]):
if doppler_details["harmonic_relative_strength"][peak_idx] > 1/T_Lo_thr and doppler_details["harmonic_relative_strength"][peak_idx] <= 1/T_Mi_thr:
Lo_details["peak_count"] += 1
Lo_details["peak_bin"].append(doppler_details["peak_bin"][peak_idx])
Lo_details["harmonic_relative_strength"].append(doppler_details["harmonic_relative_strength"][peak_idx])
elif doppler_details["harmonic_relative_strength"][peak_idx] > 1/T_Mi_thr and doppler_details["harmonic_relative_strength"][peak_idx] <= 1/T_Hi_thr:
Mi_details["peak_count"] += 1
Mi_details["peak_bin"].append(doppler_details["peak_bin"][peak_idx])
Mi_details["harmonic_relative_strength"].append(doppler_details["harmonic_relative_strength"][peak_idx])
elif doppler_details["harmonic_relative_strength"][peak_idx] > 1/T_Hi_thr:
Hi_details["peak_count"] += 1
Hi_details["peak_bin"].append(doppler_details["peak_bin"][peak_idx])
Hi_details["harmonic_relative_strength"].append(doppler_details["harmonic_relative_strength"][peak_idx])
Lo_details["total_harmonic_relative_strength"] = sum(Lo_details["harmonic_relative_strength"])
Mi_details["total_harmonic_relative_strength"] = sum(Mi_details["harmonic_relative_strength"])
Hi_details["total_harmonic_relative_strength"] = sum(Hi_details["harmonic_relative_strength"])
return Hi_details,Mi_details,Lo_details,doppler_details
# +
SAMPLES_PER_CHIRP = 64
CHIRPS_PER_FRAME = 128
T = 300e-6
SPEED_OF_LIGHT = 3e8
START_FREQUENCY = 24.025e9
B = 200e6
PULSE_REPETITION_INTERVAL = 500e-6
SAMPLE_PERIOD = T/SAMPLES_PER_CHIRP
SAMPLE_FREQUENCY = 1/SAMPLE_PERIOD
LAMBDA = SPEED_OF_LIGHT/START_FREQUENCY
RANGE_PAD = 256
DOPPLER_PAD = 512
ANTENNA_SPACING = 6.22e-3
PEAK_THRESHOLD = 0.005 # normalized FFT absolute minimum strength
PEAK_SLICE = 2 #meters around target
PEAK_WIDTH = 1 #integer
PEAK_RELEVANCE = 0 #minimum distance between pvl1,pvr1 and pvc
SEARCH_ZONE = 25 #split spectrum in slices of SEARCH_ZONE meters to find a single peak
MIN_DIST = 5 #minimum distance for detection
ANGLE_CALIBRATION = -150
ANGLE_PRECISION = 1
ANTENNA_NUMBER = 1
FRAME_REP_INTERVAL = 0.2
# -
# +
classifier = xgb.XGBClassifier()
classifier.load_model('../statistics_data_processing/code/Trained_stuff/boresight_diagonal_azimuth_model.model')
asd = pd.read_csv("../statistics_data_processing/code/Trained_stuff/Boresight_Diagonal_azimuth.csv",delimiter='\t')
d = asd.to_dict("split")
###Get dictionary with performance for each number of feature
my_dictionary_list = []
for row_idx in range(len(d['data'])):
for col_idx in range(len(d['columns'])):
if d['columns'][col_idx] == 'support':
split_bools = d['data'][row_idx][col_idx] = d['data'][row_idx][col_idx].replace("\n", "").replace(" "," ").replace("[","").replace("]","").split(" ")
d['data'][row_idx][col_idx] = []
for elem in split_bools:
if elem == 'True':
d['data'][row_idx][col_idx].append(True)
elif elem == 'False':
d['data'][row_idx][col_idx].append(False)
best_params= dict(zip(d["columns"],d["data"][row_idx]))
best_params[d['columns'][col_idx]] = d['data']
my_dictionary_list.append(best_params)
###Get best performance
max_score = 0
support = []
feature_names = []
for elem in my_dictionary_list:
if elem['score'] > max_score:
max_score = elem['score']
support = elem['support']
feature_names = elem['features'].replace("'","").replace('[','').replace(']','').replace('\n','').split(" ")
###Get feature importance
importance_type='weight'
fscores = classifier.get_booster().get_score(importance_type=importance_type)
feat_importances = []
for ft, score in fscores.items():
feat_importances.append({'Feature': ft, 'Importance': score, 'Name': feature_names[int(ft[1:])]})
feat_importances = pd.DataFrame(feat_importances)
feat_importances = feat_importances.sort_values(
by='Importance', ascending=False).reset_index(drop=True)
print(feat_importances)
###Sort labels with feature importance
feat_labels = []
for elem in feat_importances.values:
feat_labels.append(elem[2])
feat_labels = np.flip(feat_labels)
fig,ax = plt.subplots(1,1,figsize=(10,10))
###Plot importance
importance_plot=plot_importance(classifier,ax=ax,importance_type=importance_type,show_values=False)
importance_plot.set_title(f"Feature importance (by {importance_type})")
importance_plot.set_yticklabels(feat_labels)
normalization_factors = pd.read_csv("../statistics_data_processing/code/Trained_stuff/boresight_diagonal_azimuth_norm_factors.csv",delimiter='\t').values
norm_mean = normalization_factors[0]
norm_scale = normalization_factors[1]
# +
#fig,ax = plt.subplots(1,1,figsize=(10,20))
#plot_tree(classifier,ax=ax)
#plt.savefig('Tree',format='png')
# +
directory = '../../data/'
folders = []
#folders.append('Dataset_2')
folders.append('Tire_data')
for folder in folders:
###Calibration Data
calibration_data = pd.read_csv(directory + folder+ '/environment_1.txt', sep='\t', header=None)
calibration = calibration_data.select_dtypes(include = ['float']).values
CALIBRATION_FRAME_NUMBER = len(calibration)//(SAMPLES_PER_CHIRP*CHIRPS_PER_FRAME)
calibration_frames = []
###Create dataset
for frame in range(CALIBRATION_FRAME_NUMBER):
calibration_frames.append(fr.Frame(calibration[frame*CHIRPS_PER_FRAME*SAMPLES_PER_CHIRP:(frame+1)*CHIRPS_PER_FRAME*SAMPLES_PER_CHIRP,:],\
SAMPLES_PER_CHIRP, CHIRPS_PER_FRAME, ANTENNA_NUMBER, T))
average_calib_chirp = np.zeros((SAMPLES_PER_CHIRP,ANTENNA_NUMBER),dtype=complex)
for frame in range(1):
for chirp in range(CHIRPS_PER_FRAME):
average_calib_chirp += calibration_frames[frame].get_chirp(chirp)
average_calib_chirp /= CHIRPS_PER_FRAME
###Target Data
data_directory = os.fsencode(directory + folder + '/')
for file in os.listdir(data_directory):
filename = os.fsdecode(file)
if filename.find('environment') == -1 and filename.endswith('.txt') and filename.find('No_PCB_backwards_1') != -1:
actual_filename = filename
path = os.path.join(os.fsdecode(data_directory), filename)
print(path)
data = pd.read_csv(path, sep='\t', header=None)
data.columns = ["idx","I_RX1","Q_RX1"]
recording = data.select_dtypes(include = ['float']).values
FRAME_NUMBER = len(data)//(SAMPLES_PER_CHIRP*CHIRPS_PER_FRAME)
Hu_bi_frame = []
###Create dataset
for frame in range(FRAME_NUMBER):
Hu_bi_frame.append(fr.Frame(recording[frame*CHIRPS_PER_FRAME*SAMPLES_PER_CHIRP:(frame+1)*CHIRPS_PER_FRAME*SAMPLES_PER_CHIRP,:],\
SAMPLES_PER_CHIRP, CHIRPS_PER_FRAME, ANTENNA_NUMBER, T))
###Calibrate frames
calibrate = True
if calibrate:
for frame in range(FRAME_NUMBER):
Hu_bi_frame[frame].calibrate(average_calib_chirp)
xf = np.arange(0,RANGE_PAD)
range_bin = xf*T*SPEED_OF_LIGHT/(2*B)/(T/SAMPLES_PER_CHIRP)/RANGE_PAD
range2bin = 1/(T/SAMPLES_PER_CHIRP)/RANGE_PAD*T*SPEED_OF_LIGHT/(2*B)
vel_bin = fftshift(fftfreq(DOPPLER_PAD,PULSE_REPETITION_INTERVAL))*SPEED_OF_LIGHT/(2*START_FREQUENCY)
Target_observations = []
Range_spectrum_history = []
Unfiltered_spectrum_history = []
Target_info_list = []
Target_presence_list = []
###Process frames
for frame in range(FRAME_NUMBER):
data_out,target_info, MTI_out = algo_process(Hu_bi_frame[frame],RANGE_PAD,CHIRPS_PER_FRAME,DOPPLER_PAD,PEAK_THRESHOLD,PEAK_SLICE,PEAK_WIDTH,PEAK_RELEVANCE,SEARCH_ZONE,ANGLE_CALIBRATION,ANGLE_PRECISION,round(LAMBDA/ANTENNA_SPACING),range2bin)
Range_spectrum_history.append(MTI_out)
Unfiltered_spectrum_history.append(abs(data_out[:,0,0]))
if(target_info.num_targets > 0 and target_info.location[0] < SEARCH_ZONE and target_info.location[0] > MIN_DIST):
Target_info_list.append(target_info)
Target_presence_list.append(1)
else:
Target_presence_list.append(0)
###Feature extraction
target_doppler_spec = np.zeros((DOPPLER_PAD,1))
idx = 0
weighted_avg = []
weighted_std = []
location = []
strength = []
dB_Hi = 15
T_Hi_thr = np.power(10,dB_Hi/20)
dB_Mi = 30
T_Mi_thr = np.power(10,dB_Mi/20)
dB_Lo = 44
T_Lo_thr = np.power(10,dB_Lo/20)
SPC_Hi = []
SPD = []
SPC_Lo = []
SPC_Mi = []
MDR = []
maximum_deviation = []
Hi_peak_count = []
Lo_peak_count = []
std_deviation = []
Hi_harmonic_power_content = []
Lo_harmonic_power_content = []
frame_counter = []
correlation = []
max_to_mean_distance = []
peak_strength_std = []
Lo_skewness = []
Lo_skewness_variation = []
Hi_deviation = []
Hi_deviation_difference = []
Mi_peak_count = []
Mi_harmonic_power_content = []
Mi_skewness = []
Mi_skewness_difference = []
Hi_skewness = []
Hi_skewness_difference = []
Lo_skewness_mean = []
Side_lobe_strength = []
Side_lobe_max_spread = []
SPC_Lo_skewness = []
SPC_Mi_Hi_skewness = []
SPC_Mi_variance = []
counter = 0
all_features = [location,weighted_avg,weighted_std,SPC_Hi,SPD,MDR,strength,maximum_deviation,SPC_Lo,Hi_peak_count,\
std_deviation,Hi_harmonic_power_content,frame_counter,correlation,max_to_mean_distance,peak_strength_std,\
Lo_peak_count,Lo_harmonic_power_content,SPC_Mi,Lo_skewness,Lo_skewness_variation,Hi_deviation,\
Hi_deviation_difference, Mi_peak_count, Mi_harmonic_power_content, Mi_skewness, Mi_skewness_difference,\
Hi_skewness, Hi_skewness_difference,Lo_skewness_mean,Side_lobe_strength,Side_lobe_max_spread,SPC_Lo_skewness,\
SPC_Mi_Hi_skewness, SPC_Mi_variance]
mask = []
kk = 0
for u in range(len(all_features)):
if u == 1:
mask.append(False)
elif u == 12:
mask.append(False)
elif u == 15:
mask.append(False)
else:
mask.append(support[kk])
kk+=1
predictions = []
bi_quadratic = []
for i in range(DOPPLER_PAD):
if i > 3*DOPPLER_PAD//8 and i < DOPPLER_PAD//2:
bi_quadratic.append(1-(i-7*DOPPLER_PAD//16)**2/(DOPPLER_PAD//16)**2)
elif i > DOPPLER_PAD//2 and i < 5*DOPPLER_PAD//8:
bi_quadratic.append(1-(i-9*DOPPLER_PAD//16)**2/(DOPPLER_PAD//16)**2)
else:
bi_quadratic.append(0)
hor_quadratic = []
for i in range(DOPPLER_PAD):
if i > DOPPLER_PAD//4 and i < DOPPLER_PAD//2:
hor_quadratic.append(np.sqrt((DOPPLER_PAD//2-i)/(DOPPLER_PAD//4)))
elif i > DOPPLER_PAD//2 and i < 3*DOPPLER_PAD//4:
hor_quadratic.append(np.sqrt((i-DOPPLER_PAD//2)/(DOPPLER_PAD//4)))
else:
hor_quadratic.append(0)
previous_target_presence = 1 #to check for targets present in 2 subsequent frames
no_interruption = 0
subsequent_frames = 0
for target_presence in Target_presence_list:
counter +=1
###Use to cut pieces of recording
if folder == 'Dataset_1':
if filename.find('driving_diagonal_1')!=-1:
if counter > 145 and counter < 275:
target_doppler_spec = np.append(target_doppler_spec, np.zeros((DOPPLER_PAD,1)), axis=1)
continue
elif filename.find('driving_diagonal_2')!=-1:
if counter > 135 and counter < 195:
target_doppler_spec = np.append(target_doppler_spec, np.zeros((DOPPLER_PAD,1)), axis=1)
continue
elif filename.find('driving_diagonal_3')!=-1:
if counter > 135 and counter < 260:
target_doppler_spec = np.append(target_doppler_spec, np.zeros((DOPPLER_PAD,1)), axis=1)
continue
if target_presence:
next_dopp_spectrum = Target_info_list[idx].doppler_spectrum[:,0].reshape((DOPPLER_PAD,1))*Target_info_list[idx].location[0]**2
target_doppler_spec = np.append(target_doppler_spec,next_dopp_spectrum , axis=1)
if previous_target_presence:
###
location.append(Target_info_list[idx].location[0]/SEARCH_ZONE)
strength.append(sum(next_dopp_spectrum))
###
length = len(next_dopp_spectrum)
max_peak = max(next_dopp_spectrum)
SPC_Hi_thresh = max_peak/T_Hi_thr
SPC_Mi_thresh = max_peak/T_Mi_thr
SPC_Lo_thresh = max_peak/T_Lo_thr
weighted_avg.append(sum([i*next_dopp_spectrum[i] for i in range(length)])/sum(next_dopp_spectrum[:]))
tmp_roll = np.roll(next_dopp_spectrum,DOPPLER_PAD//2-round(weighted_avg[subsequent_frames][0]))
weighted_std.append(np.sqrt(sum([(i-DOPPLER_PAD//2)**2*tmp_roll[i] for i in np.arange(DOPPLER_PAD//4,3*DOPPLER_PAD//4)])/sum(tmp_roll[DOPPLER_PAD//4:3*DOPPLER_PAD//4])/(DOPPLER_PAD//4)**2))
###
max_peak_bin = np.argmax(next_dopp_spectrum)
tmp_roll = np.roll(next_dopp_spectrum,DOPPLER_PAD//2-max_peak_bin)
SPC_Hi.append(sum([tmp_roll[i] > SPC_Hi_thresh for i in range(length)])/DOPPLER_PAD)
if(not no_interruption):
SPC_prime = SPC_Hi[subsequent_frames-1]
else:
previous_doppler_spectrum = Target_info_list[idx-1].doppler_spectrum[:,0].reshape((DOPPLER_PAD,1))*Target_info_list[idx-1].location[0]**2
SPC_prime = sum([previous_doppler_spectrum[i] > SPC_Hi_thresh for i in range(length)])/DOPPLER_PAD
SPD.append(np.abs(SPC_Hi[subsequent_frames] - SPC_Hi[subsequent_frames-1]))
SPC_Lo.append(sum([(tmp_roll[i] > SPC_Lo_thresh and tmp_roll[i] <= SPC_Mi_thresh) for i in range(length)])/DOPPLER_PAD)
SPC_Lo_skewness.append(sum([int(tmp_roll[i] > SPC_Lo_thresh and tmp_roll[i] <= SPC_Mi_thresh)*tmp_roll[i]*(i-DOPPLER_PAD//2)**3 for i in np.arange(DOPPLER_PAD//4,3*DOPPLER_PAD//4)])/(DOPPLER_PAD//4)**3/np.max(tmp_roll))
SPC_Mi.append(sum([(tmp_roll[i] > SPC_Mi_thresh and tmp_roll[i] <= SPC_Hi_thresh) for i in range(length)])/DOPPLER_PAD)
SPC_Mi_Hi_skewness.append(sum([int(tmp_roll[i] > SPC_Mi_thresh)*tmp_roll[i]*(i-DOPPLER_PAD//2)**3 for i in np.arange(DOPPLER_PAD//4,3*DOPPLER_PAD//4)])/(DOPPLER_PAD//4)**3/np.max(tmp_roll))
SPC_Mi_variance.append(sum([int(tmp_roll[i] > SPC_Mi_thresh and tmp_roll[i] <= SPC_Hi_thresh)*tmp_roll[i]*(i-DOPPLER_PAD//2)**2 for i in np.arange(DOPPLER_PAD//4,3*DOPPLER_PAD//4)])/(DOPPLER_PAD//4)**2/np.max(tmp_roll))
Gt = np.max(Target_info_list[idx].doppler_spectrum[:,0])*Target_info_list[idx].location[0]**2
Gt_prime = np.max(Target_info_list[idx-1].doppler_spectrum[:,0])*Target_info_list[idx-1].location[0]**2
MDR.append(np.abs(Gt - Gt_prime)/Gt )
### Details extraction
Hi_details,Mi_details,Lo_details,all_details = doppler_details_extraction(tmp_roll,T_Lo_thr,T_Mi_thr,T_Hi_thr)
if(not no_interruption):
previous_doppler_spectrum = Target_info_list[idx-1].doppler_spectrum[:,0].reshape((DOPPLER_PAD,1))*Target_info_list[idx-1].location[0]**2
max_peak_bin_prime = np.argmax(previous_doppler_spectrum)
tmp_roll_prime = np.roll(previous_doppler_spectrum,DOPPLER_PAD//2-max_peak_bin_prime)
Hi_details_prime,Mi_details_prime,Lo_details_prime,all_details_prime = doppler_details_extraction(tmp_roll_prime,T_Lo_thr,T_Mi_thr,T_Hi_thr)
maximum_deviation.append(np.sqrt(sum([(i-DOPPLER_PAD//2)**2*tmp_roll[i] for i in range(length)])/sum(next_dopp_spectrum[:])/DOPPLER_PAD**2))
Side_lobe_strength.append(np.sqrt(sum([bi_quadratic[i]*tmp_roll[i] for i in range(length)])/sum(next_dopp_spectrum[:])))
Side_lobe_max_spread.append(np.sqrt(sum([hor_quadratic[i]*tmp_roll[i] for i in range(length)])/sum(next_dopp_spectrum[:])))
###High
Hi_peak_count.append(Hi_details["peak_count"])
Hi_harmonic_power_content.append(Hi_details["total_harmonic_relative_strength"])
hi_std = np.sqrt(sum([(Hi_details["peak_bin"][i]-DOPPLER_PAD//2)**2*Hi_details["harmonic_relative_strength"][i] for i in range(Hi_details["peak_count"])])/(DOPPLER_PAD//2)**2)
Hi_deviation.append(hi_std)
Hi_deviation_difference.append(abs(Hi_deviation[subsequent_frames] - Hi_deviation[subsequent_frames-1]))
third_moment = (sum([(Hi_details["peak_bin"][i]-DOPPLER_PAD//2)**3*Hi_details["harmonic_relative_strength"][i] for i in range(Hi_details["peak_count"])])/(DOPPLER_PAD//2)**3)
Hi_skewness.append(third_moment)
if(no_interruption):
Hi_skewness_prime = Hi_skewness[subsequent_frames-1]
else:
Hi_skewness_prime = (sum([(Hi_details_prime["peak_bin"][i]-DOPPLER_PAD//2)**3*Hi_details_prime["harmonic_relative_strength"][i] for i in range(Hi_details_prime["peak_count"])])/(DOPPLER_PAD//2)**3)
Hi_skewness_difference.append(abs(Hi_skewness[subsequent_frames] - Hi_skewness_prime))
###Mid
Mi_peak_count.append(Mi_details["peak_count"])
Mi_harmonic_power_content.append(Mi_details["total_harmonic_relative_strength"])
third_moment = (sum([(Mi_details["peak_bin"][i]-DOPPLER_PAD//2)**3*Mi_details["harmonic_relative_strength"][i] for i in range(Mi_details["peak_count"])])/(DOPPLER_PAD//2)**3)
Mi_skewness.append(third_moment)
if(no_interruption):
Mi_skewness_prime = Mi_skewness[subsequent_frames-1]
else:
Mi_skewness_prime = (sum([(Mi_details_prime["peak_bin"][i]-DOPPLER_PAD//2)**3*Mi_details_prime["harmonic_relative_strength"][i] for i in range(Mi_details_prime["peak_count"])])/(DOPPLER_PAD//2)**3)
Mi_skewness_difference.append(abs(Mi_skewness[subsequent_frames] - Mi_skewness_prime))
###Low
Lo_peak_count.append(Lo_details["peak_count"])
Lo_harmonic_power_content.append(Lo_details["total_harmonic_relative_strength"])
skewness = (sum([(Lo_details["peak_bin"][i]-DOPPLER_PAD//2)**3*Lo_details["harmonic_relative_strength"][i] for i in range(Lo_details["peak_count"])])/(DOPPLER_PAD//2)**3)
Lo_skewness.append(skewness)
if(no_interruption):
Lo_skewness_prime = Lo_skewness[subsequent_frames-1]
else:
Lo_skewness_prime = (sum([(Lo_details_prime["peak_bin"][i]-DOPPLER_PAD//2)**3*Lo_details_prime["harmonic_relative_strength"][i] for i in range(Lo_details_prime["peak_count"])])/(DOPPLER_PAD//2)**3)
Lo_skewness_variation.append(abs(Lo_skewness[subsequent_frames] - Lo_skewness_prime))
Lo_skewness_mean.append((Lo_skewness[subsequent_frames] + Lo_skewness_prime)/2)
###
if(no_interruption):
weighted_std_prime = weighted_std[subsequent_frames-1]
else:
previous_doppler_spectrum = Target_info_list[idx-1].doppler_spectrum[:,0].reshape((DOPPLER_PAD,1))*Target_info_list[idx-1].location[0]**2
weighted_avg_prime = (sum([i*previous_doppler_spectrum[i] for i in range(length)])/sum(previous_doppler_spectrum[:]))
tmp_roll = np.roll(previous_doppler_spectrum,DOPPLER_PAD//2-round(weighted_avg_prime[0]))
weighted_std_prime = (np.sqrt(sum([(i-DOPPLER_PAD//2)**2*tmp_roll[i] for i in np.arange(DOPPLER_PAD//4,3*DOPPLER_PAD//4)])/sum(tmp_roll[DOPPLER_PAD//4:3*DOPPLER_PAD//4])/(DOPPLER_PAD//4)**2))
std_deviation.append(np.abs(weighted_std[subsequent_frames] - weighted_std_prime))
frame_counter.append(counter)
###
correlation.append(sum(Target_info_list[idx].doppler_spectrum[:,0]*Target_info_list[idx-1].doppler_spectrum[:,0])/sum(Target_info_list[idx].doppler_spectrum[:,0])**2)
###
max_to_mean_distance.append(np.abs(max_peak_bin-weighted_avg[subsequent_frames])/DOPPLER_PAD)
###
peak_strength = np.max(Target_info_list[idx].print[:,:,0],axis=0)*Target_info_list[idx].location[0]**2
peak_strength_mean = np.sum(peak_strength)/CHIRPS_PER_FRAME
peak_strength_std.append(np.sqrt(np.sum((peak_strength-peak_strength_mean)**2)))
### Prediction
X_test = []
for v in range(len(mask)):
if mask[v]:
try:
X_test.append(all_features[v][subsequent_frames][0])
except:
X_test.append(all_features[v][subsequent_frames])
X_test = (np.array(X_test) - np.array(norm_mean[support]))/np.array(norm_scale[support]).reshape(1,-1)
predictions.append(classifier.predict(X_test)[0])
subsequent_frames += 1
### Append -1 in case 2 subsequent frames were not available
else:
predictions.append(-1)
idx += 1
else:
predictions.append(-1)
target_doppler_spec = np.append(target_doppler_spec, np.zeros((DOPPLER_PAD,1)), axis=1)
no_interruption = previous_target_presence
previous_target_presence = target_presence
no_interruption = no_interruption and previous_target_presence and target_presence
target_doppler_spec = target_doppler_spec[:,1:]
print("Doppler x total_frames: " + str(target_doppler_spec.shape))
### SAVE STATS
'''
Target_statistics = np.empty((1,subsequent_frames))
for feature in all_features:
Target_statistics = np.append(Target_statistics,np.array(feature).reshape(1,-1),axis=0)
Target_statistics = Target_statistics[1:,:]
print("Extracted_features x data_points: " + str(Target_statistics.shape))
df = pd.DataFrame(Target_statistics)
output_path = "../statistics_data_processing/data/"
output_filename = actual_filename.split('.')[0]
extension = '_statistics'
df.T.to_csv(output_path+output_filename+'_'+folder+extension+'.txt', sep='\t',index=False, header=False)
'''
# -
predictions_converted = []
for pred in predictions:
if pred == 0 or pred == 3 or pred == 6:
predictions_converted.append(0)
elif pred == 1 or pred == 4 or pred == 7:
predictions_converted.append(1)
elif pred == 2 or pred == 5 or pred == 8:
predictions_converted.append(2)
elif pred == -1:
predictions_converted.append(-1)
predictions = predictions_converted
print(predictions)
import matplotlib as mpl
from matplotlib.ticker import FormatStrFormatter
# %matplotlib inline
### PLOT DOPPLER VS TIME
fig,[ax,cax] = plt.subplots(1,2, gridspec_kw={"width_ratios":[10,1],'wspace':0.01}, figsize=[20,10])
ax.clear()
cmap = mpl.cm.get_cmap('turbo')
norm = mpl.colors.Normalize(vmin=0, vmax=np.max(target_doppler_spec))
cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=norm,
orientation='vertical')
levels = np.linspace(0.0, np.max(target_doppler_spec), 100)
ax.pcolormesh(np.arange(0,counter)*FRAME_REP_INTERVAL,vel_bin,target_doppler_spec, cmap=cmap, norm=norm, shading='nearest')
ax.set_xlabel('seconds',fontsize=30)
ax.set_ylabel('velocity', fontsize=30)
_ = ax.set_xticks(np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL)
_ = ax.set_xticklabels((np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL).astype('int'),fontsize=20)
#_ = ax.set_yticks(vel_bin[np.arange(0,DOPPLER_PAD,64)])
_ = ax.set_yticklabels(ax.get_yticks(),fontsize=20)
plt.savefig('Thesis_figures/'+'No_PCB.png')
# +
# %matplotlib inline
rolled_doppler = np.zeros(target_doppler_spec.shape)
for frame in range(FRAME_NUMBER):
if max(target_doppler_spec[:,frame]) > 0:
max_idx = np.argmax(target_doppler_spec[:,frame])
#round(sum([(i)*target_doppler_spec[i,frame] for i in range(DOPPLER_PAD)])/sum(target_doppler_spec[:,frame]))
rolled_doppler[:,frame] = np.roll(target_doppler_spec[:,frame],(DOPPLER_PAD//2 - max_idx))
fig,[ax,cax] = plt.subplots(1,2, gridspec_kw={"width_ratios":[10,1],'wspace':0.01}, figsize=[20,10])
ax.clear()
cmap = mpl.cm.get_cmap('turbo')
norm = mpl.colors.Normalize(vmin=0, vmax=np.max(rolled_doppler))
cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=norm,
orientation='vertical')
levels = np.linspace(0.0, np.max(rolled_doppler), 100)
ax.pcolormesh(np.arange(0,FRAME_NUMBER)*FRAME_REP_INTERVAL,vel_bin,rolled_doppler, cmap=cmap, norm=norm, shading='nearest')
#ax.contourf(np.arange(0,FRAME_NUMBER),vel_bin,rolled_doppler, levels, cmap=cmap, norm=norm)
ax.set_xlabel('seconds',fontsize=30)
ax.set_ylabel('velocity', fontsize=30)
_ = ax.set_xticks(np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL)
_ = ax.set_xticklabels((np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL).astype('int'),fontsize=20)
#_ = ax.set_yticks(vel_bin[np.arange(0,DOPPLER_PAD,64)])
_ = ax.set_yticklabels(ax.get_yticks(),fontsize=20)
#plt.savefig('Thesis_figures/'+'centered_all_doppler_biking_boresight_1.png')
# -
column_plots = 1
row_plots = 8
fig,ax = plt.subplots(row_plots,column_plots,figsize=[12,5])
from_second_number = 100#round(32/FRAME_REP_INTERVAL)
for col in range(column_plots):
for row in range(row_plots):
data = target_doppler_spec[:,row + row_plots*col + from_second_number].reshape((1,-1))[0,:]
ax[row].plot(vel_bin,data)
plt.show()
# %matplotlib widget
fig,ax = plt.subplots(3,1, figsize=[12,5])
from_second_number = 105#round(32/FRAME_REP_INTERVAL)+5
#for frame in np.arange(from_second_number,from_second_number+1):
ax[0].plot(np.sum(target_doppler_spec[:,from_second_number:from_second_number+1],axis=1))
if(sum(target_doppler_spec[:,from_second_number:from_second_number+1][:] > 0)):
actual_dopp = target_doppler_spec[:,from_second_number:from_second_number+1]
weighted_avg_1 = sum([i*actual_dopp[i] for i in range(length)])/sum(actual_dopp[:])
ax[0].plot(weighted_avg_1,np.max(actual_dopp),'ro')
low_tresh = np.max(actual_dopp)/T_Lo_thr*np.ones(length)
mid_tresh = np.max(actual_dopp)/T_Mi_thr*np.ones(length)
high_tresh = np.max(actual_dopp)/T_Hi_thr*np.ones(length)
ax[0].plot(low_tresh)
ax[0].plot(mid_tresh)
ax[0].plot(high_tresh)
ax[0].set_ylim((0,high_tresh[0]*11/10))
actual_dopp = np.roll(actual_dopp,DOPPLER_PAD//2 - round(weighted_avg_1[0]))
ax[1].plot(actual_dopp)
weighted_avg_1 = sum([i*actual_dopp[i] for i in range(length)])/sum(actual_dopp[:])
ax[1].plot(DOPPLER_PAD//2,np.max(actual_dopp),'ro')
ax[1].plot(low_tresh)
ax[1].plot(mid_tresh)
ax[1].plot(high_tresh)
weighted_std_1 = ([(i-DOPPLER_PAD//2)**2*actual_dopp[i] for i in np.arange(DOPPLER_PAD//4,3*DOPPLER_PAD//4)])/sum(tmp_roll[DOPPLER_PAD//4:3*DOPPLER_PAD//4])/(DOPPLER_PAD//4)**2
ax[2].plot(np.arange(0,len(weighted_std_1))+DOPPLER_PAD//4,weighted_std_1,'bo')
print(np.sqrt(sum(weighted_std_1)))
print(round(weighted_avg_1[0]))
# %matplotlib inline
fig,ax = plt.subplots(1,1,figsize=(20,10))
ax.plot(np.sum(target_doppler_spec[:,from_second_number:from_second_number+1],axis=1))
if(sum(target_doppler_spec[:,from_second_number:from_second_number+1][:] > 0)):
actual_dopp = target_doppler_spec[:,from_second_number:from_second_number+1]
weighted_avg_1 = sum([i*actual_dopp[i] for i in range(length)])/sum(actual_dopp[:])
ax.plot(weighted_avg_1,np.max(actual_dopp),'ro')
low_tresh = np.max(actual_dopp)/T_Lo_thr*np.ones(length)
mid_tresh = np.max(actual_dopp)/T_Mi_thr*np.ones(length)
high_tresh = np.max(actual_dopp)/T_Hi_thr*np.ones(length)
ax.plot(low_tresh)
ax.plot(mid_tresh)
ax.plot(high_tresh)
#ax.set_ylim((0,high_tresh[0]*11/10))
def peak_search_details(spectrum,Lo_thresh,Hi_thresh,peak_relevance):
counter = 0
peak_power = np.max(spectrum)
peak_bin = np.argmax(spectrum)
harmonic_power = 0
fbc = 2
fbl2 = fbc - 2
fbl1 = fbc - 1
fbr1 = fbc + 1
fbr2 = fbc + 2
peak_info = {"peak_bin":[],"peak_strength":[],"max_peak_strength":peak_power}
for idx in range(len(spectrum)):
if fbr2 >= len(spectrum):
continue
pvl2 = spectrum[fbl2]
pvl1 = spectrum[fbl1]
pvc = spectrum[fbc]
pvr1 = spectrum[fbr1]
pvr2 = spectrum[fbr2]
if pvl2+peak_relevance < pvl1 and pvr1 > pvr2+peak_relevance and pvc > Lo_thresh and pvc < Hi_thresh and pvc >= pvl1 and pvc >= pvr1 and fbc != peak_bin:
peak_info["peak_bin"].append(fbc)
peak_info["peak_strength"].append(pvc/peak_power)
fbc += 1
fbl2 = fbc - 2
fbl1 = fbc - 1
fbr1 = fbc + 1
fbr2 = fbc + 2
return peak_info
frame_doppler_peaks_dict = []
for target in Target_info_list:
max_bin = np.argmax(target.doppler_spectrum[:,0])
frame_doppler_peaks_dict.append(peak_search_details(np.roll(target.doppler_spectrum[:,0]*target.location[0]**2,DOPPLER_PAD//2 - max_bin),0,np.Inf,0))
# %matplotlib widget
fig,ax = plt.subplots(1,1, figsize=[12,5])
all_doppler_peaks = np.array([])
for frame in frame_doppler_peaks_dict:
all_doppler_peaks = np.append(all_doppler_peaks,np.array(frame["peak_strength"]))
n,bins=np.histogram(all_doppler_peaks,5000)
cumulative_n = [0]
for idx in range(len(n)):
cumulative_n.append(n[idx] + cumulative_n[idx])
ax.plot(bins,cumulative_n/cumulative_n[-1])
ax.set_xlim((0,0.8))
# +
peak_presence_frequency = np.zeros(len(bins)-1)
for frame_peaks in frame_doppler_peaks_dict:
for bin_idx in range(len(bins)-1):
for peak in frame_peaks['peak_strength']:
if bins[bin_idx] <= peak and bins[bin_idx+1] >= peak:
peak_presence_frequency[bin_idx] += 1
break
fig,ax = plt.subplots(1,1, figsize=[12,5])
ax.plot(bins[:-1],peak_presence_frequency/sum(Target_presence_list))
# -
fig,ax = plt.subplots(1,1, figsize=[12,5])
ax.plot(bins[:-1],peak_presence_frequency/sum(Target_presence_list)**2*n)
ax.set_xlim((0,0.04))
fig,[ax,cax] = plt.subplots(1,2, gridspec_kw={"width_ratios":[10,1],'wspace':0.01}, figsize=[20,10])
ax.clear()
cmap = mpl.cm.get_cmap('turbo')
norm = mpl.colors.Normalize(vmin=0, vmax=np.max(Range_spectrum_history))
cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=norm,
orientation='vertical')
levels = np.linspace(0.0, np.max(Range_spectrum_history), 100)
ax.pcolormesh(np.arange(0,FRAME_NUMBER)*FRAME_REP_INTERVAL,range_bin,np.array(Range_spectrum_history).T, cmap=cmap, norm=norm, shading='nearest')
ax.set_xlabel('seconds',fontsize=30)
ax.set_ylabel('range', fontsize=30)
_ = ax.set_xticks(np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL)
_ = ax.set_xticklabels((np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL).astype('int'),fontsize=20)
#_ = ax.set_yticks(vel_bin[np.arange(0,DOPPLER_PAD,64)])
_ = ax.set_yticklabels(ax.get_yticks(),fontsize=20)
plt.savefig('Thesis_figures/'+'Presentation_MTI_range_driving_boresight_1.png')
fig,[ax,cax] = plt.subplots(1,2, gridspec_kw={"width_ratios":[10,1],'wspace':0.01}, figsize=[20,10])
ax.clear()
cmap = mpl.cm.get_cmap('turbo')
norm = mpl.colors.Normalize(vmin=0, vmax=np.max(Unfiltered_spectrum_history))
cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=norm,
orientation='vertical')
levels = np.linspace(0.0, np.max(Unfiltered_spectrum_history), 100)
ax.pcolormesh(np.arange(0,FRAME_NUMBER)*FRAME_REP_INTERVAL,range_bin,np.array(Unfiltered_spectrum_history).T, cmap=cmap, norm=norm, shading='nearest')
ax.set_xlabel('seconds',fontsize=30)
ax.set_ylabel('range', fontsize=30)
_ = ax.set_xticks(np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL)
_ = ax.set_xticklabels((np.arange(0,FRAME_NUMBER,20)*FRAME_REP_INTERVAL).astype('int'),fontsize=20)
#_ = ax.set_yticks(vel_bin[np.arange(0,DOPPLER_PAD,64)])
_ = ax.set_yticklabels(ax.get_yticks(),fontsize=20)
#plt.savefig('Thesis_figures/'+'Unfiltered_range_biking_boresight_1.png')
# +
# %matplotlib widget
column_plots = 1
row_plots = 2
fig,ax = plt.subplots(row_plots,column_plots,figsize=[12,5])
from_second_number = 0
separator = np.zeros(len(Target_info_list[0].print[:,0,0]))
separator[-1] = 0.05
separator_list = []
for i in range(len(Target_info_list[0].print[0,:,0])):
separator_list.append(separator)
separator_list = np.array(separator_list).reshape((1,-1))[0,:]
for col in range(column_plots):
for row in range(row_plots):
data = Target_info_list[row + row_plots*col + from_second_number].print[:,:,0].T.reshape((1,-1))[0,:]
ax[row].set_ylim(0,0.2)
ax[row].plot(data)
ax[row].plot(separator_list[:],'ro',markersize=0.5)
plt.show()
peak_avg = sum(np.max(Target_info_list[from_second_number].print[:,:,0],axis=0))/CHIRPS_PER_FRAME
# +
column_plots = 1
row_plots = 8
fig,ax = plt.subplots(row_plots,column_plots,figsize=[20,10])
from_second_number = 9
separator = np.zeros(len(Target_info_list[0].print[:,0,0]))
separator[-1] = 0.05
separator_list = []
for i in range(len(Target_info_list[0].print[0,:,0])):
separator_list.append(separator)
separator_list = np.array(separator_list).reshape((1,-1))[0,:]
for col in range(column_plots):
for row in range(row_plots):
data = np.max(Target_info_list[row + row_plots*col + from_second_number].print[:,:,0],axis=0).T.reshape((1,-1))[0,:]
ax[row].hist(data,bins=100,range=(0,0.2),density=False)
plt.show()
print(Target_info_list[from_second_number].location[0])
# -
peak_collection = []
for target in Target_info_list:
peak_strength = np.max(target.print[:,:,0],axis=0)*target.location[0]**2
peak_strength_mean = np.sum(peak_strength)/CHIRPS_PER_FRAME
peak_collection.append(peak_strength-peak_strength_mean)
peak_collection = np.array(peak_collection).reshape((1,-1))
_=plt.hist(peak_collection[0,:],bins=100)
'''
from matplotlib.animation import FuncAnimation, writers
fig,[ax1,ax2] = plt.subplots(2,1,figsize=(25, 25))
classes = ['Pedestrian','Cyclist','Car']
ax1.title.set_text('Range')
ax1.title.set_fontsize(40)
ax2.title.set_fontsize(40)
ax2.title.set_text('Doppler')
ax1.set_xlim(range_bin[0], range_bin[-1])
ax1.set_ylim(0,np.max(Range_spectrum_history)*8/10)
ax2.set_xlim(vel_bin[0], vel_bin[-1])
ax1.tick_params(labelsize=30)
ax2.tick_params(labelsize=30)
#ax2.set_ylim(0,np.max(target_doppler_spec)*0.5)
ax1.axvline(MIN_DIST, lw=3, linestyle='--', color='black')
ax1.axvline(SEARCH_ZONE, lw=3, linestyle='--', color='black', label='Search Region')
ax1.plot(range_bin,np.ones(len(range_bin))*PEAK_THRESHOLD, lw=3, linestyle='dotted', color='gray', label='Detection Threshold')
# intialize two line objects (one in each axes)
line1, = ax1.plot([], [], lw=4, color='r', label='Filtered Range FFT Spectrum')
line2, = ax2.plot([], [], lw=4, color='r', label='Doppler FFT Spectrum')
line11, = ax1.plot([], [], 'D', color='black', markersize=15,label='Target location')
line21, = ax2.plot([],[], lw=3, linestyle='dashdot',color='limegreen', label='Feature Extractor')
line22, = ax2.plot([],[], lw=3, linestyle='dashdot',color='limegreen')
line23, = ax2.plot([],[], lw=3, linestyle='dashdot',color='limegreen')
line24, = ax2.plot([],[], lw=3, linestyle='dashdot',color='royalblue', label='Feature Extractor')
line25, = ax2.plot([],[], lw=3, linestyle='dashdot',color='royalblue')
line = [line1, line2, line21,line22,line23,line24,line25, line11]
ax1.legend(fontsize=20,loc=1)
ax2.legend(fontsize=20,loc=1)
#plt.xlabel(r'meters')
#plt.ylabel(r'fft magnitude')
text_axis = ax2.text(.2, -.2, 'Class:'+'No prediction available', style='italic',fontsize=60,
bbox={'facecolor': 'bisque', 'alpha': 0.5, 'pad': 10},visible=True,transform=ax2.transAxes)
#
# animation function
def animate(i):
line[0].set_data(range_bin, Range_spectrum_history[i])
if predictions[i] == -1:
text_axis.set_text('Class:'+'No prediction available')
elif predictions[i] == 0:
text_axis.set_text('Class:'+classes[predictions[i]] + ' at ' + str(round(range_bin[np.argmax(Range_spectrum_history[i])],2))+ ' m')
elif predictions[i] == 1:
text_axis.set_text('Class:'+classes[predictions[i]] + ' at ' + str(round(range_bin[np.argmax(Range_spectrum_history[i])],2))+ ' m')
elif predictions[i] == 2:
text_axis.set_text('Class:'+classes[predictions[i]] + ' at ' + str(round(range_bin[np.argmax(Range_spectrum_history[i])],2))+ ' m')
if(Target_presence_list[i]>0):
#ax1.set_ylim(0,np.max(Range_spectrum_history[i]*11/10))
ax2.set_ylim(0,np.max(target_doppler_spec[:,i])*11/10)
line[1].set_data(vel_bin,target_doppler_spec[:,i])
line[2].set_data(vel_bin,np.ones(DOPPLER_PAD)*np.max(target_doppler_spec[:,i])/T_Lo_thr)
line[3].set_data(vel_bin,np.ones(DOPPLER_PAD)*np.max(target_doppler_spec[:,i])/T_Mi_thr)
line[4].set_data(vel_bin,np.ones(DOPPLER_PAD)*np.max(target_doppler_spec[:,i])/T_Hi_thr)
line[5].set_data(vel_bin,np.roll(np.array(bi_quadratic)*np.max(target_doppler_spec[:,i]),np.argmax(target_doppler_spec[:,i])-DOPPLER_PAD//2))
line[6].set_data(vel_bin,np.roll(np.array(hor_quadratic)*np.max(target_doppler_spec[:,i]),np.argmax(target_doppler_spec[:,i])-DOPPLER_PAD//2))
line[7].set_data(range_bin[np.argmax(Range_spectrum_history[i])],np.max(Range_spectrum_history[i]))
else:
for i in np.arange(1,8):
line[i].set_data([],[])
return line
anim = FuncAnimation(fig, animate, frames=FRAME_NUMBER)
Writer = writers['ffmpeg']
writer = Writer(fps=1/(FRAME_REP_INTERVAL), metadata={'artist':'Me'}, bitrate=3600)
anim.save('../../videos/'+folder+'_'+actual_filename + '_complete_model' +'.mp4',writer)
'''
print(len(Range_spectrum_history[0]))
print(Target_presence_list)
| src/New_target_classification/simple_features_driving_1RX_only.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Ke4g6-1yvShC" colab_type="text"
# **IMPORTING LIBRARIES**
# + id="dzJNy8IWvOUs" colab_type="code" colab={}
import numpy as np # linear algebra
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from tensorflow.python.keras.layers import Dense, Flatten, Dropout
from tensorflow.python.keras.preprocessing import image
# + [markdown] id="g4qDXXr0qxXT" colab_type="text"
# **IMAGE PLOTS**
# + id="Wvpjg2vEq2PZ" colab_type="code" outputId="3f5e5134-4911-41c8-8743-2680f2386e3d" colab={"base_uri": "https://localhost:8080/", "height": 241}
img_path = 'training/n9/n9014.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img
# + id="92EknhrbrGoU" colab_type="code" outputId="80b62c3b-405b-4509-fb9b-7c1074cfe3f6" colab={"base_uri": "https://localhost:8080/", "height": 241}
img_path = 'training/n5/n5018.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img
# + [markdown] id="L52ZGpOIvYzU" colab_type="text"
# **IMPORTING DATASET**
# + id="9OYPExPVuVPn" colab_type="code" outputId="ffe99fb1-dc97-43c7-d3f5-907fe81d0dea" colab={"base_uri": "https://localhost:8080/", "height": 33}
from zipfile import ZipFile
file_name = "10-monkey-species.zip"
with ZipFile(file_name,"r") as zip:
zip.extractall()
print('Done')
# + [markdown] id="9-6MZ3kevfuM" colab_type="text"
# **Training and Validation files uploaded**
# + [markdown] id="cb83L9bfvmAh" colab_type="text"
# **Defining PATH**
# + id="cVG6R33jqNsh" colab_type="code" outputId="9dd47df9-931f-48ab-a8db-b7378621b241" colab={"base_uri": "https://localhost:8080/", "height": 33}
from zipfile import ZipFile
file_name = "training.zip"
with ZipFile(file_name,"r") as zip:
zip.extractall()
print('Done')
# + [markdown] id="hm94Mk5Ck6fF" colab_type="text"
# **TRAIN AND TEST PATH**
# + id="3rhwCikmqN8G" colab_type="code" outputId="db2e41aa-651b-4353-8f84-11b4598c9e0f" colab={"base_uri": "https://localhost:8080/", "height": 33}
from zipfile import ZipFile
file_name = "validation.zip"
with ZipFile(file_name,"r") as zip:
zip.extractall()
print('Done')
# + id="gD0ZWe07uX1p" colab_type="code" colab={}
train_dir = 'training'
val_dir = 'validation'
# + [markdown] id="BsTWrCuRk-zg" colab_type="text"
# **CHECKING NUMBER OF CLASSES IN THE DATASET**
# + id="Xmj9O4AhueNG" colab_type="code" outputId="2fb0262a-a24a-43df-fc6d-8c7629b6df3c" colab={"base_uri": "https://localhost:8080/", "height": 335}
labels = pd.read_csv("monkey_labels.txt")
num_classes = labels['Label'].size
labels
# + [markdown] id="qDT2hbNjlGkU" colab_type="text"
# **INPUT PARAMETERS**
# + id="iuHKeIlQvF-j" colab_type="code" colab={}
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 300
# + [markdown] id="1kQF5eTOlPkI" colab_type="text"
# ****DATA GENERATOR**
#
# Keras uses a so-called data-generator for inputting data into the neural network, which will loop over the data for eternity.
# **
# + [markdown] id="BFyHUEBTlVuL" colab_type="text"
# ****Data Generator for Train and Test with NO TRANSFORMATIONS****
# + id="7nGJV3bKvHWX" colab_type="code" colab={}
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.applications.inception_v3 import preprocess_input
## use inception's own preprocess function
train_data_gen_aug=ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=35,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
fill_mode='nearest' #default
)
validation_data_gen=ImageDataGenerator(
preprocessing_function=preprocess_input
)
# + [markdown] id="nQTHYgrulef9" colab_type="text"
# ****Batch_Size ****
# + [markdown] id="Zf1I88KilmfJ" colab_type="text"
# Data Generators processes and returns images in batches.
#
# Batch sizes should be small for faster processing
# + id="wb9ZSf42vJab" colab_type="code" outputId="b84bdeb5-98ff-4bda-ff53-183836a6af84" colab={"base_uri": "https://localhost:8080/", "height": 50}
train_gen=train_data_gen_aug.flow_from_directory(train_dir,
target_size=(IMAGE_WIDTH,
IMAGE_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical")
val_gen = validation_data_gen.flow_from_directory(val_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size = BATCH_SIZE,
class_mode="categorical")
# + id="1HKyrA_1iYyB" colab_type="code" colab={}
train_count=1097#train_size
val_count=272#test_size
# + [markdown] id="MY7l-zP3ibrs" colab_type="text"
# **IMPORTING INCEPTION V3 PRETRAINED WEIGHTS **
# + id="uFbUsCNiqgkK" colab_type="code" colab={}
from google.colab import files
# + id="9l-MBeRTPz93" colab_type="code" outputId="3e3f035f-e4d2-4247-dbee-382a2fef92ff" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 70}
uploaded = files.upload()
# + [markdown] id="JMeyIYYWipQX" colab_type="text"
# **MODEL SETUP**
# + id="JnRGkvDeirvc" colab_type="code" colab={}
InceptionV3_model=InceptionV3(include_top=False,
pooling='avg',
weights='inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
)
# + [markdown] id="e2yEARwbi65R" colab_type="text"
# We can see that the last layer in the convolutional layer is is GlobalAveragePooling2D
#
# We wiil be building our Transfer Learning model on top of this layer
#
#
# Adding 1 layer of FULLY ONNECTED LAYER with Drop-out and
#
# a classification layer with softmax with 10 output classses
# + id="C6wmOQBevUs3" colab_type="code" colab={}
from keras.applications import InceptionV3
from keras.models import Sequential
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
# set up the model
model=Sequential()
# add inception pretrained model, the wieghts 80Mb
model.add(InceptionV3(include_top=False,
pooling='avg',
weights='inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
))
# use relu as activation function "vanishing gradiends" :)
model.add(Dense(512, activation="relu"))
# add drop out to avoid overfitting
model.add(Dropout(0.5))#Dropping a neuron with 0.5 probability gets the highest variance for this distribution.
model.add(Dense(num_classes, activation="softmax"))
# + [markdown] id="lUwTrc9SjE77" colab_type="text"
# Using dropout layer to minimize overfitting
#
#
# Dropout(0.5) --------Dropping a neuron with 0.5 probability gets the highest variance for this distribution
# + [markdown] id="n773FxETjIKP" colab_type="text"
# **FREEZING THE PRETRAINED LAYER**
# + id="9y5t9K6WvYuq" colab_type="code" colab={}
# do not need to train the pre train layer
model.layers[0].trainable=False
# + [markdown] id="38PTD4rUjRim" colab_type="text"
# ****SUMMARY OF TENSORS FLOWING b/w the VGG_16 Layers****
# + id="S9YQDWjwwEMJ" colab_type="code" outputId="4cead238-fa09-4db0-e96c-66aea253ea82" colab={"base_uri": "https://localhost:8080/", "height": 261}
model.summary()
# + [markdown] id="4mTB6hCyjcHa" colab_type="text"
# ****COMPILING MODEL****
# + id="E5PK3_O8wJIw" colab_type="code" colab={}
# from keras import optimizers
# adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)
# use adam to avoid overfitting
model.compile(loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"])
# + [markdown] id="BUJStynZjiu-" colab_type="text"
# **TRAIN MODEL**
# + id="ltFj1Xi5wOBf" colab_type="code" outputId="e2f9d3f3-5485-4ef1-9fe8-543901a9a099" colab={"base_uri": "https://localhost:8080/", "height": 180}
model_history = model.fit_generator(train_gen,
steps_per_epoch=5,
epochs=5,
validation_data=val_gen,
validation_steps=val_count // BATCH_SIZE
)
# + [markdown] id="UaihD5kfkwV3" colab_type="text"
# ****ACCURACY and ERROR PLOT****
# + id="Nb2ZEfFHzjK0" colab_type="code" outputId="6164540f-98d5-4d55-828d-1e4e9357edb0" colab={"base_uri": "https://localhost:8080/", "height": 707}
acc = model_history.history['acc']
val_acc = model_history.history['val_acc']
loss = model_history.history['loss']
val_loss = model_history.history['val_loss']
epochs = range(1, len(acc) + 1)
import matplotlib.pyplot as plt
plt.title('Training and validation accuracy')
plt.plot(epochs, acc, 'red', label='Training acc')
plt.plot(epochs, val_acc, 'blue', label='Validation acc')
plt.legend()
plt.figure()
plt.title('Training and validation loss')
plt.plot(epochs, loss, 'red', label='Training loss')
plt.plot(epochs, val_loss, 'blue', label='Validation loss')
plt.legend()
plt.show()
# + [markdown] id="XRoL-ooDkKsU" colab_type="text"
# ****DATA AUGMENTATION****
# + [markdown] id="j07E_SZakOjX" colab_type="text"
# **We have a small training-set so it helps to artificially inflate its size by making various transformations to the images. We use a built-in data-generator that can make these random transformations.**
# + [markdown] id="iGLAXzPPkWzq" colab_type="text"
# **** DATA GENERATOR FOR TRAIN****
# + id="qNdiXd3hkaTT" colab_type="code" colab={}
train_data_gen_aug=ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=180,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
horizontal_flip=True,
fill_mode='nearest'
)
# + [markdown] id="2HBZad2JkdNx" colab_type="text"
# ****DATA GENERATOR FOR TEST with no Transformations**
#
# No transformations should be performed on the test set since we to predict the exact classification accuarcy on these images
# **
# + id="W2tPCbBZkaRs" colab_type="code" colab={}
validation_data_gen=ImageDataGenerator(
preprocessing_function=preprocess_input
)
# + id="LB4VW9KQkaOR" colab_type="code" outputId="2e2a8642-7655-4bc2-f64a-c7c824055576" colab={"base_uri": "https://localhost:8080/", "height": 50}
train_gen_aug=train_data_gen_aug.flow_from_directory(train_dir,
target_size=(IMAGE_WIDTH,
IMAGE_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical")
val_gen_aug = validation_data_gen.flow_from_directory(val_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size = BATCH_SIZE,
class_mode="categorical")
# + id="mzwJLflYkaL9" colab_type="code" colab={}
model_aug=Sequential()
# add inception pretrained model, the wieghts 80Mb
model_aug.add(InceptionV3(include_top=False,
pooling='avg',
weights='inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
))
# use relu as activation function "vanishing gradiends" :)
model_aug.add(Dense(512, activation="relu"))
# add drop out to avoid overfitting
model_aug.add(Dropout(0.5))
model_aug.add(Dense(num_classes, activation="softmax"))
# + [markdown] id="Je7-DZ21k3T5" colab_type="text"
# ****FREEZING Pretrained Layers****
# + id="8X_3OeDLkaHW" colab_type="code" colab={}
model_aug.layers[0].trainable=False
# + id="5eptGUMSlGTw" colab_type="code" outputId="fdaaf76d-8ab8-40c5-9901-bf6a08b2541b" colab={"base_uri": "https://localhost:8080/", "height": 261}
model_aug.summary()
# + [markdown] id="8mPCo7iklJpg" colab_type="text"
# **COMPILE MODEL**
# + id="MIX7ubNClI7P" colab_type="code" colab={}
model_aug.compile(loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"])
# + [markdown] id="dFBHj9L0lTRn" colab_type="text"
# ****TRANING MODEL****
# + id="qQDwV0URlWMi" colab_type="code" outputId="18ca0458-6982-4734-ef9b-94d79f7cd245" colab={"base_uri": "https://localhost:8080/", "height": 180}
model_aug_history = model_aug.fit_generator(train_gen_aug,
steps_per_epoch=5,
epochs=5,
validation_data=val_gen_aug,
validation_steps=val_count // BATCH_SIZE
)
# + [markdown] id="5PI2u9zwlZGh" colab_type="text"
# ****ACCURACY and ERROR PLOT****
# + id="pt5WHK324Nso" colab_type="code" outputId="3ecc30a3-a036-410e-d545-1bbfa55098a6" colab={"base_uri": "https://localhost:8080/", "height": 707}
acc = model_aug_history.history['acc']
val_acc = model_aug_history.history['val_acc']
loss = model_aug_history.history['loss']
val_loss = model_aug_history.history['val_loss']
epochs = range(1, len(acc) + 1)
import matplotlib.pyplot as plt
plt.title('Training and validation accuracy')
plt.plot(epochs, acc, 'red', label='Training acc')
plt.plot(epochs, val_acc, 'blue', label='Validation acc')
plt.legend()
plt.figure()
plt.title('Training and validation loss for Augmented model')
plt.plot(epochs, loss, 'red', label='Training loss')
plt.plot(epochs, val_loss, 'blue', label='Validation loss')
plt.legend()
plt.show()
# + [markdown] id="HihJb5EmlfOb" colab_type="text"
# ****Removing Drop-out Layer****
# + [markdown] id="varDiiCwl0vN" colab_type="text"
# We will accessing performance by removing the dropout layer on the basis of validation accuracy
# + [markdown] id="sPCaXTEelkwy" colab_type="text"
# **We will accessing performance by removing the dropout layer on the basis of validation accuracy**
# + id="zaVuTyqc4td8" colab_type="code" colab={}
from keras.applications import InceptionV3
from keras.models import Sequential
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
# set up the model
model_NoDropOut=Sequential()
# add inception pretrained model, the wieghts 80Mb
model_NoDropOut.add(InceptionV3(include_top=False,
pooling='avg',
weights='inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
))
# use relu as activation function "vanishing gradiends" :)
model_NoDropOut.add(Dense(512, activation="relu"))
# add drop out to avoid overfitting
#model_NoDropOut.add(Dropout(0.5))
model_NoDropOut.add(Dense(num_classes, activation="softmax"))
# + [markdown] id="c0Wc0ySjl_7T" colab_type="text"
# **FREEZING PRETRAINED LAYERS**
# + id="eQno4M1gmF4i" colab_type="code" colab={}
# do not need to train the pre train layer
model_NoDropOut.layers[0].trainable=False
# + id="uPMejRR98hS4" colab_type="code" outputId="c2dfbca3-5a15-446b-ab6a-dc81cd6a518b" colab={"base_uri": "https://localhost:8080/", "height": 228}
model_NoDropOut.summary()
# + [markdown] id="QMVRlTWRmY7I" colab_type="text"
# ****COMPILE MODEL****
# + id="jmDf7-u98z5k" colab_type="code" colab={}
# from keras import optimizers
# adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)
# use adam to avoid overfitting
model_NoDropOut.compile(loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"])
# + [markdown] id="LRodtay0mfnb" colab_type="text"
# **TRAIN MODEL**
# + id="EoLrWaaz81nt" colab_type="code" outputId="211278cb-8233-4e15-e8ec-613c8f79204c" colab={"base_uri": "https://localhost:8080/", "height": 180}
model_NoDropOut_history = model_NoDropOut.fit_generator(train_gen,
steps_per_epoch=5,
epochs=5,
validation_data=val_gen,
validation_steps=val_count // BATCH_SIZE
)
# + [markdown] id="KzpNJHg6kn0l" colab_type="text"
# ****ACCURACY and ERROR PLOT****
# + id="P_gkRWuRFVOp" colab_type="code" outputId="330cbc02-91eb-4fc6-8b31-e590fc9db8ae" colab={"base_uri": "https://localhost:8080/", "height": 707}
acc = model_NoDropOut_history.history['acc']
val_acc = model_NoDropOut_history.history['val_acc']
loss = model_NoDropOut_history.history['loss']
val_loss = model_NoDropOut_history.history['val_loss']
epochs = range(1, len(acc) + 1)
import matplotlib.pyplot as plt
plt.title('Training and validation accuracy')
plt.plot(epochs, acc, 'red', label='Training acc')
plt.plot(epochs, val_acc, 'blue', label='Validation acc')
plt.legend()
plt.figure()
plt.title('Training and validation loss')
plt.plot(epochs, loss, 'red', label='Training loss')
plt.plot(epochs, val_loss, 'blue', label='Validation loss')
plt.legend()
plt.show()
# + [markdown] id="XJBCLMp8Fckx" colab_type="text"
# **Add 1 more Fully Connected Layer**
# + id="6pzDooM6Wvtr" colab_type="code" colab={}
from keras.applications import InceptionV3
from keras.models import Sequential
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
# set up the model
model_FC=Sequential()
# add inception pretrained model, the wieghts 80Mb
model_FC.add(InceptionV3(include_top=False,
pooling='avg',
weights='inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
))
# use relu as activation function "vanishing gradiends" :)
model_FC.add(Dense(512, activation="relu"))
# add drop out to avoid overfitting
model_FC.add(Dropout(0.5))#Dropping a neuron with 0.5 probability gets the highest variance for this distribution.
model_FC.add(Dense(512, activation="relu"))
# add drop out to avoid overfitting
model_FC.add(Dropout(0.5))
model_FC.add(Dense(num_classes, activation="softmax"))
# + [markdown] id="TJEK2b8lnB0P" colab_type="text"
# **FREEZING PRETRAINED LAYERS**
# + id="G2pr4gNAnLko" colab_type="code" colab={}
# do not need to train the pre train layer
model_FC.layers[0].trainable=False
# + id="tvGlRldWnPoR" colab_type="code" outputId="783b1227-433b-440b-d477-65d3086ead8a" colab={"base_uri": "https://localhost:8080/", "height": 326}
model_FC.summary()
# + [markdown] id="Ku_H9dfOl032" colab_type="text"
# **COMPILE MODEEL**
# + id="uDiQdqmhVJSv" colab_type="code" colab={}
# from keras import optimizers
# adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)
# use adam to avoid overfitting
model_FC.compile(loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"])
# + [markdown] id="qSlGjUEvl5j2" colab_type="text"
# **TRAIN MODEL**
# + id="CYhsgdaPVzT8" colab_type="code" outputId="ac57a29e-3463-4c71-f534-f54458d45338" colab={"base_uri": "https://localhost:8080/", "height": 180}
model_FC_history = model_FC.fit_generator(train_gen,
steps_per_epoch=5,
epochs=5,
validation_data=val_gen,
validation_steps=val_count // BATCH_SIZE
)
# + [markdown] id="9aGx43mWklua" colab_type="text"
# ****ACCURACY and ERROR PLOT****
# + id="nQqS37c6V22U" colab_type="code" outputId="b4e2c6f8-b3b1-49e8-f1b8-58a669f8bf99" colab={"base_uri": "https://localhost:8080/", "height": 707}
acc = model_FC_history.history['acc']
val_acc = model_FC_history.history['val_acc']
loss = model_FC_history.history['loss']
val_loss = model_FC_history.history['val_loss']
epochs = range(1, len(acc) + 1)
import matplotlib.pyplot as plt
plt.title('Training and validation accuracy')
plt.plot(epochs, acc, 'red', label='Training acc')
plt.plot(epochs, val_acc, 'blue', label='Validation acc')
plt.legend()
plt.figure()
plt.title('Training and validation loss')
plt.plot(epochs, loss, 'red', label='Training loss')
plt.plot(epochs, val_loss, 'blue', label='Validation loss')
plt.legend()
plt.show()
# + [markdown] id="QqjU6PBUhUxb" colab_type="text"
# **Varying Nodes in FC layers**
# + id="53AwapPWhYZ2" colab_type="code" colab={}
from keras.applications import InceptionV3
from keras.models import Sequential
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
# set up the model
model4=Sequential()
# add inception pretrained model, the wieghts 80Mb
model4.add(InceptionV3(include_top=False,
pooling='avg',
weights='inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
))
# use relu as activation function "vanishing gradiends" :)
model4.add(Dense(512, activation="relu"))
# add drop out to avoid overfitting
model4.add(Dropout(0.5))
model4.add(Dense(256, activation="relu"))
# add drop out to avoid overfitting
model4.add(Dropout(0.5))#Dropping a neuron with 0.5 probability gets the highest variance for this distribution.
model4.add(Dense(num_classes, activation="softmax"))
# + id="zJ4SmB2fiSbq" colab_type="code" colab={}
# do not need to train the pre train layer
model4.layers[0].trainable=False
# + id="6ev7FG2ziShv" colab_type="code" outputId="7b840ed0-9c83-4784-d915-3ff7303f4f9a" colab={"base_uri": "https://localhost:8080/", "height": 10840}
model4.layers[0].summary()
# + id="pMKKWUb_pRSy" colab_type="code" colab={}
# + id="5maCnVBCiSgI" colab_type="code" outputId="a70d5be3-11b7-4572-ec97-9b4126ab989f" colab={"base_uri": "https://localhost:8080/", "height": 326}
model4.summary()
# + id="ceejTN-RpTIy" colab_type="code" colab={}
# from keras import optimizers
# adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)
# use adam to avoid overfitting
model4.compile(loss="categorical_crossentropy",
optimizer='adam',
metrics=["accuracy"])
# + id="18F_V9IDpJes" colab_type="code" outputId="97c1b2f1-c30e-43eb-d1f3-5f4aaf6d819d" colab={"base_uri": "https://localhost:8080/", "height": 180}
model4_history = model4.fit_generator(train_gen,
steps_per_epoch=5,
epochs=5,
validation_data=val_gen,
validation_steps=val_count // BATCH_SIZE
)
# + [markdown] id="DAKQhfXNkdP7" colab_type="text"
# **ACCURACY and ERROR PLOT**
# + id="6Z-hfPrfi2sJ" colab_type="code" outputId="99559211-47ab-4066-b612-3cdfe7fb63dc" colab={"base_uri": "https://localhost:8080/", "height": 707}
acc = model_FC_history.history['acc']
val_acc = model_FC_history.history['val_acc']
loss = model_FC_history.history['loss']
val_loss = model_FC_history.history['val_loss']
epochs = range(1, len(acc) + 1)
import matplotlib.pyplot as plt
plt.title('Training and validation accuracy')
plt.plot(epochs, acc, 'red', label='Training acc')
plt.plot(epochs, val_acc, 'blue', label='Validation acc')
plt.legend()
plt.figure()
plt.title('Training and validation loss')
plt.plot(epochs, loss, 'red', label='Training loss')
plt.plot(epochs, val_loss, 'blue', label='Validation loss')
plt.legend()
plt.show()
| Project/Models/Inception_V3_ADS_Reseach_Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:env_multilingual_class]
# language: python
# name: conda-env-env_multilingual_class-py
# ---
# + [markdown] Collapsed="false"
# ## Experiments with Embeddings
# + [markdown] Collapsed="false"
# This notebook is used to experiment with different embeddings we retrieve from language models like BERT and GPT-2.
# Ultimately, words translated in different languages are plotted against each other to show the capabilities of cross-lingual embeddings.
# + Collapsed="false"
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
# + [markdown] Collapsed="false"
# ### 1. Single Sentence
# + [markdown] Collapsed="false"
# #### Extract Embeddings from BERT
# + Collapsed="false"
# from https://github.com/huggingface/transformers/issues/1950
import tensorflow as tf
from transformers import BertTokenizer, TFBertModel
MODEL_TYPE = 'bert-base-multilingual-uncased'
sequence = ["Today, the sun is shining."]
tokenizer = BertTokenizer.from_pretrained(MODEL_TYPE)
model = TFBertModel.from_pretrained(MODEL_TYPE)
# + Collapsed="false"
def get_embeddings(sequence):
input_ids = []
outputs = []
last_hidden_states_words = []
last_hidden_states_sentence = []
token_ids = []
for index,sequ in enumerate(sequence):
input_ids.append(tf.constant(tokenizer.encode(sequ[index]))[None, :])
outputs.append(model(input_ids[index]))
last_hidden_states_words.append(outputs[index][0])
last_hidden_states_sentence.append(outputs[index][1])
token_ids.append(tokenizer.encode_plus(sequ, max_length=50, pad_to_max_length=False)['input_ids'])
return last_hidden_states_words, last_hidden_states_sentence, token_ids, labels
# + Collapsed="false"
def get_embeddings_one_sentence(sequence):
input_ids = tf.constant(tokenizer.encode(sequence[0]))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states_words = outputs[0]
last_hidden_states_sentence = outputs[1]
token_ids = tokenizer.encode_plus(sequence[0], max_length=50, pad_to_max_length=False)['input_ids']
labels = [tokenizer.decode(int(token_id)) for token_id in token_ids]
return last_hidden_states_words, last_hidden_states_sentence, token_ids, labels
# + Collapsed="false"
hidden_words, hidden_sentence, tokens, labels = get_embeddings_one_sentence(sequence)
tokens
# + [markdown] Collapsed="false"
# #### Dimensionality Reduction
# + [markdown] Collapsed="false"
# ##### t-SNE
# + Collapsed="false"
# adapted from https://github.com/ioannispartalas/CrossLingual-NLP-AMLD2020/blob/master/notebooks/AMLD%20Intro.ipynb
# For more information of TSNE: https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
# + Collapsed="false"
# error for three dimensions
def plot_embeddings(tokens,labels,sequences,dimensions=2,force_simple=False):
'''
Plots the embeddings together with the token names in a 2-dimensional space.
Args:
tokens: list (of lists) of the tokens
labels: concatenated list of all the labels of the tokens
sequences: list of input strings to be tokenized
dimensions: defines the dimensions of the plot, at the moment only 2 works
force_simple: forces the function to handle the input sequence as if it were a single sequence
Outputs: embedding plot
'''
tsne_model = TSNE(perplexity=1.5, n_components=dimensions, init='pca', n_iter=2500, random_state=23)
print(len(sequences))
if len(sequences)>1 and force_simple==False:
tsne_inputs = []
for index,sequence in enumerate(sequences):
tsne_inputs.append(np.array(tokens[index][0]))
tsne_outputs = []
for index,sequence in enumerate(sequences):
tsne_outputs.append(tsne_model.fit_transform(np.array(tsne_inputs[index])))
x = [list(zip(*tsne_outputs[index]))[0] for index,sequence in enumerate(sequences)]
y = [list(zip(*tsne_outputs[index]))[1] for index,sequence in enumerate(sequences)]
if dimensions == 3:
z = [list(zip(*tsne_outputs[index]))[2] for index,sequence in enumerate(sequences)]
plt.figure(figsize=(7, 6))
if dimensions == 2:
for index in range(len(sequences)):
plt.scatter(x[index],y[index])
print(len(x[index]))
for i in range(len(x[index])):
if index != 0:
#i+index*len(x[index-1]) for the second loop is (0-10)+1*9 (which is the length of the previous iteration)
#print(i+index*len(x[index-1]))
#print(index)
plt.annotate(labels[i+(index-1)*len(x[index-1])],
xy=(x[index][i], y[index][i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
else:
plt.annotate(labels[i],
xy=(x[index][i], y[index][i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
#elif dimensions == 3:
#for index in range(len(sequences)):
# plt.scatter(x[index],y[index],z[index])
# print(len(x[index]))
# for i in range(len(x[index])):
# if index != 0:
# #i+index*len(x[index-1]) for the second loop is (0-10)+1*9 (which is the length of the previous iteration)
# plt.annotate(labels[i+index*len(x[index-1])],
# xyz=(x[index][i], y[index][i], z[index][i]),
# xyztext=(5, 2, 2),
# textcoords='offset points',
# ha='right',
# va='bottom')
# else:
# plt.annotate(labels[i],
# xyz=(x[index][i], y[index][i], z[index][i]),
# xytext=(5, 2, 2),
# textcoords='offset points',
# ha='right',
# va='bottom')
#
else:
tsne_outputs = tsne_model.fit_transform(tokens)
print(tsne_outputs)
x = tsne_outputs[:,0]
y = tsne_outputs[:,1]
if dimensions == 3:
z = tsne_outputs[:,2]
plt.figure(figsize=(7, 6))
if dimensions == 2:
plt.scatter(x,y)
print(len(x))
print(len(labels))
for i in range(len(x)):
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
#elif dimensions == 3:
#plt.scatter(x,y,z)
#print(len(x))
#print(len(labels))
#for i in range(len(x)):
# plt.annotate(labels[i],
# xyz=(x[i], y[i], z[i]),
# xyztext=(5, 2, 2),
# textcoords='offset points',
# ha='right',
# va='bottom')
# + Collapsed="false"
def plot_embeddings_old(tokens,labels,sequences):
tsne_model = TSNE(perplexity=1.5, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = new_values[:,0]
y = new_values[:,1]
plt.figure(figsize=(7, 6))
if len(sequences)>1:
flatten_tokens = [item for sublist in token_ids_multiple for item in sublist]
start_next_sequence = int(len(flatten_tokens) / len(sequences))
plt.scatter(x[:start_next_sequence],y[:start_next_sequence])
for index,sequence in enumerate(sequences[1:]):
print(start_next_sequence*index)
print(type(start_next_sequence*index))
plt.scatter(x[start_next_sequence*index:],y[start_next_sequence*index:])
else:
plt.scatter(x,y)
print(len(x))
print(len(labels))
for i in range(len(x)):
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
# + Collapsed="false"
hidden_words
# + Collapsed="false"
tokens = hidden_words.numpy()[0].tolist()
# + Collapsed="false"
plot_embeddings(tokens,labels,sequence,2)
# + Collapsed="false"
plot_embeddings_old(tokens,labels,sequence)
# + [markdown] Collapsed="false"
# ### 2. Multiple Sentences
# + [markdown] Collapsed="false"
# #### Extract Embeddings from BERT
# + Collapsed="false"
sequences = ["Ich bin heute mit dem Auto zur Arbeit gefahren.","I went to work by car today.","Je suis allé travailler en voiture aujourd'hui.",
"Das Mädchen meiner Nachbarn spielt gerne mit Teddybären.","My neighbour's girl likes to play with teddy bears.","La fille de mon voisin aime jouer avec les ours en peluche.",
"Letzten Sommer sind wir ein einem Korallenriff tauchen gegangen.","Last summer we went diving on a coral reef.","L'été dernier, nous sommes allés plonger sur un récif de corail.",
"Im Frühling beginnt die Spargelsaison.","The asparagus season begins in spring.","La saison des asperges commence au printemps.",
"Das Fussballstadion ist einmal mehr total ausverkauft.","The football stadium is once again totally sold out.","Le stade de football est à nouveau totalement épuisé."]
sequences
# + Collapsed="false"
hidden_words_mult, hidden_sentence_mult, tokens_mult, labels_mult = get_embeddings(sequences)
len(hidden_sentence_mult)
# + [markdown] Collapsed="false"
# #### Dimensionality Reduction
# + [markdown] Collapsed="false"
# ##### T-SNE
# + Collapsed="false"
for i in hidden_sentence_mult:
print(i.numpy().shape)
break
# + Collapsed="false"
hidden_words_mult
# + Collapsed="false"
sentence_repr_mult = []
sentence_repr_mult = [hidden_sentence_mult[0].numpy()[0].tolist() for sentence in sequences]
#sentence_repr_mult
# + Collapsed="false"
# change init to 'pca'
#new_values = []
tsne_model = TSNE(perplexity=5, n_components=2, init='random', n_iter=2500, random_state=23, learning_rate=100)
new_values = tsne_model.fit_transform(sentence_repr_mult)
#new_values = [tsne_model.fit_transform(sentence_repr_mult) for sequence in sequences]
new_values
# + Collapsed="false"
x = new_values[:,0]
y = new_values[:,1]
print(len(x))
plt.figure(figsize=(7, 6))
for index in range(len(x)):
plt.scatter(x[index],y[index])
#for i in range(len(x)):
plt.annotate(sequences[index],
# plt.annotate(labels_multiple[i],
xy=(x[index], y[index]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
# + Collapsed="false"
#hidden_words_mult, hidden_sentence_mult, tokens_mult, labels_mult
#plot_embeddings(hidden_words_mult,labels_mult,sequences,2)
# + Collapsed="false"
#plot_embeddings(last_hidden_states_multiple,labels_multiple,sequences,3)
# dimension 3 requires a z axis and a new plot...
# + [markdown] Collapsed="false"
# ### Word translations
# + Collapsed="false"
words_en = ["sun","dog","queen","king","street","wonderful","car"]
words_de = ["Sonne","Hund","Königin","König","Strasse","wunderbar","Auto"]
words_fr = ["soleil","chien","reine","roi","rue","magnifique","voiture"]
words = [words_en,words_de,words_fr]
words
# + Collapsed="false"
# just tokenising these does not help! I need to put it through the model
words_en_tokenized = [tokenizer.tokenize(words) for words in words_en]
words_de_tokenized = [tokenizer.tokenize(words) for words in words_de]
words_fr_tokenized = [tokenizer.tokenize(words) for words in words_fr]
# + Collapsed="false"
hidden_words, hidden_sentence_mult, tokens_mult, labels_mult = get_embeddings(words)
# + Collapsed="false"
#words = [words_en, words_de, words_fr]
words = ["sun","dog","queen","king","street","wonderful","car","soleil","chien","reine","roi","rue","magnifique","voiture"]
#"Sonne","Hund","Königin","König","Strasse","wunderbar","Auto",
words
# + Collapsed="false"
len(words)
# + Collapsed="false"
#get_embeddings(words)
# + Collapsed="false"
# embeddings
input_ids_words = tf.constant(tokenizer.encode(words))[None, :]
outputs_words = model(input_ids_words)
last_hidden_states_words = outputs_words[0] # The last hidden-state is the first element of the output tuple
token_ids_words = tokenizer.encode_plus(words, max_length=50, pad_to_max_length=False)['input_ids']
# + Collapsed="false"
input_ids_words
# + Collapsed="false"
tokenizer.encode(["schon"])
# + Collapsed="false"
outputs_words[0]
# + Collapsed="false"
# for some reason, it does not tokenize the german words correctly!
token_ids_words
# + Collapsed="false"
labels_words = [tokenizer.decode(int(token_id)) for token_id in token_ids_words]
labels_words
# + Collapsed="false"
plot_input_words = last_hidden_states_words.numpy()[0].tolist()
# + Collapsed="false"
plot_embeddings(plot_input_words,labels_words,words,2,force_simple=True)
# + [markdown] Collapsed="false"
# ### Experiment with Embeddings for the Same Word
# + Collapsed="false"
similar_sequences = ["I am working from home today.","Has Fred come home from work yet?","You live at home but you need to work at the office."]
# + Collapsed="false"
input_ids_similar = []
outputs_similar = []
last_hidden_states_similar = []
token_ids_similar = []
for index,sequence in enumerate(similar_sequences):
print(index)
#input_ids_multiple = [tf.constant(tokenizer.encode(sequences[index]))[None, :] for index,sequence in enumerate(sequences)] # Batch size 1
input_ids_similar.append(tf.constant(tokenizer.encode(similar_sequences[index]))[None, :])
#for index,sequence in enumerate(sequences):
outputs_similar.append(model(input_ids_similar[index]))
last_hidden_states_similar.append(outputs_similar[index][0])
token_ids_similar.append(tokenizer.encode_plus(similar_sequences[index], max_length=50, pad_to_max_length=False)['input_ids'])
# + Collapsed="false"
flatten_tokens_similar = [item for sublist in token_ids_similar for item in sublist]
flatten_tokens_similar
# + Collapsed="false"
labels_similar = [tokenizer.decode(int(token_id)) for token_id in flatten_tokens_similar]
labels_similar
# + Collapsed="false"
plot_embeddings(last_hidden_states_similar,labels_similar,similar_sequences,2)
# + [markdown] Collapsed="false"
# ### Alternative Approach
# + Collapsed="false"
# following https://colab.research.google.com/drive/1TCgnpIwsr6uK4cP0Gk1RCCQrFy6s3Xc1
#pip install bert-embedding
from bert_embedding import BertEmbedding
# + Collapsed="false"
bert_embedding = BertEmbedding()
# + Collapsed="false"
# from: https://towardsdatascience.com/visualisation-of-embedding-relations-word2vec-bert-64d695b7f36
def get_visual_embs(sentence):
"""Get BERT embedding for the sentence,
project it to a 2D subspace where [CLS] is (1,0) and [SEP] is (0,1)."""
embs = bert_embedding([sentence])#, filter_spec_tokens=False)
tokens = embs[0][0]
embV = embs[0][1]
W = np.array(embV)
B = np.array([embV[0], embV[-1]])
Bi = np.linalg.pinv(B.T)
Wp = np.matmul(Bi,W.T)
return Wp, tokens
# + Collapsed="false"
embs = get_visual_embs("The sun is shining bright.")
embs
# + Collapsed="false"
#sentences = ['The sky is blue today.', 'The sea is blue today.']
sentences = similar_sequences
colors = ['blue', 'red', 'green']
plt.figure(figsize=(12,7))
plt.axhline(color='black')
plt.axvline(color='black')
for n,s in enumerate(sentences):
Wp, tokens = get_visual_embs(s)
print(Wp)
print(tokens)
plt.scatter(Wp[0,:], Wp[1,:], color=colors[n], marker='x', label=s)
rX = max(Wp[0,:])-min(Wp[0,:])
rY = max(Wp[1,:])-min(Wp[1,:])
rM = max(rX, rY)
eps = 0.005
eps2 = 0.005
for i, txt in enumerate(tokens):
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if txt in ['[CLS]', '[SEP]']:
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if txt == 'sky':
plt.annotate(txt, (Wp[0,i]+rX*eps*-6, Wp[1,i]+rX*eps*3))
if txt == 'sea':
plt.annotate(txt, (Wp[0,i]+rX*eps*2, Wp[1,i]+rX*eps*3))
if i>0:
plt.arrow(Wp[0,i-1],Wp[1,i-1], Wp[0,i]-Wp[0,i-1], Wp[1,i]-Wp[1,i-1], color=colors[n], head_length=rM*eps2*4, head_width=rM*eps2*2, length_includes_head=True)
plt.legend()
plt.show()
# + [markdown] Collapsed="false"
# ##### Tenses
# + Collapsed="false"
sentences = ['This is a horrible idea!', 'This was a horrible idea!', 'This will be a horrible idea!']
colors = ['blue', 'red', 'green']
plt.figure(figsize=(12,7))
plt.axhline(color='black')
plt.axvline(color='black')
for n,s in enumerate(sentences):
Wp, tokens = get_visual_embs(s)
print(tokens)
plt.scatter(Wp[0,:], Wp[1,:], color=colors[n], marker='x', label=s)
rX = max(Wp[0,:])-min(Wp[0,:])
rY = max(Wp[1,:])-min(Wp[1,:])
rM = max(rX, rY)
eps = 0.005
eps2 = 0.005
for i, txt in enumerate(tokens):
if txt in ['[CLS]', '[SEP]']:
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
#plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if i>0:
plt.arrow(Wp[0,i-1],Wp[1,i-1], Wp[0,i]-Wp[0,i-1], Wp[1,i]-Wp[1,i-1], color=colors[n], head_length=rM*eps2*4, head_width=rM*eps2*2, length_includes_head=True)
plt.legend()
plt.show()
# + [markdown] Collapsed="false"
# ##### Punctuation
# + Collapsed="false"
sentences = ['This is a horrible idea.', 'This is a horrible idea!', 'This is a horrible idea?']
colors = ['blue', 'red', 'green', 'yellow']
plt.figure(figsize=(12,7))
plt.axhline(color='black')
plt.axvline(color='black')
for n,s in enumerate(sentences):
Wp, tokens = get_visual_embs(s)
print(tokens)
plt.scatter(Wp[0,:], Wp[1,:], color=colors[n], marker='x', label=s)
rX = max(Wp[0,:])-min(Wp[0,:])
rY = max(Wp[1,:])-min(Wp[1,:])
rM = max(rX, rY)
eps = 0.005
eps2 = 0.005
for i, txt in enumerate(tokens):
if txt in ['[CLS]', '[SEP]']:
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
#plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if i>0:
plt.arrow(Wp[0,i-1],Wp[1,i-1], Wp[0,i]-Wp[0,i-1], Wp[1,i]-Wp[1,i-1], color=colors[n], head_length=rM*eps2*4, head_width=rM*eps2*2, length_includes_head=True)
plt.legend()
plt.show()
# + [markdown] Collapsed="false"
# ##### Complex Sentences
# + Collapsed="false"
# Some sentences from here: https://sentence.yourdictionary.com/mexico
sentences = ["In Mexico the national government is carrying out a consistent policy of developing its railway lines.",
"Borlaug also promoted the process (which proved wildly successful) of having two wheat-growing seasons in Mexico, one in the highlands, then another in the valley regions.",
"That's in Mexico, too.",
"In 1904 he went to Japan as war correspondent and in 1914 to Mexico in the same capacity."]
colors = ['blue', 'red', 'green', 'grey', 'purple', 'orange', 'lightblue', 'yellow']
plt.figure(figsize=(24,9))
plt.axhline(color='black')
plt.axvline(color='black')
for n,s in enumerate(sentences):
Wp, tokens = get_visual_embs(s)
print(tokens)
plt.scatter(Wp[0,:], Wp[1,:], color=colors[n], marker='x', label=s)
rX = max(Wp[0,:])-min(Wp[0,:])
rY = max(Wp[1,:])-min(Wp[1,:])
rM = max(rX, rY)
eps = 0.005
eps2 = 0.005
for i, txt in enumerate(tokens):
if txt in ['[CLS]', '[SEP]']:
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if txt=='mexico':
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if i>0:
plt.arrow(Wp[0,i-1],Wp[1,i-1], Wp[0,i]-Wp[0,i-1], Wp[1,i]-Wp[1,i-1], color=colors[n], head_length=rM*eps2*4, head_width=rM*eps2*2, length_includes_head=True)
plt.legend()
plt.show()
# + [markdown] Collapsed="false"
# ##### Non-real Sentences
# + Collapsed="false"
# Some sentences from here: https://sentence.yourdictionary.com/mexico
sentences = ["The the the the the the the the the",
". . . . . . . . . . .",
"gre htrha hthateh heta ger er",
"The sky is blue today.",
"<NAME> was a very good man and a loving husband."]
colors = ['blue', 'red', 'green', 'grey', 'purple', 'orange', 'lightblue', 'yellow']
plt.figure(figsize=(12,7))
plt.axhline(color='black')
plt.axvline(color='black')
for n,s in enumerate(sentences):
Wp, tokens = get_visual_embs(s)
print(tokens)
plt.scatter(Wp[0,:], Wp[1,:], color=colors[n], marker='x', label=s)
rX = max(Wp[0,:])-min(Wp[0,:])
rY = max(Wp[1,:])-min(Wp[1,:])
rM = max(rX, rY)
eps = 0.005
eps2 = 0.005
for i, txt in enumerate(tokens):
if txt in ['[CLS]', '[SEP]']:
plt.annotate(txt, (Wp[0,i]+rX*eps, Wp[1,i]+rX*eps))
if i>0:
plt.arrow(Wp[0,i-1],Wp[1,i-1], Wp[0,i]-Wp[0,i-1], Wp[1,i]-Wp[1,i-1], color=colors[n], head_length=rM*eps2*4, head_width=rM*eps2*2, length_includes_head=True)
plt.legend()
plt.show()
# + Collapsed="false"
# + Collapsed="false"
| notebook/00-Test/10_Embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Visualization
# **Import**
import csv
import os
import numpy as np
from scipy import optimize
from itertools import product
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
import scipy as sp
from utils.data import load_data, save_results
from utils.models import SVM, SPR, PCA
from utils.kernels import GaussianKernel
# **Paths and Globals**
# +
CWD = os.getcwd()
DATA_DIR = os.path.join(CWD, "data")
RESULT_DIR = os.path.join(CWD, "results")
FILES = {0: {"train_mat": "Xtr0_mat100.csv",
"train": "Xtr0.csv",
"test_mat": "Xte0_mat100.csv",
"test": "Xte0.csv",
"label": "Ytr0.csv"},
1: {"train_mat": "Xtr1_mat100.csv",
"train": "Xtr1.csv",
"test_mat": "Xte1_mat100.csv",
"test": "Xte1.csv",
"label": "Ytr1.csv"},
2: {"train_mat": "Xtr2_mat100.csv",
"train": "Xtr2.csv",
"test_mat": "Xte2_mat100.csv",
"test": "Xte2.csv",
"label": "Ytr2.csv"}}
# -
# ## Test Kernel PCA
# **Linear Kernel**
[...]
# **Gaussian Kernel**
# +
# choose kernel and kernel's parameter
kernel = GaussianKernel(200)
# load data
i = 0
X_train, Y_train, X_test = load_data(i, data_dir=DATA_DIR, files_dict=FILES)
# compute PCA
pca = PCA(kernel)
pca.fit(X_train)
# print eigenvalues
plt.plot(pca._lambda)
plt.title("Eigenvalues of PCA")
# plot 2D graph of the data projected on the eigenvectors associated to the 2 highest eigenvalues
n = 2
X_proj = pca.proj(X_train,n)
# plot PCA
plt.figure(figsize=(7,7))
plt.scatter(X_proj[:,0], X_proj[:,1], s= 20, c = Y_train)
plt.title('PCA projection in feature space')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
# -
| Data_Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import glob
import matplotlib.pyplot as plt
import matplotlib.patches as patch
import numpy as np
import pandas as pd
# %matplotlib inline
# %precision 4
plt.style.use('ggplot')
from scipy import linalg
np.set_printoptions(suppress=True)
# Students may (probably should) ignore this code. It is just here to make pretty arrows.
def plot_vectors(vs):
"""Plot vectors in vs assuming origin at (0,0)."""
n = len(vs)
X, Y = np.zeros((n, 2))
U, V = np.vstack(vs).T
plt.quiver(X, Y, U, V, range(n), angles='xy', scale_units='xy', scale=1)
xmin, xmax = np.min([U, X]), np.max([U, X])
ymin, ymax = np.min([V, Y]), np.max([V, Y])
xrng = xmax - xmin
yrng = ymax - ymin
xmin -= 0.05*xrng
xmax += 0.05*xrng
ymin -= 0.05*yrng
ymax += 0.05*yrng
plt.axis([xmin, xmax, ymin, ymax])
# -
# ### define matrix
A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(A)
# ### calculate eigendecomposition
values, vectors = np.linalg.eig(A)
print(values)
print(vectors)
# + active=""
# More specifically, the eigenvectors are the right-hand side eigenvectors and are normalized to unit length.
# -
# ## Gaussian Elimination
def gausselim(themat):
for i in range(len(themat[0])):
for j in range(i+1,len(themat)):
m = themat[j][i]/themat[i][i] # Ratio of (i,j) elt by (i,i) (diagonal) elt
themat[j] = [themat[j][k]-m*themat[i][k] for k in range(len(themat[0]))]
return themat
gausselim(A)
# ### Another way to create Matrix http://web.stanford.edu/class/cs231a/section/section1.pdf
v1 = np.array([3, 0, -2])
v2 = np.array([2, 0, -2])
v3 = np.array([0, 1, 1])
M = np.vstack([v1, v2, v3])
print(M)
v_orig = np.array([1, 2, 3])
v = np.expand_dims(v_orig, 1)
v
M.dot(v)
M@v
e1 = np.array([1,0])
e2 = np.array([0,1])
A = np.array([[2,3],[3,1]])
v1=A.dot(e1)
v2=A.dot(e2)
plt.figure(figsize=(8,4))
plt.subplot(1,2,1)
plot_vectors([e1, e2])
plt.subplot(1,2,2)
plot_vectors([v1,v2])
plt.tight_layout()
#help(plt.Circle)
plt.Circle(np.array([0,0]),radius=1)
plt.Circle.draw
# Note that the inner product is just matrix multiplication of a 1×n vector with an n×1 vector. In fact, we may write:
#
# <v,w>=v^{t} w
# The outer product of two vectors is just the opposite. It is given by:
#
# v⊗w=vw^{t}
v2
np.multiply(M, v)
v1 = v
v2 = np.expand_dims(M[:, 0], 1)
print(v1)
print(v2)
np.cross(v1, v2, axisa = 0, axisb = 0)
v1.dot(v2.T)
np.trace(M)
# ## Determinant and Inverse
np.linalg.norm(M)
np.linalg.det(M)
np.linalg.inv(M)
eigvals, eigvecs = np.linalg.eig(M)
print(eigvals)
U, S, Vtranspose = np.linalg.svd(M)
print(U)
print(S)
print(Vtranspose.T)
# ## Source: https://cs231n.github.io/python-numpy-tutorial/
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import interp2d
from scipy.linalg import inv, solve, det, eig
# +
fig, ax = plt.subplots(figsize=(10, 8))
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.set(xlim=(-5, 5), ylim=(-5, 5))
ax.grid()
vecs = ((2, 4), (-3, 3), (-4, -3.5))
for v in vecs:
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='blue',
shrink=0,
alpha=0.7,
width=0.5))
ax.text(1.1 * v[0], 1.1 * v[1], str(v))
plt.show()
# -
# # Scalar Multiplication
# +
fig, ax = plt.subplots(figsize=(10, 8))
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.set(xlim=(-5, 5), ylim=(-5, 5))
x = (2, 2)
ax.annotate('', xy=x, xytext=(0, 0),
arrowprops=dict(facecolor='blue',
shrink=0,
alpha=1,
width=0.5))
ax.text(x[0] + 0.4, x[1] - 0.2, '$x$', fontsize='16')
scalars = (-2, 2)
x = np.array(x)
for s in scalars:
v = s * x
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='red',
shrink=0,
alpha=0.5,
width=0.5))
ax.text(v[0] + 0.4, v[1] - 0.2, f'${s} x$', fontsize='16')
plt.show()
# +
fig = plt.figure(figsize=(10, 8))
ax = fig.gca(projection='3d')
x_min, x_max = -5, 5
y_min, y_max = -5, 5
α, β = 0.2, 0.1
ax.set(xlim=(x_min, x_max), ylim=(x_min, x_max), zlim=(x_min, x_max),
xticks=(0,), yticks=(0,), zticks=(0,))
gs = 3
z = np.linspace(x_min, x_max, gs)
x = np.zeros(gs)
y = np.zeros(gs)
ax.plot(x, y, z, 'k-', lw=2, alpha=0.5)
ax.plot(z, x, y, 'k-', lw=2, alpha=0.5)
ax.plot(y, z, x, 'k-', lw=2, alpha=0.5)
# Fixed linear function, to generate a plane
def f(x, y):
return α * x + β * y
# Vector locations, by coordinate
x_coords = np.array((3, 3))
y_coords = np.array((4, -4))
z = f(x_coords, y_coords)
for i in (0, 1):
ax.text(x_coords[i], y_coords[i], z[i], f'$a_{i+1}$', fontsize=14)
# Lines to vectors
for i in (0, 1):
x = (0, x_coords[i])
y = (0, y_coords[i])
z = (0, f(x_coords[i], y_coords[i]))
ax.plot(x, y, z, 'b-', lw=1.5, alpha=0.6)
# Draw the plane
grid_size = 20
xr2 = np.linspace(x_min, x_max, grid_size)
yr2 = np.linspace(y_min, y_max, grid_size)
x2, y2 = np.meshgrid(xr2, yr2)
z2 = f(x2, y2)
ax.plot_surface(x2, y2, z2, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=True, alpha=0.2)
plt.show()
# -
# ## Eigen Values vectors plot
# +
A = ((1, 2),
(2, 1))
A = np.array(A)
evals, evecs = eig(A)
evecs = evecs[:, 0], evecs[:, 1]
fig, ax = plt.subplots(figsize=(10, 8))
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.grid(alpha=0.4)
xmin, xmax = -3, 3
ymin, ymax = -3, 3
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
# Plot each eigenvector
for v in evecs:
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='blue',
shrink=0,
alpha=0.6,
width=0.5))
# Plot the image of each eigenvector
for v in evecs:
v = A @ v
ax.annotate('', xy=v, xytext=(0, 0),
arrowprops=dict(facecolor='red',
shrink=0,
alpha=0.6,
width=0.5))
# Plot the lines they run through
x = np.linspace(xmin, xmax, 3)
for v in evecs:
a = v[1] / v[0]
ax.plot(x, a * x, 'b-', lw=0.4)
plt.show()
# -
M = np.array([[-3, np.sqrt(2)], [np.sqrt(2), -2]])
M = np.array([[-0.5, 1], [-1,-0.5]])
val, vec = np.linalg.eig(M)
print(val)
print(vec)
M = np.array([[2, 5], [-2,-4]])
val, vec = np.linalg.eig(M)
print(val)
print(vec)
0.8452-0.169
vec = vec.T
vec
vec[0]*(1/vec[0][0])
vec[0]*(1/vec[0][1])
vec[1]*(1/vec[1][0])
vec[1]*(1/vec[1][1])
val, vec = np.linalg.eig(M)
np.linalg.norm(vec[1])
vec
np.sqrt(2)/2
| LinearAlgebra/Matrix_LinAlgebra.ipynb |