code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # imports
# +
import tensorflow as tf
from tensorflow import keras
import sklearn
from sklearn.metrics import roc_curve, auc, log_loss, precision_score, f1_score, recall_score, confusion_matrix
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib as mplb
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
import numpy as np
import pandas as pd
import seaborn as sns
import os
import zipfile
import shutil
import getpass
import requests
from IPython.display import clear_output
from tqdm.notebook import tqdm
# -
# # Config
# +
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
seed_val = 2020
# set seed
np.random.seed(seed=seed_val)
tf.random.set_seed(seed=seed_val)
# -
# # Params
# +
IMG_SIZE = 300
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 32
class_names = ['NEG', 'POS']
base_dir = '../'
train_images_dir = os.path.join(base_dir, 'Datasets/Images', 'train')
val_images_dir = os.path.join(base_dir, 'Datasets/Images', 'val')
test_images_dir = os.path.join(base_dir, 'Datasets/Images', 'test')
train_csv_path = os.path.join(base_dir, 'Datasets/Csv', 'Train.csv')
test_csv_path = os.path.join(base_dir, 'Datasets/Csv', 'Test.csv')
sample_csv_path = os.path.join(base_dir, 'Datasets/Csv', 'Train.csv')
# -
train_df = pd.read_csv(train_csv_path)
test_df = pd.read_csv(test_csv_path)
sample_sub_df = pd.read_csv(sample_csv_path)
train_df.head()
test_df.head()
sample_sub_df.tail()
# # Datasets & Dataloaders
image_generator = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
rotation_range=25,
width_shift_range=0.1,
zoom_range=0.1,
fill_mode='nearest',
horizontal_flip=True,
#vertical_flip=True,
rescale=None,
validation_split=0.15)
# +
train_generator = image_generator.flow_from_directory(directory=train_images_dir+'/train',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
seed=seed_val,
subset='training')
validation_generator = image_generator.flow_from_directory(directory=train_images_dir+'/train',
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
seed=seed_val,
subset='validation')
# -
for imgs, labels in train_generator:
print(imgs.shape, labels[0])
break
# +
# traditional image loading
def get_train_images_and_labels():
train_images_list = []
train_labels_list = []
for folder in os.listdir(os.path.join(train_images_dir, 'train')):
try:
for img in tqdm(os.listdir(os.path.join(train_images_dir, 'train', folder)), desc=f"Now on folder {folder}"):
x = keras.preprocessing.image.load_img(path=os.path.join(train_images_dir, 'train', folder, img),
target_size=(IMG_SIZE, IMG_SIZE))
x = keras.preprocessing.image.img_to_array(x)
train_images_list.append(x)
if folder == "TB-0":
label = np.eye(2)[0]
else:
label = np.eye(2)[1]
train_labels_list.append(label)
except:
pass
return np.array(train_images_list), np.array(train_labels_list)
train_images_list, train_labels_list = get_train_images_and_labels()
# -
len(train_images_list), len(train_labels_list)
# # Visualization
# +
def show_training_sample(batch_size=BATCH_SIZE):
imgs, labs = next(iter(train_generator))
plt.figure(figsize=(22, 10))
for i in range(min(25, batch_size)):
l, c = 5, 5
img = imgs[i] / 255.0
label = class_names[tf.argmax(labs[i])]
ax = plt.subplot(l, c, i+1)
plt.imshow(img)
plt.title(label)
plt.axis("off")
show_training_sample()
# -
arch_name = "ResNet101"
base_arch = getattr(tf.keras.applications, arch_name)
base_model = base_arch(include_top=False, input_shape=IMG_SHAPE)
# +
# freeze trained layers
for layer in base_model.layers:
layer.trainable = False
def build_model(fc_size=2, n_dense_units=512):
inputs = inputs = keras.Input(shape=IMG_SHAPE)
x = base_model(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(units=n_dense_units, activation='relu')(x)
x = keras.layers.Dropout(0.1)(x)
if fc_size > 1:
predictions = keras.layers.Dense(units=fc_size, activation="softmax")(x)
else:
predictions = keras.layers.Dense(units=1, activation="sigmoid")(x)
model = keras.Model(inputs = inputs, outputs=predictions)
return model
# +
model = build_model(fc_size=2, n_dense_units=512)
model.summary()
# -
# # Training phase
# # Report bot config
# access_token = getpass.getpass(prompt="Enter the API token from your Telegram bot here : ")
# class botCallback(keras.callbacks.Callback):
# def __init__(self,access_token):
# self.access_token = access_token
# self.ping_url = 'https://api.telegram.org/bot'+str(self.access_token)+'/getUpdates'
# self.response = requests.get(self.ping_url).json()
# self.chat_id = self.response['result'][0]['message']['chat']['id']
#
# def send_message(self,message):
# self.ping_url = 'https://api.telegram.org/bot'+str(self.access_token)+'/sendMessage?'+\
# 'chat_id='+str(self.chat_id)+\
# '&parse_mode=Markdown'+\
# '&text='+message
# self.response = requests.get(self.ping_url)
#
# def send_photo(self,filepath):
# file_ = open(filepath,'rb')
# file_dict = {'photo':file_}
# self.ping_url = 'https://api.telegram.org/bot'+str(self.access_token)+'/sendPhoto?'+\
# 'chat_id='+str(self.chat_id)
# self.response = requests.post(self.ping_url,files = file_dict)
# file_.close()
#
# def on_train_batch_begin(self, batch, logs=None):
# #keys = list(logs.keys())
# #print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
# pass
#
# def on_train_batch_end(self, batch, logs=None):
# #keys = list(logs.keys())
# #print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
# #message = ' Iteration/Batch {}\n Training Accuracy : {:7.2f}\n Training Loss : {:7.2f}\n'.format(batch,logs['accuracy'],logs['loss'])
# #message += ' Validation Accuracy : {:7.2f}\n Validation Loss : {:7.2f}\n'.format(logs['val_accuracy'],logs['val_loss'])
# #self.send_message(message)
# pass
#
# def on_test_batch_begin(self, batch, logs=None):
# #keys = list(logs.keys())
# #print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
# pass
#
# def on_test_batch_end(self, batch, logs=None):
# #message = ' Iteration/Batch {}\n Training Accuracy : {:7.2f}\n Training Loss : {:7.2f}\n'.format(batch,logs['accuracy'],logs['loss'])
# #message += ' Validation Accuracy : {:7.2f}\n Validation Loss : {:7.2f}\n'.format(logs['val_accuracy'],logs['val_loss'])
# #self.send_message(message)
# pass
#
# def on_epoch_begin(self, epoch, logs=None):
# message = f'Epoch {epoch+1} starts...\n'
# try:
# self.send_message(message)
# except:
# pass
#
# def on_epoch_end(self, epoch, logs=None):
# try:
# message = ' Epoch {} report\n Training Accuracy : {:7.2f}\n Training Loss : {:7.2f}\n'.format(epoch+1,logs['accuracy'],logs['loss'])
# message += ' Validation Accuracy : {:7.2f}\n Validation Loss : {:7.2f}\n'.format(logs['val_accuracy'],logs['val_loss'])
# self.send_message(message)
# except:
# pass
#
# class Plotter(botCallback):
# def __init__(self,access_token):
# super().__init__(access_token)
#
# def on_train_begin(self,logs=None):
# self.batch = 0
# self.epoch = []
# self.train_loss = []
# self.val_loss = []
# self.train_acc = []
# self.val_acc = []
# self.fig = plt.figure(figsize=(200,100))
# self.logs = []
#
# def on_epoch_end(self, epoch, logs=None):
#
# self.logs.append(logs)
# self.epoch.append(epoch)
# self.train_loss.append(logs['loss'])
# self.val_loss.append(logs['val_loss'])
# self.train_acc.append(logs['accuracy'])
# self.val_acc.append(logs['val_accuracy'])
#
# f,(ax1,ax2) = plt.subplots(1,2,sharex=True)
#
# clear_output(wait=True)
#
# ax1.plot(self.epoch, self.train_loss, label='Training Loss')
# ax1.plot(self.epoch, self.val_loss, label='Validation Loss')
# ax1.legend()
#
# ax2.plot(self.epoch, self.train_acc, label='Training Accuracy')
# ax2.plot(self.epoch, self.val_acc, label='Validation Accuracy')
# ax2.legend()
# img_path = os.path.join(base_dir, "Imgs")
# plt.savefig(os.path.join(img_path, 'Accuracy_and_Loss_plot.jpg'))
# plt.close(fig=f)
# try:
# self.send_photo(os.path.join(img_path, 'Accuracy_and_Loss_plot.jpg'))
# except:
# pass
# +
# training params
# optimizer
lr = 9.9e-4
optimizer = keras.optimizers.Adam(learning_rate=lr)
# loss
loss_fn = keras.losses.BinaryCrossentropy()
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
num_epochs = 30
optim_name = optimizer.get_config()['name']
model_name = f'tf_model_x_rays_based_on_{arch_name}_and_{optim_name}.h5'
model_path = os.path.join(base_dir, 'Models', model_name)
acc_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path, verbose=1, monitor='val_accuracy', save_best_only=True)
loss_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path, verbose=1, monitor='val_loss', save_best_only=True)
es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, verbose=1, restore_best_weights=True)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=10,
verbose=1, mode='max', min_lr=lr)
# bot config
#bot_callback = botCallback(access_token=access_token)
#plotter = Plotter(access_token)
CALLBACKS = [acc_ckpt, loss_ckpt, es, reduce_lr] #bot_callback, plotter]
# -
h = model.fit(train_generator,
validation_data=validation_generator,
epochs=num_epochs,
steps_per_epoch=train_generator.n // BATCH_SIZE,
validation_steps=validation_generator.n // BATCH_SIZE,
callbacks=CALLBACKS)
# # cross validation
#
#
# def run_cv(num_epochs=10, num_folds=10, batch_size=BATCH_SIZE):
#
# kf = KFold(n_splits=num_folds)
#
# cv_models_path = os.path.join(base_dir, 'Models', 'CV_models')
# histories = []
#
# for fold_num, (train_idx, val_idx) in enumerate(kf.split(X=train_images_list, y=train_labels_list)):
#
# train_ds = image_generator.flow(x = train_images_list[train_idx],
# y=train_labels_list[train_idx],
# batch_size=batch_size,
# seed=seed_val,
# shuffle=True,
# subset='training')
#
# val_ds = image_generator.flow(x = train_images_list[val_idx],
# y=train_labels_list[val_idx],
# batch_size=batch_size,
# seed=seed_val,
# shuffle=False,
# subset='validation')
#
#
# # training params
# # optimizer
# lr = 9.9e-5
# optimizer = keras.optimizers.Nadam(learning_rate=lr)
# optim_name = optimizer.get_config()['name']
# # loss
# loss_fn = keras.losses.BinaryCrossentropy()
#
# # model
# model = build_model(fc_size=2, n_dense_units=512)
# model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
# model_path = os.path.join(cv_models_path, f"tf_xrays_model_based_on_{arch_name}_and_{optim_name}_fold_{fold_num+1}.h5")
#
# acc_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path, verbose=1, monitor='val_accuracy', save_best_only=True)
# loss_ckpt = keras.callbacks.ModelCheckpoint(filepath=model_path, verbose=1, monitor='val_loss', save_best_only=True)
# es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, verbose=1, restore_best_weights=True)
# def scheduler(epoch):
# if epoch % 10 == 0:
# return lr*0.01
# else:
# return lr
#
# lr_sch = keras.callbacks.LearningRateScheduler(scheduler)
# CALLBACKS = [acc_ckpt, loss_ckpt, es, lr_sch]
#
# print(f"\n[INFO] =========== Training on fold {fold_num + 1}/{num_folds} ===========\n")
# h = model.fit(train_ds,
# validation_data=val_ds,
# epochs=num_epochs,
# steps_per_epoch=train_ds.n // batch_size,
# validation_steps = val_ds.n // batch_size,
# callbacks=CALLBACKS)
#
# histories.append(h)
# del train_ds
# del val_ds
# del model
#
# return histories, cv_models_path, optim_name, lr
# num_epochs=40
# num_folds=5
#
# histories, cv_models_path, optim_name, lr = run_cv(num_epochs=num_epochs, num_folds=num_folds, batch_size=min(8, BATCH_SIZE))
# # Results
# try:
# for h in histories:
# plt.figure(figsize=(20,5))
# loss = h.history['loss']
# acc = h.history['accuracy']
# val_loss = h.history['val_loss']
# val_acc = h.history['val_accuracy']
#
# plt.subplot(1, 2, 1)
# plt.plot(loss, label='training loss')
# plt.plot(val_loss, label='validation loss')
# plt.title("Losses results", size=16)
# plt.xlabel("epoch")
# plt.ylabel('Loss')
# plt.legend()
#
# plt.subplot(1, 2, 2)
# plt.plot(acc, label='training acc')
# plt.plot(val_acc, label='validation acc')
# plt.title("Accuracies results", size=16)
# plt.xlabel("epoch")
# plt.ylabel('Accuracy')
# plt.legend()
#
# plt.show()
#
# except:
# plt.figure(figsize=(20,6))
# loss = h.history['loss']
# acc = h.history['accuracy']
# val_loss = h.history['val_loss']
# val_acc = h.history['val_accuracy']
#
# plt.subplot(1, 2, 1)
# plt.plot(loss, label='training loss')
# plt.plot(val_loss, label='validation loss')
# plt.title("Losses results", size=16)
# plt.xlabel("epoch")
# plt.ylabel('Loss')
# plt.legend()
#
# plt.subplot(1, 2, 2)
# plt.plot(acc, label='training acc')
# plt.plot(val_acc, label='validation acc')
# plt.title("Accuracies results", size=16)
# plt.xlabel("epoch")
# plt.ylabel('Accuracy')
# plt.legend()
#
# plt.show()
# # save/load model
# model_name = f'tf_model_x_rays_based_on_{arch_name}.h5'
# model_path = os.path.join(base_dir, 'Models', model_name)
# model.save(model_path)
#
# print(f'[INFO] Model {model_name} saved to {model_path}')
# loaded_model = keras.models.load_model(model_path)
# loaded_model.summary()
# # Predictions
# +
def load_models(cv_models_path = os.path.join(base_dir, 'Models', 'CV_models'), optim_name="Adam"):
models = []
n_folds = 5
try:
for fold_num in range(1, n_folds+1):
m = keras.models.load_model(os.path.join(cv_models_path, f"tf_xrays_model_based_on_{arch_name}_and_{optim_name}_fold_{fold_num}.h5"))
m.trainable = False
models.append(m)
except :
model.trainable = False
models.append(model)
return models
models = load_models(optim_name=optim_name)
len(models)
# +
def test_step(models):
images_test = []
predictions = []
for im in tqdm(os.listdir(os.path.join(test_images_dir, 'test')), desc=f"Predicting on test images "):
images_test.append(im.split('.')[0])
x = keras.preprocessing.image.load_img(os.path.join(test_images_dir, 'test', im), target_size=(IMG_SIZE, IMG_SIZE))
x = keras.preprocessing.image.img_to_array(x)
tmp_preds = []
for model in models:
pred = model.predict(x.reshape(-1, IMG_SIZE, IMG_SIZE, 3))[0][1]# get column 1 of prediction
tmp_preds.append(pred)
predictions.append(np.array(tmp_preds).mean())
return images_test, predictions
images_test, predictions = test_step(models = models)
assert len(predictions) == len(images_test)
# +
my_file = pd.DataFrame({
'ID': images_test,
'LABEL':predictions
})
my_file
# -
num_epochs = 36
# +
if len(models) > 1:
file_name = f'tf_xrays_based_on_{arch_name}_optimizer_{optim_name}_bs_{BATCH_SIZE}_ep_{num_epochs}_lr_{lr}_n_folds_{num_folds}.csv'
else:
file_name = f'tf_xrays_based_on_{arch_name}_optimizer_{optim_name}_bs_{BATCH_SIZE}_ep_{num_epochs}_lr_{lr}.csv'
my_file.to_csv(os.path.join(base_dir, 'Submissions', file_name), index=False)
print(f"[INFO] Saved file as {file_name}")
# -
#
# preds = my_file.LABEL.values
# base_file = pd.read_csv(os.path.join(base_dir, 'Submissions', "tf_xrays_based_on_ResNet50_optimizer_Adam_bs_16_ep_150_lr_9.9e-05_best_one.csv"))
#
# y_true = base_file.LABEL.values
# for idx, val in enumerate(y_true):
# if val >.5:
# y_true[idx] = 1
# else:
# y_true[idx] = 0
#
| Notebooks/TB_tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''final_project'': conda)'
# name: python3
# ---
# # 1.0 Full Exploratory Data Analysis
#
# ## Libraries
#
# %load_ext autoreload
# %autoreload 2
import final_project.utils.paths as path
import janitor
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path.hello()
# ## Download data
#
# ### Specify input and output files
#
# +
covid_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
covid_file = path.data_raw_dir("time_series_covid19_confirmed_global.csv")
# -
# ### Run download
#
# !curl {covid_url} - o {covid_file}
# ## Process data
#
# ### Read data
#
input_covid_file = path.data_raw_dir(
"time_series_covid19_confirmed_global.csv")
covid_df = pd.read_csv(input_covid_file)
covid_df.info()
covid_df.head()
# ### Process data
#
# +
processed_df = (
covid_df
.select_columns(["Country/Region", "*/*/*"])
.pivot_longer(
index="Country/Region",
names_to="date"
)
.transform_column("date", pd.to_datetime)
.clean_names()
)
processed_df.head()
# -
# ### Save output data
#
output_covid_file = path.data_processed_dir(
"time_series_covid19_confirmed_global_processed.csv")
processed_df.to_csv(output_covid_file, index=False)
# ## Explore data
#
# ### Read data
#
processed_covid_file = path.data_processed_dir(
"time_series_covid19_confirmed_global_processed.csv")
processed_covid_df = pd.read_csv(processed_covid_file)
processed_covid_df.info()
processed_covid_df.head()
# ### Explore data
#
sns.set_style("whitegrid")
processed_covid_df = (
processed_covid_df
.transform_column(
"date",
pd.to_datetime
)
)
# #### Appearance of new covid-19 cases in Latam
#
# Subset data.
#
countries = ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Mexico', 'Peru']
some_latam_countries_df = processed_covid_df.filter_on(
f"country_region in {countries}")
some_latam_countries_df.head(3)
# Plot time series.
#
import final_project.visualization.visualize as visual
visual.covid_time_series(some_latam_countries_df)
# +
sns.lineplot(
data=some_latam_countries_df,
x="date",
y="value",
hue="country_region"
)
plt.xticks(rotation=15)
plt.xlabel("Date")
plt.ylabel("Value")
plt.title("Latam covid time series")
# -
# #### Latam in global context
#
# Top `n` countries.
#
# +
top_countries_df = (
processed_covid_df
.select_columns(["country_region", "value"])
.groupby(["country_region"])
.aggregate("sum")
.sort_values("value", ascending=False)
.reset_index()
.head(20)
.transform_column(
column_name="country_region",
function=lambda x: "red" if x in countries else "lightblue",
dest_column_name="color"
)
)
top_countries_df.head()
# -
# Plot to Latam in highlighted bars.
#
def latam_countries(df):
sns.barplot(
data=df,
x="value",
y="country_region",
palette=df.color
)
plt.xlabel("Value")
plt.ylabel("Country Region")
plt.title("Latam countries in a global context")
latam_countries(top_countries_df)
visual.latam_countries(top_countries_df)
# +
sns.barplot(
data=top_countries_df,
x="value",
y="country_region",
palette=top_countries_df.color
)
plt.xlabel("Value")
plt.ylabel("Country Region")
plt.title("Latam countries in a global context")
| notebooks/1.0-daric-full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hvarS/CS60075-Team28-Task-1/blob/main/corpus_features_extraction_single.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Ebzf0kDHqTzy" colab={"base_uri": "https://localhost:8080/"} outputId="77d4e6a8-1bf2-4b75-c16c-3030429904c8"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="GTkgqyyHyB9q" outputId="9bd00b50-150f-4103-ed13-7eadab66a4bc"
import pandas as pd
import numpy
# !pip install python-datamuse
# !pip install stanza
import stanza
corenlp_dir = './corenlp'
stanza.install_corenlp(dir=corenlp_dir)
# Set the CORENLP_HOME environment variable to point to the installation location
import os
os.environ["CORENLP_HOME"] = corenlp_dir
# !ls $CORENLP_HOME
# !pip install nltk
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="cE7VEMZT0qGC" outputId="9ab050ff-6d60-4f91-ac6f-3583d0d870d9"
import pandas as pd
#basepath = '/content/CS60075-Team28-Task-1/data/preprocessed/lcp_single_train_preprocessed.csv'
basepath = '/content/drive/MyDrive/CS60075-Team28-Task-1/data/preprocessed/lcp_single_test_preprocessed.csv'
# basepath = '/content/drive/MyDrive/CS60075-Team28-Task-1/data/preprocessed/lcp_multi_train_preprocessed.csv'
# basepath = '/content/drive/MyDrive/CS60075-Team28-Task-1/data/preprocessed/lcp_multi_test_preprocessed.csv'
df = pd.read_csv(basepath)
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="LRR5eKZmLph0" outputId="91e49fd4-bed3-44e8-e5be-d5b2c8b36d1f"
df.dtypes
# + colab={"base_uri": "https://localhost:8080/", "height": 194} id="VIpnRuT01uMo" outputId="d7f12d46-0cc8-40d1-fb3f-702bb55dd9e8"
df['split'] = df['token'].apply(lambda x: x.split())
df['token_length'] = df['token'].str.len()
df.head()
# + id="NF4vH87cKYRe" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="e7164c64-dbe0-4e04-d0c8-d239165974e7"
#function to obtain syablles for words
from datamuse import datamuse
api = datamuse.Datamuse()
def get_syllables(word):
syllables = 0
word_results = api.words(sp=word, max=1, md='psf')
if len(word_results)>0:
word = word_results[0]["word"]
syllables = int(word_results[0]["numSyllables"])
return syllables
#Apply function to get syllables in the token
df['syllables'] = df['token'].apply(lambda x: get_syllables(x) )
df.head()
# + id="R-dYFUXPNhGT" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="00935433-e1f6-4653-d7b3-c56812bccba4"
temp = df[['sentence','id']].copy()
temp.head()
# + colab={"base_uri": "https://localhost:8080/"} id="TKD915atT45g" outputId="c435c5c7-8d6d-442f-a025-f5af8ae5c88d"
from stanza.server import CoreNLPClient
############## uncomment to start the client
# Construct a CoreNLPClient with some basic annotators, a memory allocation of 4GB, and port number 9001
# Start the background server and wait for some time
# Note that in practice this is totally optional, as by default the server will be started when the first annotation is performed
client = CoreNLPClient(
annotators=['pos', 'depparse'],
memory='4G',
endpoint='http://localhost:9005',
output_format="json",
be_quiet=True)
print(client)
client.start()
import time; time.sleep(10)
def parse(string):
output = client.annotate(string)
return output
temp['parse'] = temp['sentence'].apply(lambda x: parse(x))
temp['parse'].head()
# + id="BWZnfOkjm1M7" colab={"base_uri": "https://localhost:8080/"} outputId="a4ec3b83-05f8-4d26-fe03-e6e067d0f2c2"
# get pos of the token (here phrase) from the parse constructed by the corenlp client. same for the dependencies
import string
remove = string.punctuation
remove = remove.replace("-", "")
remove = remove.replace("'", "")# don't remove apostraphies
remove = remove + '“'
remove = remove +'”'
def get_pos(row):
# should be token
word = row['token']
parse = row['parse']
# below loop is okay i guess
for i in range(len(parse['sentences'][0]['tokens'])):
comp_word = parse['sentences'][0]['tokens'][i]['word']
comp_word = comp_word.lower()
comp_word = comp_word.translate({ord(char): None for char in remove})
if comp_word == word:
return parse['sentences'][0]['tokens'][i]['pos']
def get_dep(row):
number = 0
word = row['token']
parse = row['parse']
for i in range(len(parse['sentences'][0]['basicDependencies'])):
comp_word = parse['sentences'][0]['basicDependencies'][i]['governorGloss']
comp_word = comp_word.lower()
comp_word = comp_word.translate({ord(char): None for char in remove})
if comp_word==word:
number += 1
return number
word_parse_features = pd.merge(temp, df)
word_parse_features['pos'] = word_parse_features.apply(get_pos, axis=1)
word_parse_features['dep num'] = word_parse_features.apply(get_dep, axis=1)
word_parse_features.head()
# + id="iWRW-KjYOrcV"
word_parse_features.to_csv('/content/drive/MyDrive/cwi2018_traindevset/1.csv')
# + colab={"base_uri": "https://localhost:8080/"} id="5pihojX3Vp_2" outputId="d97ac7f6-42c2-4cc2-d314-86783ae0ee1c"
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
# + colab={"base_uri": "https://localhost:8080/"} id="xQ5ghtcgXSiT" outputId="74858d65-2991-4b21-c03c-2b9bbcb44910"
print(wordnet.synsets('room'))
# + id="-KwcGTVMKNHb" colab={"base_uri": "https://localhost:8080/"} outputId="b71bf01d-06de-4fd7-fcc5-9a3c249055a0"
def get_wordnet_pos(treebank_tag):
# print(treebank_tag)
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatiser(row):
word = row['token']
pos = row['pos']
# print(word,pos)
try:
lemma = wordnet_lemmatizer.lemmatize(word, pos = get_wordnet_pos(pos))
# lemma = wordnet_lemmatizer.lemmatize(word)
return lemma
except:
try:
lemma = wordnet_lemmatizer.lemmatize(word)
return lemma
except:
print(word)
word_parse_features['lemma'] = word_parse_features.apply(lemmatiser, axis=1)
word_parse_features.head()
# + id="hUqhtMpLaLI5"
def synonyms(word):
synonyms=0
try:
results = wordnet.synsets(word)
synonyms = len(results)
return synonyms
except:
return synonyms
def hypernyms(word):
hypernyms=0
try:
results = wordnet.synsets(word)
hypernyms = len(results[0].hypernyms())
return hypernyms
except:
return hypernyms
def hyponyms(word):
hyponyms=0
try:
results = wordnet.synsets(word)
except:
return hyponyms
try:
hyponyms = len(results[0].hyponyms())
return hyponyms
except:
return hyponyms
# + colab={"base_uri": "https://localhost:8080/"} id="jwUQTKuUYBQ7" outputId="7ff90cb7-447f-464e-8499-68df60e38cf7"
word_parse_features['synonyms'] = word_parse_features['lemma'].apply(lambda x: synonyms(x))
word_parse_features['hypernyms'] = word_parse_features['lemma'].apply(lambda x: hypernyms(x))
word_parse_features['hyponyms'] = word_parse_features['lemma'].apply(lambda x: hyponyms(x))
word_parse_features.head()
# + id="vYtJroxeaBfw"
word_parse_features.to_csv('/content/drive/MyDrive/cwi2018_traindevset/2.csv')
# + id="2-rpZnrTYE-a"
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_google(tag):
if is_adjective(tag):
return 'adj'
elif is_noun(tag):
return 'n'
elif is_adverb(tag):
return 'adv'
elif is_verb(tag):
return 'v'
return None
def get_frequency(row):
nofreq = float(0.000000)
word = row["token"]
word = str(word)
tag = row["pos"]
tag = penn_to_google(tag)
try:
word_results = api.words(sp=word, max=1, md='pf')
tag_list = (word_results[0]['tags'][:-1])
frequency = word_results[0]['tags'][-1][2:]
frequency = float(frequency)
if tag in tag_list :
return frequency
else:
lemma = row['lemma']
try:
word_results = api.words(sp=lemma, max=1, md='pf')
tag_list = (word_results[0]['tags'][:-1])
frequency = word_results[0]['tags'][-1][2:]
frequency = float(frequency)
if tag in tag_list:
return frequency
else:
return nofreq
except:
return nofreq
except:
return nofreq
# + id="CdVR9DTz1vbf"
word_parse_features['google frequency'] = word_parse_features.apply(get_frequency ,axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="XamVDO_J27oZ" outputId="45abfb84-91b7-412f-bd36-9c4a07c1a831"
word_parse_features.head()
# + id="ZT6jx5Kb16Rs"
word_parse_features.to_csv('/content/drive/MyDrive/CS60075-Team28-Task-1/data/extracted_features/lcp_multi_test_features.csv')
# + id="N-ATkWY83LBm"
| corpus_features_extraction_single.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to create a simple People Counter Application using Panorama SDK
#
# **Goal of this Notebook** :
#
# * Aid an Panorama developer prototype their application before creating the AWS Lambda for Panorama
# * Using the built in wrapper application that **mimics** the Panorama sdk to get inference from the model
# * Create and Deploy the AWS Lambda for Panorama from this notebook
#
# **What this Notebook accomplishes?** :
# * Detect People in a selected video
# * Draw bounding boxes around the people
# * Count the number of people detected
# * Display the count on the video frames
#
#
# **Useful Resources to aid your development**:
# * [AWS Panorama Documentation](https://docs.aws.amazon.com/panorama/)
#
#
# **CAUTION PLEASE READ BEFORE PROCEEDING** :
#
# * The panoramasdk wrapper class used in this demo is not the original Panorama sdk that is on the device image
# * The wrapper class does not reflect the capabilities of the original Panorama SDK on the device
# * Its sole purpose is to give a developer a realistic idea of the structure and signature of the sdk on the device
# **Pre -Requisites**:
# * Sagemaker Instance created with the right role (Policies needed IOT, Lambda and S3, IAM Full Access)
#
#
#
# **Frames to Process**:
#
# * By default, we only process 10 frames from the video. If you want to increase this, please change this value in /panorama_sdk/panoramasdk.py and change frames_to_process = 10 to a value of your choice
# #### Video to use
video_to_use = "TownCentreXVID.avi"
# #### Step 1: Import Non Panorama Libraries
# !pip3 install -r requirements.txt
# +
from __future__ import division
from __future__ import print_function
from IPython.display import clear_output, Markdown, display
import json
from gluoncv import model_zoo, data, utils
import mxnet as mx
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,20)
# -
# #### Step 2: Modelling Approach
#
# This step walks through using the Panorama SDK (wrapper) model to get inference
# * **Model** : ssd_512_resnet50_v1_voc
# * **Dataset** : These models are trained on [PascalVOC](http://host.robots.ox.ac.uk/pascal/VOC/) datasets with 20 classes of objects
# * **arXiv** :[Application of Convolutional Neural Network for Image
# Classification on Pascal VOC Challenge 2012 dataset](https://arxiv.org/pdf/1607.03785.pdf)
# * **Model Input Size** : 512 x 512
# * **Model Output** : (1, 100, 1), (1,100,1), (1,100,4)
# ##### **A. Loading the model**
# +
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname("panorama_sdk"), '../..'))
sys.path.insert(1, path + '/panorama_sdk')
import jupyter_utils
jupyter_utils.declare_globals({'mxnet_modelzoo_example': True,
'custom_model': False, 'task':'object_detection', 'framework':'MXNET'})
import panoramasdk
print('Loading Model')
model = panoramasdk.model()
model.open('ssd_512_resnet50_v1_voc', 1)
print('Model Loaded')
# -
# ##### **B. Pre Processing**
def preprocess(img, size):
resized = cv2.resize(img, (size, size))
mean = [0.485, 0.456, 0.406] # RGB
std = [0.229, 0.224, 0.225] # RGB
img = resized.astype(np.float32) / 255. # converting array of ints to floats
img_a = img[:, :, 0]
img_b = img[:, :, 1]
img_c = img[:, :, 2]
# Extracting single channels from 3 channel image
# The above code could also be replaced with cv2.split(img) << which will return 3 numpy arrays (using opencv)
# normalizing per channel data:
img_a = (img_a - mean[0]) / std[0]
img_b = (img_b - mean[1]) / std[1]
img_c = (img_c - mean[2]) / std[2]
# putting the 3 channels back together:
x1 = [[[], [], []]]
x1[0][0] = img_a
x1[0][1] = img_b
x1[0][2] = img_c
x1 = np.asarray(x1)
return x1
# ##### **C. Inference**
# +
import cv2
import numpy as np
## Panorama has a unique signature where we have to create empty arrays with the output dimensions before hand
# Create input and output arrays.
class_info = model.get_output(0)
prob_info = model.get_output(1)
rect_info = model.get_output(2)
class_array = np.empty(class_info.get_dims(), dtype=class_info.get_type())
prob_array = np.empty(prob_info.get_dims(), dtype=prob_info.get_type())
rect_array = np.empty(rect_info.get_dims(), dtype=rect_info.get_type())
person_image = cv2.imread('street_empty.jpg')
# Pre Process Frame
x1 = preprocess(person_image, 512)
# Do inference on the new frame.
model.batch(0, x1)
model.flush()
# Get the results.
resultBatchSet = model.get_result()
class_batch = resultBatchSet.get(0)
prob_batch = resultBatchSet.get(1)
rect_batch = resultBatchSet.get(2)
class_batch.get(0, class_array)
prob_batch.get(1, prob_array)
rect_batch.get(2, rect_array)
class_data = class_array
prob_data = prob_array
rect_data = rect_array
print('Class data shape is ', class_data.shape)
print('Confidence data shape is ', prob_data.shape)
print('Bounding Boxes data shape is ',rect_data.shape)
# -
# #### Step 3 : Understanding and creating the Structure of the Application
#
# The Panorama Lambda function has the following structure
# +
### Lambda skeleton
class people_counter(object):
def interface(self):
# defines the parameters that interface with other services from Panorama
return
def init(self, parameters, inputs, outputs):
# defines the attributes such as arrays and model objects that will be used in the application
return
def entry(self, inputs, outputs):
# defines the application logic responsible for predicting using the inputs and handles what to do
# with the outputs
return
# -
# #### Step 4 : Let us use the panoramasdk wrapper function to simulate a real Panorama device lambda
jupyter_utils.change_video_source(video_to_use)
# +
import panoramasdk
import cv2
import numpy as np
import time
import boto3
# [AWS Panorama Documentation](https://docs.aws.amazon.com/panorama/)
HEIGHT = 512
WIDTH = 512
class people_counter(panoramasdk.base):
def interface(self):
return {
"parameters":
(
("float", "threshold", "Detection threshold", 0.10),
("model", "person_detection", "Model for detecting persons", "ssd_512_resnet50_v1_voc"),
("int", "batch_size", "Model batch size", 1),
("float", "person_index", "person index based on dataset used", 14),
),
"inputs":
(
("media[]", "video_in", "Camera input stream"),
),
"outputs":
(
("media[video_in]", "video_out", "Camera output stream"),
)
}
"""init() function is called once by the Lambda runtime. It gives Lambda a chance to perform
any necessary initialization before entering the main loop."""
def init(self, parameters, inputs, outputs):
try:
"""panoramasdk.model : Creates an Panorama.model object."""
print('Loading Model')
self.model = panoramasdk.model()
self.model.open(parameters.person_detection, 1)
print('Model Loaded')
# Detection probability threshold.
self.threshold = parameters.threshold
# Frame Number Initialization
self.frame_num = 0
# Number of People
self.number_people = 0
# Bounding Box Colors
self.colours = np.random.rand(32, 3)
# Person Index for Model from parameters
self.person_index = parameters.person_index
# Set threshold for model from parameters
self.threshold = parameters.threshold
"""model.get_output : Return a model.output_array object that represents output of the model."""
class_info = self.model.get_output(0)
prob_info = self.model.get_output(1)
rect_info = self.model.get_output(2)
self.class_array = np.empty(class_info.get_dims(), dtype=class_info.get_type())
self.prob_array = np.empty(prob_info.get_dims(), dtype=prob_info.get_type())
self.rect_array = np.empty(rect_info.get_dims(), dtype=rect_info.get_type())
return True
except Exception as e:
print("Exception: {}".format(e))
return False
def preprocess(self, img, size):
resized = cv2.resize(img, (size, size))
mean = [0.485, 0.456, 0.406] # RGB
std = [0.229, 0.224, 0.225] # RGB
img = resized.astype(np.float32) / 255. # converting array of ints to floats
img_a = img[:, :, 0]
img_b = img[:, :, 1]
img_c = img[:, :, 2]
# Extracting single channels from 3 channel image
# The above code could also be replaced with cv2.split(img) << which will return 3 numpy arrays (using opencv)
# normalizing per channel data:
img_a = (img_a - mean[0]) / std[0]
img_b = (img_b - mean[1]) / std[1]
img_c = (img_c - mean[2]) / std[2]
# putting the 3 channels back together:
x1 = [[[], [], []]]
x1[0][0] = img_a
x1[0][1] = img_b
x1[0][2] = img_c
x1 = np.asarray(x1)
return x1
def get_number_persons(self, class_data, prob_data):
# get indices of people detections in class data
person_indices = [i for i in range(len(class_data)) if int(class_data[i]) == self.person_index]
# use these indices to filter out anything that is less than 95% threshold from prob_data
prob_person_indices = [i for i in person_indices if prob_data[i] >= self.threshold]
return prob_person_indices
"""entry() function is called by the Lambda runtime from the main loop whenever there’s input data available for processing.
The job of the entry() function is to process the input data as fast as possible, generate and assign outputs as required by a particular algorithm,
and return back to the main loop. """
def entry(self, inputs, outputs):
self.frame_num += 1
for i in range(len(inputs.video_in)):
stream = inputs.video_in[i]
"""numpy.array that contains the latest stream image data."""
person_image = stream.image
w, h, c = person_image.shape
# Pre Process Frame
x1 = self.preprocess(person_image, 512)
# Do inference on the new frame.
"""
model.batch
*Returns: *None*.*
*Parameter:*
* *input_idx (int):* One model might provide multiple inputs. This index represents the specific input model.
* *input_data_array (numpy.array):* The actual array data that you want to send for inference.
"""
self.model.batch(0, x1)
"""model.flush : An unblocking call that sends all input data provided through .batch(input_idx, input_data_array) to make inference."""
self.model.flush()
"""model.get_result : Gets inference results."""
resultBatchSet = self.model.get_result()
""".get : Gets data from specific index of a batched output array. """
class_batch = resultBatchSet.get(0)
prob_batch = resultBatchSet.get(1)
rect_batch = resultBatchSet.get(2)
class_batch.get(0, self.class_array)
prob_batch.get(1, self.prob_array)
rect_batch.get(2, self.rect_array)
class_data = self.class_array[0]
prob_data = self.prob_array[0]
rect_data = self.rect_array[0]
# Get Indices of classes that correspond to People
person_indices = self.get_number_persons(class_data, prob_data)
try:
self.number_people = len(person_indices)
except:
self.number_people = 0
# Visualize with Opencv or stream.(media)
if self.number_people > 0:
for index in person_indices:
left = np.clip(rect_data[index][0] / np.float(HEIGHT), 0, 1)
top = np.clip(rect_data[index][1] / np.float(WIDTH), 0, 1)
right = np.clip(rect_data[index][2] / np.float(HEIGHT), 0, 1)
bottom = np.clip(rect_data[index][3] / np.float(WIDTH), 0, 1)
"""Add_Rect, Add_label : Adds text label aand nnotations to the data stream."""
stream.add_rect(left, top, right, bottom)
stream.add_label(str(prob_data[index][0]), right, bottom)
stream.add_label('Number of People : {}'.format(self.number_people), 0.1, 0.1)
""".release_result : Releases the inference result reference and frees up the data slot for reuse.
The model inference loop removes the inference result data that was buffered."""
self.model.release_result(resultBatchSet)
outputs.video_out[i] = stream
return True
# -
def main():
"""run() member function is implemented in the Lambda base class. It performs Lambda initialization and enters the main loop."""
people_counter().run()
main()
# #### Step 5 : Upload Lambda and Create Lambda Function
#
# * A lambda is already provided and ready for use in the lambda folder (zip file)
# * Use this code snippet to upload and publish it to Lambda Service
#
# #### Import Roles
import sagemaker
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# This Python snippet uses boto3 to create an IAM role named LambdaBasicExecution with basic
# lambda execution permissions.
# +
role_policy_document = {
"Version": "2012-10-17",
"Statement":[
{
"Effect": "Allow",
"Principal": {"Service": ["lambda.amazonaws.com", "events.amazonaws.com"]},
"Action": "sts:AssumeRole",
}
]
}
iam_client = boto3.client("iam")
iam_client.create_role(
RoleName="PeopleCounterExecutionRole",
AssumeRolePolicyDocument=json.dumps(role_policy_document),
)
# -
# The following Python snippet will use the resources above to create a new AWS Lambda function called PeopleCounterLambda
# !zip -o ../Lambda/people_counter.zip ../Lambda/people_counter.py
# +
lambda_client = boto3.client("lambda")
with open(
"../Lambda/people_counter.zip", "rb"
) as f:
zipped_code = f.read()
role = iam_client.get_role(RoleName="PeopleCounterExecutionRole")
response_create_function = lambda_client.create_function(
FunctionName="PeopleCounterLambda",
Runtime="python3.7",
Role=role["Role"]["Arn"],
Handler="people_counter.main",
Code=dict(ZipFile=zipped_code),
Timeout=120,
MemorySize=2048,
)
# -
# **What is an ARN?** : Amazon Resource Names (ARNs) uniquely identify AWS resources.
#
# The following Python snippet will publish the Lambda Function we created above, and return an ARN with a version.
#
# This version arn can be used to go directly to the Panorama console and deploy this application.
def printmd(string, color=None):
"""
Helper Function for Fomatting Output
"""
colorstr = "<span style='color:{}'>{}</span>".format(color, string)
display(Markdown(colorstr))
response = lambda_client.publish_version(FunctionName="PeopleCounterLambda")
# Printing the details of the lambda function that was just published
function_arn = response["FunctionArn"]
function_arn_version = list(response["FunctionArn"].split(":"))[-1]
lambda_url = (
"https://console.aws.amazon.com/lambda/home?region=us-east-1#/functions/"
+ response["FunctionName"]
+ "/versions/"
+ response["Version"]
+ "?tab=configuration"
)
# #### Step 6 : Upload Model to S3 Bucket
def send_model_to_s3(model, bucket = 'aws-panorama-models-bucket'):
s3 = boto3.resource('s3')
s3.create_bucket(Bucket=bucket)
key = '../../Models/' + model
s3.Object(bucket, model).put(Body=open(key, 'rb'))
bucket_name = bucket
location = boto3.client('s3').get_bucket_location(Bucket='aws-panorama-models-bucket')['LocationConstraint']
url = "s3://{}/{}".format(bucket_name, model)
printmd("**S3 Path** : **{}**".format(url), color="black")
return
send_model_to_s3(model = 'ssd_512_resnet50_v1_voc.tar.gz')
# #### Step 7 : Deploy the Application
#
# The Lambda is now created and published. You are now ready to deploy your model and the published lambda function, to the Panorama device
#
# The instructions to deploy are linked below
#
# [Creating Application Instructions Here](https://docs.aws.amazon.com/panorama/)
# ##### Some helpful information about the Lambda and Model
printmd("**Inputs**", color="black")
print(' ')
printmd("**Input Name** : **{}**".format('data'), color="black")
printmd("**Shape** : **{}**".format([1,3,512,512]), color="black")
printmd("**Order** : **{}**".format('NCHW'), color="black")
printmd("**FourCC** : **{}**".format('BGR3'), color="black")
printmd("**Normalize** : **{}**".format('minmax'), color="black")
printmd("**Minmax range** : **{}**".format('[0,255]'), color="black")
printmd("**Inputs**", color="black")
print(' ')
printmd("**Input Name** : **{}**".format('data'), color="black")
printmd("**Shape** : **{}**".format([1,3,512,512]), color="black")
printmd("**Order** : **{}**".format('NCHW'), color="black")
printmd("**FourCC** : **{}**".format('BGR3'), color="black")
printmd("**Normalize** : **{}**".format('minmax'), color="black")
printmd("**Minmax range** : **{}**".format('[0,255]'), color="black")
| PeopleCounter/Notebook/People_Counter_Panorama_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.5 64-bit (''ai_tutorial'': venv)'
# name: python37564bitaitutorialvenvbfa9976514ab457184b1b6f4ee41b3e6
# ---
# +
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import glob
char_code_max = 65535
def count_char_code(string):
count = np.zeros(char_code_max)
# 文字コードの頻出度を記録
str_len = len(string)
for i in range(str_len):
char_code = ord(string[i])
if char_code > char_code_max:
continue
count[char_code] += 1
# 今回の頻度を正規化
count = count / str_len
return count
# -
# 学習用データ
x_train = []
y_train = []
for f in glob.glob('./lang_data/train/*.txt'):
print(f)
y_train.append(f.split('/')[-1][0:2])
all = ''
for line in open(f, 'r'):
all += line
x_train.append(count_char_code(all))
# 学習
clf = GaussianNB()
clf.fit(x_train, y_train)
# テスト用データ
x_test = []
y_test = []
for f in glob.glob('./lang_data/test/*.txt'):
print(f)
y_test.append(f.split('/')[-1][0:2])
all = ''
for line in open(f, 'r'):
all += line
x_test.append(count_char_code(all))
# 評価
y_pred = clf.predict(x_test)
print(y_pred)
print(accuracy_score(y_test, y_pred))
| lang_long.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Tutorial 3: Inversions
# ======================
#
# We've covered `Mapper``., which, if I haven`t emphasised it enough yet, map things. Now, we're going to look at how we
# can use these `Mapper`'s (which map things) to reconstruct the source galaxy - I hope you're excited!
# +
# %matplotlib inline
from pyprojroot import here
workspace_path = str(here())
# %cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autolens as al
import autolens.plot as aplt
# -
# we'll use the same strong lensing data as the previous tutorial, where:
#
# - The lens `Galaxy`'s light is omitted.
# - The lens `Galaxy`'s total mass distribution is an `EllipticalIsothermal`.
# - The source `Galaxy`'s `LightProfile` is an `EllipticalSersic`.
# +
dataset_name = "mass_sie__source_sersic"
dataset_path = path.join("dataset", "howtolens", "chapter_4", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
# -
# Lets create an annular `Mask2D` which traces the stongly lensed source ring.
# +
mask = al.Mask2D.circular_annular(
shape_2d=imaging.shape_2d,
pixel_scales=imaging.pixel_scales,
sub_size=1,
inner_radius=0.5,
outer_radius=2.8,
)
aplt.Imaging.image(imaging=imaging, mask=mask)
# -
# Next, lets set the `Imaging` and `Mask2D` up as a `MaskedImaging` object and setup a `Tracer` using the input lens
# galaxy model (we don't need to provide the source's `LightProfile`, as we're using a `Mapper` to reconstruct it).
# +
masked_imaging = al.MaskedImaging(
imaging=imaging, mask=mask, settings=al.SettingsMaskedImaging(sub_size=2)
)
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, al.Galaxy(redshift=1.0)])
source_plane_grid = tracer.traced_grids_of_planes_from_grid(grid=masked_imaging.grid)[1]
# -
# we'll use another rectangular `Pixelization` and `Mapper` to perform the reconstruction.
# +
rectangular = al.pix.Rectangular(shape=(25, 25))
mapper = rectangular.mapper_from_grid_and_sparse_grid(grid=source_plane_grid)
aplt.Mapper.subplot_image_and_mapper(
image=imaging.image,
mapper=mapper,
include=aplt.Include(mask=True, inversion_grid=True),
)
# -
# And now, finally, we're going to use our `Mapper` to invert the image using an `Inversion`. I'll explain how this
# works in a second - but lets just go ahead and use the `Inversion` first. (Ignore the `regularization` input below for
# now, we'll cover this in the next tutorial).
inversion = al.Inversion(
masked_dataset=masked_imaging,
mapper=mapper,
regularization=al.reg.Constant(coefficient=1.0),
)
# Our `Inversion` has a reconstructed image and `Pixeilzation`, whcih we can plot using an `Inversion` plotter
# +
aplt.Inversion.reconstructed_image(inversion=inversion, include=aplt.Include(mask=True))
aplt.Inversion.reconstruction(
inversion=inversion, include=aplt.Include(inversion_grid=True)
)
# -
# And there we have it, we've successfully reconstructed, or, *inverted*, our source using the mapper`s rectangular
# grid. Whilst this source was simple (a blob of light in the centre of the source-plane), `Inversion`'s come into their
# own when fitting sources with complex morphologies. Infact, given we're having so much fun inverting things, lets
# invert a really complex source!
# +
dataset_name = "mass_sie__source_sersic_x5"
dataset_path = path.join("dataset", "howtolens", "chapter_4", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
aplt.Imaging.image(imaging=imaging)
# -
# This code is doing all the the same as above (setup the `Mask2D`, `Galaxy`'s `Tracer`, `Mapper`, ec.).
# +
mask = al.Mask2D.circular_annular(
shape_2d=imaging.shape_2d,
pixel_scales=imaging.pixel_scales,
sub_size=1,
inner_radius=0.1,
outer_radius=3.2,
)
aplt.Imaging.image(imaging=imaging, mask=mask)
masked_imaging = al.MaskedImaging(imaging=imaging, mask=mask)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, al.Galaxy(redshift=1.0)])
source_plane_grid = tracer.traced_grids_of_planes_from_grid(grid=masked_imaging.grid)[1]
mapper = rectangular.mapper_from_grid_and_sparse_grid(grid=source_plane_grid)
inversion = al.Inversion(
masked_dataset=masked_imaging,
mapper=mapper,
regularization=al.reg.Constant(coefficient=1.0),
)
# -
# Lets inspect the complex source reconstruction.
# +
aplt.Inversion.reconstructed_image(inversion=inversion, include=aplt.Include(mask=True))
aplt.Inversion.reconstruction(
inversion=inversion, include=aplt.Include(inversion_grid=True)
)
# -
# Pretty great, huh? If you ran the complex source pipeline, you'll remember that getting a model image that looked that
# good simply *was not possible*. With an `Inversion`, we can do it with ease and without fitting 30+ parameters!
#
# Lets discuss how an `Inversion` actually works. The explanation I give below is overly-simplified. I'm avoiding the
# technical details of how an `Inversion` *actually* works. To be good at lens modeling you don't need to understand the
# nitty-gritty details of linear inversions, you just need an instinct for how to use them as a tool to model lenses.
#
# Nevertheless, I know a lot of you hate `black-boxes`, or have an interest in linear algrebra. If you're that way
# inclined, then checkout the documentation of the autolens source code for more information. In particular, you should
# look at the following functions in the project PyAutoArray:
#
# autoarray.inversions.mappers.mapping_matrix
# autoarray.opterators.convolution.convolve_mapping_matrix
# autoarray.opterators.inversions.regularization.Regularization
# autoarray.opterators.inversions.inversions.Inversion
#
# To begin, lets consider some random mappings between our mapper`s source-pixels and the image.
aplt.Mapper.subplot_image_and_mapper(
image=masked_imaging.image,
mapper=mapper,
include=aplt.Include(mask=True, inversion_grid=True),
source_pixel_indexes=[[445], [285], [313], [132], [11]],
)
# These mappings are known before the `Inversion`, which means pre-inversion we know two key pieces of information:
#
# 1) The mappings between every source-pixel and sets of image-pixels.
# 2) The flux values in every observed image-pixel, which are the values we want to fit successfully.
#
# It turns out that with these two pieces of information we can linearly solve for the set of source-pixel fluxes that
# best-fit (e.g. maximize the log likelihood of) our observed image. Essentially, we set up the mapping between source and
# image pixels as a large matrix and solve for the source-pixel fluxes in an analogous fashion to how you would solve a
# set of simultaneous linear equations. This process is called a `linear inversion`.
#
# There are three more things about a linear `Inversion` that are worth knowing:
#
# 1) We've discussed the image sub-grid before, which splits each image-pixel into a sub-pixel. If a sub-grid is
# used, it is the mapping between every sub-pixel and source-pixel that is computed and used to perform the
# `Inversion`. This prevents aliasing effects degrading the image reconstruction, and, as a rule of thumb, I
# would suggest you use sub-gridding of degree 2x2.
#
# 2) When fitting using `LightProfile`'s we discussed how a `model_image` was generated by blurring them with the
# data's PSF. A similar blurring operation is incorporated into the `Inversion`, such that the reconstructed
# image and source fully account for the telescope optics and effect of the PSF.
#
# 3) The inversion`s solution is regularized. But wait, that`s what we'll cover in the next tutorial!
#
# Finally, let me show you how easy it is to fit an image with an `Inversion` using a `FitImaging` oboject. Instead of
# giving the source galaxy a `LightProfile`, we give it a `Pixelization` and `Regularization`, and pass it to a `Tracer`.
# +
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.Rectangular(shape=(40, 40)),
regularization=al.reg.Constant(coefficient=1.0),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
# -
# Then, like before, we pass the `MaskedImaging` and `Tracer` to a `FitImaging` object. Indeed, we see some
# pretty good looking residuals - we're certainly fitting the lensed source accurately!
# +
fit = al.FitImaging(masked_imaging=masked_imaging, tracer=tracer)
aplt.FitImaging.subplot_fit_imaging(fit=fit, include=aplt.Include(mask=True))
# -
# And, we're done, here are a few questions to get you thinking about `Inversion``.:
#
# 1) The `Inversion` provides the maximum log likelihood solution to the observed image. Is there a problem with seeking
# the `best-fit`? Is there a risk that we're going to fit other things in the image than just the lensed source
# galaxy? What happens if you reduce the `regularization_coefficient` above to zero?
#
# 2) The exterior pixels in the `Rectangular` `Grid`.have no image-pixels in them. However, they are still given a
# reconstructed flux. If this value isn't` coming from a util to an image-pixel, where is it be coming from?
| howtolens/chapter_4_inversions/tutorial_3_inversions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import findspark
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from pyspark.sql.functions import when,mean,round,rank,sum,col
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.classification import LogisticRegression,GBTClassifier, GBTClassificationModel ,LogisticRegressionModel
from pyspark.ml.feature import OneHotEncoderEstimator, StringIndexer, VectorAssembler
findspark.init('/opt/cloudera/parcels/SPARK2/lib/spark2')
spark = SparkSession.builder.master('local[*]').appName('binary_classification').getOrCreate()
new_df = spark.read.csv('file:///04_spark_ml/data/classification/application_train.csv', header=True, inferSchema=True)
new_df.printSchema()
drop_col = ['SK_ID_CURR']
new_df = new_df.select([column for column in new_df.columns if column not in drop_col])
new_df = new_df.withColumnRenamed('TARGET', 'label')
new_df.groupby('label').count().toPandas()
df_pd = new_df.toPandas()
print(len(df_pd))
plt.figure(figsize=(12,10))
sns.countplot(x='label', data=df_pd, order=df_pd['label'].value_counts().index)
pd.DataFrame(new_df.take(10), columns= new_df.columns)
for i in new_df.dtypes:
print(i)
# +
cat_cols = [item[0] for item in new_df.dtypes if item[1].startswith('string')]
print(str(len(cat_cols)) + ' categorical features')
num_cols = [item[0] for item in new_df.dtypes if item[1].startswith('int') | item[1].startswith('double')][1:]
print(str(len(num_cols)) + ' numerical features')
# -
def info_missing_table(df_pd):
"""Input pandas dataframe and Return columns with missing value and percentage"""
mis_val = df_pd.isnull().sum()
mis_val_percent = 100 * df_pd.isnull().sum() / len(df_pd)
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].sort_values('% of Total Values', ascending=False).round(1)
print ("Your selected dataframe has " + str(df_pd.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
return mis_val_table_ren_columns
def info_missing_table(df_pd):
"""Input pandas dataframe and Return columns with missing value and percentage"""
mis_val = df_pd.isnull().sum() #count total of null in each columns in dataframe
mis_val_percent = 100 * df_pd.isnull().sum() / len(df_pd) #count percentage of null in each columns
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) #join to left (as column) between mis_val and mis_val_percent
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'}) #rename columns in table
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].sort_values('% of Total Values', ascending=False).round(1)
print ("Your selected dataframe has " + str(df_pd.shape[1]) + " columns.\n" #.shape[1] : just view total columns in dataframe
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.") #.shape[0] : just view total rows in dataframe
return mis_val_table_ren_columns
missings = info_missing_table(df_pd)
missings
def count_missings(spark_df):
null_counts = []
for col in spark_df.dtypes:
cname = col[0]
ctype = col[1]
nulls = spark_df.where( spark_df[cname].isNull()).count() #check count of null in column name
result = tuple([cname, nulls]) #new tuple, (column name, null count)
null_counts.append(result) #put the new tuple in our result list
null_counts=[(x,y) for (x,y) in null_counts if y!=0] #view just columns that have missing values
return null_counts
miss_counts = count_missings(new_df)
miss_counts
# +
list_cols_miss=[x[0] for x in miss_counts]
df_miss= new_df.select(*list_cols_miss)
#categorical columns
catcolums_miss=[item[0] for item in df_miss.dtypes if item[1].startswith('string')] #will select name of column with string data type
print("cateogrical columns_miss:", catcolums_miss)
### numerical columns
numcolumns_miss = [item[0] for item in df_miss.dtypes if item[1].startswith('int') | item[1].startswith('double')] #will select name of column with integer or double data type
# print("numerical columns_miss:", numcolumns_miss)
# -
df_Nomiss=new_df.na.drop()
for x in catcolums_miss:
mode=df_Nomiss.groupBy(x).count().sort(col("count").desc()).collect()[0][0]
print(x, mode) #print name of columns and it's most categories
new_df = new_df.na.fill({x:mode})
for i in numcolumns_miss:
meanvalue = new_df.select(round(mean(i))).collect()[0][0]
print(i, meanvalue)
new_df=new_df.na.fill({i:meanvalue})
# +
ratio = 0.91
def weight_balance(labels):
return when(labels == 1, ratio).otherwise(1*(1-ratio))
new_df = new_df.withColumn('weights', weight_balance(col('label')))
# -
new_df.limit(10).toPandas()
miss_counts2 = count_missings(new_df)
miss_counts2
pd.DataFrame(new_df.take(10), columns= new_df.columns)
# +
stages = []
for categoricalCol in cat_cols:
stringIndexer = StringIndexer(inputCol = categoricalCol, outputCol = categoricalCol + 'Index')
encoder = OneHotEncoderEstimator(inputCols=[stringIndexer.getOutputCol()], outputCols=[categoricalCol + "classVec"])
stages += [stringIndexer, encoder]
assemblerInputs = [c + "classVec" for c in cat_cols] + num_cols
assembler = VectorAssembler(inputCols=assemblerInputs, outputCol="features")
stages += [assembler]
# -
cols = new_df.columns
pipeline = Pipeline(stages = stages)
pipelineModel = pipeline.fit(new_df)
new_df = pipelineModel.transform(new_df)
selectedCols = ['features']+cols
new_df = new_df.select(selectedCols)
pd.DataFrame(new_df.take(5), columns=new_df.columns)
train, test = new_df.randomSplit([0.80, 0.20], seed = 42)
print(train.count())
print(test.count())
# ## Training models
LR = LogisticRegression(featuresCol = 'features', labelCol = 'label', maxIter=15)
LR_model = LR.fit(train)
new_df.select('label','weights').toPandas()
# +
trainingSummary = LR_model.summary
roc = trainingSummary.roc.toPandas()
plt.plot(roc['FPR'],roc['TPR'])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.title('ROC Curve')
plt.show()
print('Training set ROC: ' + str(trainingSummary.areaUnderROC))
# -
predictions_LR = LR_model.transform(test)
evaluator = BinaryClassificationEvaluator()
print("Test_SET Area Under ROC: " + str(evaluator.evaluate(predictions_LR, {evaluator.metricName: "areaUnderROC"})))
gbt = GBTClassifier(maxIter=15)
GBT_Model = gbt.fit(train)
predictions = GBT_Model.transform(test)
evaluator = BinaryClassificationEvaluator()
print("Test_SET Area Under ROC: " + str(evaluator.evaluate(predictions, {evaluator.metricName: "areaUnderROC"})))
# +
paramGrid = (ParamGridBuilder()
.addGrid(gbt.maxDepth, [2, 4, 6])
.addGrid(gbt.maxBins, [20, 30])
.addGrid(gbt.maxIter, [10, 15])
.build())
cv = CrossValidator(estimator=gbt, estimatorParamMaps=paramGrid, evaluator=evaluator, numFolds=5)
cvModel = cv.fit(train)
predictions = cvModel.transform(test)
evaluator.evaluate(predictions)
# -
cvModel.bestModel.write().overwrite().save('gbt_model')
LR_model.write().overwrite().save('lr_model')
LR_model2 = LogisticRegressionModel.load('lr_model/')
predictions_LR2 = LR_model2.transform(test)
evaluator = BinaryClassificationEvaluator()
print("Test_SET Area Under ROC: " + str(evaluator.evaluate(predictions_LR2, {evaluator.metricName: "areaUnderROC"})))
cvModel.bestModel.write().overwrite().save('gbt_model')
gbt_model2 = GBTClassificationModel.load('gbt_model/')
gbt_model2.explainParams()
predictions_gbt2 = gbt_model2.transform(test)
evaluator.evaluate(predictions_gbt2)
| 04_spark_ml/Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="BRAp37uklN9X"
# # Class \#8 activities
# + [markdown] id="0dWbgIX_lSyQ"
# ## Code from Video Lesson \#8
# + id="1J7Woi6olF45" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079107012, "user_tz": 420, "elapsed": 17058, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="11fdd894-1cb8-4e5b-80d0-ea15477dd2e4"
# Upload file to Google Drive, then save filepath
# NOTE: you'll need to change this variable to match your own filepath
filepath = 'drive/My Drive/OCEAN 215 - data/nino34.long.data.txt'
# Give Colab access to Google Drive
from google.colab import drive
drive.mount('/content/drive')
# + id="lk6GV73pKTxc" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079107658, "user_tz": 420, "elapsed": 17694, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="1a11a159-0c33-4bea-9731-28aa44c4dd7e"
# Load file into Colab
import numpy as np
years = np.genfromtxt(filepath,skip_header=1,skip_footer=5,usecols=0,dtype=int,delimiter=None)
print(years.shape) # Check dimensions of the years NumPy array
data = np.genfromtxt(filepath,skip_header=1,skip_footer=5,usecols=range(1,13),dtype=float,delimiter=None)
print(data.shape) # Check dimensions of the data NumPy array
print(data) # Get a preview of the data by printing
# + id="abI47aZCKWol" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079107659, "user_tz": 420, "elapsed": 17687, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="0ed8196c-2273-4860-cbd1-a28c40d781db"
# Mask out the missing data using np.NaN (a placeholder)
data[data == -99.99] = np.NaN
# Check updated array
print(data)
# + id="NgI7PyRAKoAl" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079107660, "user_tz": 420, "elapsed": 17679, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="766ce3ed-3ce2-4e7d-c121-7d73c0045629"
# Reshape from 2D to 1D, because the data is a time series
data_1d = np.reshape(data,(data.size,))
print(data_1d)
# Alternate way of reshaping a 2D array to a 1D array
data_1d = data.flatten()
print(data_1d)
# + id="B6zuOzQ1KYxB" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079107660, "user_tz": 420, "elapsed": 17670, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="b06ebda4-d0a4-4348-8480-511d4f53f55e"
# Construct 1-D time array (for x-values, because we plot x-values vs. y-values)
# We want it to look like this:
# [ January 15, 1870,
# February 15, 1870,
# March 15, 1870,
# ...
# November 15, 2020,
# December 15, 2020 ]
from datetime import datetime
all_months = np.tile(range(1,13),len(years))
# print(all_months)
all_years = np.repeat(range(1870,2021),12)
# print(all_years)
datetimes = [datetime(all_years[idx],all_months[idx],15) for idx in range(data.size)]
datetimes = np.array(datetimes) # Because we prefer arrays, not lists
print(datetimes)
# + id="B2JLTQFyKaC6" colab={"base_uri": "https://localhost:8080/", "height": 295} executionInfo={"status": "ok", "timestamp": 1604080424191, "user_tz": 420, "elapsed": 624, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="3176fc60-806b-431c-c4c9-e81517ce74c5"
# Plot the El Niño index
import matplotlib.pyplot as plt
plt.subplots(figsize=(16,4))
plt.plot(datetimes,data_1d,color='k',lw=1) # color options: https://matplotlib.org/3.3.2/gallery/color/named_colors.html
plt.scatter(datetimes,data_1d,s=4,c='darkorange')
plt.title('Niño 3.4 index')
plt.xlabel('Time')
plt.ylabel('Average sea surface temperature (°C)')
plt.grid()
# + [markdown] id="HOdzj6Iir_mi"
# ## In-class live coding
#
# Note that we care about the state of El Niño in the Pacific Northwest because it affects our weather. For instance, we're currently in a La Niña state, which tends to bring colder, rainy weather to Seattle:
#
# https://www.cpc.ncep.noaa.gov/products/analysis_monitoring/enso_advisory/ensodisc.shtml
# + id="dIOAmigrKsj8" colab={"base_uri": "https://localhost:8080/", "height": 312} executionInfo={"status": "ok", "timestamp": 1604079108231, "user_tz": 420, "elapsed": 18221, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="f7d74e8f-8ecc-4c2c-ec56-45fbee8afe4a"
# Goal: zoom into just a portion of the x-axis (years 1980-2020)
fig, ax = plt.subplots(figsize=(16,4))
plt.plot(datetimes,data_1d,c='k',lw=1) # color options: https://matplotlib.org/3.3.2/gallery/color/named_colors.html
plt.scatter(datetimes,data_1d,s=4,c='darkorange')
plt.title('Niño 3.4 index')
plt.xlabel('Time')
plt.ylabel('Average sea surface temperature (°C)')
plt.grid()
# plt.xlim(datetime(1980,1,1),datetime(2020,12,31)) # Option 1 (call xlim() on the plt module)
# ax.set_xlim(datetime(1980,1,1),datetime(2020,12,31)) # Option 2 (call set_xlim() on the axes object saved from plt.subplots())
plt.gca().set_xlim(datetime(1980,1,1),datetime(2020,12,31)) # Option 3 (call set_xlim() on the current axes)
# + id="onCLq6ioMTrH" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079108232, "user_tz": 420, "elapsed": 18215, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="ff5b021d-0a30-4bb3-9928-c11fe892bd2f"
# This is how we test for NaNs
# np.isnan() returns a boolean (True or False)
np.isnan(50) # Returns False
np.isnan(np.nan) # Returns True
# Calculate average value of El Niño index, ignoring NaN values
sum = 0.0
nan_counter = 0
for value in data_1d:
if np.isnan(value): # Notice the if-statement is inside the for-loop
print('We found a NaN')
nan_counter += 1
else: # The else statement will be entered when "value" is not a NaN
sum += value
print(sum)
average = sum / (len(data_1d) - nan_counter) # Exclude NaN values from average (nan_counter is 4 here)
print(average)
# Round to one decimal place
print(round(average,1))
# + id="tDWMKK-2c232"
# Shift El Niño index values down by the average temperature, so they're centered at y = 0
data_1d_shifted = data_1d.copy()
for index in range(len(data_1d)):
data_1d_shifted[index] = data_1d_shifted[index] - average
# Alternate way:
# data_1d_shifted[index] -= average
# + id="wkqlfooMdAqV" colab={"base_uri": "https://localhost:8080/", "height": 295} executionInfo={"status": "ok", "timestamp": 1604079108592, "user_tz": 420, "elapsed": 18564, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="4e9ab326-7e7d-40ca-faa6-9790ccf15e23"
# Add a horizontal line at y = 0
fig, ax = plt.subplots(figsize=(16,4))
plt.plot(datetimes,data_1d_shifted,c='k',lw=1)
plt.plot([datetime(1865,1,1),datetime(2025,1,1)],[0,0],ls='--',c='k') # This is a line between two points at (1865,0) and (2025,0)
plt.scatter(datetimes,data_1d_shifted,s=4,c='darkorange')
plt.title('Niño 3.4 index')
plt.xlabel('Time')
plt.ylabel('Average sea surface temperature (°C)')
plt.grid()
# + id="R_oWsir_fWhJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1604079108594, "user_tz": 420, "elapsed": 18558, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjCBYTiuomqOsCakND1k_5wj0kYvFY53Jt7kunt=s64", "userId": "11255944928409084259"}} outputId="5f24562f-49b9-4b84-b980-1f403b7fea0a"
# Calculate average value of El Niño index using NumPy
# This won't work, because it will give the answer "NaN"...
average = np.mean(data_1d)
print('Option 1:',average)
# ... so we have slice out the NaNs
average = np.mean(data_1d[:-4])
print('Option 2:',average)
# Or we can get rid of the NaNs using conditional indexing
average = np.mean(data_1d[~np.isnan(data_1d)]) # Here, np.isnan(data_1d) returns a Boolean array,
# which we reverse using the tilde (~) to turn True to False, and False to True
print('Option 3:',average)
# Or we can ignore NaNs using the alternate NaN-excluding version of np.mean()
average = np.nanmean(data_1d)
print('Option 4:',average)
| materials/class/class_8_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# PyNE Tutorial
# =============
#
# Welcome to the PyNE tutorial!
#
# 
#
# # Before you begin
# * Hopefully you've installed the virtual machine if necessary.
# - If not, please go to http://pyne.io/install/vb.html#vb to install a virtual machine.
# - If so, open it now and open Accessories -> LXTerminal, navigate to ~/opt/pyne
# # Is PyNE Installed?
#
# You can verify that PyNE is successfully installed by running the following:
#
from pyne import data
print(data.atomic_mass('U235'))
# # Open the tutorial
#
# You can start the tutorial by navigating to `~/opt/pyne/tutorial` and entering
#
# ```bash
# $ jupyter notebook --matplotlib=inline
# ```
#
# This should open the tutorial in a web browser. We'll start with `00-intro`.
# # What is PyNE?
#
# As our tagline says, PyNE is the open source nuclear engineering toolkit.
#
# * PyNE is intended as a library of composable tools that is used to build nuclear science and engineering applications.
# * It is permissively licensed (2-clause BSD).
# * It supports both a C++ and a Python API.
# * The name 'PyNE' is a bit of a misnomer since most of the code base is in C++ but most daily usage happens in Python.
# * The v0.5 code is the current stable release.
# * As an organization, PyNE was born in April 2011. However, core parts of PyNE have existed since 2007.
# # What are the goals of PyNE?
#
# To help nuclear engineers:
#
# * be more productive,
# * have the *best* solvers,
# * have a beautiful API,
# * write really great code,
# * and teach the next generation.
#
# And of course, to perform neat tricks like this:
import numpy as np
from pyne import mesh, material
from pyne.xs.channels import sigma_t
# +
# import a few things necessary for plotting in the notebook
from yt.config import ytcfg; ytcfg["yt","suppressStreamLogging"] = "True"
from yt.frontends.moab.api import PyneMoabHex8Dataset
from yt.mods import SlicePlot
# create reactor mesh
m = mesh.Mesh(structured_coords=[np.linspace(0.0, 1.0, 101), np.linspace(0.0, 1.0, 101), [0.0, 1.0]], structured=True)
fuel = material.from_atom_frac({'U235': 0.045, 'U238': 0.955, 'O16': 2.0}, mass=1.0, density=10.7)
cool = material.from_atom_frac({'H1': 2.0, 'O16': 1.0}, mass=1.0, density=1.0)
for i, mat, ve in m:
coord = m.mesh.get_coords(ve)
m.mats[i] = fuel if (coord[0]**2 + coord[1]**2) <= 0.5**2 else cool
# create a total cross section tag on the mesh
m.sigma_t = mesh.ComputedTag(lambda mesh, i: sigma_t(mesh.mats[i], group_struct=[10.0, 1e-6], phi_g=[1.0])[0])
# create a uranium mass tag on the mesh
m.u_mass = mesh.ComputedTag(lambda mesh, i: max(mesh.mats[i]['U':'Np'].mass, 0.0))
# plot the total cross section
SlicePlot(PyneMoabHex8Dataset(m), 'z', 'sigma_t', origin='native').display()
# print the total mass of uranium in the reactor mesh
print("U Content of Mesh:", sum(m.u_mass[:]), "grams")
# -
# # What could you do with PyNE?
#
# As a **user** you could do your work or research with PyNE. Even if you have your own software that looks and behaves similarly to some aspects of PyNE, using PyNE will mean that you no longer have to develop AND maintain that functionality.
#
# As a **developer** you should be selfish. Contribute to PyNE in ways that support the work that you are doing. If a feature you want is not in PyNE right now, chances are that other people want to see that feature too! This will help your future self as much as future other people.
# # What goes into PyNE?
#
# Anything that is not export controllable, proprietary, or under HIPAA restrictions! (If you have questions, ask.)
# # Contact PyNE
#
# **Website:** http://pyne.io/
#
# **User's Mailing List:** <EMAIL>
#
# **Developer's List:** <EMAIL>
# # Questions?
| tutorial/00-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.models import Sequential
model = Sequential()
from keras.layers import Dense
model.add(Dense(1, activation='tanh', input_dim=2, kernel_initializer='zeros'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=100, centers=2, cluster_std=2.2, random_state=42)
model.fit(X, y, epochs=400, batch_size=100, shuffle=False, verbose=0)
model.evaluate(X, y)
| Section03/.ipynb_checkpoints/Getting Acquainted with Deep Learning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Introduction to Bayesian modeling with PyMC3
#
# This post is devoted to give an introduction to Bayesian modeling using [PyMC3](https://pymc-devs.github.io/pymc3/notebooks/getting_started.html), an open source probabilistic programming framework written in Python. Part of this material was presented in the Python Users Berlin (PUB) meet up.
#
# <img src="images/PyMC3_banner.svg" alt="html" style="width: 400px;"/>
#
# Why PyMC3? As described in the documentation:
#
# - PyMC3’s user-facing features are written in pure Python, it leverages [Theano](http://deeplearning.net/software/theano/) to transparently transcode models to C and compile them to machine code, thereby boosting performance.
#
# - Theano is a library that allows expressions to be defined using generalized vector data structures called tensors, which are tightly integrated with the popular [NumPy](http://www.numpy.org/) ndarray data structure.
#
# In addition, from a practical point of view, PyMC3 syntax is very transpartent from the mathematical point of view.
#
# This post is not aimed to give a full treatment of the ~~mathematical details~~, as there are many good (complete and detailed) references around these topics. Also, we are not going to dive deep into PyMC3 as all the details can be found in the documentation. Instead, we are interested in giving an overview of the basic mathematical consepts combinded with examples (writen in Python code) which should make clear why [Monte Carlo](https://en.wikipedia.org/wiki/Monte_Carlo_method) simulations are useful in Bayesian modeling.
# + [markdown] slideshow={"slide_type": "slide"}
# # 1. Mathematical Background
#
# # 1.1 Bayes Theorem
#
# ## Frequentist vs Bayesian
#
# *The essential difference between frequentist inference and Bayesian inference is the same as the difference between the two interpretations of what a "probability" means*.
#
# **Frequentist inference** is a method of statistical inference in which conclusions from data is obtained by emphasizing the frequency or proportion of the data.
#
# **Bayesian inference** is a method of statistical inference in which Bayes' theorem is used to update the probability for a hypothesis as more evidence or information becomes available.
# -
# ## Conditional Probability
#
# Let \\(A\\) and \\(B\\) be two events, then the *conditional probability* of \\(A\\) given \\(B\\) is defined as the ratio
#
# \begin{equation}
# P(A|B):=\frac{P(A\cap B)}{P(B)}
# \end{equation}
#
# *Remark:* Formally we have a [probability space](https://en.wikipedia.org/wiki/Probability_space) \\((\Omega, \mathcal{F}, P)\\), where \\(\Omega\\) is the sample space, \\(\mathcal{F}\\) is a \\(\sigma\\)-algebra on \\(\Omega\\) and \\(P\\) is a probability measure. The events \\(A\\), and \\(B\\) are elements of \\(\mathcal{F}\\) and we assume that \\(P(B)\neq 0\\).
#
# Observe in particular
#
# \begin{equation}
# P(A|B)P(B)=P(A\cap B)=P(B\cap A) = P(B|A)P(A)
# \end{equation}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bayes Theorem
#
# From the last formula we obtain the relation
#
# \begin{equation}
# P(A|B)=\frac{P(B|A)P(A)}{P(B)}
# \end{equation}
#
# which is known as [Bayes Theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem).
#
# **Example:** Suppose you are in the U-Bahn and you see a person with long hair. You want to know the probablity that this person is a woman. Consider the events \\(A=\\) woman \\(B=\\) long hair. You want to compute \\(P(A|B)\\). Suppose that you estimate \\(P(A)=0.5\\), \\(P(B)=0.4\\) and \\(P(B|A)=0.7\\) (the probability that a woman has long hair). Then, given these prior estimated probabilities, Bayes theorem gives
#
# \begin{equation}
# P(A|B)=\frac{P(B|A)P(A)}{P(B)} = \frac{0.7\times 0.5}{0.4} = 0.875.
# \end{equation}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bayesian Approach to Data Analysis
#
# Assume that you have a sample of observations \\(y_1,..., y_n\\) of a random variable \\(Y\sim f(y|\theta)\\), where \\(\theta\\) is a parameter for the distribution. Here we consider \\(\theta\\) as a random variable as well. Following Bayes Theorem (its continuous version) we can write.
#
# \begin{equation}
# f(\theta|y)=\frac{f(y|\theta)f(\theta)}{f(y)} =
# \displaystyle{\frac{f(y|\theta)f(\theta)}{\int f(y|\theta)f(\theta)d\theta}}
# \end{equation}
#
# - The function \\(f(y|\theta)\\) is called the *likelihood*.
#
# - \\(f(\theta)\\) is the *prior* distribution of \\(\theta\\).
#
#
# Note that \\(f(y)\\) *does not* depend on \\(\theta\\) (just on the data), thus it can be considered as a "normalizing constant". In addition, it is often the case that the integral above is not easy to compute. Nevertheless, it is enough to consider the relation:
#
#
#
# \begin{equation}
# f(\theta|y) \propto \text{likelihood} \times \text{prior}.
# \end{equation}
#
# (Here \\(\propto\\) denotes the proportionality relation)
# -
# ## Example: Poisson Data
#
# In order to give a better sense of the relation above we are going to study a concrete example. Consider a \\(n\\) samples of \\(Y\sim Poiss(\lambda)\\). Recall that the [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) is given by:
#
# $$
# \displaystyle{
# f(y_i|\lambda)=\frac{e^{-\lambda}\lambda^{y_i}}{y_i!}
# }
# $$
#
# where \\(\lambda>0\\). It is easy to verify that \\(E(Y)=\lambda\\) and \\(Var(Y)=\lambda\\). Parallel to the formal discussion, we are going to implement a numerical simulation:
# +
import numpy as np
import scipy.stats as ss
# We set a seed so that the results are reproducible.
np.random.seed(5)
# number of samples.
n = 100
# true parameter.
lam_true = 2
# sample array.
y = np.random.poisson(lam=lam_true, size=n)
y
# -
# mean of the sample.
y.mean()
# +
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
# %matplotlib inline
# Histogram of the sample.
plt.figure(figsize=(8, 6))
plt.hist(y, bins=15)
plt.title('Histogram of Simulated Data');
# -
# ### Prior: Gamma Distribution
#
# Let us consider a [gamma](https://en.wikipedia.org/wiki/Gamma_distribution) prior distribution for the parameter \\(\lambda \sim \Gamma(a,b)\\). Recall that the density function for the gamma distribution is
#
# \begin{equation}
# f(\lambda)=\frac{b^a}{\Gamma(a)}\lambda^{a-1} e^{-b\lambda}
# \end{equation}
#
# where \\(a>0\\) is the *shape* parameter and \\(b>0\\) is the *rate parameter*.
#
# The *expected value* and *variance* of the gamma distribution is
#
# $$
# E(\lambda)=\frac{a}{b}
# \quad
# \text{and}
# \quad
# Var(\lambda)=\frac{a}{b^2}
# $$
# +
# Parameters of the prior gamma distribution.
a = 3.5 # shape
b = 2 # rate = 1/scale
x = np.linspace(start=0,stop=10, num=100)
plt.figure(figsize=(8, 6))
plt.plot(x, ss.gamma.pdf(x,a=a,scale=1/b), 'r-')
plt.title('Gamma Density Function for a={} and b={}'.format(a,b))
# Define the prior distribution.
prior = lambda x: ss.gamma.pdf(x, a=a, scale=1/b)
# -
# ### Likelihood
#
# As the observations are independent the [likelihood](https://en.wikipedia.org/wiki/Likelihood_function) function is
#
# \begin{align}
# f(y|\lambda)=&\prod_{i=1}^{n} \frac{e^{-\lambda}\lambda^{y_i}}{y_i!}
# =\frac{e^{-n\lambda}\lambda^{\sum_{i=1}^n y_i}}{\prod_{i=1}^{n}y_i!}
# \end{align}
# +
import scipy.special as sp
# Define the likelihood function.
def likelihood(lam,y):
factorials = np.apply_along_axis(
lambda x: sp.gamma(x+1),
axis=0,
arr=y
)
numerator = np.exp(-lam*y.size)*(lam**y.sum())
denominator = np.multiply.reduce(factorials)
return numerator/denominator
# -
# ### Posterior distribution for \\(\lambda\\) up to a constant
#
# As we are just interested in the structure of the posterior distribution, up to a constant, we see
#
# \begin{align}
# f(\lambda|y)\propto & \text{likelihood} \times \text{prior}\\
# \propto & \quad f(y|\lambda)f(\lambda)\\
# \propto & \quad e^{-n\lambda}\lambda^{\sum_{i=1}^n y_i} \lambda^{a-1} e^{-b\lambda}\\
# \propto & \quad \lambda^{\left(\sum_{i=1}^n y_i+a\right)-1} e^{-(n+b)\lambda}\\
# \end{align}
# +
# Define the posterior distribution.
# (up to a constant)
def posterior_up_to_constant(lam,y):
return likelihood(lam,y)*prior(lam)
# Plot of the prior and (scaled) posterior distribution
# for the parameter lambda.
#
# We multiply the posterior distrubution function
# by the amplitude factor 2.5e74 to make it comparable
# with the prior gamma distribution.
plt.figure(figsize=(8, 6))
plt.plot(x, 2.0e74*posterior_up_to_constant(x,y), label='posterior')
plt.plot(x, ss.gamma.pdf(x,a=a,scale=1/b), 'r-', label='prior')
plt.legend();
# + [markdown] slideshow={"slide_type": "slide"}
# ### True posterior distribution for \\(\lambda\\)
#
# In fact, as \\(f(\lambda|y) \propto\: \lambda^{\left(\sum_{i=1}^n y_i+a\right)-1} e^{-(n+b)\lambda}\\), one verifies that the posterior distribution is again a gamma
#
# \begin{align}
# f(\lambda|y) = \Gamma\left(\sum_{i=1}^n y_i+a, n+b\right)
# \end{align}
#
# This means that the gamma and Poisson distribution form a [conjugate pair](https://en.wikipedia.org/wiki/Conjugate_prior).
#
# +
def posterior(lam,y):
shape = a + y.sum()
rate = b + y.size
return ss.gamma.pdf(lam, shape, scale=1/rate)
plt.figure(figsize=(8, 6))
plt.plot(x, posterior(x,y))
plt.plot(x, ss.gamma.pdf(x,a=a,scale=1/b), 'r-')
plt.title('Prior and Posterior Distributions');
# -
# We indeed see how the posterior distribution is concentrated around the true parameter \\(\lambda=2\\).
#
# Note that the posterior mean is
#
# \begin{align}
# \frac{\sum_{i=1}^n y_i+a}{n+b} = \frac{b}{n+b}\frac{a}{b}+\frac{n}{n+b}\frac{\sum_{i=1}^n y_i}{n}
# \end{align}
#
# That is, it is a weighted average of the prior mean \\(a/b\\) and the sample average \\(\bar{y}\\). As \\(n\\) increases,
#
# \begin{align}
# \lim_{n\rightarrow +\infty}\frac{b}{n+b}\frac{a}{b}+\frac{n}{n+b}\frac{\sum_{i=1}^n y_i}{n} = \bar{y}.
# \end{align}
#
# +
# Posterior gamma parameters.
shape = a + y.sum()
rate = b + y.size
# Posterior mean.
shape/rate
# -
# # 1.2 Markov Chain Monte Carlo (MCMC) Approach
#
# In the last example the posterior distribution was easy to identify. However, in paractice this is not usually the case and therefore, via Bayes Theorem, we would only know the posterior distribution up to a constant. This motivates the idea of using Monte Carlo simulation methods. How can we sample from a distribution that we do not know? The Metropolis–Hastings algorithm, explaned next, is one approach to tackle this problem.
# ## Metropolis–Hastings algorithm
#
# Let \\(\phi\\) be a function that is proportional to the desired probability distribution \\(f\\).
#
# **Initialization:**
#
# Pick \\(x_{0}\\) to be the first sample, and choose an arbitrary probability density
#
# \begin{equation}
# g(x_{n+1}| x_{n})
# \end{equation}
#
# that suggests a candidate for the next sample value \\(x_{n+1}\\). Assume \\(g\\) is symmetric.
#
# **For each iteration:**
#
# Generate a candidate \\(x\\) for the next sample by picking from the distribution \\(g(x|x_n)\\). Calculate the *acceptance ratio*
#
# \begin{equation}
# \alpha := \frac{f(x)}{f(x_n)} = \frac{\phi(x)}{\phi(x_n)}
# \end{equation}
#
# If \\(\alpha \geq 1\\), automatically accept the candidate by setting
#
# \begin{equation}
# x_{n+1} = x.
# \end{equation}
#
# Otherwise, accept the candidate with probability \\(\alpha \\). If the candidate is rejected, set
#
# \begin{equation}
# x_{n+1} = x_{n}.
# \end{equation}
# Why does this algorithm solve the initial problem? The full explanation is beyond the scope of this post (some references are provided at the end). It relies in the in the following result.
# ## Ergodic Theorem for Markov Chains
#
# **Theorem (Ergodic Theorem for Markov Chains)** If \\(\{x^{(1)} , x^{(2)} , . . .\}\\) is an *irreducible*, *aperiodic* and *recurrent* [Markov chain](https://en.wikipedia.org/wiki/Markov_chain), then there is a unique probability distribution \\(\pi\\) such that as \\(N\longrightarrow\infty\\),
#
# - \\(P(x^{(N)} ∈ A) \longrightarrow \pi(A)\\).
# - \\(\displaystyle{\frac{1}{N}\sum_{n=1}^{N} g(x^{(n)})) \longrightarrow \int g(x)\pi(x) dx }\\).
#
# *Recall:*
#
# - A Markov chain is said to be **irreducible** if it is possible to get to any state from any state.
#
# - A state \\(n\\) has **period** \\(k\\) if any return to state \\(n\\) must occur in multiples of \\(k\\) time steps.
#
# - If \\(k=1\\), then the state is said to be **aperiodic**.
#
# - A state \\(n\\) is said to be **transient** if, given that we start in state \\(n\\), there is a non-zero probability that we will never return to \\(i\\).
#
# - A state \\(n\\) is **recurrent** if it is not transient.
#
# # 2. PyMC3 Syntax
#
# Now we perform a MCMC simulation for the data described above. Note how easy is to write the model from the mathematical description.
# +
import pymc3 as pm
import arviz as az
model = pm.Model()
with model:
# Define the prior of the parameter lambda.
lam = pm.Gamma('lambda', alpha=a, beta=b)
# Define the likelihood function.
y_obs = pm.Poisson('y_obs', mu=lam, observed=y)
# Consider 2000 draws and 3 chains.
trace = pm.sample(draws=2000, chains=3)
# -
# If we do a trace plot we can see two results:
#
# - We see the simulated posterior distribution for 3 independent Markov Chains (so that, when combined, avoid the dependence on the initial point). The 3 different chains correspond to the color blue, green and orange.
#
# - The sample value of lambda for each iteration.
pm.traceplot(trace);
# We can also see the mean and quantile information for the posterior distribution.
pm.plot_posterior(trace);
# # 3. Bayesian Hierarchical Modeling: A Chocolate Cookies Example.
#
# <img src="images/monster.jpg" alt="html" style="width: 400px;"/>
#
# Now we are going to treat a more complicated example which illustrated a hierarchical mdel.
# This case of study is taken from the (strongly recomended!) online course:
#
# **Bayesian Statistics: Techniques and Models:**
#
# https://www.coursera.org/learn/mcmc-bayesian-statistics
#
# There, the MCMC simulations are done with [JAGS](http://mcmc-jags.sourceforge.net/) in [R](https://www.r-project.org/). As a matter of fact, this course motivated me to explore an analogous tool for Python.
# ## 3.1 The data
#
# Assume there is a big factory producing chocolate cookies around the world. The cookies follow a unique recipe, but you want to study the chocolate chips distribution for cookies produced in 5 different locations.
#
# - On the one hand side you would assume that the distribution across the locations is similar, as they all come from a unique recipe. This is why you may not want to model each location separately.
#
# - On the other hand, in reality, as the locations are not exacly the same you might expect some differences between each location. This is why you may not want to model all locations at once.
#
# To overcome these restrictions, a hierarchical can be a feasible approach.
# + slideshow={"slide_type": "slide"}
import pandas as pd
# We begin reading the data into a pandas dataframe.
cookies = pd.read_csv('data/cookies.dat', sep = ' ')
cookies.head()
# + slideshow={"slide_type": "subslide"}
# Let us verify the number of locations.
cookies.location.unique()
# -
# Let us start with some visualization of the data.
# + slideshow={"slide_type": "slide"}
# Histogram distribution of chocolate chips
# for all cookies.
fig, ax = plt.subplots(figsize=(8, 6))
sns.distplot(cookies['chips'], bins=15, ax=ax);
ax.set(title='Chips Distribution (All Locations)');
# -
# Histogram distribution of chocolate chips
# for cookies in each location.
g = sns.FacetGrid(data=cookies, col='location', col_wrap=2, height=3, aspect=2)
g = g.map(sns.distplot, 'chips')
# + slideshow={"slide_type": "slide"}
# Box plot for different locations.
fig, ax = plt.subplots(figsize=(10,6))
cookies.boxplot(column='chips', by='location', ax=ax);
# -
# ## 3.2 The model: Hierarchical Approach
#
#
# - Hierarchical Model:
#
# We model the chocolate chip counts by a Poisson distribution with parameter \\(\lambda\\). Motivated by the example above, we choose a gamma prior.
#
# \begin{align}
# chips \sim Poiss(\lambda)
# \quad\quad\quad
# \lambda \sim \Gamma(a,b)
# \end{align}
#
# - Parametrization:
#
# We parametrize the shape and scale of the gamma prior with the mean \\(\mu\\) and variance \\(\sigma^2\\).
#
# \begin{align}
# a=\frac{\mu^2}{\sigma^2}
# \quad\quad\quad
# b=\frac{\mu}{\sigma^2}
# \end{align}
#
# - Prior Distributions:
#
# We further impose prior for these parameters
#
# \begin{align}
# \mu \sim \Gamma(2,1/5)
# \quad\quad\quad
# \sigma \sim Exp(1)
# \end{align}
#
#
#
#
# +
x = np.linspace(start=0, stop=50, num=100)
fig = plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(x, ss.gamma.pdf(x,a=2,scale=5), 'r-')
plt.title('Prior Distribution for mu \n Gamma Density Function for a={} and b={}'.format(2,1/5))
plt.subplot(1, 2, 2)
x = np.linspace(0,10)
plt.plot(x, ss.expon.pdf(x,1), 'r-')
plt.title('Prior Distribution for sigma2 \n Exponential Density Function')
plt.xlim(1,10)
# -
# Let us write the model in PyMC3. Note how the syntax mimics the mathematical formulation.
# + slideshow={"slide_type": "slide"}
model = pm.Model()
with model:
# Prior distribution for mu.
mu = pm.Gamma('mu', alpha=2.0, beta=1.0/5)
# Prior distribution for sigma2.
sigma = pm.Exponential('sigma', 1.0)
# Parametrization for the shape parameter.
alpha = mu**2/sigma**2
# Parametrization for the scale parameter.
beta = mu/sigma**2
# Prior distribution for lambda.
lam = pm.Gamma(
'lam',
alpha=alpha,
beta=beta,
shape=cookies.location.values.max()
)
# Likelihood function for the data.
chips = [
pm.Poisson('chips_{}'.format(i),lam[i],
observed=cookies[cookies.location==i+1].chips.values)
for i in range(cookies.location.values.max())
]
# Parameters of the simulation:
# Number of iterations and independent chains.
n_draws, n_chains = 1000, 3
n_sim = n_draws*n_chains
trace = pm.sample(draws=n_draws, chains=n_chains)
# -
# ## 3.3 Diagnostics
#
# Many [diagnostic](https://pymc-devs.github.io/pymc3/api/diagnostics.html) options are described in the PyMC3 documentation.
# + slideshow={"slide_type": "slide"}
pm.traceplot(trace);
# -
# From the traceplot we see that the chains have converged. We can also have a detailed summary of the posterior distribution for each parameter:
# + slideshow={"slide_type": "fragment"}
pm.summary(trace)
# -
# We can also see this visually.
# + slideshow={"slide_type": "subslide"}
pm.plot_posterior(trace);
# -
# We can verify the convergence of the chains formally using the Gelman Rubin test. Values close to 1.0 mean convergence.
pm.gelman_rubin(trace)
# We can also test for correlation between samples in the chains. We are aiming for zero auto-correlation to get "random" samples from the posterior distribution.
# Auto-correlation of the parameter sigma for the 3 chains.
pm.autocorrplot(trace, var_names=['sigma'], max_lag=20);
# We can also consider all the variables simultaneously.
pm.autocorrplot(trace, max_lag=20);
# From these plots we see that the auto-correlation is not problematic. Indeed, we can test this through the *effective sample size*, which sould be close to the total sumber of samples `n_sim`.
pm.diagnostics.effective_n(trace)
# Finally, we can compute the [Watanabe–Akaike information criterion](https://en.wikipedia.org/wiki/Watanabe–Akaike_information_criterion).
pm.waic(trace, model)
# ## 3.4 Residual analysis
#
# In order to evaluate the model results we analyze the behaviour of the residuals.
# +
# Compute the mean of the simulation.
lambda_mean = np.apply_along_axis(np.mean, 0, trace['lam'])
# Compute for each sample the posterior mean.
cookies['yhat'] = cookies.location.apply(lambda x: lambda_mean[x-1])
# Compute the residuals.
cookies['resid'] = cookies.apply(lambda x: x.chips - x.yhat, axis=1)
cookies.head()
# -
# Cookies Residuals
fig, ax = plt.subplots(figsize=(8, 6))
cookies.reset_index().plot.scatter(x='index', y='resid', ax=ax)
ax.axhline(y=0.0, color='r', linestyle='--')
ax.set(title='Cookies Residuals', xlabel='Observation');
# We do not see a particular partern in the scatter plot of the residuals against the observation.
fig, ax = plt.subplots(figsize=(8, 6))
cookies.plot.scatter(x='yhat', y='resid', ax=ax)
ax.axhline(y=0.0, color='red', linestyle='--')
ax.set(title='Cookies Residuals');
# ## 3.5 Predictions
#
# Finally, we are going to illustrate how to use the simulation results to derive predictions.
#
# ### 3.5.1 For a known location
#
# Let us consider Location 1. We want, for example, to compute the posterior probability the next cookie in this location has less than 7 chips.
# +
# We generate n_sim samples of a Poisson distribution for
# each value for lam_0 (location 1) simulation..
y_pred_location_1 = np.random.poisson(lam=trace['lam'][:,0] , size=n_sim)
fig, ax = plt.subplots(figsize=(8, 6))
sns.distplot(y_pred_location_1, bins=30, ax=ax)
ax.set(title='Chocolate Chips Distribution for Location 1');
# -
# Probability the next cookie in location has less than 7 chips.
(y_pred_location_1 < 7).astype(int).mean()
# ### 3.5.1 For a new location
#
# Now assume we want to open a new location. First, we want to compute the posterior probability that this new location has \\(\lambda > 15\\).
# +
# Posterior distribution of for a an b
# from the simulated values of mu and sigma2.
post_a = trace['mu']**2/trace['sigma']**2
post_b = trace['mu']/trace['sigma']**2
# We now generate samples of a gamma distribution
# with these generated parameters of a and b.
lambda_pred_dist = np.random.gamma(post_a, 1/post_b, n_sim)
fig, ax = plt.subplots(figsize=(8, 6))
sns.distplot(lambda_pred_dist, bins=30, ax=ax)
ax.set(title='Lambda Predicted Distribution');
# -
# Posterior probability a new location has lambda > 15.
(lambda_pred_dist > 15).astype(int).mean()
# Now we answer a question at the next level of the hierarchical model. We want to calculate the posterior probability for a cookie produced in a new location to have more than 15 chocolate chips.
# +
# Posterior distribution of the chips.
# Here we use the values of lambda obtained above.
cookies_pred_dist = np.random.poisson(lam=lambda_pred_dist, size=n_sim)
fig, ax = plt.subplots(figsize=(8, 6))
sns.distplot(cookies_pred_dist, bins=30, ax=ax)
ax.set(title='Chocolate Chips Distribution New Location');
# -
# Posterior probability that a cookie produced
# in a new location has more than 15 chocolate chips.
(cookies_pred_dist>15).astype(int).mean()
# # 4 References and Further Reading
#
# Here we provide some suggested references used in this post and also to go deeper in the subject.
#
# ## 4.1 Bayesian Probability
#
#
# - [Coursera: Bayesian Statistics: From Concept to Data Analysis](https://www.coursera.org/learn/bayesian-statistics)
#
# - [Coursera: Bayesian Statistics: Techniques and Models](https://www.coursera.org/learn/mcmc-bayesian-statistics)
#
# - [A First Course in Bayesian Statistical Methods, <NAME>](http://www.springer.com/us/book/9780387922997)
#
# - [An Introduction to Bayesian Analysis: Theory and Methods, <NAME>., <NAME>, <NAME>](http://www.springer.com/la/book/9780387400846)
#
# ## 4.2 PyMC3
#
# - [Documentation](https://pymc-devs.github.io/pymc3/index.html)
#
# - [Probabilistic Programming in Python using PyMC, <NAME>, <NAME>, <NAME>](https://arxiv.org/abs/1507.08050)
| Python/intro_PyMC3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn import metrics
from mlxtend.plotting import plot_decision_regions
from sklearn import preprocessing, metrics
from sklearn.linear_model import LogisticRegression
import warnings
import numpy as np
from collections import OrderedDict
from lob_data_utils import lob, db_result, model
from lob_data_utils.svm_calculation import lob_svm
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
# -
data_length = 24000
stocks = ['3459', '2748', '9268']
should_save_fig = True
d_stocks = {}
d_cv_stocks = {}
d_test_stocks = {}
for s in stocks:
d, d_test = lob.load_prepared_data(
s, data_dir='../data/prepared', length=data_length)
d.index = pd.to_datetime(d['Unnamed: 0'].values)
d_test.index = pd.to_datetime(d_test['Unnamed: 0'].values)
d_stocks[s] = d
d_test_stocks[s] = d_test
print('Dates of datasets')
for s in stocks:
print(s)
print('training', min(d_stocks[s].index), '-', max(d_stocks[s].index))
print('testing', min(d_test_stocks[s].index), '-', max(d_test_stocks[s].index))
# ## Logistic regression on queue imbalance feature
#
from sklearn import utils
def get_classes_weights(df):
y_train = df['mid_price_indicator'].values
classes = np.unique(y_train)
class_weight_list = utils.class_weight.compute_class_weight('balanced', classes, y_train)
class_weights = {classes[0]: class_weight_list[0], classes[1]: class_weight_list[1]}
return class_weights
# +
def get_scores_dict_for_data(functions_to_run, dfs, clf, stock):
scores = {'stock': stock}
for func_name, func in functions_to_run.items():
for df_name, df in dfs.items():
pred = clf.predict(df['queue_imbalance'].values.reshape(-1, 1))
scores['{}_{}'.format(df_name, func_name)] = func(df['mid_price_indicator'], pred)
return scores
functions_to_run = {'precision': metrics.precision_score, 'roc_auc': metrics.roc_auc_score,
'f1_score': metrics.f1_score, 'recall': metrics.recall_score,
'matthews': metrics.matthews_corrcoef, 'kappa': metrics.cohen_kappa_score}
scores = []
for stock in stocks:
log_clf = LogisticRegression(random_state=0, class_weight=get_classes_weights(d_stocks[stock])) #21312)
res_train = model.validate_model(log_clf, d_stocks[stock][['queue_imbalance']],
d_stocks[stock]['mid_price_indicator'])
dfs = {'test': d_test_stocks[stock]}
res = get_scores_dict_for_data(functions_to_run, dfs, log_clf, stock)
d_stocks[stock]['pred_log'] = log_clf.predict(d_stocks[stock][['queue_imbalance']])
res = {**res, **res_train}
scores.append(res)
df_scores = pd.DataFrame(scores, index=stocks)
# +
def convert_scores(df, column):
scores = []
for i, row in df.iterrows():
scores.append(np.mean(row[column]))
return scores
scores_columns = ['f1', 'kappa', 'matthews', 'precision', 'recall', 'roc_auc', 'train_f1', 'train_kappa',
'train_matthews', 'train_precision', 'train_recall', 'train_roc_auc']
for col in scores_columns:
df_scores[col] = convert_scores(df_scores, col)
df_scores
# -
df_scores[['train_matthews', 'matthews', 'test_matthews']]
print('log')
df_scores[['train_roc_auc', 'roc_auc', 'test_roc_auc']]
print(df_scores[['train_matthews', 'matthews', 'test_matthews', 'train_roc_auc', 'roc_auc', 'test_roc_auc']].to_latex())
df_scores.to_csv('res_overview_all_three_logistic_regression.csv')
print(df_scores[['f1', 'test_f1_score', 'precision', 'test_precision', 'recall', 'test_recall']].to_latex())
f, ax = plt.subplots(1, 3, figsize=(27,6))
for i in range(len(stocks)):
stock = stocks[i]
d_stocks[stock]['Predicition of Mid Price Indicator'] = d_stocks[stock]['pred_log']
d_stocks[stock]['Mid Price Indicator'] = d_stocks[stock]['mid_price_indicator']
d_stocks[stock][['Predicition of Mid Price Indicator', 'Mid Price Indicator']].plot(
kind='kde', ax=ax[i])
ax[i].set_title('Density of Mid Price Indicator and its prediction {} on training data'.format(stock))
ax[i].legend(loc='lower right')
if should_save_fig:
print('Saving figure')
plt.savefig('density_of_mid_price_and_prediction_training_data.png')
f, ax = plt.subplots(1, 3, figsize=(16, 4))
for i in range(len(stocks)):
stock = stocks[i]
d_stocks[stock]['Predicition of Mid Price Indicator'] = d_stocks[stock]['pred_log']
d_stocks[stock]['Mid Price Indicator'] = d_stocks[stock]['mid_price_indicator']
tn, fp, fn, tp = metrics.confusion_matrix(d_stocks[stock]['mid_price_indicator'],
d_stocks[stock]['pred_log']).ravel()
sns.heatmap([[tp, fp], [fn, tn]],
ax=ax[i], annot=True, fmt='d', xticklabels=['Positive', 'Negative'],
yticklabels=['Positive', 'Negative'])
ax[i].set_title('{}'.format(stock))
ax[i].set_ylabel('True Mid Price Indicator')
ax[i].set_xlabel('Predicted Mid Price Indicator')
plt.tight_layout()
if should_save_fig:
print('Saving figure')
plt.savefig('logistic_confusion_matrix.png')
pivots = []
print('Pivot values')
for i in df_scores.index:
stock = i
df = d_stocks[stock]
pivot = np.mean([np.min(df[df['pred_log'] == 1]['queue_imbalance']),
np.max(df[df['pred_log'] == 0]['queue_imbalance'])])
pivots.append(pivot)
df_scores['pivot'] = pivots
df_scores[['pivot', 'stock']]
for k, d in d_stocks.items():
above_pivot = []
pivot = df_scores[df_scores['stock'] == k]['pivot'].values[0]
for i, row in d.iterrows():
if row['queue_imbalance'] < pivot:
above_pivot.append(0)
else:
above_pivot.append(1)
d['above_pivot'] = above_pivot
# +
f, ax = plt.subplots(1, 3, sharey=True, sharex=True, figsize=(16, 4))
i = 0
for k, d in d_stocks.items():
pivot = df_scores[df_scores['stock'] == k]['pivot'].values[0]
df = d[d['queue_imbalance'] < pivot]
confusion_matrix1 = metrics.confusion_matrix(df['mid_price_indicator'], df['pred_log'])
ax[i].bar(height = confusion_matrix1.ravel(), x=['Negatives below pivot','Positives below pivot',
'Negatives above pivot','Postives above pivot'])
df = d[d['queue_imbalance'] > pivot]
confusion_matrix2 = metrics.confusion_matrix(df['mid_price_indicator'], df['pred_log'])
ax[i].bar(height = confusion_matrix2.ravel(), x=['Negatives below pivot','Positives below pivot',
'Negatives above pivot','Postives above pivot'])
i += 1
plt.title('Positive and ')
# +
f, ax = plt.subplots(1, 3, sharey=True, sharex=True, figsize=(16, 12))
plt.title('Violin Plots of Queue Imbalance vs Mid Price Indicator')
i = 0
for k, d in d_stocks.items():
pivot = df_scores[df_scores['stock'] == k]['pivot'].values[0]
df = d[d['queue_imbalance'] < pivot]
sns.boxenplot(y=df['queue_imbalance'], x=df['mid_price_indicator'], ax=ax[i], palette='Set2',)
df = d[d['queue_imbalance'] > pivot]
sns.boxenplot(y=df['queue_imbalance'], x=df['mid_price_indicator'], ax=ax[i], palette='Set1', )
ax[i].set_title(k)
ax[i].set_ylabel('Queue Imbalance')
ax[i].set_xlabel('Mid Price Indicator')
i += 1
plt.ylabel('Queue Imbalance')
plt.xlabel('Mid Price Indicator')
# +
f, ax = plt.subplots(3, 1, figsize=(35, 15), sharex=True)
i = 0
for i in range(len(stocks)):
stock = stocks[i]
df = d_stocks[stock]
X = df[['queue_imbalance']].values
y = df['mid_price_indicator'].values.astype(np.integer)
clf = LogisticRegression(class_weight=get_classes_weights(df))
clf.fit(X, y)
plot_decision_regions(X[0:1000], y[0:1000], clf=clf,ax=ax[i], colors=','.join(['orange', 'blue']))
ax[i].set_xlabel('Queue Imbalance')
ax[i].set_title('Logistic Regression Decision Regions for {} on training data'.format(stock))
ax[i].set_xlim(-1.01, 1.01)
plt.tight_layout()
if should_save_fig:
print('Saving figure')
plt.savefig('logistic_regression_decision_region.png'.format(s))
| overview/overview_all_three_logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
import pandas
import tarfile
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.handlers[0].setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
locations = {
'gfs': '/l/cnets/datasets/Telecom_BDC_2015',
'diskstation': '/media/diskstation/Datasets/Telecom Big Data Challenge 2015',
'data': os.path.expanduser('~/data/tbdc15'),
'hdd': '/media/giovanni/Multimedia/Datasets/Telecom Big Data Challenge 2015',
'repo': os.path.expanduser('~/repos/tbdc15')
}
def getpaths(city, loc='gfs', boxesloc=None, storeloc=None):
root = locations[loc]
city_codes = {'RO': 'RM'}
code = city[:2].upper()
if code in city_codes:
code = city_codes[code]
paths = {
'trips': '{root}/infoblu/{city}.tar.gz'.format(root=root, city=city),
'accidents': '{root}/unipol/BDC2015_UnipolsaiClaims2014_{city}.csv'.format(root=root, city=code),
'boxes': '{root}/city_boxes.csv'.format(root=root if boxesloc is None else locations[boxesloc]),
'store': '{root}/trip_accidents_store.hdf'.format(root=root if storeloc is None else locations[storeloc])
}
return paths
def getbox(path, city):
city_code = city[0].lower()
df_box = pandas.read_csv(path, index_col='city')
df_box.ix[city_code]
box = df_box.ix[city_code].to_dict()
return box
def read_trips(path, box, scale=1000.0, break_at=None):
index_columns = ['i', 'j', 'weekday', 'hour']
trips = pandas.DataFrame(columns=index_columns + ['trips', 'trips_start']).set_index(index_columns)
# set break_at to an integer and it will stop exactly after that number of iterations
i = 0
with tarfile.open(path, mode='r:gz') as tf:
# open tar file in random access mode with on-the-fly gzip decompression
for member in tf:
if break_at is not None and i == break_at:
break
i += 1
# read contents of TAR archive. Each file in the archive contains
# the data of a different day.
logger.info(member.name)
f = tf.extractfile(member)
# do not use the "type" and "speed" columns, since we don't need them. This saves memory.
df = pandas.read_csv(f,
names=['trip', 'timestamp', 'lat', 'lon', 'type', 'speed'],
usecols=['trip', 'timestamp', 'lat', 'lon'],
sep=';',
parse_dates=['timestamp'])
# compute the cell, weekday, and hour
df['i'] = ((df['lat'] - box['lat_min']) * scale).round()
df['j'] = ((df['lon'] - box['lon_min']) * scale).round()
df['weekday'] = df['timestamp'].map(pandas.Timestamp.weekday)
df['hour'] = df['timestamp'].map(lambda k: k.hour)
# count how many trips in each cell, weekday, hour and append.
# Note that the first group-by returns a series object,
# and we wrap this into a DataFrame.
s1 = df.filter(index_columns).groupby(index_columns).apply(len)
# do the same but only considering the first frame of each trip.
df_ff = df.groupby('trip', as_index=False).head(1)
s2 = df_ff.filter(index_columns).groupby(index_columns).apply(len)
df = pandas.DataFrame({'trips': s1, 'trips_start': s2})
trips = trips.append(df)
return trips
def read_accidents(path, box, scale=1000.0):
index_columns = ['i', 'j', 'weekday', 'hour']
df = pandas.read_csv(path)
df.rename(columns={'day_type': 'weekday', 'time_range': 'hour'}, inplace=True)
df['i'] = ((df['latitude'] - box['lat_min']) * scale).round()
df['j'] = ((df['longitude'] - box['lon_min']) * scale).round()
s = df.groupby(index_columns).apply(len)
accidents = pandas.DataFrame({'accidents': s})
return accidents
def make_city_frame(city,
loc='frosty',
boxesloc='frosty',
storeloc='frosty',
scale=1000.0,
break_at=None):
"""
Reads data of trips and accidents and store data frame into HDF format
"""
paths = getpaths(city, loc=location, boxesloc=boxesloc, storeloc=storeloc)
box = getbox(paths['boxes'], city)
logger.info("Reading trips...")
trips = read_trips(paths['trips'], box, scale=scale, break_at=break_at)
logger.info("Reading accidents...")
accidents = read_accidents(paths['accidents'], box, scale=scale)
logger.info("Storing data...")
joined_df = trips.join(accidents).fillna(0).reset_index()
joined_df.to_hdf(paths['store'], city, complib='blosc', complevel=6)
logger.info("Data saved to HDF:".format(paths['store']))
# -
# # Create dataset
# ## Select city
cities = ['bari', 'milano', 'napoli', 'palermo', 'roma', 'torino', 'venezia']
location = 'gfs'
boxes_location = 'data'
store_location = 'data'
scale = 1000.0
# ## Read data
# Use the following to remove an existing store file, if needed. (Use `C-M y` to make the cell runnable).
# # rm -f /u/gciampag/data/tbdc15/trip_accidents_store.hdf
ll -h ~/data/tbdc15/
for city in cities:
logging.info("City: {}".format(city))
make_city_frame(city,
loc=location,
scale=scale,
boxesloc=boxes_location,
storeloc=store_location,
break_at=None)
# <hr style="height: .1em"/>
# # Plot the data
# +
# %matplotlib inline
import numpy
from pylab import *
# scatter plot
def scatter_trips_accidents(df, city, use_trips_starts=False):
fig = figure()
if use_trips_starts:
xcol = 'trips_start'
else:
xcol = 'trips'
df.plot(x=xcol, y='accidents', kind='scatter', marker='x', alpha=.2, color='k', fig=fig)
# trend line
emin = numpy.log10(df[xcol].min())
emax = numpy.log10(df[xcol].max())
bins = numpy.logspace(max(emin, 0), emax, 20)
print bins
df.groupby(numpy.digitize(df[xcol], bins=bins)).mean().plot(x=xcol, y='accidents',
color='r', linestyle='solid',
marker='o', ax=gca(), alpha=.5,
linewidth=2, fig=fig)
grid('off')
title(city)
if use_trips_starts:
xlabel('Traffic (start of trip)')
else:
xlabel('Traffic')
ylabel('Accidents')
xscale('log')
xlim(1, xlim()[1])
tight_layout()
legend()
savefig('trips_accidents_scatter_{}.pdf'.format(city))
savefig('trips_accidents_scatter_{}.png'.format(city))
show()
def hist_accidents(df, city):
fig = figure()
ax = gca()
ax.hist(df['accidents'].values, log=True, bins=60, color='white')
ylim(.1, ylim()[1])
xlabel('Accidents')
ylabel('Frequency')
title(city)
tight_layout()
legend()
savefig('accidents_histogram_{}.pdf'.format(city))
savefig('accidents_histogram_{}.png'.format(city))
show()
def plot_all(city):
paths = getpaths(city,
loc=location,
boxesloc=boxes_location,
storeloc=store_location)
df = pandas.read_hdf(paths['store'], city)
df = df.groupby(['i', 'j']).sum().filter(['trips', 'trips_start', 'accidents'])
scatter_trips_accidents(df, city)
scatter_trips_accidents(df, city, use_trips_starts=True)
hist_accidents(df, city)
# -
plot_all('bari')
plot_all('milano')
plot_all('napoli')
plot_all('palermo')
plot_all('roma')
plot_all('torino')
plot_all('venezia')
# ## Scatter plot of trips vs trip starts
# + active=""
# df.plot(x='trips_start', y='trips', kind='scatter', alpha=.2, marker='.')
# xscale('log')
# yscale('log')
# xlim(5e-1, 1e5)
# xlabel('Trip starts')
# ylabel('Trips')
# title(city)
# savefig("trips_trips_starts_scatter_{}_{}.pdf".format(city, scale))
# savefig("trips_trips_starts_scatter_{}_{}.png".format(city, scale))
# -
# ## Distributions of accidents
# ### Load the data
city = 'palermo'
df = pandas.read_hdf('/u/gciampag/data/tbdc15/trip_accidents_store.hdf', city)
df = df.groupby(['i', 'j']).sum().filter(['trips', 'trips_start', 'accidents'])
# ### Histogram grouping data $>0$ in bins of size $9$
bin_size = 9 # lower bound on bin size
max_accidents = df['accidents'].max()
start = 1
stop = 1 + ceil((max_accidents - 1) / bin_size) * bin_size
num = (stop - start) / bin_size + 1
bins = numpy.linspace(start, stop, num, endpoint=True)
bins = numpy.hstack([[0,], bins])
nh, __, ___ = hist(df['accidents'].values, bins=bins, color='lightgray', log=True, normed=True, histtype='bar')
xlim(-5, stop)
ylim(1e-7, 1)
xlabel('Accidents')
ylabel('Frequency')
title(city.title())
tick_params(axis='both', direction='out', which='both')
tick_params(axis='x', which='minor', bottom='on', top='off')
tick_params(axis='y', which='both', right='off')
tick_params(axis='x', which='major', top='off')
tick_params(axis='x', which='minor', bottom='on')
# ### Fit the $>0$ data with an exponential law (with binning), and geometric distribution
# +
from scipy.stats import expon, geom, poisson
group_size = 9
df_nza = df.query('accidents > 0')
a = df_nza.groupby(ceil(df_nza['accidents'] / group_size)).count()['accidents']
x = a.index.values
p = a / a.sum()
vlines(x, 0, p, color='LightGray')
plot(x, p, 'wo ', label='Data', ms=8)
# expected number of accidents (computed as a weighted average of the frequencies)
exp_accidents = np.sum(p.values * a.index.values)
#x = np.hstack([[0]])
rv = expon(loc=0, scale=exp_accidents ** -1)
plot(x, rv.cdf(x + 1) - rv.cdf(x), 'x ', color='k', mew=2, label='Exponential')
rv = geom(exp_accidents ** -1, loc=0)
plot(x, rv.pmf(x), '+ ', color='gray', mew=1.5, ms=10, label='Geometric')
rv = poisson(exp_accidents ** -1, loc=0)
plot(x, rv.pmf(x), marker=(6, 2, 0), ls=' ', color='LightGray', mew=1, ms=10, label='Poisson')
xlim(0, xlim()[1] + 1)
xlabel(r'$\left\lceil\rm{Accidents} \,/\, %d\right\rceil$' % group_size, fontsize='large')
ylabel('Probability')
yscale('log')
ylim(ylim()[0], 2)
title(city.title())
legend(loc='best', frameon=False)
tick_params(axis='both', direction='out', which='both')
tick_params(axis='x', which='minor', bottom='on', top='off')
tick_params(axis='y', which='both', right='off')
tick_params(axis='x', which='major', top='off')
tick_params(axis='x', which='minor', bottom='on')
savefig("accidents_grouped_fit_{}.png".format(city))
# -
# ## Zero-inflated geometric distribution
# +
import numpy
from scipy.stats import rv_discrete
class zigeom_gen(rv_discrete):
def _pmf(self, k, pi, p):
s = numpy.sign(k)
return (1 - s) * (pi + (1.0 - pi) * p) + s * (1.0 - pi) * (1.0 - p) ** k * p
zigeom = zigeom_gen()
# -
# ### Simulate from a zero-inflated geometric
# +
from scipy.optimize import minimize
def fit(data):
def _llk(args):
return - zigeom(*args).logpmf(data).sum()
N = float(len(data))
pi0 = (data == 0).sum() / N
x0 = (pi0, .5)
ret = minimize(_llk, x0, method='Nelder-Mead')
if ret['success']:
return ret['x']
else:
raise RuntimeError(ret['message'])
pi = .2
p = .3
data = zigeom(pi, p).rvs(size=1000)
print fit(data)
# -
p = 0.1
pi = 0.5
data = zigeom(pi, p).rvs(size=2000)
data_max = data.max()
hist(data, bins=data_max, color='white', log=True, normed=True)
pih, ph = fit(data)
x = np.arange(data_max)
px = zigeom(pih, ph).pmf(x)
plot(x + .5, px, 'r-')
title('$\pi = {}, p = {}$'.format(pi, p))
# ## Compare the trip distribution for cells with $0$ accidents with a normal cell (with $\ge 0$ accidents)
# +
import numpy
from truthy_measure.plotting import plot_pdf_log2, plot_cdf
trips_all = df['trips'].values
trips_zac = df.query('accidents == 0')['trips'].values
num_points = 20
bins = numpy.logspace(0, numpy.log2(trips_all.max()), num=num_points, base=2)
hist_all, __ = numpy.histogram(trips_all, bins=bins, normed=True)
hist_zac, __ = numpy.histogram(trips_zac, bins=bins, normed=True)
plot(bins[1:], numpy.log(hist_zac) - numpy.log(hist_all), 'ko:', mfc='LightGray')
axhline(0, ls='--', color='gray', lw=2)
xscale('log')
xlabel('Trips $t$')
ylabel('$\log\Pr\{T = t | A = 0\} - \log\Pr\{T\}$')
yb = max(numpy.abs(ylim()))
ylim(-yb, yb)
title(city.title())
tight_layout()
savefig("logratio_{}.png".format(city))
# -
| Trips_accidents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice NumPy
#
# Make sure you look at [`Intro_to_NumPy.ipynb`](Intro_to_NumPy.ipynb) first!
# +
import numpy as np
from utils import vp_from_dt, impedance, rc_series
# -
test = np.random.random(10000000)
# %timeit rc_series(test)
# Note that the log has to be fairly big for the benchmarking to work properly, because otherwise the CPU caches the computation and this skews the results.
# Now we can re-write our function using arrays instead of lists. Let's just remind ourselves of what it looked like before:
import inspect
print(inspect.getsource(rc_series))
# + [markdown] tags=["exercise"]
# <div class="alert alert-success">
# <b>Exercise</b>:
# <ul>
# <li>- Rewrite the `rc_series()` function to get rid of the loop. Remember that the math operations work on the entire array at once.</li>
# <li>- Time your new version on the `test` data and compare to the loop version.</li>
# </ul>
# </div>
# -
def rc_vector(layers):
# Your code here.
return rc
z = np.arange(10)
rc_vector(z)
# You should get the same output as you did before:
z = np.arange(10)
rc_series(z)
# + tags=["hide"]
def rc_vector(z):
uppers = z[:-1]
lowers = z[1:]
return (lowers - uppers) / (uppers + lowers)
# + tags=["hide"]
# %timeit rc_vector(test)
# + [markdown] tags=["hide"]
# 60+ times faster on my machine!
# + [markdown] tags=["exercise"]
# <div class="alert alert-success">
# <b>Exercise</b>:
# <ul>
# <li>- Run the `rc_series` function on the log data to make an RC series array.</li>
# <li>- Add the vectorized version to the file `utils.py`.</li>
# </ul>
# </div>
# +
from welly import Well
w = Well.from_las('../data/R-39.las')
dt = np.array(w.data['DT4P'])
rhob = np.array(w.data['RHOB'])
# -
vp = # Your code here
z = # Your code here
rc = # Your code here
# + tags=["hide"]
vp = vp_from_dt(dt)
# -
vs = vp_from_dt(w.data['DT4S'])
# + tags=["hide"]
z = impedance(vp, rhob)
# + tags=["hide"]
rc = rc_series(z)
# + tags=["hide"]
rc[:10]
# +
# %matplotlib inline
import matplotlib.pyplot as plt
depth = w.data['DT4P'].basis
plt.figure(figsize=(2, 10))
plt.plot(vp, depth)
# -
# ## Convolution
#
# Now we'll use the RC to compute a synthetic... sort of: we're doing this in depth.
# +
from bruges.filters import ricker
wavelet = ricker(100, 1, 0.03)
plt.plot(wavelet)
# -
syn = np.convolve(rc, wavelet, mode='same')
# +
depth_, syn_ = depth[:500], syn[:500]
plt.figure(figsize=(15, 2))
plt.plot(depth_, syn_)
plt.fill_between(depth_, 0, syn_, where=syn_>0)
# -
# ## Offset synthetic
# + tags=["hide"]
from bruges.reflection import akirichards
theta = np.linspace(0, 60, 100)
r = []
for vp1, vs1, rho1, vp2, vs2, rho2 in zip(vp, vs, rhob, vp[1:], vs[1:], rhob[1:]):
r_ = akirichards(vp1, vs1, rho1, vp2, vs2, rho2, theta)
r.append(r_)
r = np.array(r)
# + tags=["hide"]
np.apply_along_axis(log)
# + tags=["hide"]
r.shape
# + tags=["hide"]
from bruges.filters import ricker
w = ricker(100, 1, 0.03)
# + tags=["hide"]
plt.imshow(r, aspect='auto')
# + tags=["hide"]
r[np.isnan(r)] = 0
# + tags=["hide"]
def convolve(tr, w):
return np.convolve(tr, w, mode='same')
s = np.apply_along_axis(convolve, 0, r, w)
# + tags=["hide"]
plt.figure(figsize=(4, 40))
plt.imshow(s, cmap="gray", aspect='auto')
# -
# ## OPTIONAL STOPPING POINT
# ## A very brief introduction to plotting
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(rc)
# -
plt.plot(rc[:200])
plt.stem(rc[75:125])
# +
theta = np.arange(0, np.pi, 0.1)
y = np.sin(theta)**2
s = np.convolve(y, rc)
plt.plot(s[:200])
# -
# ## Vsh
#
# _V_<sub>sh</sub> or _V_<sub>shale</sub> is the volume of shale in a given volume of rock. Often synonymous with _V_<sub>clay</sub>, though strictly speaking this should be measured at a different scale: _V_<sub>clay</sub> pertains to a rock, whereas _V_<sub>sh</sub> pertains to an interval of strata.
#
# It is possible to calculate _V_<sub>sh</sub> from spectral gamma-ray CGR curve data (usually where GR comes from):
#
# $$x = \frac{\mathsf{CGR}_\mathrm{zone} - \mathsf{CGR}_\mathrm{clean}}{\mathsf{CGR}_\mathrm{shale} - \mathsf{CGR}_\mathrm{clean}}$$
#
# In many circumstances, _x_ can be used as _V_<sub>sh</sub>. Alternatively, one of the following corrections can be optionally applied:
#
# $V_\mathrm{sh} = \frac{0.5x}{1.5-x}$
#
# $V_\mathrm{sh} = 1.7 - \sqrt{3.38 - (x + 0.7)2}$
# + [markdown] tags=["exercise"]
# <div class="alert alert-success">
# <b>Exercise</b>:
# <ul>
# <li>Implement the Vsh equation.</li>
# <li>- Your function should work on scalars and on arrays or other sequences.</li>
# <li>- The function should never return a number outside the closed interaval [0, 1].</li>
# <li>- Write a docstring and tests for your function.</li>
# <li>- Apply your function to the GR log from the well `w`</li>
# </ul>
# </div>
# -
def vshale(cgr, clean, shale):
# Your code here!
return vsh
cgr = [40, 50, 80, 100, 120, 80, np.nan, 10]
vshale(cgr, clean=40, shale=100)
# This should yield:
#
# array([ 0. , 0.0625, 0.4 , 1. , 1. , 0.4 , nan, 0. ])
# + tags=["hide"]
def vshale(cgr, clean, shale):
"""
Compute VSH for arrays or single values.
"""
cgr = np.atleast_1d(cgr)
# If we don't like the warnings, we can temporarily
# replace the nans.
nans = np.isnan(cgr)
cgr[np.isnan(cgr)] = 0
x = (cgr - clean) / (shale - clean)
vsh = 0.5 * x / (1.5 - x)
# Make sure we're in the interval [0, 1]
vsh[vsh > 1] = 1
vsh[vsh < 0] = 0
# Replace the nans.
vsh[nans] = np.nan
return np.squeeze(vsh)
# + tags=["hide"]
vshale(cgr, clean=40, shale=100)
# + tags=["hide"]
vshale(45, 40, 100)
# + tags=["hide"]
vsh = vshale(w.data['GR'], 40, 100)
depth = w.data['GR'].basis
plt.figure(figsize=(2, 8))
plt.plot(vsh[:200], depth[:200])
# -
# <hr />
#
# <div>
# <img src="https://avatars1.githubusercontent.com/u/1692321?s=50"><p style="text-align:center">© Agile Geoscience 2016</p>
# </div>
| instructor/Practice_NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ldGNHcYZNkhH"
import math
from pandas_datareader import data as web
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
import urllib
from urllib.request import Request, urlopen
import json
# + colab={"base_uri": "https://localhost:8080/"} id="IwZk_TV4qivU" outputId="aac7503a-ff91-4859-8960-1bd3b0699f1f"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="0BW0_lLUQ9hn" outputId="eebb3b16-1a7f-4ba9-b1cc-8cb43a740b00"
data = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/paperAnder/bmri_close_3.xlsx')
data.head(3)
# + colab={"base_uri": "https://localhost:8080/", "height": 331} id="K9LSQHuYkugL" outputId="5bd6c9cd-0c60-4fe9-d5a2-5f3187fda6b5"
data.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="WNcxduT_dEsH" outputId="c426925b-1822-40e5-cc82-53f41c7d83a8"
data = data[['open','high','low','frequency','volume','close']]
#data = data[['close']]
data_label = data[['open','high','low','frequency','volume']]
data_close = data[['close']]
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 303} id="29yTZRr9RZYR" outputId="9dd769f0-4f76-4213-9032-a73fd4644a63"
# Visualize the closing price history
plt.figure(figsize=(8,4))
plt.title('Price History')
plt.plot(data['close'])
#plt.plot(data['open'])
# df['Close']
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price IDR',fontsize=18)
plt.show()
# + [markdown] id="RcJkkAvPFP8k"
# standar scaler, menstandarisasi angka menjadi diantara 1 dan 0
# + colab={"base_uri": "https://localhost:8080/"} id="l6nsFRZmTJ9l" outputId="1180187d-2066-405f-f407-7681974faf95"
import numpy as np
from sklearn.model_selection import train_test_split
# DI SPLIT
X_train, X_test, y_train, y_test = train_test_split(data_label, data_close, test_size=0.2, random_state=42)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# + id="pixg20cATORt" colab={"base_uri": "https://localhost:8080/"} outputId="2f8371ff-2b80-4ad2-92bd-564eb05702b4"
# Scale the all of the data to be values between 0 and 1
scaler = MinMaxScaler(feature_range=(0, 1))
# DI SCALER X_TRAIN DAN Y_TRAIN
scaled_data = scaler.fit_transform(X_train)
scaled_label = scaler.fit_transform(y_train)
scaled_test_data = scaler.fit_transform(X_test)
scaled_test_label = scaler.fit_transform(y_test)
print(scaled_data.shape)
print(scaled_label.shape)
print(scaled_test_data.shape)
print(scaled_test_label.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="3J4jS3mj89Yr" outputId="590ec87c-5613-418e-9f4c-c35675243a57"
# X_trainscaled_data
# X_train_scaled = np.reshape(scaled_data, (scaled_data.shape[0],scaled_data.shape[1]))
# print(X_train_scaled.shape)
# Y_train_scaled = np.reshape(scaled_label, (scaled_label.shape[0],scaled_label.shape[1]))
# print(Y_train_scaled.shape)
X_train_scaled_1 = np.reshape(scaled_data, (scaled_data.shape[0],scaled_data.shape[1],1))
print(X_train_scaled_1.shape)
Y_train_scaled_1 = np.reshape(scaled_label, (scaled_label.shape[0],scaled_label.shape[1],1))
print(Y_train_scaled_1.shape)
X_test_scaled_1 = np.reshape(scaled_test_data, (scaled_test_data.shape[0],scaled_test_data.shape[1],1))
print(X_test_scaled_1.shape)
Y_test_scaled_1 = np.reshape(scaled_test_label, (scaled_test_label.shape[0],scaled_test_label.shape[1],1))
print(Y_test_scaled_1.shape)
# + id="dGuGcLRD3NBN"
from keras.layers import Bidirectional,TimeDistributed,Conv1D,MaxPooling1D,Flatten,RepeatVector,ConvLSTM2D
from keras.losses import mean_squared_error
import tensorflow as tf
import tensorflow.keras.backend as K
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(mean_squared_error(y_true, y_pred))
# + id="YP-tMvDXnbEf" colab={"base_uri": "https://localhost:8080/"} outputId="d398bbd8-b477-4254-c1b9-0374e28996e6"
# Build the LSTM network model
model1 = Sequential()
model1.add(LSTM(units=50, return_sequences=True, input_shape=((5, 1))))
model1.add(Dense(units=25))
model1.add(Dense(units=25))
model1.add(Dense(units=1))
model1.compile(optimizer='adam', loss='mse')
#Bi Directional LSTM
model2 = Sequential()
model2.add(Bidirectional(LSTM(50, activation='relu'), input_shape=((5, 1))))
model2.add(Dense(1))
model2.compile(optimizer='adam', loss='mse')
#Vanilla LSTM
model3 = Sequential()
model3.add(LSTM(50, activation='relu'))
model3.add(Dense(1))
model3.compile(optimizer='adam', loss='mse')
#Stacked LSTM
model4 = Sequential()
model4.add(LSTM(50, activation='relu', return_sequences=True, input_shape=((5, 1))))
model4.add(LSTM(50, activation='relu'))
model4.add(Dense(1))
model4.compile(optimizer='adam', loss='mse')
# + colab={"base_uri": "https://localhost:8080/"} id="qI_9Kryznl1H" outputId="6e619739-d1e1-42b2-fe75-caf0798e97b6"
history1 = model1.fit(X_train_scaled_1, Y_train_scaled_1, batch_size=1, epochs=50)
# + colab={"base_uri": "https://localhost:8080/"} id="U3BwXVjL2oYj" outputId="6c88980e-1931-4b7e-ad77-86009d5a7906"
history2 = model2.fit(X_train_scaled_1, Y_train_scaled_1, batch_size=1, epochs=50)
# + colab={"base_uri": "https://localhost:8080/"} id="GMseMFMD4Tzd" outputId="7588548e-8f1f-49e2-a0a9-15d6fb85a54c"
history3 = model3.fit(X_train_scaled_1, Y_train_scaled_1, batch_size=1, epochs=50)
# + colab={"base_uri": "https://localhost:8080/"} id="zP-M9l1qAPH-" outputId="8003bc44-9693-4ffc-dfcd-7500c28e8a3f"
history4 = model4.fit(X_train_scaled_1, Y_train_scaled_1, batch_size=1, epochs=50)
# + colab={"base_uri": "https://localhost:8080/", "height": 253} id="xqrXb1czAUoK" outputId="e10e339f-f655-4953-e6de-92f4e3893601"
# print(prediction1)
print("============")
b = []
for i,idx in enumerate(Y_test_scaled_1):
# print(i)
b.append(idx[i])
# print(Y_test_scaled_1.shape)
from sklearn.metrics import mean_absolute_error
# a = mean_absolute_error(prediction1, Y_test_scaled_1)
# a
# + [markdown] id="Amwz6ivYAx5O"
# TESTING 4 MODEL
# + colab={"base_uri": "https://localhost:8080/"} id="y2S5Gkq9BX0X" outputId="a620655b-767b-41b6-a5cf-07166c377af8"
from sklearn.preprocessing import MinMaxScaler
prediction1 = model1.predict(X_test_scaled_1)
print("RATA-RATA HARGA TEST",np.mean(((Y_test_scaled_1)*n)))
n = 10000
total_data_test = 98
# Calculate /Get the value of RMSE
rmse1 = np.sqrt(np.mean(((prediction1 - Y_test_scaled_1)**2)*n))
print("MODEL 1 - VANILA LSTM + DENSE LAYER")
print("RATA-RATA PERBEDAAN HARGA",np.mean(((prediction1 - Y_test_scaled_1)*n)))
print("RATA-RATA HARGA PREDIKSI",np.mean(((prediction1)*n)))
print('MSE PREDIKSI - TEST',np.mean((prediction1 - Y_test_scaled_1)**2) / 98)
print('RMSE PREDIKSI - TEST',np.sqrt(np.mean(((prediction1 - Y_test_scaled_1)**2)* 98)))
prediction2 = model2.predict(X_test_scaled_1)
print("MODEL 2 - BI LSTM + DENSE LAYER")
print("RATA-RATA PERBEDAAN HARGA",np.mean(((prediction2 - Y_test_scaled_1)*n)))
print("RATA-RATA HARGA PREDIKSI",np.mean(((prediction2)*n)))
print('MSE PREDIKSI - TEST',np.mean((prediction2 - Y_test_scaled_1)**2) / 98)
print('RMSE PREDIKSI - TEST',np.sqrt(np.mean((prediction2 - Y_test_scaled_1)**2) / 98))
prediction3 = model3.predict(X_test_scaled_1)
print("MODEL 3 - Vanila LSTM")
print("RATA-RATA PERBEDAAN HARGA",np.mean(((prediction3 - Y_test_scaled_1)*n)))
print("RATA-RATA HARGA PREDIKSI",np.mean(((prediction3)*n)))
print('MSE PREDIKSI - TEST',np.mean((prediction3 - Y_test_scaled_1)**2) / 98)
print('RMSE PREDIKSI - TEST',np.sqrt(np.mean((prediction3 - Y_test_scaled_1)**2) / 98))
prediction4 = model4.predict(X_test_scaled_1)
print("MODEL 4 - Stacked LSTM")
print("RATA-RATA PERBEDAAN HARGA",np.mean(((prediction4 - Y_test_scaled_1)*n)))
print("RATA-RATA HARGA PREDIKSI",np.mean(((prediction4)*n)))
print('MSE PREDIKSI - TEST',np.mean((prediction4 - Y_test_scaled_1)**2) / 98)
print('RMSE PREDIKSI - TEST',np.sqrt(np.mean((prediction4 - Y_test_scaled_1)**2) / 98))
# + [markdown] id="hEYingKzYDjR"
# plot result
# + id="usApXPglDnG1" colab={"base_uri": "https://localhost:8080/", "height": 625} outputId="e02acb60-13ce-48cf-c1ab-0bd33a1c4b80"
# Plot/Create the data for the graph
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions4 #######pakai model terbaik
# Visualize the data
plt.figure(figsize=(12,8))
plt.title('LSTM Model')
plt.xlabel('Days', fontsize=18)
plt.ylabel('Close Price USD ($)', fontsize=18)
plt.plot(train['close'])
plt.plot(valid[['close', 'Predictions']])
plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
plt.show()
# + id="HF0p4iFSGlQC"
# Get the quote
bmri_quote = data
# BMRI_quote
# Create a new dataframe
new_df = bmri_quote.filter(['close'])
# Get the last 60 day closing price
last_60_days = new_df[0:30].values
# Scale the data to be values between 0 and 1
last_60_days_scaled = scaler.fit_transform(last_60_days)
# Create an empty list
X_test2 = []
# Append teh past 60 days
X_test2.append(last_60_days_scaled)
# Convert the X_test data set to a numpy array
X_test2 = np.array(X_test2)
# Reshape the data
X_test2 = np.reshape(X_test2, (X_test2.shape[0], X_test2.shape[1], 1))
# + colab={"base_uri": "https://localhost:8080/"} id="6A_jPFcq0YUJ" outputId="4e6773af-4f85-4481-a069-bf8b6599a9b1"
last_60_days.shape
# + id="75KkdKIKyOwE"
# Get the predicted scaled price
pred_price = model4.predict(X_test2) #############pake model terbaik
# undo the scaling
pred_price = scaler.inverse_transform(pred_price)
#print(pred_price)
# + colab={"base_uri": "https://localhost:8080/"} id="RmfK1MoHHb8Z" outputId="eb74d6f1-af35-4e22-f41b-0dce9188b2a8"
# Get the quote
bmri_uji_harga_hari_ini = data['close'][0]
bmri_uji_harga_kemarin = data['close'][1]
# hasil = apple_quote2['Close'] - pred_price[0][0]
# print(pred_price[0][0])
print("harga hari ini kenyantaan",bmri_uji_harga_hari_ini)
# Hasil Prediksi Besok akan terjadi kenaikan 2%
tambah2persen = math.ceil(pred_price[0][0])
# new_value = tambah2persen - pred_price[0][0] * 0.01
print("harga besok",tambah2persen)
LSTM_hasil = 1
# JIKA HASIL PREDIKSI BESOK LEBIH BESAR 1.5 % DIBANDING HARGA HARI INI ATAU KEMARIN MAKA BELI
if bmri_uji_harga_hari_ini < tambah2persen:
print("Beli Karena Prediksi akan terjadi kenaikan")
LSTM_hasil = 1
elif bmri_uji_harga_kemarin < tambah2persen:
print("Beli Karena Prediksi akan terjadi kenaikan")
LSTM_hasil = 1
else :
print("Jangan beli karena akan terjadi penurunan")
LSTM_hasil = 0
# + id="2vKSxI6PDrfI"
PEINT_R = 18.98
DYINT_R = 3.5
NGINT_R = -37.7
MCINT_R = 287
# + colab={"base_uri": "https://localhost:8080/"} id="TaHAxwkqGMBb" outputId="9c53e72a-9977-471c-9709-e5bb5df71002"
# EXPERT SCORING WITH DECISION TREE KHUSUS UNTUK SEKTOR BANK
print("Hasil dari LSTM",LSTM_hasil)
scoring = 0
params = []
# SYARAT PE TIDAK LEBIH BESAR DIBANDING 15 , DIVIDEN YIELD > 4% , Pertumbuhan Net Icome by Year 1.5% ,
# Market Cap > 200Triliun dan Hasil LSTM Positif Naik Besok
if PEINT_R < 15.5:
scoring += 22
#print("PEINT_R")
params.append("Price Earning Ratio")
if DYINT_R > 4 :
scoring += 28
#print("DYINT_R")
params.append("Pembagian Jumlah Dividen")
if NGINT_R > 1.5 :
scoring += 20
#print("NGINT_R")
params.append("Pendapatan Bersih Perusahaan")
if MCINT_R > 200 :
scoring += 10
#print("MCINT_R")
params.append("Market Capital Antimology2")
if LSTM == 1 :
scoring += 20
params.append("Close Price Prediction LSTM")
print(f"Saham ini disupport oleh Parameter baik dengan score {scoring}\nseperti", params)
if scoring < 60:
print(f"Saham jelek jan dibeli")
else:
print(f"Saham jelek jan dibeli")
# + [markdown] id="ete24JrianeQ"
#
| rnn_model_stock_10juni.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="nipkJ26yOaeo" colab_type="code" colab={}
# #!pip install datadotworld
# #!pip install datadotworld(pandas)
# + id="_FUkJxbuO1gd" colab_type="code" colab={}
# #!dw configure
# + id="Iah5ZVq_NX5J" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="nDNhS8S_PBFF" colab_type="code" colab={}
#drive.mount("/content/drive")
# + id="Neo3T_UdPHIM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9840b80e-ba65-4be9-bb23-505d3194fe99" executionInfo={"status": "ok", "timestamp": 1581525034626, "user_tz": -60, "elapsed": 1880, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
# ls
# + id="E_VNlM3hPN6w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="42e80e98-1dc9-4668-dc7c-cdb96154fe88" executionInfo={"status": "ok", "timestamp": 1581525059996, "user_tz": -60, "elapsed": 533, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
# cd "drive/My Drive/Colab Notebooks/dw_matrix"
# + id="G_lBuKfAPUcw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b9f129d2-4968-454d-95c8-9d1ebe303a1b" executionInfo={"status": "ok", "timestamp": 1581525095301, "user_tz": -60, "elapsed": 1569, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
# ls matrix_one
# + id="6aM1NNA3PVdY" colab_type="code" colab={}
# !mkdir data
# + id="ncwTNdQiPgXo" colab_type="code" colab={}
# !echo 'data' > .gitignore
# + id="rCxccfOoPve4" colab_type="code" colab={}
# !git add .gitignore
# + id="vv7IDIXiP1Fk" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="T0hWsSHuP_fY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="820809f1-b765-46c7-9861-6cbc05a87ced" executionInfo={"status": "ok", "timestamp": 1581525323583, "user_tz": -60, "elapsed": 523, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df = data.dataframes['7004_1']
df.shape
# + id="P-UqCcL5QH6Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 479} outputId="4337ebd8-2aa6-4b8b-8d1d-6eff7039a571" executionInfo={"status": "ok", "timestamp": 1581525336164, "user_tz": -60, "elapsed": 490, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df.sample(5)
# + id="xHPwJxxIQX4k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="051d2b12-6220-4a93-98d8-b5915fdeb595" executionInfo={"status": "ok", "timestamp": 1581525357958, "user_tz": -60, "elapsed": 471, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df.columns
# + id="fOABxZmVQdNc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="0886f0fe-3de0-41f2-e2ea-cc26f6757a4b" executionInfo={"status": "ok", "timestamp": 1581525377120, "user_tz": -60, "elapsed": 457, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df.prices_currency.unique()
# + colab_type="code" outputId="3f0af2d2-97d0-46e0-a176-64b933907863" executionInfo={"status": "ok", "timestamp": 1581525444604, "user_tz": -60, "elapsed": 477, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}} id="C4x18UIxQrN8" colab={"base_uri": "https://localhost:8080/", "height": 255}
df.prices_currency.value_counts(normalize=True)
# + id="70GEZJV4QzM4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a190e4a5-7bca-4beb-b6c2-0143a1ee2b6a" executionInfo={"status": "ok", "timestamp": 1581525516592, "user_tz": -60, "elapsed": 529, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df_usd = df[ df.prices_currency == 'USD' ].copy()
df_usd.shape
# + id="AB9Osy4cRBoH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="786d7ad4-5c1b-437a-8895-0555bdd8cb86" executionInfo={"status": "ok", "timestamp": 1581525648912, "user_tz": -60, "elapsed": 754, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="YviYjj50RkKB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b0934e70-36bf-4903-b7e6-de951205c072" executionInfo={"status": "ok", "timestamp": 1581525751995, "user_tz": -60, "elapsed": 592, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
filter_max = np.percentile(df_usd['prices_amountmin'], 99)
filter_max
# + id="WbzTSpmgRuik" colab_type="code" colab={}
df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max]
# + id="44Mvp3B4SNV7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="28c77a04-4ed6-4499-d3d8-33802c35b023" executionInfo={"status": "ok", "timestamp": 1581525888359, "user_tz": -60, "elapsed": 764, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="zY-5ajgBSUAT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9cc84c2e-907a-48b8-d778-25df2e596cd4" executionInfo={"status": "ok", "timestamp": 1581525955001, "user_tz": -60, "elapsed": 1945, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08073130212506059846"}}
# ls matrix_one
# + id="pZKSklUoSq_E" colab_type="code" colab={}
# !gi
| matrix_one/day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.1 64-bit (''pyUdemy'': conda)'
# name: python38164bitpyudemyconda8c705f49a8e643418ce4b1ca64c8ab63
# ---
# +
import os
project_path = os.path.abspath("C:/Users/Jan/Dropbox/_Programmieren/UdemyPythonKurs/")
file_path = os.path.join(project_path, "Chapter4_Iterables", "Strings", "user_data.txt")
print(file_path)
# +
with open(file_path, "r") as f:
content = f.readlines()
print(f, type(f))
print(content)
for idx, line in enumerate(content):
content[idx] = line.replace("\n", "")
print(content)
# -
user_input = input("Please enter your age: ")
print(user_input, type(user_input))
user_age = int(user_input)
print(user_age, type(user_age))
| Chapter4_Iterables/Strings/files.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: spylon-kernel
// language: scala
// name: spylon-kernel
// ---
// %%init_spark
launcher.packages = ["com.acervera.osm4scala:osm4scala-spark3-shaded_2.12:1.0.10"]
val osmDF = spark.read.format("osm.pbf").load("/home/jovyan/work/monaco-anonymized.osm.pbf")
osmDF.select("latitude", "longitude")
.where("element_at(tags, 'highway') == 'traffic_signals'")
.show
// + language="python"
// osm_df = spark.read.format("osm.pbf").load("/home/jovyan/work/monaco-anonymized.osm.pbf")
// osm_df.select("latitude", "longitude").where("element_at(tags, 'highway') == 'traffic_signals'").show()
| website/static/notebooks/spylon_notebook_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sesiones prácticas
# ## 0
# Instalación de Python + ecosistema científico + opencv + opengl
#
# - aula virtual -> página web -> install
# - git o unzip master
# - anaconda completo o miniconda
# - windows: opencv y probar los ejemplos
# - linux: primer método más seguro, con paquetes seleccionados
# - probar webcam.py stream.py, surface.py, image_gl.py, hog/facelandmarks.py (en ../data get.sh)
# - manejo básico de jupyter
#
# Opcional:
#
# - compilación opencv
# - probar docker
# ## 1
# Dispositivos de captura
#
# - webcam.py con opencv crudo
# - spyder
# - umucv (install con --upgrade) (update_umucv.sh)
# - PYTHONPATH
# - stream.py, opciones de autostream, efecto de teclas, --help, --dev=help
#
# - webcams
# - videos
# - carpeta de imágenes
# - teléfono
# - youtube
# - urls de tv
#
# - ejemplo de recorte invertido
# - grabar video de demo (save_video.py)
#
# ## 2
# Más utilidades
#
# - control de webcam v4l2-ctl, vlc, gucview
# - help_window
# - mouse.py
# - trackbar.py, paso a monocromo, binarización manual, tipos de dato de mask real*1, byte*255
# - roi.py, putText, moveWindow
# - zoomwindow.py
# - deque.py, demo historygrid
# - demo activity, videosave2.py
# - calibración precisa con plantilla
# - medidor.py, ejercicio: ampliarlo para dado f convertir en grados
# ## 3
# Color
#
# - efecto del suavizado sobre la superficie de niveles de gris (surface2.py)
#
# - Ejercicio: gaussian blur en el roi con sigma en un trackbar. Opcional: Implementarlo en python y C
#
# - Diferencia entre los histogramas de color por separado y conjunto.
#
# - histogram.py / histogram2.py (matplotlib interactivo), histocolor.
#
# - Efecto de la iluminación en los histogramas de niveles de gris, de RGB y de HSV/Lab, etc.
#
# - Segmentación por similitud de histograma
#
# - Segmentación por umbralización (inrange0.py, inrange.py)
#
# - efecto chroma (notebook) y backsub0.py / backsub.py. cv.absdiff
# ## 4
# HOG
#
# - (captura asíncrona)
#
# - (teoría de HOG, implementación sencilla)
#
# - hog0.py en detalle
#
# - pedestrian.py, detección multiescala
#
# - DLIB facelandmarks.py: HOG face detector con landmarks
#
# - (opcional) DLIB herramienta de etiquetado imglab. Entrenamiento de detector HOG SVM con herramientas de DLIB:
#
# - descargar y descomprimir dlib source
# - ir a los ejemplos/faces
# - meter dentro imglab (que hay que compilar pero tenemos versión precompilada en robot/material/va)
# - mostrar los training.xml y testing.xml (se pueden crear otros)
# - meter dentro train_detector.py y run_detector.py de code/hog
# - ./train_detector training.xml testing.xml (crea detector.svm)
# - ./run_detector detector.svm --dev=dir:\*.jpg (o también --dev=dir:/path/to/umucv/images/monty\*)
#
# ## 5
# Flujo óptico de Lucas-Kanade y SIFT keypoints
#
# - crosscorr.py
# - LK/*.py
# - SIFT/*.py
# Antes de empezar la sesión propiamente dicha comentamos el método de detección de objetos por correlación cruzada, que es el mismo criterio que se usa para buscar la posición de *corners* en imágenes sucesivas.
# En la primera parte de la sesión vamos a construir un "tracker" de puntos de interés basado en el método de Lucas-Kanade.
# El primer paso es construir un detector de corners partiendo de cero, calculando una imagen con el menor valor propio de la matriz de covarianza de la distribución local del gradiente en cada pixel (corners0.py). En realidad esta operación está directamente disponible en opencv mediante cv.goodFeaturesToTrack (corners1.py).
# El siguiente ejemplo muestra cómo encontrar directamente con cv.calcOpticalFlowPyrLK la posición de los puntos detectados en el fotograma siguiente, sin necesidad de recalcular puntos nuevos y asociarlos con los anteriores (corners2.py).
# A continuación ampliamos el código para generar puntos nuevos regularmente y crear una lista de trayectorias "tracks" que se mantiene actualizada en cada fotograma (corners3.py).
# Finalmente, ampliamos el código anterior para que solo se generen puntos nuevos en zonas de la imagen donde no los haya, y mejoramos la detección de las posiciones siguientes con un criterio de calidad muy robusto que exige que la predicción hacia el pasado de los puntos nuevos coincida con el punto inicial. Si no hay una asociación mutua el punto y su trayectoria se descartan (lk_tracks.py).
# En la segunda parte experimentamos con el detector de puntos de interés SIFT.
#
# Nuestro objetivo es obtener un conjunto de "keypoints", cada uno con su descriptor (vector de características que describe el entorno del punto), que permita encontrarlo en imágenes futuras. Esto tiene una aplicación inmediata para reconocer objetos y más adelante en geometría visual.
#
# Empezamos con el ejemplo de código code/SIFT/sift0.py, que simplemente calcula y muestra los puntos de interés. Es interesante observar el efecto de los parámetros del método y el tiempo de cómputo en función del tamaño de la imagen (que puedes cambiar con --size o --resize).
#
# (Nota: La implementación de SIFT en opencv está en un repositorio aparte con las contribuciones "non free" (aunque la patente ha expirado el año pasado). La versión de opencv que estamos usando lo incluye. Si no fuera así, podemos utilizar el detector AKAZE que funciona de forma equivalente. La forma de hacerlo está comentada en el código).
#
# El siguiente ejemplo code/SIFT/sift1.py muestra un primer ataque para establecer correspondencias. Los resultados son bastante pobres porque se aceptan todas las posibles coincidencias.
#
# Finalmente, en code/SIFT/sift.py aplicamos un criterio de selección para eliminar muchas correspondencias erróneas (aunque no todas). Esto es en principio suficiente para el reconocimiento de objetos. (Más adelante veremos una forma mucho mejor de eliminar correspondencias erróneas, necesaria para aplicaciones de geometría.)
#
# El ejercicio obligatorio **SIFT** es una ampliación sencilla de este código. Se trata de almacenar un conjunto de modelos (¡con textura! para que tengan suficientes keypoints) como portadas de libros, discos, videojuegos, etc. y reconocerlos en base a la proporción de coincidencias detectadas.
# ## 7
# (**PROVISIONAL**)
# ## 8
# En esta sesión vamos a explorar el reconocimiento de formas mediante descriptores frecuenciales.
#
# Nuestro objetivo es hacer un programa que reconozca la forma de trébol, como se muestra [en este pantallazo](../../images/demos/shapedetect.png). Si no tenéis a mano un juego de cartas podéis usar --dev=dir:../images/card*.png para hacer las pruebas, aunque lo ideal es hacerlo funcionar con una cámara en vivo.
#
# Trabajaremos con los ejemplos de la carpeta `code/shapes` y, como es habitual, iremos añadiendo poco a poco funcionalidad. En cada nuevo paso los comentarios explican los cambios respecto al paso anterior.
#
# Empezamos con el ejemplo shapes/trebol1.py, que simplemente prepara un bucle de captura básico, binariza la imagen y muestra los contornos encontrados. Se muestran varias formas de realizar la binarización y se puede experimentar con ellas, pero en principio el método automático propuesto suele funcionar bien en muchos casos.
#
# El segundo paso en shapes/trebol2.py junta la visualización en una ventana y selecciona los contornos oscuros de tamaño razonable. Esto no es imprescincible para nuestra aplicación, pero es interesante trabajar con el concepto de orientación de un contorno.
#
# En shapes/trebol3.py leemos un modelo de la silueta trébol de una imagen que tenemos en el repositorio y la mostramos en una ventana.
#
# En shapes/trebol3b.py hacemos una utilidad para ver gráficamente las componentes frecuenciales como elipses que componen la figura. Podemos ver las componentes en su tamaño natural, incluyendo la frecuencia principal, [como aquí](../images/demos/full-components.png), o quitando la frecuencia principal y ampliando el tamaño de las siguientes, que son la base del descriptor de forma, [como se ve aquí](../images/demos/shape-components.png). Observa que las configuraciones de elipses son parecidas cuando corresponden a la misma silueta.
#
# En shapes/trebol4.py definimos la función que calcula el descriptor invariante. Se basa esencialmente en calcular los tamaños relativos de estas elipses. En el código se explica cómo se consigue la invarianza a las transformaciones deseadas: posición, tamaño, giros, punto de partida del contorno y ruido de medida.
#
# Finalmente, en shapes/trebol5.py calculamos el descriptor del modelo y en el bucle de captura calculamos los descriptores de los contornos oscuros detectados para marcar las siluetas que tienen un descriptor muy parecido al del trébol.
#
# El ejercicio opcional SILU consiste en ampliar este código para reconocer un conjunto más amplio de siluetas en alguna aplicación que se os parezca interesante. Por ejemplo, en images/shapes tenéis los modelos de caracteres de las placas de matrícula.
# ## 9
# Otra actividad en esta sesión consiste en comentar el ejemplo de código code/server.py. Utiliza el paquete [flask][flask] para crear un sencillo servidor web que devuelve la imagen de la cámara modificada como deseemos.
#
# [flask]: https://en.wikipedia.org/wiki/Flask_(web_framework)
# En esta sesión vamos a hacer varias actividades. Necesitamos algunos paquetes. En Linux son:
#
# sudo apt install tesseract-ocr tesseract-ocr-spa libtesseract-dev
# pip install tesserocr
#
# sudo apt install libzbar-dev
# pip install pyzbar
#
# Usuarios de Mac y Windows: investigad la forma de instalarlo.
#
# 1) En primer lugar nos fijamos en el script `code/ocr.png`, cuya misión es poner en marcha el OCR con la cámara en vivo. Usamos el paquete de python `tesserocr`. Vamos a verificar el funcionamiento con una imagen estática, pero lo ideal es probarlo con la cámara en vivo.
#
# python ocr.py python ocr.py --dev=dir:../images/texto/bo0.png
#
# Está pensado para marcar una sola línea de texto, [como se muestra aquí](../images/demos/ocr.png). Este pantallazo se ha hecho con la imagen bo1.png disponible en la misma carpeta, que está desenfocada, pero aún así el OCR funciona bien.
#
#
# 2) El segundo ejemplo es `code/zbardemo.png` que muestra el uso del paquete pyzbar para leer códigos de barras ([ejemplo](../images/demos/barcode.png)) y códigos QR ([ejemplo](../images/demos/qr.png)) con la cámara. En los códigos de barras se detectan puntos de referencia, y en los QR se detectan las 4 esquinas del cuadrado, que pueden ser útiles como referencia en algunas aplicaciones de geometría.
#
#
# 3) A continuación vamos a jugar con un bot de telegram que nos permite comunicarnos cómodamente con nuestro ordenador desde el teléfono móvil, sin necesidad de tener una dirección pública de internet.
#
# Voy a dejar durante esta mañana un bot funcionando para que hagáis pruebas. El bot se llama "BichoBot" y su foto de perfil es una pequeña plataforma con ruedas con un raspberry pi encima. Responde al comando /hello y si le enviáis una foto os la devolverá en blanco y negro e invertida. (Está basado en bot3.py).
#
# Simplemente necesitamos:
#
# pip install python-telegram-bot
#
# El ejemplo `bot/bot0.py` nos envía al teléfono la IP del ordenador (es útil si necesitamos conectarnos por ssh con una máquina que tiene IP dinámica).
#
# El ejemplo `bot/bot1.py` explica la forma de enviar una imagen nuestro teléfono cuando ocurre algo. En este caso se envía cuando se pulsa una tecla, pero lo normal es detectar automáticamente algún evento con las técnicas de visión artificial que estamos estudiando.
#
# El ejemplo `bot/bot2.py` explica la forma de hacer que el bot responda a comandos. El comando /hello nos devuelve el saludo, el comando /stop detiene el programa y el comando /image nos devuelve una captura de nuestra webcam. (Se ha usado la captura en un hilo).
#
# El ejemplo `bot/bot3.py` explica la forma de capturar comandos con argumentos y el procesamiento de una imagen enviada por el usuario.
#
# Esta práctica es completamente opcional, pero es muy útil para enviar cómodamente a nuestros programas de visión artificial una imagen tomada con la cámara sin necesidad de escribir una aplicación específica para el móvil. Algunos ejercicios que estamos haciendo se pueden adaptar fácilmente para probarlos a través de un bot de este tipo.
#
# Para crearos vuestro propio bot tenéis que contactar con el bot de telegram "BotFather", que os guiará paso a paso y os dará el token de acceso. Y luego el "IDBot" os dirá el id numérico de vuestro usuario.
#
# En la carpeta hay otros ejemplos más avanzados.
#
#
# 4) En la dirección
#
# https://github.com/ruvelro/TV-Online-TDT-Spain
#
# se pueden encontrar las url de muchos canales de televisión que están haciendo streaming en directo. Abriendo los ficheros m3u8 encontramos las url que podemos poner en --dev en nuestras aplicaciones (hay distintas resoluciones de imagen). Por ejemplo, la TVE1 está aquí:
#
# http://hlsliveamdgl7-lh.akamaihd.net/i/hlsdvrlive_1@583042/index_0400_av-p.m3u8?sd=10&rebase=on
#
# (Funciona a saltos porque autoStream lee los frames lo más rápido posible. Se puede poner un time.sleep para que vaya a ritmo normal).
#
# Próximamente propondré un ejercicio opcional relacionado con esto.
# ## 10
# Esta sesión está dedicada a poner en marcha una red convolucional sencilla. La tarea que vamos a resolver es el reconocimiento de dígitos manuscritos. Por eso, en primer lugar es conveniente escribir unos cuantos números en una hoja de papel, con un bolígrafo que tenga un trazo no demasiado fino, y sin preocuparnos mucho de que estén bien escritos. Pueden tener distintos tamaños, pero no deben estar muy girados. Para desarrollar el programa y hacer pruebas cómodamente se puede trabajar con una imagen fija, pero la idea es que nuestro programa funcione con la cámara en vivo.
#
#
# Trabajaremos en la carpeta [code/DL/CNN](../code/DL/CNN), donde tenemos las diferentes etapas de ejercicio y una imagen de prueba.
#
# El primer paso es `digitslive-1.py` que simplemente encuentra las manchas de tinta que pueden ser posibles números.
#
# En `digitslive-2.py` normalizamos el tamaño de las detecciones para poder utilizar la base de datos MNIST.
#
# En `digitslive-3.py` implementamos un clasificador gaussiano con reducción de dimensión mediante PCA y lo ponemos en marcha con la imagen en vivo. (Funciona bastante bien pero, p.ej., en la imagen de prueba comete un error).
#
# Finalmente, en `digitslive-4.py` implementamos la clasificación mediante una red convolucional mediante el paquete **keras**. Usamos unos pesos precalculados. (Esta máquina ya no comete el error anterior.)
#
# Como siempre, en cada fase del ejercicio los comentarios explican el código que se va añadiendo.
#
# Una vez conseguido esto, la sesión práctica tiene una segunda actividad que consiste en **entrenar los pesos** de (por ejemplo) esta misma red convolucional. Para hacerlo en nuestro ordenador sin perder la paciencia necesitamos una GPU con CUDA y libCUDNN. La instalación de todo lo necesario puede no ser trivial.
#
# Una alternativa muy práctica es usar [google colab](https://colab.research.google.com/), que proporciona gratuitamente máquinas virtuales con GPU y un entorno de notebooks jupyter (un poco modificados pero compatibles). Para probarlo, entrad con vuestra cuenta de google y abrid un nuevo notebook. En la opción de menú **Runtime** hay que seleccionar **Change runtime type** y en hardware accelerator ponéis GPU. En una celda del notebook copiáis directamente el contenido del archivo `cnntest.py` que hay en este mismo directorio donde estamos trabajando hoy. Al evaluar la celda se descargará la base de datos y se lanzará un proceso de entrenamiento. Cada epoch tarda unos 4s. Podéis comparar con lo que se consigue con la CPU en vuestro propio ordenador. Se puede lanzar un entrenamiento más completo, guardar los pesos y descargarlos a vuestra máquina.
#
# Como curiosidad, podéis comparar con lo que conseguiría el OCR tesseract, y guardar algunos casos de dígitos que estén bien dibujados pero que la red clasifique mal.
# ## 11
# En esta sesión vamos a poner en marcha los modelos avanzados de deep learning que presentamos ayer.
#
# Los ejemplos de código se han probado sobre LINUX. En Windows o Mac puede ser necesario hacer modificaciones; para no perder mucho tiempo mi recomendación es probarlo primero en una máquina virtual.
#
# Si tenéis una GPU nvidia reciente lo ideal es instalar CUDA y libCUDNN para conseguir una mayor velocidad de proceso. Si no tenéis GPU no hay ningún problema, todos los modelos funcionan con CPU. (Los ejercicios de deep learning que requieren entrenamiento son opcionales.)
#
# Para ejecutar las máquinas inception, YOLO y el reconocimiento de caras necesitamos los siguientes paquetes:
#
# pip install face_recognition tensorflow==1.15.0 keras easydict
#
# La detección de marcadores corporales *openpose* requiere unos pasos de instalación adicionales que explicaremos más adelante.
#
# (La versión 1.15.0 de tensorflow es necesaria para YOLO y openpose. Producirá algunos warnings sin mucha importancia. Si tenemos una versión más reciente de tensorflow podemos hacer `pip install --upgrade tensorflow=1.15.0` o crear un entorno de conda especial para este tema).
# 1) Para probar el **reconocimiento de caras** nos vamos a la carpeta code/DL/facerec. Debe estar correctamente instalado DLIB.
#
# En el directorio `gente` se guardan los modelos. Como ejemplo tenemos a los miembros de Monty Python:
#
# ./facerec.py --dev=dir:../../../images/monty-python*
#
# (Recuerda que las imágenes seleccionadas con --dev=dir: se avanzan pinchando con el ratón en la ventana pequeña de muestra).
#
# Puedes meter fotos tuyas y de tu familia en la carpeta `gente` para probar con la webcam o con otras fotos.
#
# Con pequeñas modificaciones de este programa se puede resolver el ejercicio ANON: selecciona una cara en la imagen en vivo pinchando con el ratón para ocultarla (emborronándola o pixelizándola) cuando se reconozca en las imágenes siguientes.
#
# Esta versión del reconocimiento de caras no tiene aceleración con GPU (tal vez se puede configurar). Si reducimos un poco el tamaño de la imagen funciona con bastante fluidez.
# 2) Para probar la máquina **inception** nos movemos a la carpeta code/DL/inception.
#
# ./inception0.py
#
# (Se descargará el modelo del la red). Se puede probar con las fotos incluidas en la carpeta con `--dev=dir:*.png`. La versión `inception1.py` captura en hilo aparte y muestra en consola las 5 categorías más probables.
#
# Aunque se supone que consigue buenos resultados en las competiciones, sobre imágenes naturales comete bastante errores.
# 3) El funcionamiento de **YOLO** es mucho mejor. Nos vamos a la carpeta code/DL y ejecutamos lo siguiente para para descargar el código y los datos de esta máquina (y de openpose).
#
# bash get.sh
#
# Nos metemos en code/DL/yolo y ejecutamos:
#
# /.yolo-v3.py
#
# Se puede probar también con las imágenes de prueba incluidas añadiendo `--dev=dir:*.png`.
#
# El artículo de [YOLO V3](https://pjreddie.com/media/files/papers/YOLOv3.pdf) es interesante. En la sección 5 el autor explica que abandonó esta línea de investigación por razones éticas. Os recomiendo que la leáis. Como curiosidad, hace unos días apareció [YOLO V4](https://arxiv.org/abs/2004.10934).
# 4) Para probar **openpose** nos vamos a code/DL/openpose. Los archivos necesarios ya se han descargado en el paso anterior, pero necesitamos instalar algunos paquetes. El proceso se explica en el README.
# En la carpeta `docker` hay un script para ejecutar una imagen docker que tiene instalados todos los paquetes que hemos estamos usando en la asignatura. Es experimental. No perdaís ahora tiempo con esto si no estáis familiarizados con docker.
#
# El tema de deep learning en visión artificial es amplísimo. Para estudiarlo en detalle hace falta (como mínimo) una asignatura avanzada (master). Nuestro objetivo es familizarizarnos un poco con algunas de las máquinas preentrenadas disponibles para hacernos una idea de sus ventajas y limitaciones.
#
# Si estáis interesados en estos temas el paso siguiente es adaptar alguno de estos modelos a un problema propio mediante "transfer learning", que consiste en utilizar las primeras etapas de una red preentrenada para transformar nuestros datos y ajustar un clasificador sencillo. Alternativamente, se puede reajustar los pesos de un modelo preentrenado, fijando las capas iniciales al principio. Para remediar la posible falta de ejemplos se utilizan técnicas de "data augmentation", que generan variantes de los ejemplos de entrenamiento con múltiples transformaciones.
# ## 12
# Hoy vamos a rectificar el plano de la mesa apoyándonos en marcadores artificiales.
#
# En primer lugar trabajaremos con marcadores poligonales. Nuestro objetivo es detectar un marcador como el que aparece en el vídeo `images/rot4.mjpg`. Nos vamos a la carpeta `code/polygon`.
#
# El primer paso (`polygon0.py`) es detectar figuras poligonales con el número de lados correcto a partir de los contornos detectados.
#
# A continuación (`polygon1.py`) nos quedamos con los polígonos que realmente pueden corresponder al marcador. Esto se hace viendo si existe una homografía que relaciona con precisión suficiente el marcador real y su posible imagen.
#
# Finalmente (`polygon2.py`) obtiene el plano rectificado
#
# También se puede añadir información "virtual" a la imagen original, como por ejemplo los ejes de coordenadas definidos por el marcador (`polygon3.py`).
#
#
# Como segunda actividad, en la carpeta `code/elipses` se muestra la forma de detectar un marcador basado en 4 círculos.
# ## 13
# En esta sesión vamos a extraer la matriz de cámara a partir del marcador utilizado en la sesión anterior, lo que nos permitirá añadir objetos virtuales tridimensionales a la escena y determinar la posición de la cámara en el espacio.
#
# Nos vamos a la carpeta `code/pose`, donde encontraremos los siguientes ejemplos de código:
#
# `pose0.py` incluye el código completo para extraer contornos, detectar el marcador poligonal, extraer la matriz de cámara y dibujar un cubo encima del marcador.
#
# `pose1.py` hace lo mismo con funciones de umucv.
#
# `pose2.py` trata de ocultar el marcador y dibuja un objeto que cambia de tamaño.
#
# `pose3.py` explica la forma de proyectar una imagen en la escena escapando del plano del marcador.
#
# `pose3D.py` es un ejemplo un poco más avanzado que utiliza el paquete pyqtgraph para mostrar en 3D la posición de la cámara en el espacio.
#
# En el ejercicio **RA** puedes intentar que el comportamiento del objeto virtual dependa de acciones del usuario (p. ej. señalando con el ratón un punto del plano) o de objetos que se encuentran en la escena.
| notebooks/guionpracticas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_conda)
# language: python
# name: conda_conda
# ---
# +
import time
import csv
import os
from pathlib import Path
import sys
sys.path.insert(0, '../')
import pycor
from pycor import utils, korlang
stopwatch = utils.StopWatch()
model_size = 7000
outputpath = "../../output/" + str(model_size) + "/"
model_path = outputpath + "model/"
pycor.loadmodel(model_path)
# +
def listfiles(path):
result_arr = []
filenames = os.listdir(path)
for filename in filenames:
full_filename = os.path.join(path, filename)
result_arr.append(full_filename)
return result_arr
files = []
files.extend( listfiles('../../data/wiki2') )
files.extend( listfiles('../../data/wiki') )
files.extend( listfiles('../samples/news') )
files.extend( listfiles('../../data/news') )
files.extend( listfiles('../../data/blotter') )
files.extend( listfiles('../../data/zdnet') )
files.extend( listfiles('../../data/ciobiz') )
files.extend( listfiles('../../data/NP') )
print (len(files), "files")
# +
yeTags = set(['EFN'])
def gettail(word):
if word.bestpair:
# return word.bestpair.tags
return word.bestpair.tail.text
return 'X'
def gettag(word):
if word.bestpair:
return word.bestpair.tags
return 'X'
def gethead(word):
if word.bestpair:
return word.bestpair.head.text
return 'X'
yheadsMap = {}
ytailsMap = {}
ytagsMap = {}
def handleHaed(index, word, words):
yset = yheadsMap.get(word.bestpair.head)
if yset is None:
yset = []
yheadsMap[word.bestpair.head] = yset
if index > 1:
yset.append( [gethead(words[index-1]),gethead(words[index-2])])
else:
yset.append( [gethead(words[index-1])] )
def handleTag(index, word, words):
yset = ytagsMap.get(word.bestpair.head)
if yset is None:
yset = []
ytagsMap[word.bestpair.head] = yset
if index > 1:
yset.append( [gettag(words[index-1]),gettag(words[index-2])])
else:
yset.append( [gettag(words[index-1])] )
def handleTail(index, word, words):
yset = ytailsMap.get(word.bestpair.head)
if yset is None:
yset = []
ytailsMap[word.bestpair.head] = yset
if index > 1:
yset.append( [gettail(words[index-1]),gettail(words[index-2])])
else:
yset.append( [gettail(words[index-1])] )
def analyzeYongEon(file):
sentence_array, words_array = pycor.readfile(file)
for words in words_array:
for index, word in enumerate(words):
if word.bestpair and len(yeTags & set(word.bestpair.tags))>0 and word.text.endswith('다'):
handleHaed(index, word, words)
handleTail(index, word, words)
handleTag(index, word, words)
def writeFile(path, yMap):
with open(path, 'w', encoding='utf-8') as file :
writer = csv.writer(file)
for head, tags in yMap.items():
headText = head.text
for tag in tags:
row = [headText]
row.extend(tag)
writer.writerow(row)
file.close()
stopwatch.start()
docsize = 70
for file in files[:docsize]:
if file.endswith(".txt"):
analyzeYongEon(file)
print("Loading ",docsize," Docs: " , stopwatch.secmilli() , "(", stopwatch.millisecstr(), "ms.)")
# +
# yheadsMap
# ytailsMap
# ytagsMap
def fill(ylist, ymap):
for head, tags in ymap.items():
headText = head.text
for tag in tags:
row = [headText]
row.extend(tag)
ylist.append(row)
tailsList = []
tagsList = []
fill(tailsList,ytailsMap)
fill(tagsList,ytagsMap)
josaYMap = {}
matrix = []
for index in range(len(tailsList)):
tail = tailsList[index]
tag = tagsList[index]
text = tail[0]
row = [text]
row.extend(tail[1:])
josaKey = ''
for tags in tag[1:]:
tagsStr = "+".join(tags)
row.append(tagsStr)
josaKey += "::"
for t in tags:
if t.startswith("J"):
josaKey += "+" + t
ymap = josaYMap.get(josaKey)
if ymap is None:
ymap = {}
josaYMap[josaKey] = ymap
count = ymap.get(text)
if count is None:
count = 0
count += 1
ymap[text] = count
matrix.append(row)
def writeTailTagsFile(path, tailTagsList):
with open(path, 'w', encoding='utf-8') as file :
writer = csv.writer(file)
for row in tailTagsList:
writer.writerow(row)
file.close()
outputpath = "../../output/" + str(docsize) + "/"
writeFile(outputpath+'/yongeon-heads.csv',yheadsMap)
# writeFile(outputpath+'/yongeon-tails.csv',ytailsMap)
# writeFile(outputpath+'/yongeon-tags.csv',ytagsMap)
writeTailTagsFile(outputpath+'/yongeon-tails-tags.csv',matrix)
print(len(matrix))
print(len(josaYMap))
# +
from pycor.res import CollocationResolver
import pycor.speechmodel as sm
resolver = CollocationResolver()
pycor.addresolver(resolver)
def gettext(word, islast=False):
if type(word) is sm.Sentence :
if(word.senttype == sm.SENTENCE_TYPE_EQUIV):
return word.text()
else:
return word.text()
else:
if islast and word.bestpair:
return word.bestpair.head.text
else:
return word.text
sentence_array, words_array = pycor.readfile('../samples/docs/economics1.txt')
ngrams = {}
for sentence in sentence_array:
for index in range(1, len(sentence.words)-1):
first = gettext(sentence.words[index-1])
second = gettext(sentence.words[index])
second2 = gettext(sentence.words[index], True)
third = gettext(sentence.words[index+1], True)
bigram = ' '.join([first,second2])
trigram = ' '.join([first,second,third])
bigramCnt = ngrams.get(bigram)
if bigramCnt:
bigramCnt = bigramCnt +1
else:
bigramCnt = 1
ngrams[bigram] = bigramCnt
trigramCnt = ngrams.get(trigram)
if trigramCnt:
trigramCnt = bigramCnt +1
else:
trigramCnt = 1
ngrams[trigram] = trigramCnt
# ngrams[bigram] = [sentence.words[index-1], sentence.words[index]]
# ngrams[trigram] = [sentence.words[index-1], sentence.words[index],sentence.words[index+1]]
# print(first, second , third )
# for key, cnt in ngrams.items():
# if cnt > 1:
# print(key,cnt)
# for colloc in pycor.getmodel().collocations.values():
# print(colloc, colloc.frequency)
# +
# for file in files[:400]:
# if file.endswith(".txt"):
# pycor.readfile(file)
# print( len(pycor.getmodel().collocations))
# for colloc in pycor.getmodel().collocations.values():
# if colloc.frequency > 3:
# print(colloc, colloc.frequency)
# +
# pycor.readfile('../samples/news/new1_hani.txt')
# for colloc in pycor.getmodel().collocations.values():
# print(colloc, colloc.frequency)
# +
from pycor.res import yongeonresolver as yr
yongeonResolver = yr.YongeonResolver()
pycor.addresolver(yongeonResolver)
for file in files[:100]:
if file.endswith(".txt"):
pycor.readfile(file)
yongeonResolver.writemap(outputpath + "/yongeon.csv")
yongeonResolver.writeindex(outputpath + "/yongeon_index.csv")
# -
| notebooks/collocations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="0R2j0wT2NxUG" colab_type="code" outputId="9390799e-e5a6-4a58-d0a0-909ac16a1bc7" colab={"base_uri": "https://localhost:8080/", "height": 51}
from Models import Classification_Module3 as Classification_Module
from Models import Focus_Module3 as Focus_Module
from Mosaic import mosaic_data, MosaicDataset,split_foreground_background
from torch.utils.data import Dataset,DataLoader
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from matplotlib import pyplot as plt
# %matplotlib inline
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# + id="SblUamycPE7O" colab_type="code" outputId="ef6229b6-cf4d-45b5-f715-d96676263d77" colab={"base_uri": "https://localhost:8080/", "height": 51}
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
# + id="Tsd9DNgUPMBx" colab_type="code" colab={}
data = split_foreground_background(trainloader,total = 50000)
mosaic_list_of_images,mosaic_label,fore_idx = mosaic_data(data,desired_num=30000,total=50000)
# + id="GC1dUctdPgmW" colab_type="code" colab={}
batch = 250
train_dataset = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
mosaic_loader = DataLoader( train_dataset,batch_size= batch ,shuffle=True)
# + id="TCm_yFatRxib" colab_type="code" colab={}
mimages_val,mlabel_val,fidx_val = mosaic_data(data,desired_num=10000,total=50000)
# + id="RccSHk3DU3jU" colab_type="code" colab={}
batch = 250
test_dataset = MosaicDataset(mimages_val,mlabel_val,fidx_val)
test_loader = DataLoader( test_dataset,batch_size= batch ,shuffle=True)
# + id="Di1vx00TVFgx" colab_type="code" colab={}
focus_net = Focus_Module(3,1).double()
focus_net = focus_net.to(device)
# + id="R0wsFjYFVPfo" colab_type="code" colab={}
classification_net = Classification_Module(12,3).double()
# + id="jz6llbjujOm8" colab_type="code" outputId="0fb5f223-a67d-4e0b-f54c-d676a5cdb084" colab={"base_uri": "https://localhost:8080/", "height": 136}
classification_net = classification_net.to(device)
classification_net
# + id="emZvvTp0VbIu" colab_type="code" colab={}
optimizer_focus = optim.SGD(focus_net.parameters(),lr = 0.01,momentum=0.9)
optimizer_classification = optim.SGD(classification_net.parameters(),lr =0.01, momentum=0.9)
# + id="cClaMnSRVfS6" colab_type="code" colab={}
criterion = nn.CrossEntropyLoss()
# + id="Y8iOl6JEVjJJ" colab_type="code" outputId="afb1cab8-217b-4270-9464-8f728915b766" colab={"base_uri": "https://localhost:8080/", "height": 1000}
tr_loss = []
for epoch in range(110): # loop over the dataset multiple times
running_loss = 0.0
cnt=0
iteration = 30000 // batch
ep_loss = []
for i, data in enumerate(mosaic_loader):
inputs , labels , fgrnd_idx = data
inputs,labels = inputs.to("cuda"),labels.to("cuda")
optimizer_focus.zero_grad()
optimizer_classification.zero_grad()
avg_data , alphas = focus_net(inputs)
outputs = classification_net(avg_data)
_, predicted = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer_focus.step()
optimizer_classification.step()
running_loss += loss.item()
mini = 40
if cnt % mini == mini-1: # print every mini mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
ep_loss.append(running_loss/mini)
running_loss = 0.0
cnt=cnt+1
tr_loss.append(np.mean(ep_loss))
print('Finished Training')
# + id="3lB7KlwGVpmd" colab_type="code" outputId="f24a5344-8c7e-48ae-d11c-f9572b6cfbf3" colab={"base_uri": "https://localhost:8080/", "height": 88}
train_acc = 0
for i, data in enumerate(mosaic_loader):
inputs,labels,_ = data
inputs,labels = inputs.to(device), labels.to(device)
avg_data,alphas = focus_net(inputs)
outputs = classification_net(avg_data)
_,predicted = torch.max(outputs.data,1)
# print(predicted.detach().cpu().numpy())
train_acc += sum(predicted.cpu().numpy() == labels.cpu().numpy())
print("percentage train accuracy: ",train_acc/300)
torch.save(focus_net.state_dict(),"focus_net_at_two.pt")
torch.save(classification_net.state_dict(),"classification_net_at_two.pt")
# + id="QEdCv9Lfd6Pf" colab_type="code" outputId="2a090d2f-8602-4e69-fb60-4d52809ec538" colab={"base_uri": "https://localhost:8080/", "height": 88}
val_acc = 0
for i, data in enumerate(test_loader):
inputs,labels,_ = data
inputs,labels = inputs.to(device), labels.to(device)
avg_data,alphas = focus_net(inputs)
outputs = classification_net(avg_data)
_,predicted = torch.max(outputs.data,1)
val_acc +=sum(predicted.cpu().numpy() == labels.cpu().numpy())
print("percentage validation accuracy: ",val_acc/100)
# + id="Q5MIz98beE2L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="4d57528e-e293-4f32-b023-3356db10ec5d"
plt.figure(figsize = (5,4))
plt.plot(tr_loss,label= "training loss")
plt.xlabel("epochs")
plt.ylabel("cross entropy loss")
plt.savefig("training_loss_at_two.png")
plt.savefig("training_loss_at_two.pdf")
# + id="kDF5iDVwYCZb" colab_type="code" colab={}
| 1_mosaic_data_attention_experiments/4_averaging_at_different_layers/layer_two/training_at_layer_two.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="3rjW5U5EWvFi"
# # Environment Loading Examples
#
# In this notebook, we walk through a few examples of how to load and interact with the Construction environments, both using discrete relative actions with graph observations, and using continuous absolute actions with image observations.
#
# For further details, see the [Documentation](https://github.com/deepmind/dm_construction/blob/master/docs/index.md).
# + cellView="both" colab={} colab_type="code" id="ZnFfEX6jkwXb"
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="Qq5modlibPFw"
# **Installation**
#
# 1. From the root of this repository, run `pip install .[demos]` to install both `dm_construction` and extra dependencies needed to run this notebook.
# 2. Install [ffmpeg](https://ffmpeg.org/):
# * Cross-platform with [Anaconda](https://docs.anaconda.com/anaconda/install/): `conda install ffmpeg`
# * Ubuntu: `apt-get install ffmpeg`
# * Mac with [Homebrew](https://brew.sh/): `brew install ffmpeg`
# + cellView="both" colab={} colab_type="code" id="IqN5Xi6Dwg8A"
import base64
import tempfile
import textwrap
import dm_construction
from IPython.display import HTML
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
# + cellView="both" colab={} colab_type="code" id="Rgjq1hYsCQTF"
## Helper Functions
def show_rgb_observation(rgb_observation, size=5):
"""Plots a RGB observation, as returned from a Unity environment.
Args:
rgb_observation: numpy array of pixels
size: size to set the figure
"""
_, ax = plt.subplots(figsize=(size, size))
ax.imshow(rgb_observation)
ax.set_axis_off()
ax.set_aspect("equal")
def print_status(env_, time_step_):
"""Prints reward and episode termination information."""
status = "r={}, p={}".format(time_step_.reward, time_step_.discount)
if time_step_.discount == 0:
status += " (reason: {})".format(env_.termination_reason)
print(status)
# + [markdown] colab_type="text" id="KT-f-mqNHQuY"
# ## Supported tasks and wrappers
# + [markdown] colab_type="text" id="kqZG9Eh6hwT2"
# These are the tasks that can be loaded:
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1583516904602, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="hQrnulou5hy_" outputId="ccd09317-734d-4276-d518-bf0b0a2dd37c"
dm_construction.ALL_TASKS
# + [markdown] colab_type="text" id="pCSth8vehyb0"
# These are the wrappers that can be applied to the tasks:
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1583516904649, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="p1dmfSnkBs5X" outputId="a88202a3-4f5d-4598-95ef-e676c20dc8f9"
dm_construction.ALL_WRAPPERS
# + [markdown] colab_type="text" id="zI_cnwMoB2ZX"
# ## Discrete Relative Actions and Graph Observations
# + [markdown] colab_type="text" id="xg356bZDiKNd"
# The `discrete_relative` wrapper exposes graph-based discrete relative actions and graph observations. Here is an example of loading the Covering task with this wrapper and taking some actions in the environment.
#
# Because the observations are graphs, they are not easy to visualize. Instead, we will grab image observations from the underyling task environment and display those instead.
# + colab={} colab_type="code" id="FX3BH6NrB5cf"
# Create the environment.
env = dm_construction.get_environment(
"covering", wrapper_type="discrete_relative", difficulty=0)
# + colab={"height": 68} colab_type="code" executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1583516915181, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="pYA3B_ZMB613" outputId="4118a9da-57a4-4f9d-b1d2-157f489b103c"
env.action_spec()
# + colab={"height": 136} colab_type="code" executionInfo={"elapsed": 33, "status": "ok", "timestamp": 1583516915223, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="jX7dbkOJB7vK" outputId="5ea7f2a2-bcbf-4967-c1ed-f6bfd5454253"
env.observation_spec()
# + colab={"height": 323} colab_type="code" executionInfo={"elapsed": 126, "status": "ok", "timestamp": 1583516915362, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="HJnIA6CxEYm8" outputId="a6b76f63-381f-4be7-dab3-473c19915fb9"
np.random.seed(1234)
time_step = env.reset()
# Get the image observation from the task environment.
show_rgb_observation(env.core_env.last_time_step.observation["RGB"])
# -
time_step.observation
# + colab={"height": 340} colab_type="code" executionInfo={"elapsed": 112, "status": "ok", "timestamp": 1583516915486, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="GoCtIeXmE1f2" outputId="0c8cfd5c-257c-4806-97e1-4dab9ee7d838"
# Pick an edge.
obs = time_step.observation
moved_block = 0
base_block = 7
edge_index = list(
zip(obs["senders"], obs["receivers"])).index((moved_block, base_block))
# Construct the action.
action = {
"Index": edge_index,
"sticky": 1, # make it sticky
"x_action": 0, # place it to the left
}
time_step = env.step(action)
print_status(env, time_step)
# Get the image observation from the task environment.
show_rgb_observation(env.core_env.last_time_step.observation["RGB"])
# -
time_step.observation
# + colab={"height": 340} colab_type="code" executionInfo={"elapsed": 105, "status": "ok", "timestamp": 1583516915604, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="DkF-Yne2FJPJ" outputId="573ea263-cce9-4325-c1c8-7ee30efb9e16"
# Pick an edge.
obs = time_step.observation
moved_block = 3
base_block = len(obs["nodes"]) - 1 # this is the last placed block
edge_index = list(
zip(obs["senders"], obs["receivers"])).index((moved_block, base_block))
# Construct the action.
action = {
"Index": edge_index,
"sticky": 0, # make it not sticky
"x_action": 12, # place it to the right
}
time_step = env.step(action)
print_status(env, time_step)
# Get the image observation from the task environment.
show_rgb_observation(env.core_env.last_time_step.observation["RGB"])
# + colab={} colab_type="code" id="96b9D67ck6QR"
# Stop the environment.
env.close()
# + [markdown] colab_type="text" id="4F66-J8rBxr9"
# ## Continuous Absolute Actions and Image Observations
# + [markdown] colab_type="text" id="MvTGYr0Yh3yb"
# The `continuous_absolute` wrapper exposes continuous absolute actions and image observations. Here is an example of loading the Covering task with this wrapper, taking some actions in the environment, and displaying the resulting observations.
# + colab={} colab_type="code" id="NBZdSXXC5eik"
# Create the environment.
env = dm_construction.get_environment(
"covering", wrapper_type="continuous_absolute", difficulty=0)
# + colab={"height": 85} colab_type="code" executionInfo={"elapsed": 41, "status": "ok", "timestamp": 1583516922655, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="F7RrPiXNBwX-" outputId="480961d4-49fc-4dc0-8296-65f1643dbd2d"
env.action_spec()
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 43, "status": "ok", "timestamp": 1583516922707, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="x308Fr2KB0Sy" outputId="110b4f1d-21ac-497d-d54b-d40105b9b4fe"
env.observation_spec()
# + colab={"height": 322} colab_type="code" executionInfo={"elapsed": 119, "status": "ok", "timestamp": 1583516922837, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="Jrku9vLjB-aO" outputId="e22ea2d0-708e-46a1-9e08-e860d2dc6d8c"
# Start a new episode.
np.random.seed(1234)
time_step = env.reset()
# This is the same observation that agents will see.
show_rgb_observation(time_step.observation)
# + colab={"height": 339} colab_type="code" executionInfo={"elapsed": 226, "status": "ok", "timestamp": 1583516923073, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="Hoz8LwSdCpZ4" outputId="a5f3826a-e5cf-47db-ffa3-085fd58139d4"
# Place a block a bit to the right.
action = {
"Horizontal": 1,
"Vertical": 1,
"Sticky": -1,
"Selector": 0
}
time_step = env.step(action)
show_rgb_observation(time_step.observation)
print_status(env, time_step)
# + colab={"height": 339} colab_type="code" executionInfo={"elapsed": 107, "status": "ok", "timestamp": 1583516923192, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="CKlTQgqUDM1t" outputId="eda34e10-92d3-4d0e-96e9-3e2baf70d2b5"
# Place another block in the center.
action = {
"Horizontal": 0,
"Vertical": 2,
"Sticky": 1,
"Selector": 0
}
time_step = env.step(action)
show_rgb_observation(time_step.observation)
print_status(env, time_step)
# + colab={} colab_type="code" id="oiDk5dFkk895"
# Stop the environment.
env.close()
# + [markdown] colab_type="text" id="DJEbWBS9ExDS"
# ## Creating Videos
# + [markdown] colab_type="text" id="zHisw-Lriq13"
# Because physics is simulated for many timesteps in between each action, it can be nice to grab all of those intermediate frames (the observations exposed to the agent are only the final frame of the simulation). To do this, we will enable a special observer camera in the underlying Unity environment and then pull frames from this to create a video.
# + cellView="both" colab={} colab_type="code" id="eNt9gB9jw0am"
def get_environment(problem_type, wrapper_type="discrete_relative",
difficulty=0, curriculum_sample=False):
"""Gets the environment.
This function separately creates the unity environment and then passes it to
the environment factory. We do this so that we can add an observer to the
unity environment to get all frames from which we will create a video.
Args:
problem_type: the name of the task
wrapper_type: the name of the wrapper
difficulty: the difficulty level
curriculum_sample: whether to sample difficulty from [0, difficulty]
Returns:
env_: the environment
"""
# Separately construct the Unity env, so we can enable the observer camera
# and set a higher resolution on it.
unity_env = dm_construction.get_unity_environment(
observer_width=600,
observer_height=600,
include_observer_camera=True,
max_simulation_substeps=50)
# Create the main environment by passing in the already-created Unity env.
env_ = dm_construction.get_environment(
problem_type, unity_env, wrapper_type=wrapper_type,
curriculum_sample=curriculum_sample, difficulty=difficulty)
# Create an observer to grab the frames from the observer camera.
env_.core_env.enable_frame_observer()
return env_
def make_video(frames_):
"""Creates a video from a given set of frames."""
# Create the Matplotlib animation and save it to a temporary file.
with tempfile.NamedTemporaryFile(suffix=".mp4") as fh:
writer = animation.FFMpegWriter(fps=20)
fig = plt.figure(frameon=False, figsize=(10, 10))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis("off")
ax.set_aspect("equal")
im = ax.imshow(np.zeros_like(frames_[0]), interpolation="none")
with writer.saving(fig, fh.name, 50):
for frame in frames_:
im.set_data(frame)
writer.grab_frame()
plt.close(fig)
# Read and encode the video to base64.
mp4 = open(fh.name, "rb").read()
data_url = "data:video/mp4;base64," + base64.b64encode(mp4).decode()
# Display the video in the notebook.
return HTML(textwrap.dedent(
"""
<video controls>
<source src="{}" type="video/mp4">
</video>
""".format(data_url).strip()))
# + cellView="both" colab={"height": 538} colab_type="code" executionInfo={"elapsed": 8178, "status": "ok", "timestamp": 1583516934455, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="pCwwFrtDQc8v" outputId="7a164d34-18ae-4119-c11c-90c2293451a2"
# Create the environment.
env = get_environment("covering", wrapper_type="continuous_absolute")
# Reset the episode.
np.random.seed(1234)
time_step = env.reset()
frames = env.core_env.pop_observer_frames()
# Take an action.
action = {
"Horizontal": 0,
"Vertical": 5,
"Sticky": 0,
"Selector": 0
}
time_step = env.step(action)
print_status(env, time_step)
# Get all the intermediate frames.
frames.extend(env.core_env.pop_observer_frames())
# Stop the environment.
env.close()
# Display the results as a video. Here you can see the block falling from a
# large height and eventually colliding with an obstacle.
make_video(frames)
# + colab={"height": 538} colab_type="code" executionInfo={"elapsed": 8163, "status": "ok", "timestamp": 1583516942630, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 0} id="vZU7Etk3HxPL" outputId="2dca674e-72ed-4b74-a146-9c7dceae8376"
# Create the environment.
env = get_environment("marble_run", wrapper_type="continuous_absolute")
# Reset the episode.
np.random.seed(1234)
time_step = env.reset()
frames = env.core_env.pop_observer_frames()
# Take an action.
action = {
"Horizontal": 0,
"Vertical": 5,
"Sticky": 1,
"Selector": 0
}
time_step = env.step(action)
print_status(env, time_step)
# Get all the intermediate frames.
frames.extend(env.core_env.pop_observer_frames())
# Stop the environment.
env.close()
# Display the results as a video
make_video(frames)
# -
| demos/environment_loading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Open API를 이용햇서 고양이 이미지 url 1000개씩 리스트 저장
# +
import requests
client_id = "udzt3jcCA8xTKi3UjGH_"
client_secret = "<KEY>"
header = {'X-Naver-Client-id' : client_id, 'X-Naver-Client-Secret' : client_secret }
# -
# 네이버 OPEN API에서 고양이와 강아지 이미지를 검색
# keyword : 검색어 (고양이, 강아지)
# display : 검색 결과의 개수
# start : 시작 인덱스
def get_api_result(keyword, display, start):
#네이버 이미지 검색 url
url = 'https://openapi.naver.com/v1/search/image?query=' + keyword \
+ "&display=" + str(display) \
+ "&start=" + str(start)
# 네이버 이미지 검색 URL 를 실행하고 검색 결과를 result에 저장
result = requests.get(url, headers=header)
return result.json()
# keyword : 검색어 (고양이, 강아지)
# total_page : 검색할 전체 페이지 (100개씩 10페이지 전체 1000개의 이미지를 검색)
def call_and_print(keyword, total_page=10):
# 네이버 이미지 검색 결과에서 이미지를 다운 받을 url을 저장 할 리스트
link_list=[]
for page in range(total_page):
# 한 페이지당 100개의 이미지 정보를 검색
display = 100
# 페이지의 시작 인덱스 한 페이지당 100개씩 검색하기 때문에
# 1페이지는 1, 2페이지는 101, ...
start = (page*display)+1
# get_api_result(keyword, display, start) : 네이버 이미지 검색 API를 호출하고
# 결과를 리턴받아서 json_obj에 저장
json_obj = get_api_result(keyword, display, start)
# 검색 결과에서 json_obj['items']에 이미지를 다운 받을 URL이
# 저장되어 있으므로 json_obj['items']을 link_list에 저장
for item in json_obj['items']:
link_list.append(item['link'])
# 이미지를 다운로드할 url이 저장된 link_list를 리턴
return link_list
# +
keyword = "고양이"
link1= call_and_print(keyword)
link1
# -
| MongoDB/Final_Check_MONGODB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import datetime
datetime.datetime.now()
# import time
now = datetime.datetime.now()
now.year, now.month, now.day
now.hour, now.minute, now.second
# import special day time
pre = datetime.datetime(2018, 4, 1)
pre.year, pre.month, pre.day
# compare time
now > pre
# minus test
pre1 = datetime.datetime(2018, 11, 30)
pre2 = datetime.datetime(2018, 10, 10)
diff_pre = pre1 - pre2
print(diff_pre)
str_diff = str(diff_pre)
print(str_diff)
# +
# 1) str split
# -
diff_result = str_diff.split()[0]
print(diff_result)
# +
# 2) str2list
# -
list_diff = list(str_diff)
print(list_diff)
int_diff = list_diff[:-14]
print(int_diff)
join_diff = "".join(int_diff)
print(join_diff)
# +
# pre str2date
today = '20120101.0'
today = float(today)
print(today)
today = int(today)
print(today)
today = str(today)
print(today)
today = list(today)
print(today)
year = today[:4]
print(year)
year = "".join(year)
print(year)
year = int(year)
print(year)
month = today[4:6]
print(month)
month = "".join(month)
print(month)
month = int(month)
print(month)
day = today[6:8]
print(day)
day = "".join(day)
print(day)
day = int(day)
print(day)
# -
today = datetime.datetime(year, month, day+1)
yesterday = datetime.datetime(year, month, day)
diff = today - yesterday
print(diff)
| python/datetime_control.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SSD300 MS COCO Evaluation Tutorial
#
# This is a brief tutorial that goes over how to evaluate a trained SSD300 on one of the MS COCO datasets using the official MS COCO Python tools available here:
#
# https://github.com/cocodataset/cocoapi
#
# Follow the instructions in the GitHub repository above to install the `pycocotools`. Note that you will need to set the path to your local copy of the PythonAPI directory in the subsequent code cell.
#
# Of course the evaulation procedure described here is identical for SSD512, you just need to build a different model.
# +
from keras import backend as K
from keras.models import load_model
from keras.optimizers import Adam
from scipy.misc import imread
import numpy as np
from matplotlib import pyplot as plt
import sys
# TODO: Specify the directory that contains the `pycocotools` here.
pycocotools_dir = '../cocoapi/PythonAPI/'
if pycocotools_dir not in sys.path:
sys.path.insert(0, pycocotools_dir)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from data_generator.object_detection_2d_data_generator import DataGenerator
from eval_utils.coco_utils import get_coco_category_maps, predict_all_to_json
# %matplotlib inline
# -
# Set the input image size for the model.
img_height = 300
img_width = 300
# ## 1. Load a trained SSD
#
# Either load a trained model or build a model and load trained weights into it. Since the HDF5 files I'm providing contain only the weights for the various SSD versions, not the complete models, you'll have to go with the latter option when using this implementation for the first time. You can then of course save the model and next time load the full model directly, without having to build it.
#
# You can find the download links to all the trained model weights in the README.
# ### 1.1. Build the model and load trained weights into it
# +
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, 3),
n_classes=80,
mode='inference',
l2_regularization=0.0005,
scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05], # The scales for Pascal VOC are [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05]
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
normalize_coords=True,
subtract_mean=[123, 117, 104],
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400)
# 2: Load the trained weights into the model.
# TODO: Set the path of the trained weights.
weights_path = 'path/to/trained/weights/VGG_coco_SSD_300x300_iter_400000.h5'
model.load_weights(weights_path, by_name=True)
# 3: Compile the model so that Keras won't complain the next time you load it.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
# -
# Or
# ### 1.2. Load a trained model
# +
# TODO: Set the path to the `.h5` file of the model to be loaded.
model_path = 'path/to/trained/model.h5'
# We need to create an SSDLoss object in order to pass that to the model loader.
ssd_loss = SSDLoss(neg_pos_ratio=3, n_neg_min=0, alpha=1.0)
K.clear_session() # Clear previous models from memory.
model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
'L2Normalization': L2Normalization,
'DecodeDetections': DecodeDetections,
'compute_loss': ssd_loss.compute_loss})
# -
# ## 2. Create a data generator for the evaluation dataset
#
# Instantiate a `DataGenerator` that will serve the evaluation dataset during the prediction phase.
# +
dataset = DataGenerator()
# TODO: Set the paths to the dataset here.
MS_COCO_dataset_images_dir = '../../datasets/MicrosoftCOCO/val2017/'
MS_COCO_dataset_annotations_filename = '../../datasets/MicrosoftCOCO/annotations/instances_val2017.json'
dataset.parse_json(images_dirs=[MS_COCO_dataset_images_dir],
annotations_filenames=[MS_COCO_dataset_annotations_filename],
ground_truth_available=False, # It doesn't matter whether you set this `True` or `False` because the ground truth won't be used anyway, but the parsing goes faster if you don't load the ground truth.
include_classes='all',
ret=False)
# We need the `classes_to_cats` dictionary. Read the documentation of this function to understand why.
cats_to_classes, classes_to_cats, cats_to_names, classes_to_names = get_coco_category_maps(MS_COCO_dataset_annotations_filename)
# -
# ## 3. Run the predictions over the evaluation dataset
#
# Now that we have instantiated a model and a data generator to serve the dataset, we can make predictions on the entire dataset and save those predictions in a JSON file in the format in which COCOeval needs them for the evaluation.
#
# Read the documenation to learn what the arguments mean, but the arguments as preset below are the parameters used in the evaluation of the original Caffe models.
# TODO: Set the desired output file name and the batch size.
results_file = 'detections_val2017_ssd300_results.json'
batch_size = 20 # Ideally, choose a batch size that divides the number of images in the dataset.
predict_all_to_json(out_file=results_file,
model=model,
img_height=img_height,
img_width=img_width,
classes_to_cats=classes_to_cats,
data_generator=dataset,
batch_size=batch_size,
data_generator_mode='resize',
model_mode='inference',
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
normalize_coords=True)
# ## 4. Run the evaluation
#
# Now we'll load the JSON file containing all the predictions that we produced in the last step and feed it to `COCOeval`. Note that the evaluation may take a while.
coco_gt = COCO(MS_COCO_dataset_annotations_filename)
coco_dt = coco_gt.loadRes(results_file)
image_ids = sorted(coco_gt.getImgIds())
cocoEval = COCOeval(cocoGt=coco_gt,
cocoDt=coco_dt,
iouType='bbox')
cocoEval.params.imgIds = image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
| ssd300_evaluation_COCO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # MaxCounters
# https://app.codility.com/programmers/lessons/4-counting_elements/max_counters/
# +
# brute force solution, fails the performance test
def solutionnotmap(N, A):
C = [0 for _ in range(N)]
amax = 0
for c in A :
c = c
if c == N + 1 :
for i in range(N) :
C[i] = amax
else:
C[c-1] = C[c-1] + 1
if C[c-1] > amax : amax = C[c-1]
return C
# optimal solution, avoid reseting the counter table after reaching the max command
def solution(N, A):
C = [0 for _ in range(N)]
oldmax = 0
newmax = 0
map = {}
for c in A :
c = c
if c == N + 1 :
map = {}
oldmax = newmax
# for i in range(N) :
# C[i] = amax
else:
if c in map : C[c-1] = C[c-1] + 1
else :
map[c] = 1
C[c-1] = oldmax + 1
if C[c-1] > newmax : newmax = C[c-1]
for i in range(N) :
if (i+1) not in map: C[i] = oldmax
return C
# -
A = [3,4,4,6,1,4,4]
print(solution(5,A))
# # FrogRiverOne
# https://app.codility.com/programmers/lessons/4-counting_elements/frog_river_one/
# +
from typing import Set, List
def solution(X : int, A : List[int]) -> int :
assert(len(A) > 0)
assert(X > 0)
numb : Set[int] = set()
expectedsum : int = int((X*(X+1)) / 2)
sum : int = 0
for i in range(0,len(A)) :
if not A[i] in numb : sum = sum + A[i]
if sum == expectedsum : return i
numb.add(A[i])
return -1
# -
assert (solution(5,[1,3,1,4,2,3,5,4]) == 6)
assert (solution(1,[1]) == 0)
assert (solution(3,[2,1]) == -1)
assert (solution(3,[3,2,1]) == 2)
# # PermCheck
# https://app.codility.com/programmers/lessons/4-counting_elements/perm_check/
# +
from typing import List
def solution(A: List[int]) -> int :
e : List[bool] = [False for i in range(len(A))]
for i in A :
if i > len(A) : return 0
if e[i-1]: return 0
e[i-1] = True
return 1
# -
assert(solution([4,1,3,2]) == 1)
assert(solution([4,1,3]) == 0)
assert(solution([1,4,1]) == 0)
assert(solution([2,2,2]) == 0)
# # MissingInteger
# https://app.codility.com/programmers/lessons/4-counting_elements/missing_integer/
# +
from typing import List
def solution(A : List[int]):
if len(A) == 0 : return 1
X = sorted(A)
positive = 0
lastpos = None
for i in X :
if i <= 0 : continue
if lastpos is None or lastpos != i : positive = positive + 1
if i != positive : return positive
lastpos = i
if lastpos is None : return 1
return lastpos + 1
# -
assert(solution([1, 3, 6, 4, 1, 2]) == 5)
assert(solution([-1,-3]) == 1)
assert(solution([1,2,3]) == 4)
| codility-lessons/4 Counting Elements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # work on the tree
# ## ast.NodeVisitor
# ast.NodeVisitor is the primary tool for ‘scanning’ the tree.
# +
import ast
import inspect
print(inspect.getsource(ast.NodeVisitor))
# -
# To use it, subclass it and override methods visit_Foo, corresponding to the node classes. (see [Meet the Nodes](https://greentreesnakes.readthedocs.io/en/latest/nodes.html)).
#
# For example, this visitor will print the names of any functions defined in the given code, including methods and functions defined within other functions:
import ast
class FuncLister(ast.NodeVisitor):
def visit_FunctionDef(self, node):
print('func_name: ', node.name)
self.generic_visit(node)
source_code = """
def a():
print('i am a')
def b():
print('call a')
a()
def c():
print('i am c function')
b()
""".strip()
FuncLister().visit(ast.parse(source_code))
# If you want child nodes to be visited, remember to call self.generic_visit(node) in the methods you override.
# Alternatively, you can run through a list of all the nodes in the tree using ast.walk(). There are no guarantees about the order in which nodes will appear. The following example again prints the names of any functions defined within the given code:
# +
import ast
source_code = """
def a():
print('i am a')
def b():
print('call a')
a()
def c():
print('i am c function')
b()
""".strip()
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
print('func_name: ', node.name)
# -
#
#
#
#
#
#
| doc/steps_to_make/2001_0401_work_on_tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Dropout
# -
vgg16 = VGG16()
vgg16.summary()
# ## Generative Adversarial Networks
# Demonstration
# Zeroes image
input_image = np.zeros((224, 224, 3))
input_image_preprocessed = preprocess_input(input_image)
predictions = vgg16.predict([[input_image_preprocessed]])
decode_predictions(predictions)
# matchstick - Model has no idea what to predict...
# Ones image
input_image = np.ones((224, 224, 3)) * 255
input_image_preprocessed = preprocess_input(input_image)
predictions = vgg16.predict([[input_image_preprocessed]])
decode_predictions(predictions)
# Random image
input_image = np.random.uniform(0, 255, (224, 224, 3)).astype(np.uint8)
input_image_preprocessed = preprocess_input(input_image)
predictions = vgg16.predict([[input_image_preprocessed]])
decode_predictions(predictions)
| 6_Generative_Models/6_GN_GAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Collection
#
# These are the most common used functions
#
# 1. namedtuple
# 2. Counter
# 3. deque
# 4. OrderedDict
# 5. defaultDict
#
#
########################
# 1. namedtuple
########################
from collections import namedtuple
Point = namedtuple('point',['x','y']) # return new subclass of tuple
pt1 = Point(5,10)
pt2 = Point(2,5)
dot_product = ( pt1.x * pt2.x) + ( pt1.y * pt2.y)
dot_product
Car = namedtuple('Car','Price Mileage Colour Class')
xyz = Car(Price = 100000, Mileage = 30, Colour = 'Cyan', Class = 'Y')
print(xyz)
print(xyz.Class)
#################
# 2. orderdict
#################
from collections import OrderedDict
od = {} # unordered dictionary
od['a'] = 1
od['c'] = 3
od['d'] = 4
od['b'] = 2
od['e'] = 5
print(od)
od = OrderedDict() # ordered dictionary
od['a'] = 1
od['c'] = 3
od['b'] = 2
od['d'] = 4
od['e'] = 5
print(od)
from collections import OrderedDict
d = OrderedDict()
for i in range(int(input())):
x = input().split()
if (' '.join(x[0:-1])) in d:
d[' '.join(x[0:-1])] += int(x[-1])
else:
d[' '.join(x[0:-1])] = int(x[-1])
print(d)
#
for i in range(len(d)):
print("{} {}".format(list(d.keys())[i],list(d.values())[i]))
from collections import OrderedDict
d = OrderedDict()
for i in range(int(input())):
x = input()
if x in d:
d[x] += 1
else:
d[x] = 1
print(len(d))
print(*(list(d.values())))
#######################################
# 3. counter
# Tally occurrences of words in a list
#######################################
from collections import Counter
cnt = Counter()
l = ['one','two','three','two','three','four','three']
for i in l:
print(i)
cnt[i] += 1
print(cnt)
# +
#####################
# 4. deque
#####################
from collections import deque
d = deque()
d.append(1) # [1]
d.appendleft(2) # [2,1]
d.clear() # []
d.extend('1') # ['1']
d.extendleft('1234') # ['4','3','2','1','1']
d.count('1')
d.pop() # ['4','3','2','1']
d.popleft() # ['3','2','1']
d.remove('2') # ['3','1']
d.reverse() # ['1','3']
d.extend([1,2,3,4,5,6,7,8,9]) # extend take iterator # ['1','3',1,2,3,4,5,6,7,8,9]
d.reverse() # [9,8,7,6,5,4,3,2,'3','1']
d.rotate(2) # rotate toward right 2 indices {move last 2 elements on front}
d
# -
from collections import deque
d = deque()
for i in range(int(input())):
x = list(input().split())
if x[0]=='append':
d.append(int(x[1]))
elif x[0]=='appendleft':
d.appendleft(int(x[1]))
elif x[0]=='pop':
d.pop()
elif x[0]=='popleft':
d.popleft()
k = 0
from collections import deque
for i in range(int(input())):
n = []
l = int(input())
c = deque(map(int,input().split()))
for j in range(l-1):
if (c[k] >= c[-1]) and (c[k] >= c[k+1]):
n.append(c.popleft())
elif c[-1] >= c[k] and (c[k] >= c[k+1]):
n.append(c.pop())
else:
print('No')
break
if len(c)==1:
if c[0] >= n[-1]:
print('No')
break
else:
print('Yes')
break
for t in range(int(input())):
input()
lst = list(map(int, input().split()))
l = len(lst)
i = 0
while i < l - 1 and lst[i] >= lst[i+1]:
i += 1
while i < l - 1 and lst[i] <= lst[i+1]:
i += 1
print ("Yes" if i == l - 1 else "No")
# +
from collections import Counter, OrderedDict
class OrderedCounter(Counter, OrderedDict):
pass
[print(*c) for c in OrderedCounter(sorted(input())).most_common(3)]
# -
# MINIONS PROBLEMS
s = input()
vowels = 'AEIOU'
kevsc = 0
stusc = 0
for i in range(len(s)):
if s[i] in vowels:
kevsc += (len(s)-i)
else:
stusc += (len(s)-i)
if kevsc > stusc:
print ("Kevin", kevsc)
elif kevsc < stusc:
print ("Stuart", stusc)
else:
print ("Draw")
x=input().split()
a=[]
b=[]
for i in range(int(x[0])):
a.append(input().strip())
for i in range(int(x[1])):
b.append(input().strip())
for i in range(len(b)):
if b[i] in a:
for j in range(a.count(b[i])):
print('{} '.format( a.index( b[i] ) + 1), end = '' )
a[a.index(b[i])] = '*'
print()
else:
print('{}'.format(-1))
# +
# DEfault Dict
from collections import defaultdict
d = defaultdict(list)
n, m = list(map(int, input().split()))
for i in range(n):
d[input()].append(i + 1)
for i in range(m):
print(' '.join(map(str, d[input()])) or -1)
# +
n,m = input().split(); n = int(n); m = int(m)
a = list(map(int,input().split()))
b = list(map(int,input().split()))
x = 0
for i in list(map(int,input().split())):
if i in a:
x += 1
elif i in b:
x -= 1
print(x)
# -
n, m = input().split()
sc_ar = input().split()
A = set(input().split())
B = set(input().split())
print (sum([(i in A) - (i in B) for i in sc_ar]))
| 15. Collections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Machine-Learning-Tokyo/CNN-Architectures/blob/master/Implementations/ResNet/ResNet_implementation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-D79U3kqGx2d" colab_type="text"
# # Implementation of ResNet
#
# We will use the [tensorflow.keras Functional API](https://www.tensorflow.org/guide/keras/functional) to build ResNet from the original paper: “[Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)” by <NAME>, <NAME>, <NAME>, <NAME>.
#
# [Video tutorial](https://www.youtube.com/watch?v=oeIMmtUroi0&list=PLaPdEEY26UXyE3UchW0C742xh542yh0yI&index=5)
#
# ---
#
# In the paper we can read:
#
# >**[i]** “We adopt batch normalization (BN) [16] right after each convolution and before activation.”
# >
# >**[ii]** "Donwsampling is performed by conv3_1, conv4_1, and conv5_1 with a stride of 2."
# >
# >**[iii]** "(B) The projection shortcut in Eqn.(2) is used to match dimensions (done by 1×1 convolutions). For both options, when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2"
# >
# >**[iv]** "[...] (B) projection shortcuts are used for increasing dimensions, and other shortcuts are identity;"
# >
# >**[v]** "The three layers are 1×1, 3×3, and 1×1 convolutions, where the 1×1 layers are responsible for reducing and then increasing (restoring) dimensions, leaving the 3×3 layer a bottleneck with smaller input/output dimensions."
# >
# >**[vi]** "50-layer ResNet: We replace each 2-layer block in the 34-layer net with this 3-layer bottleneck block, resulting in a 50-layer ResNet (Table 1). We use option B for increasing
# dimensions."
#
# <br>
#
# We will also make use of the following Table **[vii]**:
#
# <img src=https://github.com/Machine-Learning-Tokyo/DL-workshop-series/raw/master/Part%20I%20-%20Convolution%20Operations/images/ResNet/ResNet.png width="600">
#
# <br>
# <br>
#
# as well the following diagram **[viii]**:
# <img src=https://github.com/Machine-Learning-Tokyo/DL-workshop-series/raw/master/Part%20I%20-%20Convolution%20Operations/images/ResNet/ResNet_block.png width="200">
#
# ---
#
# ## Network architecture
#
# The network starts with a [Conv, BatchNorm, ReLU] block (**[i]**) and continues with a series of **Resnet blocks** (conv*n*.x in **[vii]**) before the final *Avg Pool* and *Fully Connected* layers.
#
# ### Resnet block
#
# The *Resnet block* consists of a repetition of blocks similar to the one depicted in **[viii]**. As one can see the input tesnor goes through three Conv-BN-ReLU blocks and the output is added to the input tensor. This type of connection that skips the main body of the block and merges (adds) the input tensor with another one further on is called *skip connection* (right arrow in **[viii]**).
#
# There are two types of skip connections in ResNet: the **Identity** and the **Projection**. In **[viii]** is depicted the **Identity** one. This is used when the input tensor has same shape as the one produced by the last Convolution layer of the block.
#
# However, when the two tensors have different shape, the input tensor must change to get same shape as the other one in order to be able to be added to it. This is done by the **Projection** connection as described in **[iii]** and **[iv]**.
#
# The change in shape happens when we:
# - Change the number of filters and thus of feature maps of the output tensor.
# This happens at the first sub-block of each *ResNet* block since the output tensor has 4 times the number of feature maps than the input tensor.
# - Change the spatial dimensions of the output tensor (downsampling)
# which takes place according to **[ii]**.
#
# #### Identity block
#
# The *Identity block* takes a tensor as an input and passes it through 1 stream of:
# > 1. a 1x1 *Convolution* layer followed by a *Batch Normalization* and a *Rectified Linear Unit (ReLU)* activation layer
# > 2. a 3x3 *Convolution* layer followed by a *Batch Normalization* and a *Rectified Linear Unit (ReLU)* activation layer
# > 3. a 1x1 *Convolution* layer followed by a *Batch Normalization* layer
# >
# > Pay attention at the number of filters (depicted with the letter f at the diagram) which are the same for the first 2 Convolution layer but 4x for the 3rd one.
#
# Then the *output* of this stream is added to the *input* tensor. On the new tensor a *Rectified Linear Unit (ReLU)* activation is applied befor returning it.
#
# <br>
#
# #### Projection block
#
# The *Projection block* takes a tensor as an input and passes it through 2 streams.
# - The left stream consists of:
# > 1. a 1x1 *Convolution* layer followed by a *Batch Normalization* and a *Rectified Linear Unit (ReLU)* activation layer
# > 2. a 3x3 *Convolution* layer followed by a *Batch Normalization* and a *Rectified Linear Unit (ReLU)* activation layer
# > 3. a 1x1 *Convolution* layer followed by a *Batch Normalization* layer
# >
# > Pay attention at the number of filters (depicted with the letter f at the diagram) which are the same for the first 2 Convolution layer but 4x for the 3rd one.
#
#
# - The right stream consists of:
# > a 1x1 *Convolution* layer followed by a *Batch Normalization* layer
#
# The outputs of both streams are then added up to a new tensor on which a *Rectified Linear Unit (ReLU)* activation is applied befor returning it.
#
# <br>
#
# As one can see the only difference between the two blocks is the existence of the Convolution-Batch Normalization sub-block at the right stream.
#
# The reason we need this Convolution layer is:
# - To change the number of filters (feature maps) of the tensor after each block.
# - To change the size of the tensor after each block.
#
# In order to change the size (downsampling) we use a stride of 2 after specific blocks as described at **[ii]** at the first 1x1 Convolution layer and the Projection's Convolution layer according to **[iii]** and **[v]**.
#
# ---
#
# ## Workflow
# We will:
# 1. import the neccesary layers
# 2. write a helper function for the Conv-BatchNorm-ReLU block (**[i]**)
# 3. write a helper function for the Identity block
# 4. write a helper function for the Projection block
# 5. write a helper function for the Resnet block (**[ii]**)
# 6. use these helper functions to build the model.
#
# ---
#
# ### 1. Imports
# + id="I4R3bJ_oGvWu" colab_type="code" colab={}
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, \
ReLU, Add, MaxPool2D, GlobalAvgPool2D, Dense
# + [markdown] id="s_OYs62GHy25" colab_type="text"
# ### 2. *Conv-BatchNorm-ReLU block*
# Next, we will build the *Conv-BatchNorm-ReLU block* as a function that will:
# - take as inputs:
# - a tensor (**`x`**)
# - the number of filters (**`filters`**)
# - the kernel size (**`kernel_size`**)
# - the strides (**`strides`**)
# - run:
# - apply a *Convolution layer* followed by a *Batch Normalization* and a *ReLU* activation
# - return the tensor
# + id="zIX-qmoYH5cr" colab_type="code" colab={}
def conv_batchnorm_relu(x, filters, kernel_size, strides):
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
return x
# + [markdown] id="3TKlbCK7H9Ej" colab_type="text"
# ### 3. *Identity block*
# Now, we will build the *Identity block* as a function that will:
# - take as inputs:
# - a tensor (**`tensor`**)
# - the number of filters (**`filters`**)
# - run:
# - apply a 1x1 **Conv-BatchNorm-ReLU block** to **`tensor`**
# - apply a 3x3 **Conv-BatchNorm-ReLU block**
# - apply a 1x1 *Convolution layer* with 4 times the filters **`filters`**
# - apply a *Batch normalization*
# - add this tensor with **`tensor`**
# - apply a *ReLU* activation
# - return the tensor
# + id="hAV39Lb_ICOH" colab_type="code" colab={}
def identity_block(tensor, filters):
x = conv_batchnorm_relu(tensor, filters=filters, kernel_size=1, strides=1)
x = conv_batchnorm_relu(x, filters=filters, kernel_size=3, strides=1)
x = Conv2D(filters=4*filters, kernel_size=1, strides=1)(x) # notice: filters=4*filters
x = BatchNormalization()(x)
x = Add()([x, tensor])
x = ReLU()(x)
return x
# + [markdown] id="7Q38iyJWII9K" colab_type="text"
# ### 4. *Projection block*
# Now, we will build the *Projection block* which is similar to the *Identity* one.
#
# Remember, this time we need the strides because we want to downsample the tensors at specific blocks according to **[ii]**, **[iii]** and **[v]**:
# > “the 1×1 layers are responsible for reducing and then increasing (restoring) dimensions”.
#
# The downsampling at the main stream will take place at the first 1x1 Convolution layer*.
# The downsampling at the right stream will take place at its Convolution layer.
# + id="V1BTC5FuIMuv" colab_type="code" colab={}
def projection_block(tensor, filters, strides):
# left stream
x = conv_batchnorm_relu(tensor, filters=filters, kernel_size=1, strides=strides) #[v]
x = conv_batchnorm_relu(x, filters=filters, kernel_size=3, strides=1)
x = Conv2D(filters=4*filters, kernel_size=1, strides=1)(x) # notice: filters=4*filters
x = BatchNormalization()(x)
# right stream
shortcut = Conv2D(filters=4*filters, kernel_size=1, strides=strides)(tensor) # notice: filters=4*filters
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = ReLU()(x)
return x
# + [markdown] id="hrmfsTM1IXaS" colab_type="text"
# \**Notice that in some implementations downsampling takes place at the 3x3 layer. This is also know as ResNet 1.5 (https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch).*
#
# ---
#
# ### 5. *Resnet block*
# Now that we defined the *Projection block* and the *Identity block* we can use them to define the **Resnet block**.
#
# Based on the **[vii]** (column *50-layer*) for each block we have a number of repetiontions (depicted with *xn* next to the block numbers). The 1st of these blocks will be a *Projection block* and the rest will be *Identity blocks*.
#
# The reason for this is that at the beginning of each block the number of feature maps of the tensor change. Since at the Identity block the input tensor and the output tensor are added, they need to have the same number of feature maps.
#
# Let's build the *Resnet block* as a function that will:
# - take as inputs:
# - a tensor (**`x`**)
# - the number of filters (**`filters`**)
# - the total number of repetitions of internal blocks (**`reps`**)
# - the strides (**`strides`**)
# - run:
# - apply a projection block with strides: **`strides`**
# - for apply an *Identity block* for $r-1$ times (the $-1$ is because the first block was a *Convolution* one)
# - return the tensor
# + id="pTsieulOITMi" colab_type="code" colab={}
def resnet_block(x, filters, reps, strides):
x = projection_block(x, filters=filters, strides=strides)
for _ in range(reps-1):
x = identity_block(x, filters=filters)
return x
# + [markdown] id="TCOXYcAmIebP" colab_type="text"
# ### 6. Model code
# Now we are ready to build the model:
# + id="wTK6a4AmIawj" colab_type="code" colab={}
input = Input(shape=(224, 224, 3))
x = conv_batchnorm_relu(input, filters=64, kernel_size=7, strides=2) # [3]: 7x7, 64, strides 2
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) # [3]: 3x3 max mool, strides 2
x = resnet_block(x, filters=64, reps=3, strides=1)
x = resnet_block(x, filters=128, reps=4, strides=2) # strides=2 ([2]: conv3_1)
x = resnet_block(x, filters=256, reps=6, strides=2) # strides=2 ([2]: conv4_1)
x = resnet_block(x, filters=512, reps=3, strides=2) # strides=2 ([2]: conv5_1)
x = GlobalAvgPool2D()(x) # [3]: average pool *it is not written any pool size so we use Global
output = Dense(1000, activation='softmax')(x) # [3]: 1000-d fc, softmax
from tensorflow.keras import Model
model = Model(input, output)
# + [markdown] id="WCnWXgx3JG1d" colab_type="text"
# ## Final code
# + id="1fdNEUqGIjZL" colab_type="code" colab={}
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU, Add
from tensorflow.keras.layers import MaxPool2D, GlobalAvgPool2D, Dense
def conv_batchnorm_relu(x, filters, kernel_size, strides):
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
return x
def identity_block(tensor, filters):
x = conv_batchnorm_relu(tensor, filters=filters, kernel_size=1, strides=1)
x = conv_batchnorm_relu(x, filters=filters, kernel_size=3, strides=1)
x = Conv2D(filters=4*filters, kernel_size=1, strides=1)(x) # notice: filters=4*filters
x = BatchNormalization()(x)
x = Add()([x, tensor])
x = ReLU()(x)
return x
def projection_block(tensor, filters, strides):
# left stream
x = conv_batchnorm_relu(tensor, filters=filters, kernel_size=1, strides=1)
x = conv_batchnorm_relu(x, filters=filters, kernel_size=3, strides=strides)
x = Conv2D(filters=4*filters, kernel_size=1, strides=1)(x) # notice: filters=4*filters
x = BatchNormalization()(x)
# right stream
shortcut = Conv2D(filters=4*filters, kernel_size=1, strides=strides)(tensor) # notice: filters=4*filters
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = ReLU()(x)
return x
def resnet_block(x, filters, reps, strides):
x = projection_block(x, filters=filters, strides=strides)
for _ in range(reps-1): # the -1 is because the first block was a Conv one
x = identity_block(x, filters=filters)
return x
input = Input(shape=(224, 224, 3))
x = conv_batchnorm_relu(input, filters=64, kernel_size=7, strides=2) # [3]: 7x7, 64, strides 2
x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) # [3]: 3x3 max mool, strides 2
x = resnet_block(x, filters=64, reps=3, strides=1)
x = resnet_block(x, filters=128, reps=4, strides=2) # s=2 ([2]: conv3_1)
x = resnet_block(x, filters=256, reps=6, strides=2) # s=2 ([2]: conv4_1)
x = resnet_block(x, filters=512, reps=3, strides=2) # s=2 ([2]: conv5_1)
x = GlobalAvgPool2D()(x) # [3]: average pool *it is not written any pool size so we use Global
output = Dense(1000, activation='softmax')(x) # [3]: 1000-d fc, softmax
from tensorflow.keras import Model
model = Model(input, output)
# + [markdown] id="Dndy3rtVJQYw" colab_type="text"
# ## Model diagram
#
# <img src="https://raw.githubusercontent.com/Machine-Learning-Tokyo/CNN-Architectures/master/Implementations/ResNet/ResNet_diagram.svg?sanitize=true">
| Implementations/ResNet/ResNet_implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Author : <NAME>
# github link : https://github.com/amirshnll/Cryotherapy
# dataset link : http://archive.ics.uci.edu/ml/datasets/Cryotherapy+Dataset+
# email : <EMAIL>
# -
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from pandas.plotting import scatter_matrix
from sklearn.model_selection import train_test_split
col_names= ['Result_of_Treatment','sex', 'age', 'Time', 'Number_of_Warts', 'Type', 'Area' ]
cry= pd.read_csv("Cryotherapy.csv",header=None, names=col_names)
inputs =cry.drop('sex',axis='columns')
target =cry['Result_of_Treatment']
input_train, input_test, target_train, target_test = train_test_split(inputs, target, test_size=0.3, random_state=1)
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(7,6), max_iter=5000)
mlp.fit(input_train, target_train)
from sklearn.metrics import accuracy_score
predictions_train =mlp.predict(input_train)
print("accuracy for train data: ", accuracy_score(predictions_train, target_train))
y_pred=mlp.predict(input_test)
print("accuracy for test data: ", accuracy_score(y_pred, target_test))
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
result1 = classification_report(target_test, y_pred)
print("Classification Report:",)
print (result1)
result2 = accuracy_score(target_test,y_pred)
print("Accuracy:",result2)
| mlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/diascarolina/data-science-bootcamp/blob/main/modulo6/aulas_desafios_modulo6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KGDDAk20SmUI"
# # Applied Data Science Bootcamp
# + [markdown] id="7rSkPTS7SqcS"
# # Module 06 - Data Science in Finance
# + [markdown] id="dGJjOLqyTAee"
# Notebook for the lessons and proposed challenges.
# + [markdown] id="5FGiFum0TBRH"
# # Import Libraries
# + id="o0Xe0A-0St-7"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, classification_report, plot_confusion_matrix
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
sns.set()
# + [markdown] id="zGDbYWk9nYEk"
# # Lesson 01
# + id="z64PE0JWS-7c"
approved_clients_raw = pd.read_csv('https://github.com/diascarolina/data-science-bootcamp/blob/main/data/approved_clients.csv?raw=true')
client_records_raw = pd.read_csv('https://github.com/diascarolina/data-science-bootcamp/blob/main/data/client_record.csv?raw=true')
# + colab={"base_uri": "https://localhost:8080/", "height": 225} id="3whlZxKvNam3" outputId="98acbdf4-617b-410a-8165-36900328fc9a"
client_records_raw.head()
# + colab={"base_uri": "https://localhost:8080/"} id="bbTKtx1PNnPJ" outputId="e930bbdd-b7b2-451c-d3fc-a5891e5897e4"
client_records_raw.shape
# + colab={"base_uri": "https://localhost:8080/"} id="yxE2F3D-N1O9" outputId="d5729ba4-419a-4404-cca1-856a70dc4dcb"
client_records_raw.id.value_counts()
# + [markdown] id="g8rmYpOiPUvh"
# We can see that we have some repeated IDs.
# + colab={"base_uri": "https://localhost:8080/"} id="Wsrc1jrMON4c" outputId="d3a858d3-452a-40ac-bd98-d80182dab679"
client_records_raw.duplicated().sum()
# + [markdown] id="4D0PbCz-QEnZ"
# This means that we don't have whole rows duplicated.
# + colab={"base_uri": "https://localhost:8080/"} id="7_VScT17P9AI" outputId="3bb9305c-6ff3-4e59-9270-ad9c1cc8b73c"
client_records_raw[client_records_raw.id.duplicated(keep = False)].shape
# + [markdown] id="e1mqEMRdRLbS"
# So we have 94 problematic values (47 repeated ones). Let's save them.
# + id="gyoYiBMxQm7V"
repeated_ids = client_records_raw[client_records_raw.id.duplicated(keep = False)].id
# + id="mcK5yfdaSEkm"
client_records = client_records_raw.copy()
client_records = client_records.drop(repeated_ids.index)
# + colab={"base_uri": "https://localhost:8080/"} id="IRl8_GiqTv3x" outputId="30bf7583-c7b8-46ec-eefb-bb375845bad6"
client_records.shape
# + colab={"base_uri": "https://localhost:8080/"} id="f8nJIL5oS3S_" outputId="561cfca4-e9a4-482e-82c5-83aa6e606dff"
client_records.id.value_counts()
# + [markdown] id="lwwFrBSOTzZe"
# Now we have only one ID per row.
# + [markdown] id="-psn8YECV_B5"
# Do we have null data?
# + colab={"base_uri": "https://localhost:8080/"} id="VgfVWISDTuBd" outputId="038f7fca-aa44-4083-b232-96a571f69d20"
print(f'Number of null values in client_records: {client_records.isnull().sum().sum()}')
# + colab={"base_uri": "https://localhost:8080/"} id="NSKYl7tRT9w_" outputId="ade0ff42-efcf-469a-f2df-04ddbebd23d1"
client_records.info()
# + [markdown] id="TdlXJBJEWObc"
# # Lesson 02
# + id="fIx15gqnXMak"
quantitative_variables = ['age', 'children_count', 'annual_income', 'years_working', 'family_size']
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="E7ZnRNlUYBJS" outputId="bfa7f2a2-ef16-49fe-d5df-891a08375d0d"
client_records[quantitative_variables].describe()
# + [markdown] id="Uw7XJtkEYbwf"
# Taking a look at the table above, how can someone have ```years_working``` equal to -1000.7?
# + colab={"base_uri": "https://localhost:8080/"} id="-AjJvnifYJzA" outputId="109ba629-7532-4579-88e2-00ca735abff7"
client_records.query("years_working <= 0")[['years_working', 'income_type']].value_counts()
# + [markdown] id="uMLW_dqQaNV9"
# We see that all ```years_working == -1000.7``` are pensioners.
# + colab={"base_uri": "https://localhost:8080/"} id="G04a8HWxZ9pf" outputId="7ad25c57-99c8-4885-ed4b-9699b387fd14"
client_records.query("income_type == 'pensioner'")['income_type'].value_counts()
# + [markdown] id="h_paiyZkauw4"
# Almost all pensioners have ```years_working == -1000.7```.
# + id="JdqD0eWAbDbJ"
client_records['years_working'] = client_records['years_working'].replace(-1000.7, -1)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="95H98x-cbOwS" outputId="9812b3a4-e67e-4728-b47b-122151392e46"
client_records[quantitative_variables].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="dTOcIqm4bSfn" outputId="d44325e4-b33c-4548-ad35-7a15f0428c2d"
sns.histplot(data = client_records, x = 'annual_income', bins = 10)
plt.show()
# + [markdown] id="N0VG65kjcitN"
# Let's remove tha outliers.
# + colab={"base_uri": "https://localhost:8080/"} id="6SE2l080cU_1" outputId="f678d205-07d2-42d5-fbdc-eaaa30b38660"
annual_income_column = client_records['annual_income']
avg_annual_income = annual_income_column.mean()
std_annual_income = annual_income_column.std()
upper_limit = avg_annual_income + (2 * std_annual_income)
lower_limit = avg_annual_income - (2 * std_annual_income)
outlier_index = []
for index, value in annual_income_column.items():
if value > upper_limit or value < lower_limit:
outlier_index.append(index)
print(f'We have {len(outlier_index)} outliers in the annual income column.')
# + colab={"base_uri": "https://localhost:8080/"} id="MfJoVIq6dNa6" outputId="e70353b8-5381-4715-8679-9fae05ce4379"
client_records = client_records.drop(outlier_index)
client_records.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="KCp2UvjJeyQ_" outputId="112c189e-91e8-46ae-d874-f31b970318cf"
client_records[quantitative_variables].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Y-3q8_4yfBib" outputId="6d3df9f8-532c-4adb-dba7-d7a80c6ceaf3"
sns.histplot(data = client_records, x = 'annual_income', bins = 10)
plt.xticks(rotation = 20)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="2hP6quLefJTO" outputId="b5c5e781-b328-4f3d-dcf7-21b64566cbcf"
sns.histplot(data = client_records, x = 'age', bins = 10)
plt.xticks(rotation = 20)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="xJ65n6PmfbCn" outputId="16a0bc2f-2957-4d5a-bfa5-d822ee6abe65"
sns.histplot(data = client_records.query("years_working > 0"), x = 'years_working')
plt.xticks(rotation = 20)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="Z-phygmChu7t" outputId="24a0908c-5f0e-4a83-c60e-13803017d348"
client_records.info()
# + id="RwFIhdPufoB7"
categorical_variables = ['gender', 'own_car', 'own_property', 'income_type',
'education_type', 'marital_status', 'housing_type',
'own_cellphone', 'own_workphone', 'own_phone', 'own_email',
'occupation_type']
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Lu-p3fISirAc" outputId="40f8f3d5-24c2-4b7d-9a6a-3d74456029a9"
sns.countplot(data = client_records, x = 'marital_status')
plt.xticks(rotation = 45, ha = 'right')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="roBzhLW_kQs3" outputId="80547372-a20f-4d92-889d-89f910c40542"
sns.countplot(data = client_records, x = 'own_cellphone')
plt.xticks(ha = 'right')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="oS_nmD5-kfOS" outputId="7babcdea-6846-4416-c068-e9e122929d6a"
sns.countplot(data = client_records, x = 'occupation_type')
plt.xticks(rotation = 45, ha = 'right')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Zsw9h70Akluv" outputId="0990d9af-3776-4455-959f-1394b96e297f"
sns.histplot(data = client_records.query("occupation_type == 'others'"), x = 'annual_income', bins = 10)
plt.xticks(rotation = 45)
plt.show()
# + [markdown] id="meIpxB25m_Q8"
# We have seen that the variable ``own_cellphone``` has only one alue, so it is not relevant for our future machine learning model.
#
# Also, we will drop the gender variable in order to avoid a bias.
# + id="xN3WW15KlazX"
client_records_treatment1 = client_records.drop(['gender', 'own_cellphone'], axis = 1)
# + colab={"base_uri": "https://localhost:8080/"} id="1KO0ytCSnqGh" outputId="42229f99-5018-479b-ec31-d2a2e7650dec"
client_records_treatment1.shape
# + [markdown] id="OSg9yh2nn3X4"
# Let's update our categorical variables.
# + id="N5up-uLJntao"
categorical_variables.remove('gender')
categorical_variables.remove('own_cellphone')
# + [markdown] id="Z-j_8x_SoQw2"
# # Lesson 03
# + [markdown] id="f2x-AqWnyTD2"
# Let's now take a look at the approved clients.
# + id="sI90Yo-wn1ZF"
approved_clients = approved_clients_raw.copy()
# + colab={"base_uri": "https://localhost:8080/", "height": 205} id="j2G-ehKwpw5N" outputId="86ca1d7b-0ffa-4aec-89cd-284d5e1fb07f"
approved_clients.head()
# + colab={"base_uri": "https://localhost:8080/"} id="yaJmlzRmp1mL" outputId="8c6e47b4-e7ea-4565-85b1-f65721fb5ec0"
approved_clients.info()
# + colab={"base_uri": "https://localhost:8080/"} id="PEKX11a_p50z" outputId="e54edbd5-9ec0-41d6-891e-fbe199a7ecca"
approved_clients.id.value_counts().shape
# + colab={"base_uri": "https://localhost:8080/"} id="th_CL6TZyfi-" outputId="e93a13c9-8a39-436b-9964-fbe72c9daf08"
client_records_treatment1.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 643} id="Aq4UtAn0zCi1" outputId="f267077f-17d1-499c-987b-9a8a673908cb"
# example of a client
approved_clients.query("id == 5001712")
# + colab={"base_uri": "https://localhost:8080/", "height": 175} id="b4svxy1LzNas" outputId="0161ab50-31fa-422b-b132-d91c02055a52"
approved_clients.query("id == 5001711")
# + colab={"base_uri": "https://localhost:8080/"} id="7Se9JnpTzcT6" outputId="9128519b-64ad-4f92-8603-c237810c99bc"
approved_clients.status.value_counts().index.to_list()
# + [markdown] id="BFvhAw470Vol"
# ## Creating Auxiliary Variables
# + colab={"base_uri": "https://localhost:8080/"} id="k3xMLXIxzlKK" outputId="bdbb2f76-9d43-4184-c805-cf66a1135e5c"
# month in which the client's account was opened
approved_clients_by_id = approved_clients.copy()
approved_clients_by_id = approved_clients_by_id.groupby('id')
opening_series = approved_clients_by_id.apply(lambda x: min(x['month']))
opening_series.name = 'opening'
opening_series
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="XrUma7vE08Mc" outputId="37030c4a-de77-4f22-c043-d3bdd578e6a1"
approved_clients = approved_clients.merge(opening_series, on = 'id')
approved_clients.head()
# + colab={"base_uri": "https://localhost:8080/"} id="Q8aOYKXf1GX7" outputId="6ae749db-2434-42b9-f401-522e4b2134a7"
last_registered_series = approved_clients_by_id.apply(lambda x: max(x['month']))
last_registered_series.name = 'final'
last_registered_series
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Ys8E2Vba2Mx2" outputId="a824c6f1-7010-4541-ff45-8f4cae6196f3"
approved_clients = approved_clients.merge(last_registered_series, on = 'id')
approved_clients.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="WwvHmdIR2UE4" outputId="0846a03e-c998-490a-dc21-6820be06d68d"
# window of number of opening months
approved_clients['window'] = approved_clients['final'] - approved_clients['opening']
approved_clients.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="TQPBnnUA2jPK" outputId="5a82aa84-d047-4663-cded-f0f7482c09c8"
# month on book (mob)
approved_clients['mob'] = approved_clients['month'] - approved_clients['opening']
approved_clients.head()
# + colab={"base_uri": "https://localhost:8080/"} id="stX2040m3mXP" outputId="393d4313-19d1-4a22-fd57-e84f854acdae"
approved_clients['status'].unique()
# + id="60rrgqy02_dk"
status_dict = {
'no_loan': 0,
'paid_loan': 1,
'1-29days': 2,
'30-59days': 3,
'60-89days': 4,
'90-119days': 5,
'120-149days': 6,
'>150days': 7
}
approved_clients['status_int'] = approved_clients['status'].map(status_dict)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Dax1Fqd54K96" outputId="5605c9aa-c216-41bc-e0fe-7727e87997b7"
approved_clients.head()
# + id="ZzGvqJEt5Chj"
# approved_clients['bad'] = approved_clients.apply(lambda x: x['status_int'] > 3, axis = 1)
approved_clients['bad'] = approved_clients.apply(lambda x: 1 if x['status_int'] > 3 else 0, axis = 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="rlrSraZm50JA" outputId="fee1409c-eede-4743-d799-0221a497f492"
approved_clients
# + [markdown] id="uTIjJP_u65Ac"
# ## Vintage Analysis
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="et3yrgT556Ef" outputId="b03b272a-4de3-41f4-f458-96fba95ad845"
approved_clients_unique_id = approved_clients[['id', 'opening', 'final', 'window']].groupby('id').apply(lambda x: x.iloc[0]).reset_index(drop = True)
approved_clients_unique_id
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="nDdJ8W777p_M" outputId="cce1042d-9954-43df-e79b-e1cb8694f65b"
approved_clients_denominator = approved_clients_unique_id.groupby('opening').apply(lambda x: x['id'].count()).reset_index()
approved_clients_denominator.columns = ['opening', 'number_of_clients']
approved_clients_denominator
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="IVS8kWpO8MW_" outputId="13610cc5-12d1-4a1a-d856-8e9bb56b0292"
vintage = approved_clients.groupby(['opening', 'mob']).apply(lambda x: x['id'].count()).reset_index()
vintage.columns = ['opening', 'mob', 'number_of_clients']
vintage
# + [markdown] id="gqnITEHa87x6"
# To explain the above table, let's look at the index 1886:
# - opening -2 = 2 months ago
# - mob 1 = account open for one month
# - number_of_clients 770
#
# So, two months ago we had 770 clients with accounts open for a period of one month.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="MgLxTnVT8sKl" outputId="6f83fd88-ede9-4888-b633-1972e1f4f714"
vintage = pd.merge(vintage[['opening', 'mob']],
approved_clients_denominator,
on = ['opening'],
how = 'left')
vintage
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="xUSSqxyG9t79" outputId="38604cfa-f893-4b66-8cc9-24931408afa4"
vintage['number_of_bad_payers'] = np.nan
for j in range(-60, 1):
ls = []
for i in range(0, 61):
due = list(approved_clients.query("bad == 1 and mob == @i and opening == @j")['id'])
ls.extend(due)
vintage.loc[(vintage['mob'] == i) & (vintage['opening'] == j), 'number_of_bad_payers'] = len(set(ls))
vintage['bad_payers_rate'] = vintage['number_of_bad_payers'] / vintage['number_of_clients']
vintage
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="23pmj7qi-2W2" outputId="71821a51-3f44-4688-adc0-2c2b8067ef68"
vintage_pivot = vintage.pivot(index = 'opening',
columns = 'mob',
values = 'bad_payers_rate')
vintage_pivot
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="pcLVCyva_dCQ" outputId="5a7e2c67-c0aa-4ec4-c452-551b59429ea8"
lst = [i for i in range(0, 61)]
vintage_pivot[lst].T.iloc[:, :10].plot(legend = True,
grid = True,
title = 'Cumulative Percentage of Bad Clients (late pay > 60 days)',
figsize = (10, 8))
plt.xlabel('MOB')
plt.ylabel('Cumulative Percentage of Bad Clients')
plt.legend(bbox_to_anchor = (1.12, 1), title = 'Opening')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="jfxa8v8QAS4c" outputId="6a406574-9677-43b7-a09c-2ded324408d3"
sns.histplot(data = approved_clients_unique_id,
x = 'window',
cumulative = True,
stat = 'density',
kde = True)
plt.title('Distribution of Clients by Window')
plt.xlabel('Window')
plt.ylabel('Percentage of Clients')
plt.show()
# + id="7ukjbfmNBG_L"
late_interval_dict = {
'more_than_30_days': 3,
'more_than_60_days': 4,
'more_than_90_days': 5,
'more_than_120_days': 6,
'more_than_150_days': 7
}
# + id="wdKp4hm-CE4-"
for key, value in late_interval_dict.items():
approved_clients[f'bad_{key}'] = approved_clients.apply(lambda x: 1 if x['status_int'] >= value else 0, axis = 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="IckBsv6iCq9I" outputId="c680576b-34c8-4a3f-83b1-2b69d21e9f82"
approved_clients
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="dXPX3D9jCrOS" outputId="53424ca3-f683-46fa-c40d-f313aff158a9"
bad_rate_dict = {}
id_sum = len(set(approved_clients['id']))
for key in late_interval_dict.keys():
min_bad = approved_clients.query(f'bad_{key} == 1').groupby('id')['mob'].min().reset_index()
mob_bad_rate = pd.DataFrame({'mob':range(0,61), 'bad_rate': np.nan})
lst = []
for i in range(0,61):
due = min_bad.query('mob == @i')['id'].to_list()
lst.extend(due)
mob_bad_rate.loc[mob_bad_rate['mob'] == i, 'bad_rate'] = len(set(lst)) / id_sum
bad_rate_dict[key] = mob_bad_rate['bad_rate']
bad_rate = pd.DataFrame(bad_rate_dict)
bad_rate
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="_nRTSwEeEaMB" outputId="38073a26-8812-44c1-f747-e7e057291a81"
bad_rate.plot(grid = True,
title = 'Cumulative Percentage of Bad Clients by Late Window',
figsize = (10, 6))
plt.xlabel('Mob')
plt.ylabel('Cumulative Percentage of Clients')
plt.show()
# + [markdown] id="FvH0t5B7Fwud"
# ## Defining the Target Variable
# + colab={"base_uri": "https://localhost:8080/"} id="-5OmcfKoFDEg" outputId="72cf1e70-57e3-4d9e-cb45-7193b1e58ff8"
len(approved_clients.query('window >= 12').groupby('id').count().index)
# + colab={"base_uri": "https://localhost:8080/"} id="It49tS5BFNLf" outputId="92b2e073-af93-4c0d-9f4e-466f321d70bb"
approved_clients_treatment1 = approved_clients.query('window >= 12').copy()
approved_clients_treatment1.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="9BRNVKQtFfYT" outputId="157c631d-cb5e-4e6a-90ca-b549ee15bf2c"
def verify(registers):
status_list = registers['status'].to_list()
if '60-89days' in status_list or '90-119days' in status_list or '120-149days' in status_list or '>150days' in status_list:
return 1
else:
return 0
client_records_unique_id = pd.DataFrame(approved_clients_treatment1.groupby('id').apply(verify)).reset_index()
client_records_unique_id.columns = ['id', 'bad']
client_records_unique_id.head()
# + colab={"base_uri": "https://localhost:8080/"} id="iCSvk9zGGoRG" outputId="24a6055a-a3bd-44ff-b5f7-5acd2d920972"
client_records_unique_id.shape
# + colab={"base_uri": "https://localhost:8080/"} id="jvH90dX2HVLD" outputId="c5fdb1f4-53d9-4f9a-bdec-f4ca2e2751a0"
client_records_treatment1.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="fozoGPzpHbhh" outputId="0eb00d58-66e6-4be0-c1bc-c01e26b9ca68"
target_client_records = client_records_treatment1.merge(client_records_unique_id, on = 'id')
target_client_records.head()
# + colab={"base_uri": "https://localhost:8080/"} id="q2ZglplFNmGM" outputId="3a9b6e79-2554-4b62-fe57-1fba8dcd8b25"
target_client_records['bad'].value_counts(normalize = True) * 100
# + [markdown] id="PucMmvHvKENe"
# We can see that our data is pretty unbalanced.
# + [markdown] id="c30feWaIJ8Xv"
# # Lesson 04
# + colab={"base_uri": "https://localhost:8080/"} id="WEyZ6dLDN_Sq" outputId="ad2c8cf5-3d1f-44c2-85a2-062563f65591"
# !pip install scikit-learn==0.23.2 imbalanced-learn==0.7.0
# + id="s38FQ7umOGgy"
import six
import sys
sys.modules['sklearn.externals.six'] = six
# + id="03200EKyJ-X3"
binary_columns = ['own_car', 'own_property', 'own_workphone',
'own_phone', 'own_email']
# + id="-9qaZUV0Kg34"
non_binary_columns = ['income_type', 'education_type', 'marital_status',
'housing_type', 'occupation_type']
# + id="anXUuTfLLR7D"
target_client_records = target_client_records[['id'] + quantitative_variables + binary_columns + non_binary_columns + ['bad']]
# + colab={"base_uri": "https://localhost:8080/", "height": 225} id="ssxw6O_eL9K1" outputId="81db1ce6-4d54-47cc-87d0-5357a6ea5032"
target_client_records.head()
# + id="ckGH4n09L-U0"
SEED = 1561651
def run_model(model, data):
X = data.drop(['id', 'bad'], axis=1)
y = data.drop('id', axis = 1)['bad']
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, random_state = SEED)
model.fit(X_train, y_train)
prob_predic = model.predict_proba(X_test)
auc = roc_auc_score(y_test, prob_predic[:,1])
print(f'AUC score: {auc}')
good_data = np.sort(model.predict_proba(X_test)[:, 1])
bad_data = np.sort(model.predict_proba(X_test)[:, 0])
kstest = stats.ks_2samp(good_data, bad_data)
print(f"KS: {kstest}")
CM = plot_confusion_matrix(model,
X_test,
y_test,
values_format = '.0f',
display_labels = ['Good', 'Bad'])
plt.grid(False)
plt.show(CM)
prediction = model.predict(X_test)
print('\nClassification Report')
print(classification_report(y_test, prediction, zero_division = 0))
# + id="8OTMB-kYNb-Q"
class Transformer(BaseEstimator, TransformerMixin):
def __init__(self, quantitative_variables, categorical_variables):
self.quantitative_variables = quantitative_variables
self.categorical_variables = categorical_variables
self.encoder = OneHotEncoder()
self.scaler = MinMaxScaler()
def fit(self, X, y = None):
self.encoder.fit(X[self.categorical_variables])
self.scaler.fit(X[self.quantitative_variables])
return self
def transform(self, X, y = None):
X_categorical = pd.DataFrame(data = self.encoder.transform(X[self.categorical_variables]).toarray(),
columns = self.encoder.get_feature_names(self.categorical_variables))
X_quantitative = pd.DataFrame(data = self.scaler.transform(X[self.quantitative_variables]),
columns = self.quantitative_variables)
X = pd.concat([X_quantitative, X_categorical], axis = 1)
return X
# + [markdown] id="5tD7gidDRrtQ"
# ## Dummy Classifier
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="FtREF8tpPXVy" outputId="5a1a1b71-3abf-4431-cb16-c6c9bce237d9"
pipeline = Pipeline([('Transformer', Transformer(quantitative_variables, categorical_variables)),
('Model', DummyClassifier(strategy = 'prior'))])
run_model(pipeline, target_client_records)
# + [markdown] id="j5ORyLoRR5dD"
# ## Logistic Regression
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="no5-JEZfQWhh" outputId="ed708e58-6131-412f-c768-0d2c48a81086"
pipeline = Pipeline([('Transformer', Transformer(quantitative_variables, categorical_variables)),
('Model', LogisticRegression(random_state = SEED, max_iter = 1000))])
run_model(pipeline, target_client_records)
# + [markdown] id="Fp7z1GZcSUQD"
# ## Oversampling and Undersampling
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="f8kE00I_R_gi" outputId="863668e0-dc29-4582-f20a-a4998be1e371"
pipeline = Pipeline([('Transformer', Transformer(quantitative_variables, categorical_variables)),
('Sampling', SMOTE(random_state = SEED)),
('Model', LogisticRegression(random_state = SEED, max_iter = 1000))])
run_model(pipeline, target_client_records)
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="NJW09TpVTaix" outputId="77c90b90-8b41-4b5e-ce40-0a81867c12e9"
pipeline = Pipeline([('Transformer', Transformer(quantitative_variables, categorical_variables)),
('Model', RandomForestClassifier(random_state = SEED))])
run_model(pipeline, target_client_records)
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="19DO6zAKTQgH" outputId="cf69ee2a-3363-4c33-abd6-60558ec3f287"
pipeline = Pipeline([('Transformer', Transformer(quantitative_variables, categorical_variables)),
('Sampling', SMOTE(random_state = SEED)),
('Model', RandomForestClassifier(random_state = SEED))])
run_model(pipeline, target_client_records)
# + [markdown] id="m1P8onUUUfLy"
# # Lesson 05
# + id="L257aJ2BUgVj"
pipeline = Pipeline([('Transformer', Transformer(quantitative_variables, categorical_variables)),
('Sampling', SMOTE(random_state = SEED)),
('Model', RandomForestClassifier(random_state = SEED))])
# + colab={"base_uri": "https://localhost:8080/"} id="eXnrcyfCUtsx" outputId="963a5ae7-8feb-4170-ec3f-6346fbe0371e"
X = target_client_records.drop(['id', 'bad'], axis = 1)
y = target_client_records['bad']
pipeline.fit(X, y)
# + [markdown] id="8ei62MksVJPm"
# ## Saving the Model
# + id="UhsNOo23VCIR"
from joblib import dump
# + colab={"base_uri": "https://localhost:8080/"} id="vAze4gOrVNrx" outputId="948a994e-53cc-40e6-9f73-dda2aad8970b"
dump(pipeline, 'model.joblib')
# + id="dCK9UHPoVeKO"
features = X.columns
# + colab={"base_uri": "https://localhost:8080/"} id="igfu2WNIVgPj" outputId="92d07e92-9a10-429b-c997-96435565a611"
dump(features, 'features.joblib')
# + id="TMRozGF-VjXe"
categories_list = target_client_records[non_binary_columns].apply(lambda x: x.unique(), axis = 0)
# + colab={"base_uri": "https://localhost:8080/"} id="QDL3FI9tV0Xr" outputId="de552897-973a-45dd-eee1-5e14c19d0588"
dump(categories_list, 'categories_list.joblib')
| modulo6/aulas_desafios_modulo6.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.0
# language: julia
# name: julia-1.5
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Introduction to Recurrent Neural Networks
# (c) <NAME>, 2019
# * Objectives: learn about RNNs, the RNN layer, compare with MLP on a tagging task.
# * Prerequisites: [MLP models](40.mlp.ipynb)
# * New functions:
# [RNN](http://denizyuret.github.io/Knet.jl/latest/reference/#Knet.RNN),
# [adam](http://denizyuret.github.io/Knet.jl/latest/reference/#Knet.adam)
#
# ([image
# source](http://colah.github.io/posts/2015-08-Understanding-LSTMs))
#
# In this notebook we will see how to implement a recurrent neural network (RNN) in Knet. In RNNs, connections between units form a directed cycle, which allows them to keep a persistent state over time. This gives them the ability to process sequences of arbitrary length one element at a time, while keeping track of what happened at previous elements. One can view the current state of the RNN as a representation for the sequence processed so far.
#
# We will build a part-of-speech tagger using a large annotated corpus of English. We will represent words with numeric vectors appropriate as inputs to a neural network. These word vectors will be initialized randomly and learned during training just like other model parameters. We will compare three network architectures: (1) an MLP which tags each word independently of its neighbors, (2) a simple RNN that can represent the neighboring words to the left, (3) a bidirectional RNN that can represent both left and right contexts. As can be expected 1 < 2 < 3 in performance. More surprisingly, the three models are very similar to each other: we will see their model diagrams are identical except for the horizontal connections that carry information across the sequence.
# + slideshow={"slide_type": "slide"}
# Setup display width, load packages, import symbols
ENV["COLUMNS"] = 72
using Random: shuffle!
using Base.Iterators: flatten
using IterTools: ncycle, takenth
using Knet: Knet, AutoGrad, param, param0, mat, RNN, relu, Data, adam, progress, nll, zeroone
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Brown Corpus
# To introduce recurrent neural networks (RNNs) we will train a part-of-speech tagger using the [Brown Corpus](https://en.wikipedia.org/wiki/Brown_Corpus). We will train three models: a MLP, a unidirectional RNN, a bidirectional RNN and observe significant performance differences.
# + slideshow={"slide_type": "slide"}
include(Knet.dir("data/nltk.jl"))
(data,words,tags) = brown()
println("The Brown Corpus has $(length(data)) sentences, $(sum(length(p[1]) for p in data)) tokens, with a word vocabulary of $(length(words)) and a tag vocabulary of $(length(tags)).")
# + [markdown] slideshow={"slide_type": "slide"}
# `data` is an array of `(w,t)` pairs each representing a sentence, where `w` is a sequence of word ids, and `t` is a sequence of tag ids. `words` and `tags` contain the strings for the ids.
# + slideshow={"slide_type": "fragment"}
println.(summary.((data,words,tags)));
# + [markdown] slideshow={"slide_type": "slide"}
# Here is what the first sentence looks like with ids and with strings:
# + slideshow={"slide_type": "fragment"}
(w,t) = first(data)
display(permutedims(Int[w t]))
display(permutedims([words[w] tags[t]]))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Chain of layers
# + slideshow={"slide_type": "fragment"}
# Let's define a chain of layers
struct Chain
layers
Chain(layers...) = new(layers)
end
(c::Chain)(x) = (for l in c.layers; x = l(x); end; x)
(c::Chain)(x,y) = nll(c(x),y)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dense layers
# + slideshow={"slide_type": "fragment"}
# Redefine dense layer (See mlp.ipynb):
struct Dense; w; b; f; end
Dense(i::Int,o::Int,f=identity) = Dense(param(o,i), param0(o), f)
(d::Dense)(x) = d.f.(d.w * mat(x,dims=1) .+ d.b)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Word Embeddings
# `data` has each sentence tokenized into an array of words and each word mapped to a `UInt16` id. To use these words as inputs to a neural network we further map each word to a Float32 vector. We will keep these vectors in the columns of a size (X,V) matrix where X is the embedding dimension and V is the vocabulary size. The vectors will be initialized randomly, and trained just like any other network parameter. Let's define an embedding layer for this purpose:
# + slideshow={"slide_type": "slide"}
struct Embed; w; end
Embed(vocabsize::Int,embedsize::Int) = Embed(param(embedsize,vocabsize))
(e::Embed)(x) = e.w[:,x]
# + [markdown] slideshow={"slide_type": "slide"}
# This is what the words, word ids and embeddings for a sentence looks like: (note the identical id and embedding for the 2nd and 5th words)
# + slideshow={"slide_type": "slide"}
embedlayer = Embed(length(words),8)
(w,t) = data[52855]
display(permutedims(words[w]))
display(permutedims(Int.(w)))
display(embedlayer(w))
# + [markdown] slideshow={"slide_type": "slide"}
# ## RNN layers
# + slideshow={"slide_type": "slide"}
@doc RNN
# + [markdown] slideshow={"slide_type": "slide"}
# ## The three taggers: MLP, RNN, biRNN
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tagger0 (MLP)
# This is what Tagger0 looks like. Every tag is predicted independently. The prediction of each tag only depends on the corresponding word.
# <img src="https://docs.google.com/drawings/d/e/2PACX-1vTfV4-TB0KwjDbFKpj3rL0tfeApEh9XXaDJ1OF3emNVAmc_-hvgqpEBuA_K0FsNuxymZrv3ztScXxqF/pub?w=378&h=336"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tagger1 (RNN)
# In Tagger1, the RNN layer takes its previous output as an additional input. The prediction of each tag is based on words to the left.
# <img src="https://docs.google.com/drawings/d/e/2PACX-1vTaizzCISuSxihPCjndr7xMVwklsrefi9zn7ZArCvsR8fb5V4DGKtusyIzn3Ujp3QbAJgUz1WSlLvIJ/pub?w=548&h=339"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tagger2 (biRNN)
# In Tagger2 there are two RNNs: the forward RNN reads the sequence from left to right, the backward RNN reads it from right to left. The prediction of each tag is dependent on all the words in the sentence.
# <img src="https://docs.google.com/drawings/d/e/2PACX-1vQawvnCj6odRF2oakF_TgXd8gLxSsfQP8-2ZdBdEIpfgIyPq0Zp_EF6zcFJf6JlGhfiKQvdVyg-Weq2/pub?w=566&h=335"/>
# + slideshow={"slide_type": "slide"}
Tagger0(vocab,embed,hidden,output)= # MLP Tagger
Chain(Embed(vocab,embed),Dense(embed,hidden,relu),Dense(hidden,output))
Tagger1(vocab,embed,hidden,output)= # RNN Tagger
Chain(Embed(vocab,embed),RNN(embed,hidden,rnnType=:relu),Dense(hidden,output))
Tagger2(vocab,embed,hidden,output)= # biRNN Tagger
Chain(Embed(vocab,embed),RNN(embed,hidden,rnnType=:relu,bidirectional=true),Dense(2hidden,output));
# + [markdown] slideshow={"slide_type": "slide"}
# ## Sequence Minibatching
# Minibatching is a bit more complicated with sequences compared to simple classification problems, this section can be skipped on a first reading. In addition to the input and minibatch sizes, there is also the time dimension to consider. To keep things simple we will concatenate all sentences into one big sequence, then split this sequence into equal sized chunks. The input to the tagger will be size (B,T) where B is the minibatch size, and T is the chunk size. The input to the RNN layer will be size (X,B,T) where X is the embedding size.
# + slideshow={"slide_type": "slide"}
BATCHSIZE = 64
SEQLENGTH = 32;
# + slideshow={"slide_type": "slide"}
function seqbatch(x,y,B,T)
N = length(x) ÷ B
x = permutedims(reshape(x[1:N*B],N,B))
y = permutedims(reshape(y[1:N*B],N,B))
d = []; for i in 0:T:N-T
push!(d, (x[:,i+1:i+T], y[:,i+1:i+T]))
end
return d
end
allw = vcat((x->x[1]).(data)...)
allt = vcat((x->x[2]).(data)...)
d = seqbatch(allw, allt, BATCHSIZE, SEQLENGTH);
# + [markdown] slideshow={"slide_type": "slide"}
# This may be a bit more clear if we look at an example minibatch:
# + slideshow={"slide_type": "slide"}
(x,y) = first(d)
words[x]
# + [markdown] slideshow={"slide_type": "slide"}
# ## Embedding a minibatch
# Julia indexing allows us to get the embeddings for this minibatch in one go as an (X,B,T) array where X is the embedding size, B is the minibatch size, and T is the subsequence length.
# + slideshow={"slide_type": "fragment"}
embedlayer = Embed(length(words),128)
summary(embedlayer(x))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Experiments
# + slideshow={"slide_type": "slide"}
# shuffle and split minibatches into train and test portions
shuffle!(d)
dtst = d[1:10]
dtrn = d[11:end]
length.((dtrn,dtst))
# + slideshow={"slide_type": "slide"}
# For running experiments we will use the Adam algorithm which typically converges faster than SGD.
function trainresults(file,maker,savemodel)
if (print("Train from scratch? "); readline()[1]=='y')
model = maker()
results = ((nll(model,data=dtst), 1-accuracy(model,data=dtst))
for x in takenth(progress(adam(model,ncycle(dtrn,5))),100))
results = reshape(collect(Float32,flatten(results)),(2,:))
Knet.save(file,"model",(savemodel ? model : nothing),"results",results)
GC.gc(true) # To save gpu memory
else
isfile(file) || download("http://people.csail.mit.edu/deniz/models/tutorial/$file",file)
model,results = Knet.load143(file,"model","results")
end
println(minimum(results,dims=2))
return model,results
end
# + slideshow={"slide_type": "slide"}
VOCABSIZE = length(words)
EMBEDSIZE = 128
HIDDENSIZE = 128
OUTPUTSIZE = length(tags);
# + slideshow={"slide_type": "slide"}
# 2.35e-01 100.00%┣┫ 2780/2780 [00:13/00:13, 216.36i/s] [0.295007; 0.0972656]
t0maker() = Tagger0(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE)
(t0,r0) = trainresults("tagger134a.jld2",t0maker,false);
# + slideshow={"slide_type": "fragment"}
# 1.49e-01 100.00%┣┫ 2780/2780 [00:19/00:19, 142.58i/s] [0.21358; 0.0616211]
t1maker() = Tagger1(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE)
(t1,r1) = trainresults("tagger134b.jld2",t1maker,false);
# + slideshow={"slide_type": "fragment"}
# 9.37e-02 100.00%┣┫ 2780/2780 [00:25/00:25, 109.77i/s] [0.156669; 0.044043]
t2maker() = Tagger2(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE)
(t2,r2) = trainresults("tagger134c.jld2",t2maker,true);
# + slideshow={"slide_type": "slide"}
using Plots; default(fmt=:png,ls=:auto,ymirror=true)
# + slideshow={"slide_type": "slide"}
plot([r0[2,:], r1[2,:], r2[2,:]]; xlabel="x100 updates", ylabel="error",
ylim=(0,0.15), yticks=0:0.01:0.15, labels=["MLP","RNN","biRNN"])
# + slideshow={"slide_type": "slide"}
plot([r0[1,:], r1[1,:], r2[1,:]]; xlabel="x100 updates", ylabel="loss",
ylim=(0,.5), yticks=0:0.1:.5, labels=["MLP","RNN","biRNN"])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Playground
# Below, you can type and tag your own sentences:
# + slideshow={"slide_type": "slide"}
wdict=Dict{String,UInt16}(); for (i,w) in enumerate(words); wdict[w]=i; end
unk = UInt16(length(words))
wid(w) = get(wdict,w,unk)
function tag(tagger,s::String)
w = permutedims(split(s))
t = tags[(x->x[1]).(argmax(Array(tagger(wid.(w))),dims=1))]
vcat(w,t)
end
# + slideshow={"slide_type": "slide"}
tag(t2,readline())
# -
| tutorial/60.rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building A Simple HandWritten Digits Classifier Using MLPs
#
# ### Loading MNIST DataBase
#
# +
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(X_train,y_train),(X_test,y_test)= mnist.load_data()
# -
# # Visualizing Loaded Data
# +
import matplotlib.pyplot as plt
# %matplotlib inline
import matplotlib.cm as cm
import numpy as np
fig=plt.figure(figsize=(20,20))
for i in range(7):
ax=fig.add_subplot(1,7,i+1,xticks=[],yticks=[])
ax.imshow(X_train[i],cmap='gray')
ax.set_title(str(y_train[i]))
# -
# # Rescaling Images
X_train=X_train.astype('float32')/255
X_test=X_test.astype('float32')/255
print("X_train Shape: ", X_train.shape)
print("y_train Shape: ", y_train.shape)
# # Building A MLP Architecture
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(256, activation = 'relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation = 'softmax')
])
# # Compiling Our Model
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# # Train The Model
#
hist = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs = 10)
# # Plot of loss per iteration
import matplotlib.pyplot as plt
plt.plot(hist.history['loss'], label='Loss')
plt.plot(hist.history['val_loss'], label='Val Loss')
plt.legend()
# # Plot of Accuracy per iteration
plt.plot(hist.history['accuracy'], label="Accuracy")
plt.plot(hist.history['val_accuracy'], label="Val Accuracy")
plt.legend()
# # Evaluation on test set
print(model.evaluate(X_test, y_test))
# # Confusion Matrix
from sklearn.metrics import confusion_matrix
import numpy as np
import itertools
# +
def plot_confusion_matrix(cm,classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float32')/cm.sum(axis=1)[:, np.newaxis]
print("Normalized Confusion Matrix")
else:
print("Confusion Matrix(Without Normalization)")
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max()/2
for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
p_test = model.predict(X_test).argmax(axis = 1)
cm = confusion_matrix(y_test, p_test)
plot_confusion_matrix(cm, list(range(10)))
# -
# # Let's See some misclassified examples
misclassified_idx = np.where(p_test != y_test)[0]
i = np.random.choice(misclassified_idx)
plt.imshow(X_test[i], cmap='gray')
plt.title("True label is {} and Predicted label is {}".format(y_test[i], p_test[i]))
i = np.random.choice(misclassified_idx)
plt.imshow(X_test[i], cmap='gray')
plt.title("True label is {} and Predicted label is {}".format(y_test[i], p_test[i]))
i = np.random.choice(misclassified_idx)
plt.imshow(X_test[i], cmap='gray')
plt.title("True label is {} and Predicted label is {}".format(y_test[i], p_test[i]))
i = np.random.choice(misclassified_idx)
plt.imshow(X_test[i], cmap='gray')
plt.title("True label is {} and Predicted label is {}".format(y_test[i], p_test[i]))
| TensorFlow2/4-MNIST_MLPS/.ipynb_checkpoints/MNIST_MLPs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="QLSzJhTVmYca"
# # Homework for week 4
# + [markdown] id="ZWqkYe-mmYck"
# ## Function 1- string to integer
# + [markdown] id="L2Xeg6ZMmYck"
# #### 1. Convert String Number to Integer
# Create a function that can take something like ```"$5,356"``` or ```"$250,000"``` or even ```"$10.75"``` and covert into integers like return ```5356```, ```250000``` and ```11``` respectively.
# + id="L0FSXNuTmYcl"
## build function here and call it clean_numbers
def clean_numbers(some_string_number):
'''
Enter a number or a list of numbers that are in a string format, and I will convert it to an integer.
'''
amount = some_string_number.replace("$","").replace(",","")
return round(float(amount))
# + colab={"base_uri": "https://localhost:8080/"} id="sjJQAznHmYcl" outputId="138567be-d8cb-4198-b50c-3eaa4447553d"
## call the function on "$5,356"
## Did you get 5356?
clean_numbers("$5,356")
# + colab={"base_uri": "https://localhost:8080/"} id="1A3ng-rfmYcl" outputId="fa604c4f-c7b3-4a5b-8002-a9519dbf8888"
## call the function on "$10.75"
## Did you get 11?
clean_numbers("$10.75")
# + colab={"base_uri": "https://localhost:8080/"} id="ztZY7KAWmYcl" outputId="a96732be-2333-4808-eec7-85f70c71db14"
## Use map() to run your function on the following list
## save result in a list called updated_list
my_amounts = ["$12.24", "$4,576.33", "$23,629.01"]
updated_list = list(map(clean_numbers, my_amounts))
updated_list
# + [markdown] id="v_OkCtGNmYcm"
# ### 3. What if we encounter a list that has a mix of numbers as strings and integers and floats?
# + id="HOLy-0nzmYcm"
## Run this cell
more_amounts = [960, "$12.24", "$4,576.33", "$23,629.01", 23.656]
# + id="QWevBiuUmYcm"
## tweaked the function here to handle all situations
def clean_numbers(some_string_number):
'''
Enter a number or a list of numbers.
The items can be strings, integers, floats or a mix of all.
I will convert it to an integer.
'''
if isinstance(some_string_number, str):
amount = round(float(some_string_number.replace("$","").replace(",","")))
else:
amount = round(float(some_string_number))
return amount
# + colab={"base_uri": "https://localhost:8080/"} id="UE56J2n6mYcm" outputId="daccced0-e92b-407a-d06c-2a4483a7c7a4"
## run it on more_amounts using map()
list(map(clean_numbers, more_amounts))
# + id="IffJV1m-t2DC"
| homework/homework-for-week-4-SOLUTIONS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: main2
# language: python
# name: main2
# ---
# +
import jax
import jax.numpy as jnp
import numpy as np # get rid of this eventually
import argparse
from jax import jit
from jax.experimental.ode import odeint
from functools import partial # reduces arguments to function by making some subset implicit
from jax.experimental import stax
from jax.experimental import optimizers
import os, sys, time
sys.path.append('..')
sys.path.append('../hyperopt')
from HyperparameterSearch import extended_mlp
# -
from models import mlp as make_mlp
from utils import wrap_coords
from jax.experimental.ode import odeint
# +
def hamiltonian_eom(hamiltonian, state, conditionals, t=None):
q, p = jnp.split(state, 2)
q = q / 10.0 #Normalize
conditionals = conditionals / 10.0 #Normalize
q_t = jax.grad(hamiltonian, 1)(q, p, conditionals)
p_t = -jax.grad(hamiltonian, 0)(q, p, conditionals)
q_tt = p_t #mass is 1
return jnp.concatenate([q_t, q_tt])
# replace the lagrangian with a parameteric model
def learned_dynamics(params, nn_forward_fn):
@jit
def dynamics(q, p, conditionals):
state = jnp.concatenate([q, p, conditionals])
return jnp.squeeze(nn_forward_fn(params, state), axis=-1)
return dynamics
# -
class ObjectView(object):
def __init__(self, d): self.__dict__ = d
# +
@jax.jit
def qdotdot(q, q_t, conditionals):
g = conditionals
q_tt = (
g * (1 - q_t**2)**(5./2) /
(1 + 2 * q_t**2)
)
return q_t, q_tt
@jax.jit
def ofunc(y, t=None):
q = y[::3]
q_t = y[1::3]
g = y[2::3]
q_t, q_tt = qdotdot(q, q_t, g)
return jnp.stack([q_t, q_tt, jnp.zeros_like(g)]).T.ravel()
# -
(jnp.tanh(jax.random.uniform(jax.random.PRNGKey(1), (1000,))*10-5)*0.99999).max()
from matplotlib import pyplot as plt
plt.hist((jnp.tanh(jax.random.normal(jax.random.PRNGKey(1), (100,))*2)*0.99999))
# +
@partial(jax.jit, static_argnums=(1, 2), backend='cpu')
def gen_data(seed, batch, num):
rng = jax.random.PRNGKey(seed)
q0 = jax.random.uniform(rng, (batch,), minval=-10, maxval=10)
qt0 = jax.random.uniform(rng+1, (batch,), minval=-0.99, maxval=0.99)
g = jax.random.normal(rng+2, (batch,))*10
y0 = jnp.stack([q0, qt0, g]).T.ravel()
yt = odeint(ofunc, y0, jnp.linspace(0, 1, num=num), mxsteps=300)
qall = yt[:, ::3]
qtall = yt[:, 1::3]
gall = yt[:, 2::3]
return jnp.stack([qall, qtall]).reshape(2, -1).T, gall.reshape(1, -1).T, qdotdot(qall, qtall, gall)[1].reshape(1, -1).T
@partial(jax.jit, static_argnums=(1,))
def gen_data_batch(seed, batch):
rng = jax.random.PRNGKey(seed)
q0 = jax.random.uniform(rng, (batch,), minval=-10, maxval=10)
qt0 = (jnp.tanh(jax.random.normal(jax.random.PRNGKey(1), (batch,))*2)*0.99999)#jax.random.uniform(rng+1, (batch,), minval=-1, maxval=1)
g = jax.random.normal(rng+2, (batch,))*10
return jnp.stack([q0, qt0]).reshape(2, -1).T, g.reshape(1, -1).T, jnp.stack(qdotdot(q0, qt0, g)).T
# -
cstate, cconditionals, ctarget = gen_data_batch(0, 5)
cstate, cconditionals, ctarget
from matplotlib import pyplot as plt
# +
# qdotdot(jnp.array([0]), jnp.array([0.9]), jnp.array([10]))
# +
# 0.29830917716026306 {'act': [4],
# 'batch_size': [27.0], 'dt': [0.09609870774790222],
# 'hidden_dim': [596.0], 'l2reg': [0.24927677946969878],
# 'layers': [4.0], 'lr': [0.005516656601005163],
# 'lr2': [1.897157209816416e-05], 'n_updates': [4.0]}
# -
import pickle as pkl
# +
# loaded = pkl.load(open('./params_for_loss_0.29429444670677185_nupdates=1.pkl', 'rb'))
# -
args = ObjectView({'dataset_size': 200,
'fps': 10,
'samples': 100,
'num_epochs': 80000,
'seed': 0,
'loss': 'l1',
'act': 'softplus',
'hidden_dim': 500,
'output_dim': 1,
'layers': 4,
'n_updates': 1,
'lr': 0.001,
'lr2': 2e-05,
'dt': 0.1,
'model': 'gln',
'batch_size': 68,
'l2reg': 5.7e-07,
})
# args = loaded['args']
rng = jax.random.PRNGKey(args.seed)
from jax.experimental.ode import odeint
best_params = None
best_loss = np.inf
from itertools import product
init_random_params, nn_forward_fn = extended_mlp(args)
rng = jax.random.PRNGKey(0)
_, init_params = init_random_params(rng, (-1, 3))
rng += 1
# This is the output. Now, let's train it.
# Idea: add identity before inverse:
# # Let's train it:
# +
best_small_loss = np.inf
iteration = 0
total_epochs = 100
minibatch_per = 3000
train_losses, test_losses = [], []
lr = 1e-3 #1e-3
final_div_factor=1e4
#OneCycleLR:
@jax.jit
def OneCycleLR(pct):
#Rush it:
start = 0.3 #0.2
pct = pct * (1-start) + start
high, low = lr, lr/final_div_factor
scale = 1.0 - (jnp.cos(2 * jnp.pi * pct) + 1)/2
return low + (high - low)*scale
opt_init, opt_update, get_params = optimizers.adam(
OneCycleLR
)
opt_state = opt_init(init_params)
# opt_state = opt_init(best_params)
# -
plt.plot(OneCycleLR(jnp.linspace(0, 1, num=200)))
plt.yscale('log')
plt.title('lr schedule')
# +
from jax.tree_util import tree_flatten
@jax.jit
def loss(params, cstate, cconditionals, ctarget):
runner = jax.vmap(
partial(
hamiltonian_eom,
learned_dynamics(params, nn_forward_fn)), (0, 0), 0)
preds = runner(cstate, cconditionals)[:, [0, 1]]
error = jnp.abs(preds - ctarget)
#Weight additionally by proximity to c!
error_weights = (1 + 1/jnp.sqrt(1.0-cstate[:, [1]]**2))
return jnp.sum(error * error_weights)*len(preds)/jnp.sum(error_weights)
@jax.jit
def update_derivative(i, opt_state, cstate, cconditionals, ctarget):
params = get_params(opt_state)
param_update = jax.grad(
lambda *args: loss(*args)/len(cstate),
0
)(params, cstate, cconditionals, ctarget)
params = get_params(opt_state)
return opt_update(i, param_update, opt_state), params
# -
epoch = 0
cstate, cconditionals, ctarget = gen_data_batch(epoch, 128)
loss(get_params(opt_state), cstate, cconditionals, ctarget)
update_derivative(0, opt_state, cstate, cconditionals, ctarget);
rng = jax.random.PRNGKey(0)
epoch = 0
from tqdm.notebook import tqdm
gen_data_batch(0, 128)[0].shape
cconditionals[:5]
cstate[:5]
ctarget[:5]
best_loss = np.inf
best_params = None
from copy import deepcopy as copy
for epoch in tqdm(range(epoch, total_epochs)):
epoch_loss = 0.0
num_samples = 0
batch = 512
ocstate, occonditionals, octarget = gen_data_batch(epoch, minibatch_per*batch)
for minibatch in range(minibatch_per):
fraction = (epoch + minibatch/minibatch_per)/total_epochs
s = np.s_[minibatch*batch:(minibatch+1)*batch]
cstate, cconditionals, ctarget = ocstate[s], occonditionals[s], octarget[s]
opt_state, params = update_derivative(fraction, opt_state, cstate, cconditionals, ctarget);
rng += 10
cur_loss = loss(params, cstate, cconditionals, ctarget)
epoch_loss += cur_loss
num_samples += len(cstate)
closs = epoch_loss/num_samples
print('epoch={} lr={} loss={}'.format(
epoch, OneCycleLR(fraction), closs)
)
if closs < best_loss:
best_loss = closs
best_params = [[copy(jax.device_get(l2)) for l2 in l1] if len(l1) > 0 else () for l1 in params]
import pickle as pkl
# +
# pkl.dump({'params': best_params, 'description': 'q and g are divided by 10. hidden=500. act=Softplus'},
# open('best_sr_params_hamiltonian.pkl', 'wb'))
# -
opt_state = opt_init(best_params)
cstate, cconditionals, ctarget = gen_data(0, 1, 50)
cstate.shape
plt.plot(cstate[:, 1])
params = get_params(opt_state)
plt.rc('font', family='serif')
# +
fig, ax = plt.subplots(1, 1, figsize=(4*1, 4*1), sharex=True, sharey=True)
ax_idx = [(i, j) for i in range(1) for j in range(1)]
for i in tqdm(range(1)):
ci = ax_idx[i]
cstate, cconditionals, ctarget = gen_data((i+4)*(i+1), 1, 50)
runner = jax.jit(jax.vmap(
partial(
hamiltonian_eom,
learned_dynamics(params, nn_forward_fn)), (0, 0), 0))
@jax.jit
def odefunc_learned(y, t):
return jnp.concatenate((runner(y[None, :2], y[None, [2]])[0], jnp.zeros(1)))
yt_learned = odeint(
odefunc_learned,
jnp.concatenate([cstate[0], cconditionals[0]]),
np.linspace(0, 1, 50),
mxsteps=100)
cax = ax#[ci[0], ci[1]]
cax.plot(cstate[:, 1], label='Truth')
cax.plot(yt_learned[:, 1], label='Learned')
cax.legend()
if ci[1] == 0:
cax.set_ylabel('Velocity of particle/Speed of light')
if ci[0] == 0:
cax.set_xlabel('Time')
cax.set_ylim(-1, 1)
plt.title("Hamiltonian NN - Special Relativity")
plt.tight_layout()
plt.savefig('sr_hnn.png', dpi=150)
# -
| notebooks/SpecialRelativity-HNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
csv_test = pd.read_csv('KMeans_user.csv')
csv_test.shape
csv_test.head(3)
# +
#비슷하게 평점 나긴 사람
# -
x_columns = ["Action","Adventure","Animation","Children's","Comedy","Crime","Documentary","Drama","Fantasy","Film-Noir","Horror","Musical","Mystery","Romance","Sci-Fi","Thriller","War","Western"]
y_column = 'user_pk'
#y컬럼의 문자열값을 labelEncoder를 통해 숫자로 바꾼다
y_true_label_encoder = preprocessing.LabelEncoder()
y_true_encoded = y_true_label_encoder.fit_transform(csv_test[y_column])
print(list(csv_test[y_column][:5]),'->',list(y_true_encoded[:5])) #샘플출력
# +
kMeansModels = dict() # k값별 모델 저장할 딕셔너리
kMeansModelPreds = dict() # k값별 모델 예측 결과 저장할 딕셔너리
kMeansModelLabelEncoder = dict() # k값별 라벨인코더 저장할 딕셔너리
sumSquaredDistancesList = list() # 샘플과 클러스터 센터간 거리 제곱의 합 리스트
silhouetteScoreList = list() # Silhouette Coefficient 평균 리스트
# -
ks = [3] # k값으로 2부터 9까지 테스트한다
for k in ks: # k값 순회
model = KMeans(n_clusters=k, random_state=0, n_init=100).fit(csv_test[x_columns]) # k개 클러스터로 모델 생성
model.cluster_centers_
model.labels_
result={}
for (idx,user) in enumerate(csv_test[y_column][0:100]):
if result.get(model.labels_[idx]) :
result[model.labels_[idx]].append(user)
else:
result[model.labels_[idx]]=[user]
print(result)
| similartouser.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md:myst
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercises
#
# **After** completing the tutorial attempt the following exercises.
#
# **If you are not sure how to do something, have a look at the "How To" section.**
#
# 1. Obtain the following tuples using the `range` command:
# 1. $(0, 1, 2, 3, 4, 5)$
# 2. $(2, 3, 4, 5)$
# 3. $(2, 4, 6, 8)$
# 4. $-1, 2, 5, 8$
# 2. By **both** generating and directly computing obtain the **number of** the following:
# 1. All permutations of $(0, 1, 2, 3, 4, 5)$.
# 2. All permutations of $("A", "B", "C")$.
# 3. Permutations of size 3 of $(0, 1, 2, 3, 4, 5)$.
# 4. Permutations of size 2 of $(0, 1, 2, 3, 4, 5, 6)$.
# 5. Combinations of size 3 of $(0, 1, 2, 3, 4, 5)$.
# 6. Combinations of size 2 of $(0, 1, 2, 3, 4, 5)$.
# 7. Combinations of size 5 of $(0, 1, 2, 3, 4, 5)$.
# 3. A class consists of 3 students from Ashville and 4 from Bewton. A committee of 5 students is chosen at random the class.
# 1. Find the number of committees that include 2 students from Ashville and 3 from Bewton are chosen.
# 2. In fact 2 students, from Ashville and 3 from Bewton are chosen. In order to watch a video, all 5 committee members sit in a row. In how many different orders can they sit if no two students from Bewton sit next to each other.
# 4. Three letters are selected at random from the 8 letters of the word `COMPUTER`, without regard to order.
# 1. Find the number of possible selections of 3 letters.
# 2. Find the number of selections of 3 letters with the letter `P`.
# 3. Find the number of selections of 3 letters where the 3 letters form the word `TOP`.
| book/tools-for-mathematics/05-combinations-permutations/exercises/.main.md.bcp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# ### Veamos un ejemplo de un posible Sistema-L
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] deletable=true editable=true
# #### Vamos a necesitar un cursor
# + deletable=true editable=true
class Cursor(object):
def __init__(self, x, y, angulo):
self.x = x
self.y = y
self.ang = angulo
# + [markdown] deletable=true editable=true
# #### Y generar una repeteción para manejar las iteraciones de los sistemas-L (puede ser por iteración o recursión)
# + deletable=true editable=true
class SistemaL(object):
def __init__(self, gramatica, inicial, distancia, nivel):
self.gramatica = gramatica
self.inicial = inicial
self.distancia = distancia
self.nivel = nivel
def iniciar(self):
self.cursor = Cursor(0, 0, 0)
#plt.plot([self.cursor.x, self.cursor.x + self.distancia],
# [self.cursor.y, self.cursor.y])
def generar(self, nivel, cursor, distancia):
if nivel == 0:
a = (cursor.x, cursor.y)
b = (cursor.x + distancia * np.cos(cursor.ang),
cursor.y + distancia * np.sin(cursor.ang))
print(a, "-->", b)
plt.plot([a[0], b[0]], [a[1], b[1]])
cursor.x = b[0]
cursor.y = b[1]
else:
print("nivel = ", nivel)
for i in self.gramatica[self.inicial]:
if i == 'F':
self.generar(nivel-1, cursor, distancia/3)
elif i == '+':
cursor.ang = cursor.ang - np.pi/3
elif i == '-':
cursor.ang = cursor.ang + np.pi/3
# + deletable=true editable=true
plt.clf()
miSistema = SistemaL(gramatica={'F': "F-F++F-F"}, inicial='F', distancia=1,
nivel=5)
miSistema.iniciar()
miSistema.generar(miSistema.nivel, miSistema.cursor, miSistema.distancia)
# + deletable=true editable=true
plt.show()
| Sistemas-L.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:docmap_playground]
# language: python
# name: conda-env-docmap_playground-py
# ---
# %load_ext autoreload
# %autoreload 2
# +
from functools import partial
from src.data import DataSource, Dataset, DatasetGraph
from src.data.process_functions import process_20_newsgroups
from src import paths
# +
# Set up a 20 newsgroups dataset
ds_name = '20_newsgroups'
output_ds_name = ds_name
dsrc = DataSource(ds_name)
# -
license = """
Custom Academic License: "You may use this material free of charge for any educational purpose, provided attribution is given in any lectures or publications that make use of this material." As in http://kdd.ics.uci.edu/databases/20newsgroups/20newsgroups.data.html.
"""
metadata = """
The 20 Newsgroups dataset is a collection of approximately 20,000 newsgroup documents, partitioned (nearly) evenly across 20 different newsgroups.
The data is organized into 20 different newsgroups, each corresponding to a different topic. Some of the newsgroups are very closely related to each other (e.g. comp.sys.ibm.pc.hardware / comp.sys.mac.hardware), while others are highly unrelated (e.g misc.forsale / soc.religion.christian).
Here are the categories:
* `alt.atheism`,
* `comp.graphics`,
* `comp.os.ms-windows.misc`,
* `comp.sys.ibm.pc.hardware`,
* `comp.sys.mac.hardware`,
* `comp.windows.x`,
* `misc.forsale`,
* `rec.autos`,
* `rec.motorcycles`,
* `rec.sport.baseball`,
* `rec.sport.hockey`,
* `sci.crypt`,
* `sci.electronics`,
* `sci.med`,
* `sci.space`,
* `soc.religion.christian`,
* `talk.politics.guns`,
* `talk.politics.mideast`,
* `talk.politics.misc`,
* `talk.religion.misc`
The current version is obtained by wrapping `sklearn.datasets.fetch_20newsgroups`, which comes from this [20 newsgroups webpage](http://qwone.com/~jason/20Newsgroups/).
By default we follow the sklearn suggestion to set `remove=('headers', 'footers', 'quotes')` to avoid overfitting.
"""
dsrc.add_metadata(contents=metadata, force=True)
dsrc.add_metadata(contents=license, kind='LICENSE', force=True)
# +
process_function = process_20_newsgroups
process_kwargs = {}
dsrc.process_function = partial(process_function, **process_kwargs)
#workflow.add_datasource(dsrc)
dag = DatasetGraph(catalog_path=paths['catalog_path'])
dag.add_source(output_dataset=output_ds_name, datasource_name=ds_name, force=True)
# -
# %%time
ds = Dataset.load(ds_name)
len(ds.data) == 18846
| reference/datasets/20-newsgroups-dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gradient Tape Basics
#
# In this ungraded lab, you'll get familiar with Tensorflow's built in API called Gradient Tape which helps in performing automatic differentiation.
# ## Imports
# + colab={} colab_type="code" id="uQe_MWjNPQkR"
import tensorflow as tf
# -
# ## Exercise on basics of Gradient Tape
#
# Let's explore how you can use [tf.GradientTape()](https://www.tensorflow.org/api_docs/python/tf/GradientTape) to do automatic differentiation.
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="57Vnn9iIPNh9" outputId="543fce75-1adc-447c-b70b-872e8f8c9841"
# Define a 2x2 array of 1's
x = tf.ones((2,2))
with tf.GradientTape() as t:
# Record the actions performed on tensor x with `watch`
t.watch(x)
# Define y as the sum of the elements in x
y = tf.reduce_sum(x)
# Let z be the square of y
z = tf.square(y)
# Get the derivative of z wrt the original input tensor x
dz_dx = t.gradient(z, x)
# Print our result
print(dz_dx)
# -
# ### Gradient tape expires after one use, by default
#
# If you want to compute multiple gradients, note that by default, GradientTape is not persistent (`persistent=False`). This means that the GradientTape will expire after you use it to calculate a gradient.
#
# To see this, set up gradient tape as usual and calculate a gradient, so that the gradient tape will be 'expired'.
# +
x = tf.constant(3.0)
# Notice that persistent is False by default
with tf.GradientTape() as t:
t.watch(x)
# y = x^2
y = x * x
# z = y^2
z = y * y
# Compute dz/dx. 4 * x^3 at x = 3 --> 108.0
dz_dx = t.gradient(z, x)
print(dz_dx)
# -
# #### Gradient tape has expired
#
# See what happens if you try to calculate another gradient after you've already used gradient tape once.
# If you try to compute dy/dx after the gradient tape has expired:
try:
dy_dx = t.gradient(y, x) # 6.0
print(dy_dx)
except RuntimeError as e:
print("The error message you get is:")
print(e)
# ### Make the gradient tape persistent
# To make sure that the gradient tape can be used multiple times, set `persistent=True`
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="P12ExatAPqn6" outputId="65d8c2ca-b49d-4b91-d2d5-97153daf5d19"
x = tf.constant(3.0)
# Set persistent=True so that you can reuse the tape
with tf.GradientTape(persistent=True) as t:
t.watch(x)
# y = x^2
y = x * x
# z = y^2
z = y * y
# Compute dz/dx. 4 * x^3 at x = 3 --> 108.0
dz_dx = t.gradient(z, x)
print(dz_dx)
# -
# #### Now that it's persistent, you can still reuse this tape!
#
# Try calculating a second gradient on this persistent tape.
# You can still compute dy/dx because of the persistent flag.
dy_dx = t.gradient(y, x) # 6.0
print(dy_dx)
# Great! It still works! Delete the tape variable `t` once you no longer need it.
# Drop the reference to the tape
del t
# ### Nested Gradient tapes
# Now let's try computing a higher order derivative by nesting the `GradientTapes:`
#
# #### Acceptable indentation of the first gradient calculation
# Keep in mind that you'll want to make sure that the first gradient calculation of `dy_dx` should occur at least inside the outer `with` block.
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="UxNLeFLlP4qU" outputId="5f96770a-f0a3-47e5-dd8d-3e0b3074deeb"
x = tf.Variable(1.0)
with tf.GradientTape() as tape_2:
with tf.GradientTape() as tape_1:
y = x * x * x
# The first gradient calculation should occur at leaset
# within the outer with block
dy_dx = tape_1.gradient(y, x)
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
# -
# The first gradient calculation can also be inside the inner with block.
# +
x = tf.Variable(1.0)
with tf.GradientTape() as tape_2:
with tf.GradientTape() as tape_1:
y = x * x * x
# The first gradient calculation can also be within the inner with block
dy_dx = tape_1.gradient(y, x)
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
# -
# #### Where not to indent the first gradient calculation
# If the first gradient calculation is OUTSIDE of the outer `with` block, it won't persist for the second gradient calculation.
# +
x = tf.Variable(1.0)
with tf.GradientTape() as tape_2:
with tf.GradientTape() as tape_1:
y = x * x * x
# The first gradient call is outside the outer with block
# so the tape will expire after this
dy_dx = tape_1.gradient(y, x)
# The tape is now expired and the gradient output will be `None`
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
# -
# Notice how the `d2y_dx2` calculation is now `None`. The tape has expired. Also note that this still won't work even if you set persistent=True for both gradient tapes.
# +
x = tf.Variable(1.0)
# Setting persistent=True still won't work
with tf.GradientTape(persistent=True) as tape_2:
# Setting persistent=True still won't work
with tf.GradientTape(persistent=True) as tape_1:
y = x * x * x
# The first gradient call is outside the outer with block
# so the tape will expire after this
dy_dx = tape_1.gradient(y, x)
# the output will be `None`
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
# -
# ### Proper indentation for the second gradient calculation
#
# The second gradient calculation `d2y_dx2` can be indented as much as the first calculation of `dy_dx` but not more.
# +
x = tf.Variable(1.0)
with tf.GradientTape() as tape_2:
with tf.GradientTape() as tape_1:
y = x * x * x
dy_dx = tape_1.gradient(y, x)
# this is acceptable
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
# -
# This is also acceptable
# +
x = tf.Variable(1.0)
with tf.GradientTape() as tape_2:
with tf.GradientTape() as tape_1:
y = x * x * x
dy_dx = tape_1.gradient(y, x)
# this is also acceptable
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
# -
# This is also acceptable
# +
x = tf.Variable(1.0)
with tf.GradientTape() as tape_2:
with tf.GradientTape() as tape_1:
y = x * x * x
dy_dx = tape_1.gradient(y, x)
# this is also acceptable
d2y_dx2 = tape_2.gradient(dy_dx, x)
print(dy_dx)
print(d2y_dx2)
| Custom and Distributed Training with TensorFlow/Week 1 Differentiation and Gradients/Lab_2_gradient-tape-basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Temperature Prediction for 5 Cities in the USA
# ## <NAME>
# An analysis is being performed here to analyze the variation in temperatures for different cities in the United States to determine any trend across the years and make predictions for the future based on current data using time series analysis on Python.
#
# The data is obtained from [Berkley Earth](http://berkeleyearth.org/data/) and contains the average monthly temperature for various cities around the world.
#
# All Temperature values are in Celsius.
#import the necessary packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller, acf, pacf
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from math import sqrt
# %matplotlib inline
#Load the data, and set the 'dt' column as a datetime object
data = pd.read_csv('GlobalLandTemperaturesByCity.csv', parse_dates=['dt'])
# The analysis is being performed for 5 cities across different regions of the United States namely:
# 1. Chicago
# 2. New York
# 3. Minneapolis
# 4. Houston
# 5. Los Angeles
#
# These cities are chosen because they represent different climates, geography in the same country.
#
# We are also dividing the data to test the time series analysis on existing data values. In this case, this testing data is chosen as the temperature values beginning January 2012 and after. The training dataset contains the temperature data from January 1800 (for the cities available) to December 2011.
# +
#Creating the testing dataset having values after 2012 for the selecred cities to test the predictions made later
test_data = {}
#all data for the respective cities is stored in city_selected
city_selected = ["Chicago", "New York", "Minneapolis", "Houston", "Los Angeles"]
for city in city_selected:
test_data[city] = data[(data['Country']=='United States')&(data['dt']>pd.datetime(2011,12,1))&(data['City']==city)]
test_data[city] = test_data[city].reset_index(drop = True)
# +
def City_data (city, country, data):
'''Function to return the data for selected cities'''
country_data = data[(data['Country']==country)&(data['dt']>=pd.datetime(1800,1,1))&(data['dt']<pd.datetime(2012,1,1))]
city_data = country_data[country_data['City']==city]
city_data = city_data.reset_index(drop = True)
return city_data
def average_yearly_data (data):
'''Function to return the average yearly temperature'''
i = 0
returned_data = {'Year':[], 'YearlyTemperature': []}
year = data.iloc[i,0].year
while(year<2011):
year = data.iloc[i,0].year
avg_year = 0
n = 0
while(n<12):
avg_year += data.iloc[i,1]
i += 1
n += 1
avg_year = avg_year/n
returned_data['Year'].append(year)
returned_data['YearlyTemperature'].append(avg_year)
return pd.DataFrame(returned_data)
#the average yearly data for these cities is stored in average_yearly_city_data
city_data = {}
average_yearly_city_data = {}
for city in city_selected:
city_data[city] = City_data(city, 'United States', data)
average_yearly_city_data[city] = average_yearly_data(city_data[city])
plt.plot(average_yearly_city_data[city]['Year'], average_yearly_city_data[city]['YearlyTemperature'], label = city)
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.xlabel('Year')
plt.ylabel('Average Yearly Temperature')
plt.title('Variation in Average Yearly Temperature through the years')
# -
# From the above plot we see that the average temperature each year is varying a lot for each of the selected cities. Also, there seems to be a slight increase in the yearly temperatures post 1900. Just to be sure we will visualize this period specifically.
data_for_year_after_1900 = {}
for city in city_selected:
condition = average_yearly_city_data[city]['Year']>=1900
data_for_year_after_1900[city] = average_yearly_city_data[city][condition]
plt.plot(data_for_year_after_1900[city]['Year'], data_for_year_after_1900[city]['YearlyTemperature'], label=city)
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.xlabel('Year')
plt.ylabel('Average Yearly Temperature')
plt.title('Variation in Average Temperature after 1900')
# Even now looking at the plot no clear trend is observed, the temperature increases and decreases in no particular fashion. However, the increase and decrease in yearly temperature appears to be at the same time (a little different for Los Angeles) for the different cities.
#
# To analyze the data further, we perform a decomposition of the temperature data for these cities in order to visualize the trend, seasonality and the residuals for the temperature data of the respective cities.
# +
# storing the seasonality seperately to make the graph
seasonality = {}
fig1 = plt.subplot(2,1,1)
fig2 = plt.subplot(2,1,2)
for city in city_selected:
city_data[city] = city_data[city].set_index('dt')
decomposition = seasonal_decompose(city_data[city]['AverageTemperature'])
trend = decomposition.trend
seasonality[city] = decomposition.seasonal
residuals = decomposition.resid
fig1.plot(trend, label=city)
fig1.set_title('Trend')
fig2.plot(residuals, label=city)
fig2.set_title('Residuals')
plt.tight_layout()
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.xlabel('Year')
# +
#plotting the seasonality values over a year for each of the cities
#Set the x axis to just show the monthly values
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m'))
for city in (seasonality.keys()):
plt.plot(seasonality[city][-12:], label = city)
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.title('Seasonality')
plt.ylabel('Temperature (in Celsius)')
plt.xlabel('Month')
# -
# The following observations are noted from the plots above:
# 1. There is no discernable trend in the data for any of the cities.
# 2. The variation in residuals appear to be slightly increasing with time (more prominent for Minneapolis and Los Angeles) and vary a lot for each of the cities under study.
# 3. The seasonal variation through the year is very high peaking during the month of July and having minimum value in January. Also, the seasonal plot has a normal pattern for each of the cities under study.
#
# To make predictions on the temperature for years to come, we will now check the data for stationarity by performing the augmented Dickey-Fuller Test.
# +
def stationary_stat_city(test_data):
print ('Test Statistic: ' + str(test_data[0]))
print ('P value: ' + str(test_data[1]))
print ('Number of Observations: '+ str(test_data[3]))
print ('Critical Value 1%: '+ str(test_data[4]['1%']))
print ('Critical Value 5%: '+ str(test_data[4]['5%']))
print ('Critical Value 10%: '+ str(test_data[4]['10%']))
test_output = {}
for city in city_selected:
test_output[city] = adfuller(city_data[city]['AverageTemperature'])
print ('Statistics for ' + city)
stationary_stat_city(test_output[city])
print()
# -
# It is seen that the value of test statistic is greater (more negative) than the 1% critical values for each of the selected studies. Thus, we can reject the null hyothesis of a unit root being present in the data set and we can conclude with more than 99% confidence that this is a stationary series.
#
# To make future predictions using the ARIMA model, we first need to determine the values of its coefficients which is done through plotting the Autocorrelation and the Partial autocorrelation functions.
# +
acf_val = {}
pacf_val = {}
fig1 = plt.subplot(2,1,1)
fig1.set_title('Autocorrelation Function')
fig2 = plt.subplot(2,1,2)
fig2.set_title('Partial Autocorrelation Function')
for city in city_selected:
acf_val[city] = acf(city_data[city]['AverageTemperature'], nlags = 400)
pacf_val[city] = pacf(city_data[city]['AverageTemperature'])
fig1.plot(acf_val[city], label=city)
fig1.axhline(y=0,linestyle='--',color='gray')
fig2.plot(pacf_val[city], label=city)
fig2.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=(-1.96*np.std(city_data[city]['AverageTemperature']))/np.sqrt(len(city_data[city])),linestyle='-.',color='gray')
plt.axhline(y=(1.96*np.std(city_data[city]['AverageTemperature']))/np.sqrt(len(city_data[city])),linestyle='-.',color='gray')
plt.tight_layout()
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.xlabel('Lag Value')
# -
# We can see from plotting the autocorrelation function that the value of the function actually does decay but this decay is very slow. However, the decay for the Partial Autocorrelation function is much faster. Also, for the autocorrelation function the value peaks at 0 (shown below) and both the functions are similar for each of the cities under study.
# +
acf_val_small = {}
for city in city_selected:
acf_val_small[city] = acf(city_data[city]['AverageTemperature'], nlags = 20)
plt.plot(acf_val_small[city], label = city)
plt.axhline(y=0,linestyle='--',color='gray')
#Plotting the 95% confidence intervals
plt.axhline(y=(-1.96*np.std(city_data[city]['AverageTemperature']))/np.sqrt(len(city_data[city])),linestyle='-.',color='gray')
plt.axhline(y=(1.96*np.std(city_data[city]['AverageTemperature']))/np.sqrt(len(city_data[city])),linestyle='-.',color='gray')
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.title('Autocorrelation Function')
plt.xlabel('Lag Value')
# -
# From the above plots for Autocorrelation function and the Partial Autocorrelation function we conclude that the ARIMA model we will be using will have the following input parameters:
#
# p = 6 (Lag Value where the PACF cuts of lower confidence level while rising for the first time)
#
# q = 0 (Lag Value where the ACF is above the upper confidence level the first time)
#
# d = 0 (Order of differencing used to perform the analysis)
#Defining different variables to store the different values which are being calculated.
#Forecasting for time steps in the future we have the data for (entire 2012 and 9 months of 2013) = 21
model_arima = {}
results_model = {}
forecast_model = {}
for city in city_selected:
pred_name = ""
model_arima[city] = ARIMA(city_data[city]['AverageTemperature'], order = (6,0,0)).fit()
forecast_model[city] = model_arima[city].forecast(steps = 21)
results_model[city] = model_arima[city].fittedvalues
# We will now compare the predictions made by our model with the actual values obtained for the year of 2013 available from the data for each of the cities.
fig = []
i = 0
for city in city_selected:
predictions = forecast_model[city][0]
fig.append(plt.subplot(5,1,i+1))
fig[i].plot(predictions, label = "prediction", linestyle='--')
fig[i].plot(test_data[city]['AverageTemperature'], label = "actual")
fig[i].set_title('Comparison for '+ city)
i += 1
plt.gcf().set_size_inches(6,8)
plt.tight_layout()
plt.legend(bbox_to_anchor=(1.05,1), loc=2)
plt.xlabel('Month')
# We can see that our analysis predicts the variation in the temperature data very well but the exact values are off. To calculate this error which is occurring between the forecasted and the actual values, we will calculate the root mean squared error for the temperature values for each of the cities under study.
mean_sq_error = {}
print ('Root Mean Square Errors:')
for city in city_selected:
mean_sq_error[city] = sqrt(mean_squared_error(test_data[city]['AverageTemperature'], forecast_model[city][0]))
print (city + ": " + str(mean_sq_error[city]))
# From the graphs and the rms errors calculated for each of the cities, we see that the errors are least for Houston and Los Angeles, while it is the highest for Minneapolis (because of the variation between months 0 & 7, rest of the estimations are actually quite accurate).
#
# From the graphs, we can also see that for each city apart from Los Angeles, the model estimates the temperature to be less than what it actually is between the months 0 & 6.
# # Conclusion:
#
# The analysis performed looks at identifying trends in the temperature data across the years and making predictions for temperature for 5 different cities in the United States. Through the analysis, it can be concluded that:
# 1. No discernable trend was observed in the temperature values for each of the cities during the exploratory phase.
# 2. ARIMA model used for prediction of the temperature values, makes the best predictions for Los Angeles and Houston.
#
# It is to be noted that weather prediction depends highly on local climate and is subject to frequent change depending on the local conditions. Thus, the root mean squared error values achieved by us represent good estimation of the temperature which when combined with certain other weather parameters can yield even better results but is beyond the scope of our current analysis.
| Temperature_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# <hr style="margin-bottom: 40px;">
#
# # Pandas DataFrame exercises
#
# +
# Import the numpy package under the name np
import numpy as np
# Import the pandas package under the name pd
import pandas as pd
# Import the matplotlib package under the name plt
import matplotlib.pyplot as plt
# %matplotlib inline
# Print the pandas version and the configuration
print(pd.__version__)
# -
# 
#
# ## DataFrame creation
# ### Create an empty pandas DataFrame
#
# your code goes here
# + [solution]
pd.DataFrame(data=[None],
index=[None],
columns=[None])
# -
# <img width=400 src="https://cdn.dribbble.com/users/4678/screenshots/1986600/avengers.png"></img>
# 
#
# ### Create a `marvel_df` pandas DataFrame with the given marvel data
#
marvel_data = [
['Spider-Man', 'male', 1962],
['Captain America', 'male', 1941],
['Wolverine', 'male', 1974],
['Iron Man', 'male', 1963],
['Thor', 'male', 1963],
['Thing', 'male', 1961],
['<NAME>', 'male', 1961],
['Hulk', 'male', 1962],
['Beast', 'male', 1963],
['Invisible Woman', 'female', 1961],
['Storm', 'female', 1975],
['Namor', 'male', 1939],
['Hawkeye', 'male', 1964],
['Daredevil', 'male', 1964],
['Doctor Strange', 'male', 1963],
['<NAME>', 'male', 1962],
['Scarlet Witch', 'female', 1964],
['Wasp', 'female', 1963],
['Black Widow', 'female', 1964],
['Vision', 'male', 1968]
]
# your code goes here
# + [solution]
marvel_df = pd.DataFrame(data=marvel_data)
marvel_df
# -
# 
#
# ### Add column names to the `marvel_df`
#
# your code goes here
# + [solution]
col_names = ['name', 'sex', 'first_appearance']
marvel_df.columns = col_names
marvel_df
# -
# 
#
# ### Add index names to the `marvel_df` (use the character name as index)
#
# your code goes here
# + [solution]
marvel_df.index = marvel_df['name']
marvel_df
# -
# 
#
# ### Drop the name column as it's now the index
# your code goes here
# + [solution]
#marvel_df = marvel_df.drop(columns=['name'])
marvel_df = marvel_df.drop(['name'], axis=1)
marvel_df
# -
# 
#
# ### Drop 'Namor' and '<NAME>' rows
#
# your code goes here
# + [solution]
marvel_df = marvel_df.drop(['Namor', '<NAME>'], axis=0)
marvel_df
# -
# 
#
# ## DataFrame selection, slicing and indexation
# ### Show the first 5 elements on `marvel_df`
#
# your code goes here
# + [solution]
#marvel_df.loc[['Spider-Man', 'Captain America', 'Wolverine', 'Iron Man', 'Thor'], :] # bad!
#marvel_df.loc['Spider-Man': 'Thor', :]
#marvel_df.iloc[0:5, :]
#marvel_df.iloc[0:5,]
marvel_df.iloc[:5,]
#marvel_df.head()
# -
# 
#
# ### Show the last 5 elements on `marvel_df`
#
# your code goes here
# + [solution]
#marvel_df.loc[['Hank Pym', 'Scarlet Witch', 'Wasp', 'Black Widow', 'Vision'], :] # bad!
#marvel_df.loc['Hank Pym':'Vision', :]
marvel_df.iloc[-5:,]
#marvel_df.tail()
# -
# 
#
# ### Show just the sex of the first 5 elements on `marvel_df`
# your code goes here
# + [solution]
#marvel_df.iloc[:5,]['sex'].to_frame()
marvel_df.iloc[:5,].sex.to_frame()
#marvel_df.head().sex.to_frame()
# -
# 
#
# ### Show the first_appearance of all middle elements on `marvel_df`
# your code goes here
# + [solution]
marvel_df.iloc[1:-1,].first_appearance.to_frame()
# -
# 
#
# ### Show the first and last elements on `marvel_df`
#
# your code goes here
# + [solution]
#marvel_df.iloc[[0, -1],][['sex', 'first_appearance']]
marvel_df.iloc[[0, -1],]
# -
# 
#
# ## DataFrame manipulation and operations
# ### Modify the `first_appearance` of 'Vision' to year 1964
# your code goes here
# + [solution]
marvel_df.loc['Vision', 'first_appearance'] = 1964
marvel_df
# -
# 
#
# ### Add a new column to `marvel_df` called 'years_since' with the years since `first_appearance`
#
# your code goes here
# + [solution]
marvel_df['years_since'] = 2018 - marvel_df['first_appearance']
marvel_df
# -
# 
#
# ## DataFrame boolean arrays (also called masks)
# ### Given the `marvel_df` pandas DataFrame, make a mask showing the female characters
#
# your code goes here
# + [solution]
mask = marvel_df['sex'] == 'female'
mask
# -
# 
#
# ### Given the `marvel_df` pandas DataFrame, get the male characters
#
# your code goes here
# + [solution]
mask = marvel_df['sex'] == 'male'
marvel_df[mask]
# -
# 
#
# ### Given the `marvel_df` pandas DataFrame, get the characters with `first_appearance` after 1970
#
# your code goes here
# + [solution]
mask = marvel_df['first_appearance'] > 1970
marvel_df[mask]
# -
# 
#
# ### Given the `marvel_df` pandas DataFrame, get the female characters with `first_appearance` after 1970
# your code goes here
# + [solution]
mask = (marvel_df['sex'] == 'female') & (marvel_df['first_appearance'] > 1970)
marvel_df[mask]
# -
# 
#
# ## DataFrame summary statistics
# ### Show basic statistics of `marvel_df`
# your code goes here
# + [solution]
marvel_df.describe()
# -
# 
#
# ### Given the `marvel_df` pandas DataFrame, show the mean value of `first_appearance`
# your code goes here
# + [solution]
#np.mean(marvel_df.first_appearance)
marvel_df.first_appearance.mean()
# -
# 
#
# ### Given the `marvel_df` pandas DataFrame, show the min value of `first_appearance`
#
# your code goes here
# + [solution]
#np.min(marvel_df.first_appearance)
marvel_df.first_appearance.min()
# -
# 
#
# ### Given the `marvel_df` pandas DataFrame, get the characters with the min value of `first_appearance`
# your code goes here
# + [solution]
mask = marvel_df['first_appearance'] == marvel_df.first_appearance.min()
marvel_df[mask]
# -
# 
#
# ## DataFrame basic plottings
# ### Reset index names of `marvel_df`
#
# your code goes here
# + [solution]
marvel_df = marvel_df.reset_index()
marvel_df
# -
# 
#
# ### Plot the values of `first_appearance`
#
# your code goes here
# + [solution]
#plt.plot(marvel_df.index, marvel_df.first_appearance)
marvel_df.first_appearance.plot()
# -
# 
#
# ### Plot a histogram (plot.hist) with values of `first_appearance`
#
# your code goes here
# + [solution]
plt.hist(marvel_df.first_appearance)
# -
# 
#
| Pandas/.ipynb_checkpoints/4 - Pandas DataFrames exercises-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# <h1>lcacoffee</h1>
#
# script that displays coffees sold by hour at lca2015.
# Currently it opens a .json file and converts it into a python dict.
import json
import os
import pandas
opcvs = open('/home/wcmckee/Downloads/convertcsv.json', 'r')
opzrd = opcvs.read()
jdunp = json.loads(opzrd)
valia = []
pandas.read_json(jdunp)
jdunp.count(int)
len(jdunp)
# ok if i cycle through jdunp between 0 and 23 i get the results.
#
# cycle through ints but as a string. must add ' '
for numtwn in range(0,24):
print "'" + str(numtwn) + "'"
for jdr in jdunp:
print jdr['0']
for numtwn in range(0,24):
print "'" + str(numtwn) + "'"
for dej in jdunp:
print dej.values()
valia.append(dej.values())
dezrand = len(valia)
azlis = []
for vals in valia:
print vals
azlis.append(vals)
# I need to filter the - - from the results. I really only need the values that have numbers.
#
# Take number in brackets away from number not in brackets.
# The number in brackets is total amount of coffees sold. The number not in brackets is amount of volchers used.
# The number that I get when i take away is the coffee sold without volchers.
#
# New dict that shows only the times that coffee were sold and the amount of coffgfges that were solf. Maybe that would works.
betra = []
for azl in azlis:
betra.append(azl)
anoe = []
anez = []
for betr in betra:
betr.append(anoe)
for deta in betr:
#print deta
if '- -' in deta:
print deta
else:
anez.append(deta)
fdic = []
for resut in anez:
print resut
fdic.append(resut)
# How come it is only adding the wednesday data in the results. It needs to have all the datas.
#
# Needs to take the number in brackets away from the number not in brackets.
fdic
| .ipynb_checkpoints/lcacoffee-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# Exercise 6 - Support Vector Machines
# =====
#
# Support vector machines (SVMs) let us predict categories. This exercise will demonstrate a simple support vector machine that can predict a category from a small number of features.
#
# Our problem is that we want to be able to categorise which type of tree an new specimen belongs to. To do this, we will use features of three different types of trees to train an SVM.
#
# __Run the code__ in the cell below.
# Run this code!
# It sets up the graphing configuration.
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as graph
# %matplotlib inline
graph.rcParams['figure.figsize'] = (15,5)
graph.rcParams["font.family"] = 'DejaVu Sans'
graph.rcParams["font.size"] = '12'
graph.rcParams['image.cmap'] = 'rainbow'
# Step 1
# -----
#
# First, we will take a look at the raw data first to see what features we have.
#
# #### Replace `<printDataHere>` with `print(dataset.head())` and then __run the code__.
# +
import pandas as pd
import numpy as np
# Loads the SVM library
from sklearn import svm
# Loads the dataset
dataset = pd.read_csv('Data/trees.csv')
###
# REPLACE <printDataHere> with print(dataset.head()) TO PREVIEW THE DATASET
###
print(dataset.head())
###
# -
# It looks like we have _four features_ (leaf_width, leaf_length, trunk_girth, trunk_height) and _one label_ (tree_type).
#
# Let's plot it.
#
# __Run the code__ in the cell below.
# +
# Run this code to plot the leaf features
# This extracts the features. drop() deletes the column we state (tree_type), leaving on the features
allFeatures = dataset.drop(['tree_type'], axis = 1)
# This keeps only the column we state (tree_type), leaving only our label
labels = np.array(dataset['tree_type'])
#Plots the graph
X = allFeatures['leaf_width']
Y = allFeatures['leaf_length']
color=labels
graph.scatter(X, Y, c = color)
graph.title('classification plot for leaf features')
graph.xlabel('leaf width')
graph.ylabel('leaf length')
graph.legend()
graph.show()
# -
# __Run the code__ in the cell below to plot the trunk features
# Run this code to plot the trunk features
graph.scatter(allFeatures['trunk_girth'], allFeatures['trunk_height'], c = labels)
graph.title('Classification plot for trunk features')
graph.xlabel('trunk girth')
graph.ylabel('trunk height')
graph.show()
# Step 2
# -----
#
# Lets make a support vector machine.
#
# The syntax for a support vector machine is as follows:
#
# __`model = svm.SVC().fit(features, labels)`__
#
# Your features set will be called __`train_X`__ and your labels set will be called __`train_Y`__
#
# #### Let's first run the SVM in the cell below using the first two features, the leaf features.
# +
# Sets up the feature and target sets for leaf features
# Feature 1
feature_one = allFeatures['leaf_width'].values
# Feature 2
feature_two = allFeatures['leaf_length'].values
# Features
train_X = np.asarray([feature_one, feature_two]).transpose()
# Labels
train_Y = labels
# Fits the SVM model
###
# REPLACE THE <makeSVM> WITH THE CODE TO MAKE A SVM MODEL AS ABOVE
###
model = svm.SVC().fit(train_X, train_Y)
###
print("Model ready. Now plot it to see the result.")
# -
# #### Let's plot it! Run the cell below to visualise the SVM with our dataset.
# +
# Run this to plots the SVM model
X_min, X_max = train_X[:, 0].min() - 1, train_X[:, 0].max() + 1
Y_min, Y_max = train_X[:, 1].min() - 1, train_X[:, 1].max() + 1
XX, YY = np.meshgrid(np.arange(X_min, X_max, .02), np.arange(Y_min, Y_max, .02))
Z = model.predict(np.c_[XX.ravel(), YY.ravel()]).reshape(XX.shape)
graph.scatter(feature_one, feature_two, c = train_Y, cmap = graph.cm.rainbow, zorder = 10, edgecolor = 'k', s = 40)
graph.contourf(XX, YY, Z, cmap = graph.cm.rainbow, alpha = 1.0)
graph.contour(XX, YY, Z, colors = 'k', linestyles = '--', alpha=0.5)
graph.title('SVM plot for leaf features')
graph.xlabel('leaf width')
graph.ylabel('leaf length')
graph.show()
# -
# The graph shows three colored zones that the SVM has chosen to group the datapoints in. Color, here, means type of tree. As we can see, the zones correspond reasonably well with the actual tree types of our training data. This means that the SVM can group, for its training data, quite well calculate tree type based on leaf features.
#
#
# Now let's do the same using trunk features.
#
# ### In the cell below replace:
# #### 1. `<addTrunkGirth>` with `'trunk_girth'`
# #### 2. `<addTrunkHeight>` with `'trunk_height'`
# #### Then __run the code__.
# +
# Feature 1
###--- REPLACE THE <addTrunkGirth> BELOW WITH 'trunk_girth' (INCLUDING THE QUOTES) ---###
###
trunk_girth = allFeatures['trunk_girth'].values
###
# Feature 2
###--- REPLACE THE <addTrunkHeight> BELOW WITH 'trunk_height' (INCLUDING THE QUOTES) ---###
trunk_height = allFeatures['trunk_height'].values
###
# Features
trunk_features = np.asarray([trunk_girth, trunk_height]).transpose()
# Fits the SVM model
model = svm.SVC().fit(trunk_features, train_Y)
# Plots the SVM model
X_min, X_max = trunk_features[:, 0].min() - 1, trunk_features[:, 0].max() + 1
Y_min, Y_max = trunk_features[:, 1].min() - 1, trunk_features[:, 1].max() + 1
XX, YY = np.meshgrid(np.arange(X_min, X_max, .02), np.arange(Y_min, Y_max, .02))
Z = model.predict(np.c_[XX.ravel(), YY.ravel()]).reshape(XX.shape)
graph.scatter(trunk_girth, trunk_height, c = train_Y, cmap = graph.cm.rainbow, zorder = 10, edgecolor = 'k', s = 40)
graph.contourf(XX, YY, Z, cmap = graph.cm.rainbow, alpha = 1.0)
graph.contour(XX, YY, Z, colors = 'k', linestyles = '--', alpha = 0.5)
graph.title('SVM plot for leaf features')
graph.xlabel('trunk girth')
graph.ylabel('trunk height')
graph.show()
# -
# Conclusion
# -------
#
# And that's it! You've made a simple support vector machine that can predict the type of tree based on the leaf and trunk measurements!
#
# You can go back to the course now and click __'Next Step'__ to move onto how we can test AI models.
| projects/ms-learn-ml-crash-course/06. Support Vector Machines - Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TRASE Widgets for Supply Chain
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# **Read soy data**
df = pd.read_csv("/Users/ikersanchez/Vizzuality/PROIEKTUAK/TRASE/work/data/BRAZIL_SOY_2.3/BRAZIL_SOY_2.3.csv")
df.columns = [x.lower() for x in df.columns]
df = df[['year', 'biome', 'exporter', 'importer', 'country', 'soy_equivalent_tonnes', 'territorial_deforestation_ha', 'zero_deforestation']]
df.head()
def donut_chart(labels, sizes, title, colors):
plt.pie(sizes, labels=labels, autopct='%1.1f%%', colors=colors, shadow=True)
centre_circle = plt.Circle((0,0),0.75, fc='white',linewidth=1.25)
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
plt.axis('equal')
plt.title(title)
plt.show()
def bar_chart_hor(x, y, title, xlabel):
plt.barh(x,y, color='#ea6869')
plt.title(title)
plt.xlabel(xlabel)
plt.show()
def bar_chart_ver(x, y, title, xlabel, ylabel):
plt.bar(x,y, color='#ea6869')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
def stacked_bar_chart_hor(y, values, categories, title, xlabel, ylabel, width, colors):
plt.figure(figsize=(10,5))
for i in range(len(values)):
if i == 0:
plt.barh(y, values[i], width, label=categories[i], color = colors[i])
past_values = np.array(values[i])
else:
plt.barh(y, values[i], width, left=past_values, label=categories[i], color = colors[i])
past_values = np.array(values[i]) + past_values
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.show()
def stacked_bar_chart_ver(x, values, categories, title, xlabel, ylabel, width, colors):
plt.figure(figsize=(10,5))
for i in range(len(values)):
if i == 0:
plt.bar(x, values[i], width, label=categories[i], color = colors[i])
past_values = np.array(values[i])
else:
plt.bar(x, values[i], width, bottom=past_values, label=categories[i], color = colors[i])
past_values = np.array(values[i]) + past_values
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.show()
def top_places(df, places='biome', indicator='soy_equivalent_tonnes', top=5):
d = {}
for place in sorted(df[places].unique()):
place_tmp = df[df[places] == place]
d[place] = place_tmp[indicator].sum()
s = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
labels = []
values = []
for item in s:
labels.append(item[0])
values.append(item[1])
top_labels = labels[:(top-1)]
top_values = values[:(top-1)]
top_labels.append('Other')
top_values.append(np.array(values[(top-1):]).sum())
return top_labels, top_values
# ## 2017 - Trade Volume
#
# 
# **Global**
year = 2017
tmp = df[df['year'] == year]
f"{tmp['soy_equivalent_tonnes'].sum()} t of Soy was produced in Brazil in {year}"
# **Biome / Exporter / Port / Importer / Country**
filter='biome'
title='Biome'
top_labels, top_values = top_places(tmp, places=filter, indicator='soy_equivalent_tonnes', top=6)
top_labels = top_labels[:-1][::-1]
top_values = top_values[:-1][::-1]
bar_chart_hor(x=top_labels, y=top_values, title=title, xlabel='t')
# ## 2003-2017 - Trade Volume
#
# 
# **Global**
soy_volume = []
years = []
for year in df["year"].unique():
tmp = df[df["year"] == year]
soy_volume.append(tmp['soy_equivalent_tonnes'].sum())
years.append(year)
bar_chart_ver(x=years, y=soy_volume, title='Soy produced in Brazil', xlabel='years', ylabel='t')
# **Biome / Exporter / Port / Importer / Country**
filter='biome'
title='Biome'
tmp = df
top_labels, top_values = top_places(tmp, places=filter, indicator='soy_equivalent_tonnes', top=6)
top_labels = top_labels[:-1]
values = []
years = df["year"].unique()
for label in top_labels:
value_label = []
for year in years:
tmp = df[(df["year"] == year) & (df[filter] == label)]
value_label.append(tmp['soy_equivalent_tonnes'].sum())
values.append(value_label)
stacked_bar_chart_ver(x=years, values=values, categories=top_labels, title=title, xlabel='years', ylabel='t', width = 0.35,
colors = ['#ea6869', '#ffeb8b', '#2d586e', '#007d29', '#b4008a', '#06ff67', '#8339aa', '#f3fb00', '#7193ff'])
# ## 2017 - Trade Volume - Zero Deforestation Commitment (Exporter)
#
# 
# **Global**
year = 2017
tmp = df[df['year'] == year]
tmp.fillna('Unknown', axis=1, inplace= True)
labels = ['Unknown', 'Company commitment', 'None', 'Soy Moratorium']
sizes = []
for label in labels:
sizes.append(tmp[tmp['zero_deforestation'] == label]['soy_equivalent_tonnes'].count())
# Donut chart
donut_chart(labels=labels, sizes=sizes,
title=f'Zero deforestation commitment of Soy produced in Brazil in {year}',
colors=['#f2f2f2', '#C2E699', '#e36845', '#1D6837'])
# **Biome / Exporter / Port / Importer / Country**
filter = 'biome'
title = 'Biome'
top_labels, top_values = top_places(tmp, places=filter, indicator='soy_equivalent_tonnes', top=6)
top_labels = top_labels[::-1][1:]
top_values = top_values[::-1][1:]
values = []
categories = ['Unknown', 'Company commitment', 'None', 'Soy Moratorium']
for cat in categories:
value_label = []
for label in top_labels:
tmp_label = tmp[(tmp['zero_deforestation'] == cat) & (tmp[filter] == label)]
value_label.append(tmp_label['soy_equivalent_tonnes'].sum())
values.append(value_label)
stacked_bar_chart_hor(y=top_labels, values=values, categories=categories, title=title, xlabel='t', ylabel='', width = 0.35,
colors=['#f2f2f2', '#C2E699', '#e36845', '#1D6837'])
# ## 2003-2017 - Trade Volume - Zero Deforestation Commitment (Exporter)
#
# 
# **Global**
tmp = df
tmp.fillna('Unknown', axis=1, inplace= True)
values = []
labels = ['Unknown', 'Company commitment', 'None', 'Soy Moratorium']
years = tmp["year"].unique()
for label in labels:
value_label = []
for year in years:
tmp_label = tmp[(tmp["year"] == year) & (tmp['zero_deforestation'] == label)]
value_label.append(tmp_label['soy_equivalent_tonnes'].sum())
values.append(value_label)
stacked_bar_chart_ver(x=years, values=values, categories=labels, title='Zero deforestation commitment of Soy produced in Brazil', xlabel='years', ylabel='t', width = 0.35,
colors = ['#f2f2f2', '#C2E699', '#e36845', '#1D6837'])
# **Biome / Exporter / Port / Importer / Country**
filter_1 = 'biome'
filter_2 = 'AMAZONIA'
tmp_biome = tmp[tmp[filter_1] == filter_2]
values = []
years = tmp_biome["year"].unique()
for label in labels:
value_label = []
for year in years:
tmp_label = tmp_biome[(tmp_biome["year"] == year) & (tmp_biome['zero_deforestation'] == label)]
value_label.append(tmp_label['soy_equivalent_tonnes'].sum())
values.append(value_label)
stacked_bar_chart_ver(x=years, values=values, categories=labels, title=f'Zero deforestation commitment of Soy produced in {filter_2}', xlabel='years', ylabel='t', width = 0.35,
colors = ['#f2f2f2', '#C2E699', '#e36845', '#1D6837'])
# ## 2017 - Trade Volume - Selection
#
# 
# **Global**
year = 2017
tmp = df[df['year'] == year]
tmp.fillna('Unknown', axis=1, inplace= True)
labels = ['Netherlands', 'Thailand', 'Spain', 'South Korea', 'Cuba']
sizes = []
for label in labels:
sizes.append(tmp[tmp['country'] == label]['soy_equivalent_tonnes'].count())
# Donut chart
donut_chart(labels=labels, sizes=sizes,
title=f'Soy exported in {year} by',
colors=['#ea6869', '#ffeb8b', '#2d586e', '#007d29', '#b4008a'])
# **Biome / Exporter / Port / Importer / Country**
filter = 'biome'
title = 'Biome'
top_labels, top_values = top_places(tmp, places=filter, indicator='soy_equivalent_tonnes', top=6)
top_labels = top_labels[::-1][1:]
top_values = top_values[::-1][1:]
values = []
categories = ['Netherlands', 'Thailand', 'Spain', 'South Korea', 'Cuba']
for cat in categories:
value_label = []
for label in top_labels:
tmp_label = tmp[(tmp['country'] == cat) & (tmp[filter] == label)]
value_label.append(tmp_label['soy_equivalent_tonnes'].sum())
values.append(value_label)
stacked_bar_chart_hor(y=top_labels, values=values, categories=categories, title=title, xlabel='t', ylabel='', width = 0.35,
colors=['#ea6869', '#ffeb8b', '#2d586e', '#007d29', '#b4008a'])
| Trase/TRASE_widgets_supply_chain.ipynb |
# + [markdown] colab_type="text" id="copyright-notice"
# #### Copyright 2017 Google LLC.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="copyright-notice2"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="mPa95uXvcpcn"
# # 使用神经网络对手写数字进行分类
# + [markdown] colab_type="text" id="Fdpn8b90u8Tp"
# 
# + [markdown] colab_type="text" id="c7HLCm66Cs2p"
# **学习目标:**
# * 训练线性模型和神经网络,以对传统 [MNIST](http://yann.lecun.com/exdb/mnist/) 数据集中的手写数字进行分类
# * 比较线性分类模型和神经网络分类模型的效果
# * 可视化神经网络隐藏层的权重
# + [markdown] colab_type="text" id="HSEh-gNdu8T0"
# 我们的目标是将每个输入图片与正确的数字相对应。我们会创建一个包含几个隐藏层的神经网络,并在顶部放置一个归一化指数层,以选出最合适的类别。
# + [markdown] colab_type="text" id="2NMdE1b-7UIH"
# ## 设置
#
# 首先,我们下载数据集、导入 TensorFlow 和其他实用工具,并将数据加载到 *Pandas* `DataFrame`。请注意,此数据是原始 MNIST 训练数据的样本;我们随机选择了 20000 行。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} colab_type="code" id="4LJ4SD8BWHeh"
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://storage.googleapis.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
# + [markdown] colab_type="text" id="kg0-25p2mOi0"
# 第一列中包含类别标签。其余列中包含特征值,每个像素对应一个特征值,有 `28×28=784` 个像素值,其中大部分像素值都为零;您也许需要花一分钟时间来确认它们不*全部*为零。
# + [markdown] colab_type="text" id="PQ7vuOwRCsZ1"
# 
# + [markdown] colab_type="text" id="dghlqJPIu8UM"
# 这些样本都是分辨率相对较低、对比度相对较高的手写数字图片。`0-9` 这十个数字中的每个可能出现的数字均由唯一的类别标签表示。因此,这是一个具有 10 个类别的多类别分类问题。
#
# 现在,我们解析一下标签和特征,并查看几个样本。注意 `loc` 的使用,借助 `loc`,我们能够基于原来的位置抽出各列,因为此数据集中没有标题行。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} colab_type="code" id="JfFWWvMWDFrR"
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="mFY_-7vZu8UU"
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="4-Vgg-1zu8Ud"
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
# + [markdown] colab_type="text" id="wrnAI1v6u8Uh"
# 显示一个随机样本及其对应的标签。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="s-euVJVtu8Ui"
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
# + [markdown] colab_type="text" id="ScmYX7xdZMXE"
# ## 任务 1:为 MNIST 构建线性模型
#
# 首先,我们创建一个基准模型,作为比较对象。`LinearClassifier` 可提供一组 *k* 类一对多分类器,每个类别(共 *k* 个)对应一个分类器。
#
# 您会发现,除了报告准确率和绘制对数损失函数随时间变化情况的曲线图之外,我们还展示了一个[**混淆矩阵**](https://en.wikipedia.org/wiki/Confusion_matrix)。混淆矩阵会显示错误分类为其他类别的类别。哪些数字相互之间容易混淆?
#
# 另请注意,我们会使用 `log_loss` 函数跟踪模型的错误。不应将此函数与用于训练的 `LinearClassifier` 内部损失函数相混淆。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="cpoVC4TSdw5Z"
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image
return set([tf.feature_column.numeric_column('pixels', shape=784)])
# + [markdown] colab_type="text" id="kMmL89yGeTfz"
# 在本次练习中,我们会对训练和预测使用单独的输入函数,并将这些函数分别嵌套在 `create_training_input_fn()` 和 `create_predict_input_fn()` 中,这样一来,我们就可以调用这些函数,以返回相应的 `_input_fn`,并将其传递到 `.train()` 和 `.predict()` 调用。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="OeS47Bmn5Ms2"
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="8zoGWAoohrwS"
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="G6DjSLZMu8Um"
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
# + [markdown] colab_type="text" id="ItHIUyv2u8Ur"
# **花费 5 分钟的时间了解一下使用这种形式的线性模型时,准确率方面表现如何。在本次练习中,为自己设定限制,仅使用批量大小、学习速率和步数这三个超参数进行试验。**
#
# 如果您从上述任何试验中得到的准确率约为 0.9,即可停止试验。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="yaiIhIQqu8Uv"
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="266KQvZoMxMv"
# ### 解决方案
#
# 点击下方即可查看一种可能的解决方案。
# + [markdown] colab_type="text" id="lRWcn24DM3qa"
# 以下是一组使准确率应该约为 0.9 的参数。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="TGlBMrUoM1K_"
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="mk095OfpPdOx"
# ## 任务 2:使用神经网络替换线性分类器
#
# **使用 [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier) 替换上面的 LinearClassifier,并查找可实现 0.95 或更高准确率的参数组合。**
#
# 您可能希望尝试 Dropout 等其他正则化方法。这些额外的正则化方法已记录在 `DNNClassifier` 类的注释中。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="rm8P_Ttwu8U4"
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
# + [markdown] colab_type="text" id="TOfmiSvqu8U9"
# 获得出色的模型后,通过评估我们将在下面加载的测试数据进行仔细检查,确认您没有过拟合验证集。
#
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="evlB5ubzu8VJ"
mnist_test_dataframe = pd.read_csv(
"https://storage.googleapis.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="PDuLd2Hcu8VL"
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
# + [markdown] colab_type="text" id="6sfw3LH0Oycm"
# ### 解决方案
#
# 点击下方即可查看可能的解决方案。
# + [markdown] colab_type="text" id="XatDGFKEO374"
# 除了神经网络专用配置(例如隐藏单元的超参数)之外,以下代码与原始的 `LinearClassifer` 训练代码几乎完全相同。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="kdNTx8jkPQUx"
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: An `int`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ZfzsTYGPPU8I"
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="qXvrOgtUR-zD"
# 接下来,我们来验证测试集的准确率。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="scQNpDePSFjt"
mnist_test_dataframe = pd.read_csv(
"https://storage.googleapis.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="EVaWpWKvSHmu"
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
# + [markdown] colab_type="text" id="WX2mQBAEcisO"
# ## 任务 3:可视化第一个隐藏层的权重。
#
# 我们来花几分钟时间看看模型的 `weights_` 属性,以深入探索我们的神经网络,并了解它学到了哪些规律。
#
# 模型的输入层有 `784` 个权重,对应于 `28×28` 像素输入图片。第一个隐藏层将有 `784×N` 个权重,其中 `N` 指的是该层中的节点数。我们可以将这些权重重新变回 `28×28` 像素的图片,具体方法是将 `N` 个 `1×784` 权重数组*变形*为 `N` 个 `28×28` 大小数组。
#
# 运行以下单元格,绘制权重曲线图。请注意,此单元格要求名为 "classifier" 的 `DNNClassifier` 已经过训练。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} colab_type="code" id="eUC0Z8nbafgG"
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
# + [markdown] colab_type="text" id="kL8MEhNgrx9N"
# 神经网络的第一个隐藏层应该会对一些级别特别低的特征进行建模,因此可视化权重可能只显示一些模糊的区域,也可能只显示数字的某几个部分。此外,您可能还会看到一些基本上是噪点(这些噪点要么不收敛,要么被更高的层忽略)的神经元。
#
# 在迭代不同的次数后停止训练并查看效果,可能会发现有趣的结果。
#
# **分别用 10、100 和 1000 步训练分类器。然后重新运行此可视化。**
#
# 您看到不同级别的收敛之间有哪些直观上的差异?
| ml/cc/exercises/zh-CN/multi-class_classification_of_handwritten_digits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### We have access to a specific bank loan data. We have data about all loans asked to the bank, whether the bank decided to grant it and, finally, whether the borrower managed to repay it. We also have info about the borrower at the moment she is asking for the loan.
#
# ### You have to come up with a better strategy to grant loans. Specifically you should: Build a model which is better than the bank model. For simplicity, assume that:
#
# ##### If you grant the loan and the it doesn’t get repaid, you lose 1
#
# ##### If you grant the loan and the it does get repaid, you gain 1
#
# ##### If you don’t grant the loan, you gain 0
#
# #### (1) Using the rules above, compare bank profitability vs your model profitability.
#
# #### (2) Describe the impact of the most important variables on the prediction. Also, focus on the variable “is_employed”, which describes whether the borrower is employed when she asks for the loan. How does this variable impact the model? Explain why.
#
# #### (3) Are there any other variables, not in the data provided, that you’d have liked to include in the model?
# ### Load the package would be used
# +
import pandas as pd
pd.set_option("display.max_columns", 10)
pd.set_option("display.width", 350)
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({"figure.autolayout": True})
import seaborn as sns
sns.set(style = "white")
sns.set(style = "whitegrid", color_codes = True)
from sklearn.metrics import confusion_matrix, auc, roc_curve, classification_report
import h2o
from h2o.frame import H2OFrame
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.grid.grid_search import H2OGridSearch
# -
# ### Read in the data set
borrower = pd.read_csv("../Datasets//loan/borrower_table.csv")
loan = pd.read_csv("../Datasets//loan/loan_table.csv")
# ### Look into dataset
# +
print(borrower.shape)
print(loan.shape)
print(borrower.head)
print(loan.head)
print(borrower.info)
print(loan.info)
print(len(borrower["loan_id"]) == len(pd.unique(borrower["loan_id"])))
print(len(loan["loan_id"]) == len(pd.unique(loan["loan_id"])))
# -
# ### Data precessing
dat0 = loan.merge(borrower, on = "loan_id", how = "inner")
len(dat0)
print(list(dat0.columns))
dat0.head()
# Since the project aims to rebuild the loan granted model, a better outcome variable than the flag “loan_granted” is the flag of “loan_repaid”. Accordingly, we first subset data with granted loans and use the loan_repaid as our binary outcome.
dat = dat0[dat0["loan_granted"] == 1]
print(len(dat))
dat["date"] = pd.to_datetime(dat["date"])
# ### Visulization
#
# #### Should grant by loan purpose
grp_purpose = dat[["loan_purpose", "loan_repaid"]].groupby("loan_purpose").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.barplot(x = "loan_purpose", y = "loan_repaid", data = grp_purpose, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Loan Purpose", fontsize = 16)
plt.xlabel("Loan Purpose", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by if first loan
grp_first_loan = dat[["is_first_loan", "loan_repaid"]].groupby("is_first_loan").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.barplot(x = "is_first_loan", y = "loan_repaid", data = grp_first_loan, palette = "PuBuGn")
plt.title("Loan Repaid Rate by If First Loan", fontsize = 16)
plt.xlabel("If First Loan", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by Fully Repaid Previous Loans
grp_fully_repaid = dat[["fully_repaid_previous_loans", "loan_repaid"]].groupby("fully_repaid_previous_loans").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.barplot(x = "fully_repaid_previous_loans", y = "loan_repaid", data = grp_fully_repaid, palette = "PuBuGn")
plt.title("Loan Repaid Rate by If Fully Repaid", fontsize = 16)
plt.xlabel("If Fully Repaid", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by Currently Repaying Other Loans
grp_other_loans = dat[["currently_repaying_other_loans", "loan_repaid"]].groupby("currently_repaying_other_loans").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.barplot(x = "currently_repaying_other_loans", y = "loan_repaid", data = grp_other_loans, palette = "PuBuGn")
plt.title("Loan Repaid Rate by If Other Loans", fontsize = 16)
plt.xlabel("If Other Loans", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by Total Credit Card Limit
grp_card_limit = dat[["total_credit_card_limit", "loan_repaid"]].groupby("total_credit_card_limit").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "total_credit_card_limit", y = "loan_repaid", data = grp_card_limit, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Total Credit Card Limit", fontsize = 16)
plt.xlabel("Total Credit Card Limit", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by Average Percentage Credit Card Limit Used Last Year
grp_limit_used = dat[["avg_percentage_credit_card_limit_used_last_year", "loan_repaid"]].groupby("avg_percentage_credit_card_limit_used_last_year").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "avg_percentage_credit_card_limit_used_last_year", y = "loan_repaid", data = grp_limit_used, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Limit Used", fontsize = 16)
plt.xlabel("Limit Used", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by Saving Amount
grp_saving = dat[["saving_amount", "loan_repaid"]].groupby("saving_amount").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "saving_amount", y = "loan_repaid", data = grp_saving, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Saving Amount", fontsize = 16)
plt.xlabel("Saving Amount", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# #### Should grant by Checking Amount
grp_checking = dat[["checking_amount", "loan_repaid"]].groupby("checking_amount").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "checking_amount", y = "loan_repaid", data = grp_checking, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Checking Amount", fontsize = 16)
plt.xlabel("Checking Amount", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# ### Should grant by If Employed
grp_is_employed = dat[["is_employed", "loan_repaid"]].groupby("is_employed").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.barplot(x = "is_employed", y = "loan_repaid", data = grp_is_employed, palette = "PuBuGn")
plt.title("Loan Repaid Rate by If Employed", fontsize = 16)
plt.xlabel("If Employed", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# ### Should grant by Yearly Salary
grp_salary = dat[["yearly_salary", "loan_repaid"]].groupby("yearly_salary").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "yearly_salary", y = "loan_repaid", data = grp_salary, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Yearly Salary", fontsize = 16)
plt.xlabel("Yearly Salary", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# ### Should grant by Age
grp_age = dat[["age", "loan_repaid"]].groupby("age").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "age", y = "loan_repaid", data = grp_age, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Age", fontsize = 16)
plt.xlabel("Age", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# ### Should grant by Dependent Number
grp_dependent = dat[["dependent_number", "loan_repaid"]].groupby("dependent_number").mean().reset_index()
plt.figure(figsize = [12, 6])
sns.lineplot(x = "dependent_number", y = "loan_repaid", data = grp_dependent, palette = "PuBuGn")
plt.title("Loan Repaid Rate by Dependent Number", fontsize = 16)
plt.xlabel("Dependent Number", fontsize = 12)
plt.ylabel("Loan Repaid Rate", fontsize = 12)
# ### Build A Classifier
# +
#### Initialize H2O cluster
h2o.init()
h2o.remove_all()
dat_h2o = H2OFrame(dat)
dat_h2o["is_first_loan"] = dat_h2o["is_first_loan"].asfactor()
dat_h2o["loan_purpose"] = dat_h2o["loan_purpose"].asfactor()
dat_h2o["is_first_loan"] = dat_h2o["is_first_loan"].asfactor()
dat_h2o["fully_repaid_previous_loans"] = dat_h2o["fully_repaid_previous_loans"].asfactor()
dat_h2o["currently_repaying_other_loans"] = dat_h2o["currently_repaying_other_loans"].asfactor()
dat_h2o["is_employed"] = dat_h2o["is_employed"].asfactor()
dat_h2o.summary()
# -
index = dat_h2o["loan_repaid"].stratified_split(test_frac = 0.34, seed = 2019)
train_dat = dat_h2o[index == "train"]
test_dat = dat_h2o[index == "test"]
X = ["is_first_loan", "fully_repaid_previous_loans", "currently_repaying_other_loans", "total_credit_card_limit",
"avg_percentage_credit_card_limit_used_last_year", "saving_amount", "checking_amount", "is_employed",
"yearly_salary", "age", "dependent_number"]
Y = "loan_repaid"
RF = H2ORandomForestEstimator(balance_classes = False, ntrees = 100, max_depth = 20,
mtries = -1, seed = 2019, score_each_iteration = True)
RF.train(x = X, y = Y, training_frame = train_dat)
# +
train_true = train_dat.as_data_frame()["loan_repaid"].values
test_true = test_dat.as_data_frame()["loan_repaid"].values
train_pred = RF.predict(train_dat).as_data_frame()["predict"].values
test_pred = RF.predict(test_dat).as_data_frame()["predict"].values
train_fpr, train_tpr, _ = roc_curve(train_true, train_pred)
test_fpr, test_tpr, _ = roc_curve(test_true, test_pred)
train_auc = np.round(auc(train_fpr, train_tpr), 3)
test_auc = np.round(auc(test_fpr, test_tpr), 3)
print(classification_report(y_true=test_true, y_pred = (test_pred > 0.5).astype(int)))
RF.varimp_plot()
# -
RF.partial_plot(data = train_dat, cols = ["is_first_loan", "fully_repaid_previous_loans", "currently_repaying_other_loans", "total_credit_card_limit",
"avg_percentage_credit_card_limit_used_last_year", "saving_amount", "checking_amount", "is_employed",
"yearly_salary", "age", "dependent_number"], plot = True)
# #### (1) Using the rules above, compare bank profitability vs your model profitability.
#### Model profitability
prob = np.concatenate([train_pred, test_pred])
cate = prob > 0.5
profit_model = cate * dat["loan_repaid"]
#### Bank profitability
dat0["flag"] = 0
dat0.loc[dat0.loan_repaid == 1.0, "flag"] = 1
dat0.loc[(dat0.loan_repaid == 0.0) & (dat0.loan_granted == 1), "flag"] = -1
print(profit_model.sum())
print(dat0["flag"].sum())
# #### (2) Describe the impact of the most important variables on the prediction. Also, focus on the variable “is_employed”, which describes whether the borrower is employed when she asks for the loan. How does this variable impact the model? Explain why.
#
# See R solution.
# #### (3) Are there any other variables, not in the data provided, that you’d have liked to include in the model?
#
# See R solution.
| 12.Loan Granting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3.9
# language: python
# name: python3
# ---
# # Numpy and Pandas versus Python dictionary
#
# We measure the memory size and access times for features of Text-Fabric.
#
# How much space does a loaded feature occupy in RAM?
#
# How fast can we look up the value of a feature for a given node?
#
# It turns out that nothing beats the Python dictionary.
# +
import gzip
from timeit import timeit
import numpy as np
import pandas as pd
from tf.core.data import Data
from tf.core.timestamp import Timestamp
from tf.advanced.helpers import dm
from pack import deepSize
# -
# # Data preparation
#
# We load some features from the BHSA.
#
# They have different sparsity characteristics, as we shall see.
# We also compile this data as Pandas data.
#
# The closest data structure is a
# [Pandas Series](https://pandas.pydata.org/docs/user_guide/dsintro.html).
#
# We also test it with a sparse array as data for the series.
#
# We start with a Series object based on the data of the feature.
# +
TEMP = "_temp"
FEATURES = """
vs
g_word_utf8
rela
""".strip().split()
HEAD = f"""\
feature | length | start | end | NaN | %NaN | python dict | numpy | pandas | pandas-sparse
--- | --- | --- | --- | --- | --- | --- | --- | --- | ---
"""
HEAD_TIME = f"""\
feature | python dict | numpy | pandas | pandas-sparse
--- | --- | --- | --- | ---
"""
NONES = {None, "NA"}
TIMES = 1
KEYS = (100001, 1000001)
T = Timestamp()
# -
# We define a class of feature test objects where we store data in various representations.
#
# We measure memory usage and add methods to measure the access times.
class FeatureTest:
def __init__(self, feat):
dataObj = Data(f"{TEMP}/{feat}.tf", T)
dataObj.load()
T.indent(level=0)
data = dataObj.data
start = min(data)
end = max(data)
self.start = start
self.end = end
self.nan = sum(1 for n in range(start, end + 1) if data.get(n, None) in NONES)
self.feat = feat
self.data = data
self.ln = len(data)
self.mem = deepSize(data) // (1024 * 1024)
def adjust(self, totalMax):
self.totalMax = totalMax
self.nanPerc = self.nan * 100 / totalMax
def numpy(self):
data = self.data
totalMax = self.totalMax
array = [data.get(i, "") for i in range(totalMax + 1)]
dataN = np.array(array, np.str)
self.dataN = dataN
self.memN = deepSize(dataN) // (1024 * 1024)
def pandas(self):
data = self.data
dataP = pd.Series(data, dtype="string")
self.dataP = dataP
self.memP = dataP.memory_usage(index=True, deep=True) // (1024 * 1024)
totalMax = self.totalMax
array = [data.get(i, "NA") or "NA" for i in range(totalMax + 1)]
dataSP = pd.Series(pd.arrays.SparseArray(array, fill_value="NA", dtype="string"))
self.dataSP = dataSP
self.memSP = dataSP.memory_usage(index=True, deep=False) // (1024 * 1024)
def accessTime(self, times):
data = self.data
locs = locals()
self.access = sum(timeit(f"data.get({key}, None)", globals=locs, number=times) for key in KEYS)
def accessTimeN(self, times):
data = self.dataN
locs = locals()
self.accessN = sum(
timeit(f"data[{key}] if {key} < data.size else None", globals=locs, number=times)
for key in KEYS
)
def accessTimeP(self, times):
dataP = self.dataP
locs = locals()
self.accessP = sum(timeit(f"dataP.get({key})", globals=locs, number=times) for key in KEYS)
def accessTimeSP(self, times):
dataSP = self.dataSP
locs = locals()
self.accessSP = sum(timeit(f"dataSP.get({key})", globals=locs, number=times) for key in KEYS)
def report(self):
return (
f"{self.feat} | "
f"{self.ln} | "
f"{self.start} | "
f"{self.end} | "
f"{self.nan} | "
f"{self.nanPerc} | "
f"{self.mem} MB | "
f"{self.memN} MB | "
f"{self.memP} MB | "
f"{self.memSP} MB\n"
)
def reportTime(self):
return (
f"{self.feat} | "
f"{self.access} s | "
f"{self.accessN} s | "
f"{self.accessP} s | "
f"{self.accessSP} s\n"
)
# We collect the feature test objects in a general test object.
class DataTest:
def __init__(self):
totalMax = 0
features = {}
T.indent(reset=True)
for feat in FEATURES:
T.info(f"stage1 {feat}")
fObj = FeatureTest(feat)
if fObj.end > totalMax:
totalMax = fObj.end
features[feat] = fObj
T.info("done")
self.features = features
for (feat, fObj) in features.items():
T.info(f"stage2 {feat}")
fObj.adjust(totalMax)
fObj.numpy()
fObj.pandas()
T.info("done")
def accessTime(self, times):
features = self.features
for (feat, fObj) in features.items():
T.info(f"timing {feat}")
fObj.accessTime(times)
fObj.accessTimeN(times)
fObj.accessTimeP(times)
fObj.accessTimeSP(times)
def report(self):
features = self.features
md = HEAD
for fObj in features.values():
md += fObj.report()
dm(md)
def reportTime(self):
features = self.features
md = HEAD_TIME
for fObj in features.values():
md += fObj.reportTime()
dm(md)
def test(self, feat, nodes):
dt = self.features[feat]
md = f"""\
tf | numpy | pandas | pandas-sparse
---|---|---|---
"""
for i in nodes:
md += f"{dt.data[i]} | {dt.dataN[i]} | {dt.dataP[i]} | {dt.dataSP[i]}\n"
dm(md)
# We load the features and measure the sizes.
DT = DataTest()
DT.report()
# A few checks whether the data representations give back the right data:
DT.test("vs", range(1, 11))
DT.test("g_word_utf8", range(1, 11))
DT.test("rela", range(427608, 427619))
DT.accessTime(10000)
DT.reportTime()
# # Conclusions
#
# Storage in NumPy is worse than in a Python dict.
# The access time is also worse, in the order of 5 times.
#
# Storage in a Pandas series is slightly better space-wise than in a Python dict.
# However, the access time is 50 times worse.
#
# In a Pandas sparse series, the storage is much smaller, but the access time is 300-3000 times worse.
#
# For Text-Fabric, no performance gains are to be expected when turning to Pandas or Numpy as workhorses
# for storing and accessing features.
| test/pandas/pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bringing contextual word representations into your models
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2021"
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [General set-up](#General-set-up)
# 1. [Hugging Face BERT models and tokenizers](#Hugging-Face-BERT-models-and-tokenizers)
# 1. [BERT featurization with Hugging Face](#BERT-featurization-with-Hugging-Face)
# 1. [Simple feed-forward experiment](#Simple-feed-forward-experiment)
# 1. [A feed-forward experiment with the sst module](#A-feed-forward-experiment-with-the-sst-module)
# 1. [An RNN experiment with the sst module](#An-RNN-experiment-with-the-sst-module)
# 1. [BERT fine-tuning with Hugging Face](#BERT-fine-tuning-with-Hugging-Face)
# 1. [HfBertClassifier](#HfBertClassifier)
# 1. [HfBertClassifier experiment](#HfBertClassifier-experiment)
# ## Overview
#
# This notebook provides a basic introduction to using pre-trained [BERT](https://github.com/google-research/bert) representations with the Hugging Face library. It is meant as a practical companion to our lecture on contextual word representations. The goal of this notebook is just to help you use these representations in your own work.
#
# If you haven't already, I encourage you to review the notebook [vsm_04_contextualreps.ipynb](vsm_04_contextualreps.ipynb) before working with this one. That notebook covers the fundamentals of these models; this one dives into the details more quickly.
#
# A number of the experiments in this notebook are resource-intensive. I've included timing information for the expensive steps, to give you a sense for how long things are likely to take. I ran this notebook on a laptop with a single NVIDIA RTX 2080 GPU.
# ## General set-up
#
# The following are requirements that you'll already have met if you've been working in this repository. As you can see, we'll use the [Stanford Sentiment Treebank](sst_01_overview.ipynb) for illustrations, and we'll try out a few different deep learning models.
# +
import os
from sklearn.metrics import classification_report
import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer
from torch_shallow_neural_classifier import TorchShallowNeuralClassifier
from torch_rnn_classifier import TorchRNNModel
from torch_rnn_classifier import TorchRNNClassifier
from torch_rnn_classifier import TorchRNNClassifierModel
from torch_rnn_classifier import TorchRNNClassifier
import sst
import utils
# -
utils.fix_random_seeds()
SST_HOME = os.path.join("data", "sentiment")
# The `transformers` library does a lot of logging. To avoid ending up with a cluttered notebook, I am changing the logging level. You might want to skip this as you scale up to building production systems, since the logging is very good – it gives you a lot of insights into what the models and code are doing.
import logging
logger = logging.getLogger()
logger.level = logging.ERROR
# ## Hugging Face BERT models and tokenizers
#
# We'll illustrate with the BERT-base cased model:
weights_name = 'bert-base-cased'
# There are lots other options for pretrained weights. See [this Hugging Face directory](https://huggingface.co/models).
# Next, we specify a tokenizer and a model that match both each other and our choice of pretrained weights:
bert_tokenizer = BertTokenizer.from_pretrained(weights_name)
bert_model = BertModel.from_pretrained(weights_name)
# For modeling (as opposed to creating static representations), we will mostly process examples in batches – generally very small ones, as these models consume _a lot_ of memory. Here's a small batch of texts to use as the starting point for illustrations:
example_texts = [
"Encode sentence 1. [SEP] And sentence 2!",
"Bert knows Snuffleupagus"]
# We will often need to pad (and perhaps truncate) token lists so that we can work with fixed-dimensional tensors: The `batch_encode_plus` has a lot of options for doing this:
example_ids = bert_tokenizer.batch_encode_plus(
example_texts,
add_special_tokens=True,
return_attention_mask=True,
padding='longest')
example_ids.keys()
# The `token_type_ids` is used for multi-text inputs like NLI. The `'input_ids'` field gives the indices for each of the two examples:
example_ids['input_ids']
# Notice that the final two tokens of the second example are pad tokens.
#
# For fine-tuning, we want to avoid attending to padded tokens. The `'attention_mask'` captures the needed mask, which we'll be able to feed directly to the pretrained BERT model:
example_ids['attention_mask']
# Finally, we can run these indices and masks through the pretrained model:
# +
X_example = torch.tensor(example_ids['input_ids'])
X_example_mask = torch.tensor(example_ids['attention_mask'])
with torch.no_grad():
reps = bert_model(X_example, attention_mask=X_example_mask)
# -
# Hugging Face BERT models create a special `pooler_output` representation that is the final representation above the [CLS] extended with a single layer of parameters:
reps.pooler_output.shape
# We have two examples, each representented by a single vector of dimension 768, which is $d_{model}$ for BERT base using the notation from [the original Transformers paper](https://arxiv.org/abs/1706.03762). This is an easy basis for fine-tuning, as we will see.
#
# We can also access the final output for each state:
reps.last_hidden_state.shape
# Here, we have 2 examples, each padded to the length of the longer one (12), and each of those representations has dimension 768. These representations can be used for sequence modeling, or pooled somehow for simple classifiers.
# Those are all the essential ingredients for working with these parameters in Hugging Face. Of course, the library has a lot of other functionality, but the above suffices to featurize and to fine-tune.
# ## BERT featurization with Hugging Face
#
# To start, we'll use the Hugging Face interfaces just to featurize examples to create inputs to a separate model. In this setting, the BERT parameters are frozen.
def bert_phi(text):
input_ids = bert_tokenizer.encode(text, add_special_tokens=True)
X = torch.tensor([input_ids])
with torch.no_grad():
reps = bert_model(X)
return reps.last_hidden_state.squeeze(0).numpy()
# ### Simple feed-forward experiment
#
# For a simple feed-forward experiment, we can get the representation of the `[CLS]` tokens and use them as the inputs to a shallow neural network:
def bert_classifier_phi(text):
reps = bert_phi(text)
#return reps.mean(axis=0) # Another good, easy option.
return reps[0]
# Next we read in the SST train and dev splits:
# +
train = sst.train_reader(SST_HOME)
dev = sst.dev_reader(SST_HOME)
# -
# Split the input/output pairs out into separate lists:
# +
X_str_train = train.sentence.values
y_train = train.label.values
X_str_dev = dev.sentence.values
y_dev = dev.label.values
# -
# In the next step, we featurize all of the examples. These steps are likely to be the slowest in these experiments:
# %time X_train = [bert_classifier_phi(text) for text in X_str_train]
# %time X_dev = [bert_classifier_phi(text) for text in X_str_dev]
# Now that all the examples are featurized, we can fit a model and evaluate it:
model = TorchShallowNeuralClassifier(
early_stopping=True,
hidden_dim=300)
# %time _ = model.fit(X_train, y_train)
preds = model.predict(X_dev)
print(classification_report(y_dev, preds, digits=3))
# ### A feed-forward experiment with the sst module
#
# It is straightforward to conduct experiments like the above using `sst.experiment`, which will enable you to do a wider range of experiments without writing or copy-pasting a lot of code.
def fit_shallow_network(X, y):
mod = TorchShallowNeuralClassifier(
hidden_dim=300,
early_stopping=True)
mod.fit(X, y)
return mod
# %%time
_ = sst.experiment(
sst.train_reader(SST_HOME),
bert_classifier_phi,
fit_shallow_network,
assess_dataframes=sst.dev_reader(SST_HOME),
vectorize=False) # Pass in the BERT reps directly!
# ### An RNN experiment with the sst module
#
# We can also use BERT representations as the input to an RNN. There is just one key change from how we used these models before:
#
# * Previously, we would feed in lists of tokens, and they would be converted to indices into a fixed embedding space. This presumes that all words have the same representation no matter what their context is.
#
# * With BERT, we skip the embedding entirely and just feed in lists of BERT vectors, which means that the same word can be represented in different ways.
#
# `TorchRNNClassifier` supports this via `use_embedding=False`. In turn, you needn't supply a vocabulary:
def fit_rnn(X, y):
mod = TorchRNNClassifier(
vocab=[],
early_stopping=True,
use_embedding=False) # Pass in the BERT hidden states directly!
mod.fit(X, y)
return mod
# %%time
_ = sst.experiment(
sst.train_reader(SST_HOME),
bert_phi,
fit_rnn,
assess_dataframes=sst.dev_reader(SST_HOME),
vectorize=False) # Pass in the BERT hidden states directly!
# ## BERT fine-tuning with Hugging Face
#
# The above experiments are quite successful – BERT gives us a reliable boost compared to other methods we've explored for the SST task. However, we might expect to do even better if we fine-tune the BERT parameters as part of fitting our SST classifier. To do that, we need to incorporate the Hugging Face BERT model into our classifier. This too is quite straightforward.
# ### HfBertClassifier
#
# The most important step is to create an `nn.Module` subclass that has, for its parameters, both the BERT model and parameters for our own classifier. Here we define a very simple fine-tuning set-up in which some layers built on top of the output corresponding to `[CLS]` are used as the basis for the SST classifier:
class HfBertClassifierModel(nn.Module):
def __init__(self, n_classes, weights_name='bert-base-cased'):
super().__init__()
self.n_classes = n_classes
self.weights_name = weights_name
self.bert = BertModel.from_pretrained(self.weights_name)
self.bert.train()
self.hidden_dim = self.bert.embeddings.word_embeddings.embedding_dim
# The only new parameters -- the classifier:
self.classifier_layer = nn.Linear(
self.hidden_dim, self.n_classes)
def forward(self, indices, mask):
reps = self.bert(
indices, attention_mask=mask)
return self.classifier_layer(reps.pooler_output)
# As you can see, `self.bert` does the heavy-lifting: it reads in all the pretrained BERT parameters, and I've specified `self.bert.train()` just to make sure that these parameters can be updated during our training process.
#
# In `forward`, `self.bert` is used to process inputs, and then `pooler_output` is fed into `self.classifier_layer`. Hugging Face has already added a layer on top of the actual output for `[CLS]`, so we can specify the model as
#
# $$
# \begin{align}
# [h_{1}, \ldots, h_{n}] &= \text{BERT}([x_{1}, \ldots, x_{n}]) \\
# h &= \tanh(h_{1}W_{hh} + b_{h}) \\
# y &= \textbf{softmax}(hW_{hy} + b_{y})
# \end{align}$$
#
# for a tokenized input sequence $[x_{1}, \ldots, x_{n}]$.
#
# The Hugging Face documentation somewhat amusingly says, of `pooler_output`,
#
# > This output is usually _not_ a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence.
#
# which is entirely reasonable, but it will require more resources, so we'll do the simpler thing here.
# For the training and prediction interface, we can subclass `TorchShallowNeuralClassifier` so that we don't have to write any of our own data-handling, training, or prediction code. The central changes are using `HfBertClassifierModel` in `build_graph` and processing the data with `batch_encode_plus`.
class HfBertClassifier(TorchShallowNeuralClassifier):
def __init__(self, weights_name, *args, **kwargs):
self.weights_name = weights_name
self.tokenizer = BertTokenizer.from_pretrained(self.weights_name)
super().__init__(*args, **kwargs)
self.params += ['weights_name']
def build_graph(self):
return HfBertClassifierModel(self.n_classes_, self.weights_name)
def build_dataset(self, X, y=None):
data = self.tokenizer.batch_encode_plus(
X,
max_length=None,
add_special_tokens=True,
padding='longest',
return_attention_mask=True)
indices = torch.tensor(data['input_ids'])
mask = torch.tensor(data['attention_mask'])
if y is None:
dataset = torch.utils.data.TensorDataset(indices, mask)
else:
self.classes_ = sorted(set(y))
self.n_classes_ = len(self.classes_)
class2index = dict(zip(self.classes_, range(self.n_classes_)))
y = [class2index[label] for label in y]
y = torch.tensor(y)
dataset = torch.utils.data.TensorDataset(indices, mask, y)
return dataset
# ### HfBertClassifier experiment
#
# That's it! Let's see how we do on the SST binary, root-only problem. Because fine-tuning is expensive, we'll conduct a modest hyperparameter search and run the model for just one epoch per setting evaluation, as we did when [assessing NLI models](nli_02_models.ipynb).
def bert_fine_tune_phi(text):
return text
def fit_hf_bert_classifier_with_hyperparameter_search(X, y):
basemod = HfBertClassifier(
weights_name='bert-base-cased',
batch_size=8, # Small batches to avoid memory overload.
max_iter=1, # We'll search based on 1 iteration for efficiency.
n_iter_no_change=5, # Early-stopping params are for the
early_stopping=True) # final evaluation.
param_grid = {
'gradient_accumulation_steps': [1, 4, 8],
'eta': [0.00005, 0.0001, 0.001],
'hidden_dim': [100, 200, 300]}
bestmod = utils.fit_classifier_with_hyperparameter_search(
X, y, basemod, cv=3, param_grid=param_grid)
return bestmod
# %%time
bert_classifier_xval = sst.experiment(
sst.train_reader(SST_HOME),
bert_fine_tune_phi,
fit_hf_bert_classifier_with_hyperparameter_search,
assess_dataframes=sst.dev_reader(SST_HOME),
vectorize=False) # Pass in the BERT hidden state directly!
# And now on to the final test-set evaluation, using the best model from above:
optimized_bert_classifier = bert_classifier_xval['model']
# Remove the rest of the experiment results to clear out some memory:
del bert_classifier_xval
def fit_optimized_hf_bert_classifier(X, y):
optimized_bert_classifier.max_iter = 1000
optimized_bert_classifier.fit(X, y)
return optimized_bert_classifier
test_df = sst.sentiment_reader(
os.path.join(SST_HOME, "sst3-test-labeled.csv"))
# %%time
_ = sst.experiment(
sst.train_reader(SST_HOME),
bert_fine_tune_phi,
fit_optimized_hf_bert_classifier,
assess_dataframes=test_df,
vectorize=False) # Pass in the BERT hidden state directly!
| finetuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="gMVdNFk2r3k0" executionInfo={"status": "ok", "timestamp": 1606053640902, "user_tz": 180, "elapsed": 759, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}}
#bibliotecas
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_validate
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="OU0J49qxrvBf" executionInfo={"status": "ok", "timestamp": 1606052698619, "user_tz": 180, "elapsed": 1216, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="84223605-850a-412a-98b0-a5c20c25ec94"
#atribui os dados da uri para a variável dados
uri = "https://gist.githubusercontent.com/guilhermesilveira/e99a526b2e7ccc6c3b70f53db43a87d2/raw/1605fc74aa778066bf2e6695e24d53cf65f2f447/machine-learning-carros-simulacao.csv"
dados = pd.read_csv(uri).drop(columns=["Unnamed: 0"], axis=1)
dados.head()
# + colab={"base_uri": "https://localhost:8080/"} id="yvHToiOhufWr" executionInfo={"status": "ok", "timestamp": 1606052698620, "user_tz": 180, "elapsed": 1208, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="9c25fdfa-adca-4c8e-fff0-6074f1e2dbe1"
#define as features de x e y, SEED e cria treinos e testes de x e y.
x = dados[["preco", "idade_do_modelo","km_por_ano"]]
y = dados["vendido"]
SEED = 158020
np.random.seed(SEED)
treino_x, teste_x, treino_y, teste_y = train_test_split(x, y, test_size = 0.25,
stratify = y)
print("Treinaremos com %d elementos e testaremos com %d elementos" % (len(treino_x), len(teste_x)))
# + colab={"base_uri": "https://localhost:8080/"} id="UTcZseGV6Qw8" executionInfo={"status": "ok", "timestamp": 1606052698621, "user_tz": 180, "elapsed": 1204, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="597bf781-f113-4a43-f377-8c943181336e"
#gera um dummy stratified para validação dos dados
dummy_stratified = DummyClassifier()
dummy_stratified.fit(treino_x, treino_y)
acuracia = dummy_stratified.score(teste_x, teste_y) * 100
print("A acurácia do dummy stratified foi %.2f%%" % acuracia)
# + colab={"base_uri": "https://localhost:8080/"} id="tqrqrs1r6bAW" executionInfo={"status": "ok", "timestamp": 1606052698622, "user_tz": 180, "elapsed": 1199, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="99c341a2-64b7-4115-b191-b9d51c19316c"
#cria um modelo de decision tree
modelo = DecisionTreeClassifier(max_depth=2)
modelo.fit(treino_x, treino_y)
previsoes = modelo.predict(teste_x)
acuracia = accuracy_score(teste_y, previsoes) * 100
print("A acurácia foi %.2f%%" % acuracia)
# + id="S7XNCEPQONsV" executionInfo={"status": "ok", "timestamp": 1606052698622, "user_tz": 180, "elapsed": 1193, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}}
#função para imprimir os resultados do intervalo e média com desvio padrão
def imprime_resultados(results):
media = results ['test_score'].mean()
desvio_padrao = results['test_score'].std()
print("Accuracy intervalo [%.2f%%, %.2f%%]" % ((media - 2 *desvio_padrao) * 100, (media + 2 * desvio_padrao) * 100))
print("Accuracy médio: %.2f%%" % (media * 100))
# + colab={"base_uri": "https://localhost:8080/"} id="f2w2NAqW5SIy" executionInfo={"status": "ok", "timestamp": 1606052698889, "user_tz": 180, "elapsed": 1456, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="f0317e01-4cc9-4f41-b5c9-42cf52ad64bd"
#aplica a validação cruzada no modelo
cv = KFold(n_splits = 10)
modelo = DecisionTreeClassifier(max_depth=2)
results = cross_validate(modelo, x, y, cv = cv, return_train_score=False)
imprime_resultados(results)
# + colab={"base_uri": "https://localhost:8080/"} id="uJLPuD9xPkbd" executionInfo={"status": "ok", "timestamp": 1606052698890, "user_tz": 180, "elapsed": 1450, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="61a483a1-7bd9-46f4-a149-a7327b9ad54b"
#aplica a validação cruzada com shuffle no modelo
cv = KFold(n_splits = 10, shuffle = True)
modelo = DecisionTreeClassifier(max_depth=2)
results = cross_validate(modelo, x, y, cv = cv, return_train_score=False)
imprime_resultados(results)
# + colab={"base_uri": "https://localhost:8080/"} id="s9Zr2c_TpE5W" executionInfo={"status": "ok", "timestamp": 1606052699177, "user_tz": 180, "elapsed": 1730, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="46d9d5c4-af37-4655-8aa0-7234a8dc1d03"
#testando o modelo com o stratified KFold
cv = StratifiedKFold(n_splits = 10, shuffle = True)
modelo = DecisionTreeClassifier(max_depth=2)
results = cross_validate(modelo, x, y, cv = cv, return_train_score=False)
imprime_resultados(results)
# + colab={"base_uri": "https://localhost:8080/"} id="EtIT7jXYwxtI" executionInfo={"status": "ok", "timestamp": 1606052699178, "user_tz": 180, "elapsed": 1725, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="688dc27d-4179-48d4-98dd-305747822aed"
len(dados)#verificando o tamanho do dataframe
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="LGG8jDzkwWwR" executionInfo={"status": "ok", "timestamp": 1606052699180, "user_tz": 180, "elapsed": 1721, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="7e425a86-9522-43ae-e442-29ceea81b616"
#cria a coluna 'modelo' com dados aleatórios gerados a partir do método random.randint() da biblioteca Numpy para testar valores desconhecidos pelo algoritmo a partir desses novos modelos
np.random.seed(SEED)
dados['modelo'] = dados.idade_do_modelo + np.random.randint(-2, 3, size=10000)
dados.modelo = dados.modelo + abs(dados.modelo.min()) + 1 #trata o valor mínimo para 0 em vez de -1
dados.head()
# + id="_M_UhjzXPvxy"
# + colab={"base_uri": "https://localhost:8080/"} id="iuBVCXuNzdfC" executionInfo={"status": "ok", "timestamp": 1606052699181, "user_tz": 180, "elapsed": 1716, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="6620400c-0103-4375-c868-aff2fe961a02"
#testando um modelo com dados gerados aleatórios utilizado o group KFold
cv = GroupKFold(n_splits = 10,)
modelo = DecisionTreeClassifier(max_depth=2)
results = cross_validate(modelo, x, y, cv = cv, groups = dados.modelo, return_train_score=False)
imprime_resultados(results)
# + colab={"base_uri": "https://localhost:8080/"} id="hEytMna_LjwP" executionInfo={"status": "ok", "timestamp": 1606053103065, "user_tz": 180, "elapsed": 2376, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="fa1d0b07-1640-43f3-bfdd-dd3b09f02aff"
#testando um modelo SVC com os dados escalados utilizado o método StandardScaler
SEED = 301
np.random.seed(SEED)
scaler = StandardScaler()
scaler.fit(treino_x)
treino_x_escalado = scaler.transform(treino_x)
teste_x_escalado = scaler.transform(teste_x)
modelo = SVC()
modelo.fit(treino_x_escalado, treino_y)
previsoes = modelo.predict(teste_x_escalado)
acuracia = accuracy_score(teste_y, previsoes) * 100
print("A acurácia foi de %.2f%%" %acuracia)
# + colab={"base_uri": "https://localhost:8080/"} id="DxeSYqGaNMN8" executionInfo={"status": "ok", "timestamp": 1606053666552, "user_tz": 180, "elapsed": 22768, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-sherhDJMTPM/AAAAAAAAAAI/AAAAAAAABJ8/Ug8RI1s3zLA/s64/photo.jpg", "userId": "14215347088313866282"}} outputId="3070636e-fe06-49a8-e1dc-ca1436422d4c"
#adicionando o pipeline do sklearn no modelo anterior
SEED = 301
np.random.seed(SEED)
scaler = StandardScaler()
modelo = SVC()
pipeline = Pipeline([('transformacao', scaler), ('estimador', modelo)])
cv = GroupKFold(n_splits=10)
results = cross_validate(pipeline, x, y, cv = cv, groups = dados.modelo, return_train_score=False)
imprime_resultados(results)
| machine_learning/validacao_de_modelos/validacao_cruzada.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# next time check this video:
# https://www.youtube.com/watch?v=VdLdfF_uuSQ
#
#
# # Data load
# +
#import file as df
from urllib.error import URLError
from urllib.request import urlretrieve
import os
from pandas import read_csv
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
#df.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salary']
#df.to_csv('adult_birth_h.csv')
######################
#A function that downloads data if it's not available in the specified path
# it also sets column names
######################
def getAdultBirthData(filename = 'adult_birth.csv', url=URL, force_download = False):
if force_download or not os.path.exists(filename):
try:
urlretrieve(URL, filename)
except URLError as e:
print(e.reason)
df = read_csv('adult_birth.csv')
df.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salary']
return df
#df = getAdultBirthData(force_download = True)
df = getAdultBirthData()
df.head()
# -
# # Data summary
df.shape
df.dtypes
# describe() applird to the whole data frame will give summary statistics for continupus variables. It can also be appliead to a single variable.
df.describe()
df.relationship.describe()
# Learning about grouping and aggregating funtions, trying examples from : https://www.shanelynn.ie/summarising-aggregation-and-grouping-data-in-python-pandas/
#non-null observations
df['hours-per-week'].count()
#longest hours
df['hours-per-week'].max()
# How many entries are there for each race?
df['race'].value_counts()
## Number of non-null unique marital-status entries
df['marital-status'].nunique()
#splitting with groupby
df.groupby(['race']).groups.keys()
# Get the mean of the age per race
df.groupby('race')['age'].mean()
# Get the number of dates / entries in each marital-status category
df.groupby('marital-status')['age'].mean()
df.groupby('sex')['age'].mean()
df.groupby('salary')['sex'].count()
df.groupby(['race', 'sex'])['age'].mean()
df.groupby(['race', 'sex'])['education-num'].mean()
df.groupby('salary')['sex'].count()
df.groupby(['salary', 'sex']).agg({'sex':'count'})
df.groupby(['salary', 'sex']).agg({'age':'mean'})
# # Dealing with "?"
# Hving moved to the next step I noticed there are a few cells with "?" that we probably want to get rid of before recoding the data. I'm going to remove a row if there is a ? in it. This is a rigid approach that will have to do for now. First I created functions with the small dataset below. Also, I'm going to get rid of any whitespaces at this stage, when matching strings this is important as anything with a whitespace will not be matched.
# +
#check there are no whitespaces
df_obj = df.select_dtypes(['object'])
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
#replace any "?" with NaN
# to check print again: print(df.loc[0,'workclass'])
# lambda anonymous function is equivalent to:
#def strip(x)
# return x.str.strip()
df.loc[df['workclass'].str.contains('///?'), 'workclass'].head(3).tolist()
#containsStringCheck("\?", df)
#we confirm that there are '?'.
#now we can delete rows with "?" with a custom function I tested with a dayaframe slice at the bottom
# +
import numpy as np
def replaceStringwithNanAndRemoveRow(inputChar, dfName):
colNum = 0
#characters treated by Python in a special way
#note that sashes need to be escaped
specialCharactersList = [".","^", "?", "$", "*", "+", "{", "}", "[", "]","\/", "\\"]
if inputChar in specialCharactersList:
inputCharEscaped = "\\"+ inputChar
else:
inputCharEscaped = inputChar
#if a column is a string/object check if it contains string pattern we want to remove rows for
# replace the string pattern with NaN
#remove nan
for column in dfName:
if(dfName[column].dtype=='object'):
response = dfName[column].str.contains(inputCharEscaped).any()
if (response == True):
dfName[column].replace(inputChar, np.nan, inplace=True)
colNum = colNum+1
#careful, this will drop any other rows with NaN
dfWithoutNaN = dfName.dropna()
return dfWithoutNaN
df = replaceStringwithNanAndRemoveRow('?', df)
# -
# # Data recoding
# Reduce the number of factors in work class, marital status and country.
#
df['workclass'].value_counts()
# Values have white spaces in front of them, we need to fix that first. First, I select columns of type 'object' and then use the str.strip function.
#
# Now I can recode values with a custom function
# +
def rename_workclass(wc):
if(wc=="Never-worked")|(wc=="Without-pay"):
return "Unemployed"
elif (wc=="State-gov")|(wc=="Local-gov")|(wc=="Federal-gov"):
return "Gov"
elif (wc=="Self-emp-inc")|(wc=="Self-emp-not-inc"):
return "Self-emp"
else:
return wc
df['workclassR'] = df['workclass'].apply(rename_workclass)
df['workclassR'].value_counts()
df.groupby(['salary', 'workclassR']).agg({'workclassR':'count'})
# +
#A different way to recode with a dictionary
mapper = {'Without-pay': 'Unemployed',
'Never-worked': 'Unemployed',
'State-gov': 'Gov',
'Local-gov': 'Gov',
'Federal-gov': 'Gov',
'Self-emp-inc': 'Self-emp',
'Self-emp-not-inc': 'Self-emp'}
df['workclassR2'] = df['workclass'].map(mapper).fillna(df['workclass'])
print(df['workclassR2'].value_counts())
#remove the additional column at the end
df.drop(['workclassR2'], axis=1)
# -
# Do more recoding, for marital status and country
#
#
# +
mapper_marital_status = {'Married-spouse-absent': 'Married',
'Married-AF-spouse': 'Married',
'Married-civ-spouse': 'Married',
'Divorced': 'Not-Married',
'Separated': 'Not-Married',
'Widowed': 'Not-Married',
'Self-emp-not-inc': 'Self-emp'}
df['marital-statusR'] = df['marital-status'].map(mapper_marital_status).fillna(df['marital-status'])
print(df['marital-statusR'].value_counts())
# +
northAmericaList = ["Canada", "Cuba", "Dominican-Republic", "El-Salvador", "Guatemala",
"Haiti", "Honduras", "Jamaica", "Mexico", "Nicaragua",
"Outlying-US(Guam-USVI-etc)", "Puerto-Rico", "Trinadad&Tobago",
"United-States"]
asiaList = ["Cambodia", "China", "Hong", "India", "Iran", "Japan", "Laos",
"Philippines", "Taiwan", "Thailand", "Vietnam"]
southAmericaList = ["Columbia", "Ecuador", "Peru"]
europeList = ["England", "France", "Germany", "Greece", "Holand-Netherlands",
"Hungary", "Ireland", "Italy", "Poland", "Portugal", "Scotland",
"Yugoslavia"]
otherList = ["South", "?"]
def rename_country(c):
if c in northAmericaList:
return "north.america"
elif c in asiaList:
return "asia"
elif c in southAmericaList:
return "south.america"
elif c in europeList:
return "europe"
elif c in otherList:
return "other"
else:
return c
df['countryR'] = df['native-country'].apply(rename_country)
df['countryR'].value_counts()
# -
# # Dealing with missing data
df.info() #will tell us about missing values
df.isnull().sum() #how many Nan
# # Save dataframe with recoded variables for future use
#df.to_pickle("C:\Users\karol\python_data\adultRecoded.pkl")
#looks like I can only save to current wd
df.to_pickle("adultRecoded.pkl")
import os
print(os.getcwd())
# There is nothing to do here, no missing data in this dataset
# # Create a small example to test recoding function, then get a slice of a dataframe and test your function there
#
# I've made many errors when trying to recode the data (for example, it took me a while to discover there were white spaces in the data). To debug, I used a minimal example and got the first 5 rows of my df, to test my functions
# +
import pandas as pd
testDict = {'Col1' : pd.Categorical(["a", "b", "c", "d", "e"]),
'Col2' : pd.Categorical(["1", "2", "3", "4", "5"])}
testDF = pd.DataFrame.from_dict(testDict)
testDF
testDF.dtypes
# +
def letter_recode(Col1):
if(Col1=="a")|(Col1=="b"):
return "ab"
elif (Col1=="c")|(Col1=="d"):
return "cd"
else:
return Col1
testDF['Col3'] = testDF['Col1'].apply(letter_recode)
testDF['Col3'].value_counts()
testDF
# -
df['workclass'].head()
#there are 2 ways for selecting parts of a dataframe, iloc, and loc, iloc is better when it comes to indexing
#df5 = df.loc[0:4,:]
df5 = df.iloc[0:4, :] # to access the column at the nth position
#we need to specify that the slice is not a copy, otherwise we will be getting warnings
df5.is_copy = False
df5
# +
df5_obj = df5.select_dtypes(['object'])
df5[df5_obj.columns] = df5_obj.apply(lambda x: x.str.strip())
#print(df5.loc[0,'workclass'])
df5.relationship[df5.relationship=='Husband'] = '?'
#df7 = replaceStringwithNanAndRemoveRow('\?', df5)
#df5
df5
# colNum = 0
# for column in df5:
# if(df5[column].dtype=='object'):
# response = df5[column].str.contains('\?').any()
# if (response == True):
# df5[column].replace('?', np.nan, inplace=True)
# colNum = colNum+1
# dfWithoutNaN = df5.dropna()
# dfWithoutNaN
# +
df5.dtypes
df5
# +
#a function to test whether a column contains a string
import numpy as np
def containsStringCheck(string, dfName):
colNum = 0
for column in dfName:
if(dfName[column].dtype=='object'):
#print(df5[column].dtype)
response = dfName[column].str.contains(string).any()
print(dfName.columns[colNum] +" contains value " +str(response) )
colNum = colNum+1
#containsStringCheck('White', df5)
#containsStringCheck('\\\?', df5)
#df5['race'].str.contains('White').any()
df5
#df5.marital-status[df5.marital-status=='Husband']
#df5.loc[df5['relationship'].str.contains('Husband'), 'relationship'].head(3).tolist()
#df5
#function to check
def replaceStringwithNanAndRemoveRow(inputChar, dfName):
colNum = 0
#characters treated by Python in a special way
#note that sashes need to be escaped
specialCharactersList = [".","^", "?", "$", "*", "+", "{", "}", "[", "]","\/", "\\"] #'^', '$', '*', '+', '?', '{', '}', '[', ']', '\', '|', '(', ')' ]
#if a character is in the list escape it
if inputChar in specialCharactersList:
inputCharEscaped = "\\"+ inputChar
else:
inputCharEscaped = inputChar
#if a column is a string/object check if it contains string pattern we want to remove rows for
# replace the string pattern with NaN
#remove nan
for column in dfName:
if(dfName[column].dtype=='object'):
response = dfName[column].str.contains(inputCharEscaped).any()
if (response == True):
dfName[column].replace(inputChar, np.nan, inplace=True)
colNum = colNum+1
#careful, this will drop any other rows with NaN
dfWithoutNaN = dfName.dropna()
return dfWithoutNaN
df6 = replaceStringwithNanAndRemoveRow('?', df5)
# #check string was removed
# containsStringCheck("White", df6)
# containsStringCheck("\?", df6)
df6
# -
| DataCleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="FJMGVPJ3CvXe"
# ## Tacotron 2 inference code
# Edit the variables **checkpoint_path** and **text** to match yours and run the entire code to generate plots of mel outputs, alignments and audio synthesis from the generated mel-spectrogram using Griffin-Lim.
# + [markdown] id="Va0VYiW2CvXz"
# #### Import libraries and setup matplotlib
# + colab={"base_uri": "https://localhost:8080/"} id="EPdrtn5qDCUL" executionInfo={"status": "ok", "timestamp": 1635678740620, "user_tz": -540, "elapsed": 700, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03208981693383898195"}} outputId="64ca8fed-60aa-434d-cd68-39194376de60"
from google.colab import drive
drive.mount('/content/drive')
## set Path
path = '/content/drive/MyDrive/fusic/emotion_tts/tacotron2'
# %cd {path}
# + colab={"base_uri": "https://localhost:8080/"} id="xFKQhj2LDRBw" executionInfo={"status": "ok", "timestamp": 1635678916200, "user_tz": -540, "elapsed": 103354, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03208981693383898195"}} outputId="83f79120-8b6d-4685-ae6b-c3206320133a"
# ! pip install unidecode
# ! pip install pytorch-revgrad
# ! pip install jaconv pyopenjtalk wandb
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="YSF_RNfLCvX1" executionInfo={"status": "error", "timestamp": 1635678917595, "user_tz": -540, "elapsed": 1405, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03208981693383898195"}} outputId="ec76d508-8d80-4fbc-df9d-419eed1e80b4"
import matplotlib
# %matplotlib inline
import matplotlib.pylab as plt
import IPython.display as ipd
import sys
sys.path.append('waveglow/')
import numpy as np
import torch
from hparams import create_hparams
from model import Tacotron2
from layers import TacotronSTFT, STFT
from audio_processing import griffin_lim
from train import load_model
from text import text_to_sequence
from denoiser import Denoiser
# + id="1E-UKSpnCvX7"
def plot_data(data, figsize=(16, 4)):
fig, axes = plt.subplots(1, len(data), figsize=figsize)
for i in range(len(data)):
axes[i].imshow(data[i], aspect='auto', origin='bottom',
interpolation='none')
# + [markdown] id="hM_P66zrCvX8"
# #### Setup hparams
# + id="0z81QxNNCvX9"
hparams = create_hparams()
hparams.sampling_rate = 22050
# + [markdown] id="4agLyIePCvX-"
# #### Load model from checkpoint
# + id="8y7h3RElCvX_"
checkpoint_path = "tacotron2_statedict.pt"
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'])
_ = model.cuda().eval().half()
# + [markdown] id="qZqPpM2jCvYB"
# #### Load WaveGlow for mel2audio synthesis and denoiser
# + id="d2_mL7dUCvYC" outputId="2624ea20-f0dd-41c2-fe48-f106e54d80a0"
waveglow_path = 'waveglow_256channels.pt'
waveglow = torch.load(waveglow_path)['model']
waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
denoiser = Denoiser(waveglow)
# + [markdown] id="4y6c96fkCvYE"
# #### Prepare text input
# + id="RUAgmB-UCvYE"
text = "Waveglow is really awesome!"
sequence = np.array(text_to_sequence(text, ['english_cleaners']))[None, :]
sequence = torch.autograd.Variable(
torch.from_numpy(sequence)).cuda().long()
# + [markdown] id="8cNahd0MCvYF"
# #### Decode text input and plot results
# + id="SW8UtUMICvYF" outputId="19edcf6c-a299-492a-bebb-f694951d9c0c"
mel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)
plot_data((mel_outputs.float().data.cpu().numpy()[0],
mel_outputs_postnet.float().data.cpu().numpy()[0],
alignments.float().data.cpu().numpy()[0].T))
# + [markdown] id="mDlGeHYECvYG"
# #### Synthesize audio from spectrogram using WaveGlow
# + id="1PR5Xz7NCvYG" outputId="8c7e3254-9257-45eb-d5f5-452a4e385179"
with torch.no_grad():
audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)
ipd.Audio(audio[0].data.cpu().numpy(), rate=hparams.sampling_rate)
# + [markdown] id="21rwiS7DCvYH"
# #### (Optional) Remove WaveGlow bias
# + id="Af5TqgLvCvYH" outputId="1cd14c7f-f7f0-4870-ea9d-7a5fb47739a0"
audio_denoised = denoiser(audio, strength=0.01)[:, 0]
ipd.Audio(audio_denoised.cpu().numpy(), rate=hparams.sampling_rate)
| inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: python
# name: pyspark
# ---
# +
evaluation = True
evaluation_verbose = False
OUTPUT_BUCKET_FOLDER = "gs://<GCS_BUCKET_NAME>/outbrain-click-prediction/output/"
DATA_BUCKET_FOLDER = "gs://<GCS_BUCKET_NAME>/outbrain-click-prediction/data/"
# -
from IPython.display import display
from pyspark.sql.types import *
import pyspark.sql.functions as F
from pyspark.ml.linalg import Vectors, SparseVector, VectorUDT
import numpy as np
import scipy.sparse
import math
import datetime
import time
import itertools
import pickle
import random
random.seed(42)
import pandas as pd
# %matplotlib inline
start_time = time.time()
import hashlib
def hashstr(s, nr_bins):
return int(hashlib.md5(s.encode('utf8')).hexdigest(), 16)%(nr_bins-1)+1
# ## UDFs
# +
def date_time_to_unix_epoch(date_time):
return int(time.mktime(date_time.timetuple()))
def date_time_to_unix_epoch_treated(dt):
if dt != None:
try:
epoch = date_time_to_unix_epoch(dt)
return epoch
except Exception as e:
print("Error processing dt={}".format(dt), e)
return 0
else:
return 0
# -
timestamp_null_to_zero_int_udf = F.udf(lambda x: date_time_to_unix_epoch_treated(x), IntegerType())
INT_DEFAULT_NULL_VALUE = -1
int_null_to_minus_one_udf = F.udf(lambda x: x if x != None else INT_DEFAULT_NULL_VALUE, IntegerType())
int_list_null_to_empty_list_udf = F.udf(lambda x: x if x != None else [], ArrayType(IntegerType()))
float_list_null_to_empty_list_udf = F.udf(lambda x: x if x != None else [], ArrayType(FloatType()))
str_list_null_to_empty_list_udf = F.udf(lambda x: x if x != None else [], ArrayType(StringType()))
def truncate_day_from_timestamp(ts):
return int(ts / 1000 / 60 / 60 / 24)
truncate_day_from_timestamp_udf = F.udf(lambda ts: truncate_day_from_timestamp(ts), IntegerType())
extract_country_udf = F.udf(lambda geo: geo.strip()[:2] if geo != None else '', StringType())
extract_country_state_udf = F.udf(lambda geo: geo.strip()[:5] if geo != None else '', StringType())
list_len_udf = F.udf(lambda x: len(x) if x != None else 0, IntegerType())
def convert_odd_timestamp(timestamp_ms_relative):
TIMESTAMP_DELTA=1465876799998
return datetime.datetime.fromtimestamp((int(timestamp_ms_relative)+TIMESTAMP_DELTA)//1000)
# # Loading Files
# ## Loading UTC/BST for each country and US / CA states (local time)
country_utc_dst_df = pd.read_csv('country_codes_utc_dst_tz_delta.csv', keep_default_na=False)
countries_utc_dst_dict = dict(zip(country_utc_dst_df['country_code'].tolist(), country_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
countries_utc_dst_broad = sc.broadcast(countries_utc_dst_dict)
us_states_utc_dst_df = pd.read_csv('us_states_abbrev_bst.csv', keep_default_na=False)
us_states_utc_dst_dict = dict(zip(us_states_utc_dst_df['state_abb'].tolist(), us_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
us_states_utc_dst_broad = sc.broadcast(us_states_utc_dst_dict)
ca_states_utc_dst_df = pd.read_csv('ca_states_abbrev_bst.csv', keep_default_na=False)
ca_countries_utc_dst_dict = dict(zip(ca_states_utc_dst_df['state_abb'].tolist(), ca_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
ca_countries_utc_dst_broad = sc.broadcast(ca_countries_utc_dst_dict)
# ## Loading competition csvs
# +
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('dummyEvents', F.lit(1)) \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.withColumn('event_country', extract_country_udf('geo_location_event')) \
.withColumn('event_country_state', extract_country_state_udf('geo_location_event')) \
.alias('events')
# +
page_views_schema = StructType(
[StructField("uuid_pv", StringType(), True),
StructField("document_id_pv", IntegerType(), True),
StructField("timestamp_pv", IntegerType(), True),
StructField("platform_pv", IntegerType(), True),
StructField("geo_location_pv", StringType(), True),
StructField("traffic_source_pv", IntegerType(), True)]
)
page_views_df = spark.read.schema(page_views_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"page_views.csv") \
.withColumn('day_pv', truncate_day_from_timestamp_udf('timestamp_pv')) \
.alias('page_views')
page_views_df.createOrReplaceTempView('page_views')
# -
page_views_users_df = spark.sql('''
SELECT uuid_pv, document_id_pv, max(timestamp_pv) as max_timestamp_pv, 1 as dummyPageView
FROM page_views p
GROUP BY uuid_pv, document_id_pv
''').alias('page_views_users')
# +
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"promoted_content.csv") \
.withColumn('dummyPromotedContent', F.lit(1)).alias('promoted_content').cache()
# +
documents_meta_schema = StructType(
[StructField("document_id_doc", IntegerType(), True),
StructField("source_id", IntegerType(), True),
StructField("publisher_id", IntegerType(), True),
StructField("publish_time", TimestampType(), True)]
)
documents_meta_df = spark.read.schema(documents_meta_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"documents_meta.csv") \
.withColumn('dummyDocumentsMeta', F.lit(1)).alias('documents_meta').cache()
# -
#Joining with Page Views to get traffic_source_pv
events_joined_df = events_df.join(documents_meta_df \
.withColumnRenamed('source_id', 'source_id_doc_event') \
.withColumnRenamed('publisher_id', 'publisher_doc_event') \
.withColumnRenamed('publish_time', 'publish_time_doc_event')
, on=F.col("document_id_event") == F.col("document_id_doc"), how='left') \
.join(page_views_df,
on=[F.col('uuid_event') == F.col('uuid_pv'),
F.col('document_id_event') == F.col('document_id_pv'),
F.col('platform_event') == F.col('platform_pv'),
F.col('geo_location_event') == F.col('geo_location_pv'),
F.col('day_event') == F.col('day_pv')],
how='left') \
.alias('events').cache()
# +
documents_categories_schema = StructType(
[StructField("document_id_cat", IntegerType(), True),
StructField("category_id", IntegerType(), True),
StructField("confidence_level_cat", FloatType(), True)]
)
documents_categories_df = spark.read.schema(documents_categories_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"documents_categories.csv") \
.alias('documents_categories').cache()
documents_categories_grouped_df = documents_categories_df.groupBy('document_id_cat') \
.agg(F.collect_list('category_id').alias('category_id_list'),
F.collect_list('confidence_level_cat').alias('confidence_level_cat_list')) \
.withColumn('dummyDocumentsCategory', F.lit(1)) \
.alias('documents_categories_grouped')
# +
documents_topics_schema = StructType(
[StructField("document_id_top", IntegerType(), True),
StructField("topic_id", IntegerType(), True),
StructField("confidence_level_top", FloatType(), True)]
)
documents_topics_df = spark.read.schema(documents_topics_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"documents_topics.csv") \
.alias('documents_topics').cache()
documents_topics_grouped_df = documents_topics_df.groupBy('document_id_top') \
.agg(F.collect_list('topic_id').alias('topic_id_list'),
F.collect_list('confidence_level_top').alias('confidence_level_top_list')) \
.withColumn('dummyDocumentsTopics', F.lit(1)) \
.alias('documents_topics_grouped')
# +
documents_entities_schema = StructType(
[StructField("document_id_ent", IntegerType(), True),
StructField("entity_id", StringType(), True),
StructField("confidence_level_ent", FloatType(), True)]
)
documents_entities_df = spark.read.schema(documents_entities_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"documents_entities.csv") \
.alias('documents_entities').cache()
documents_entities_grouped_df = documents_entities_df.groupBy('document_id_ent') \
.agg(F.collect_list('entity_id').alias('entity_id_list'),
F.collect_list('confidence_level_ent').alias('confidence_level_ent_list')) \
.withColumn('dummyDocumentsEntities', F.lit(1)) \
.alias('documents_entities_grouped')
# +
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER+"clicks_train.csv") \
.withColumn('dummyClicksTrain', F.lit(1)).alias('clicks_train')
# -
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df, on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"), how='left') \
.join(events_joined_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
# +
if evaluation:
table_name = 'user_profiles_eval'
else:
table_name = 'user_profiles'
user_profiles_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER+table_name) \
.withColumn('dummyUserProfiles', F.lit(1)).alias('user_profiles')
# -
# # Spliting Train/validation set | Test set
# +
if evaluation:
validation_set_exported_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER+"validation_set.parquet") \
.alias('validation_set')
validation_set_exported_df.select('display_id').distinct().createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''SELECT * FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').alias('clicks') \
.join(documents_categories_grouped_df, on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"), how='left') \
.join(documents_topics_grouped_df, on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"), how='left') \
.join(documents_entities_grouped_df, on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"), how='left') \
.join(documents_categories_grouped_df \
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list') \
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df \
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list') \
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df \
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list') \
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df, on=[F.col("clicks.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("clicks.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
#print("validation_set_df.count() =", validation_set_df.count())
#Added to validation set information about the event and the user for statistics of the error (avg ctr)
validation_set_ground_truth_df = validation_set_df.filter('clicked = 1') \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumn('user_categories_count', list_len_udf('category_id_list')) \
.withColumn('user_topics_count', list_len_udf('topic_id_list')) \
.withColumn('user_entities_count', list_len_udf('entity_id_list')) \
.select('display_id','ad_id','platform_event', 'day_event', 'timestamp_event',
'geo_location_event', 'event_country', 'event_country_state', 'views',
'user_categories_count', 'user_topics_count', 'user_entities_count') \
.withColumnRenamed('ad_id','ad_id_gt') \
.withColumnRenamed('views','user_views_count') \
.cache()
#print("validation_set_ground_truth_df.count() =", validation_set_ground_truth_df.count())
train_set_df = spark.sql('''SELECT * FROM clicks_train_joined t
WHERE NOT EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').cache()
print("train_set_df.count() =", train_set_df.count())
#validation_display_ids_df.groupBy("day_event").count().show()
else:
clicks_test_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True)]
)
clicks_test_df = spark.read.schema(clicks_test_schema).options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_test.csv") \
.withColumn('dummyClicksTest', F.lit(1)) \
.withColumn('clicked', F.lit(-999)) \
.alias('clicks_test')
test_set_df = clicks_test_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df, on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"), how='left') \
.join(documents_categories_grouped_df, on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"), how='left') \
.join(documents_topics_grouped_df, on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"), how='left') \
.join(documents_entities_grouped_df, on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"), how='left') \
.join(events_joined_df, on='display_id', how='left') \
.join(documents_categories_grouped_df \
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list') \
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df \
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list') \
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df \
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list') \
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df, on=[F.col("events.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("promoted_content.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
#print("test_set_df.count() =",test_set_df.count())
train_set_df = clicks_train_joined_df.cache()
print("train_set_df.count() =", train_set_df.count())
# -
# # Training models
def is_null(value):
return value == None or len(str(value).strip()) == 0
LESS_SPECIAL_CAT_VALUE = 'less'
def get_category_field_values_counts(field, df, min_threshold=10):
category_counts = dict(list(filter(lambda x: not is_null(x[0]) and x[1] >= min_threshold, df.select(field).groupBy(field).count().rdd.map(lambda x: (x[0], x[1])).collect())))
#Adding a special value to create a feature for values in this category that are less than min_threshold
category_counts[LESS_SPECIAL_CAT_VALUE] = -1
return category_counts
# ## Building category values counters and indexers
event_country_values_counts = get_category_field_values_counts('event_country', events_df, min_threshold=10)
len(event_country_values_counts)
#All non-null categories: 230
event_country_state_values_counts = get_category_field_values_counts('event_country_state', events_df, min_threshold=10)
len(event_country_state_values_counts)
event_geo_location_values_counts = get_category_field_values_counts('geo_location_event', events_df, min_threshold=10)
len(event_geo_location_values_counts)
#All non-null categories: 2988
doc_entity_id_values_counts = get_category_field_values_counts('entity_id', documents_entities_df, min_threshold=10)
len(doc_entity_id_values_counts)
#All non-null categories: 1326009
# ## Processing average CTR by categories
def get_percentiles(df, field, quantiles_levels=None, max_error_rate=0.0):
if quantiles_levels == None:
quantiles_levels = np.arange(0.0, 1.1, 0.1).tolist()
quantiles = df.approxQuantile(field, quantiles_levels, max_error_rate)
return dict(zip(quantiles_levels, quantiles))
#REG = 10
REG = 0
ctr_udf = F.udf(lambda clicks, views: clicks / float(views + REG), FloatType())
# ### Average CTR by ad_id
ad_id_popularity_df = train_set_df.groupby('ad_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views')) \
.withColumn('ctr', ctr_udf('clicks','views'))
# +
#ad_id_popularity_df.count()
# +
#get_percentiles(ad_id_popularity_df, 'clicks')
# +
#get_percentiles(ad_id_popularity_df, 'views')
# -
ad_id_popularity = ad_id_popularity_df.filter('views > 5').select('ad_id', 'ctr', 'views') \
.rdd.map(lambda x: (x['ad_id'], (x['ctr'], x['views'], 1, 1))).collectAsMap()
ad_id_popularity_broad = sc.broadcast(ad_id_popularity)
list(ad_id_popularity.values())[:3]
len(ad_id_popularity)
# +
#get_ad_id_ctr_udf = F.udf(lambda ad_id: ad_id_popularity[ad_id] if ad_id in ad_id_popularity else -1, FloatType())
# -
ad_id_avg_ctr = sum(map(lambda x: x[0], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_avg_ctr
ad_id_weighted_avg_ctr = sum(map(lambda x: x[0]*x[1], ad_id_popularity.values())) / float(sum(map(lambda x: x[1], ad_id_popularity.values())))
ad_id_weighted_avg_ctr
ad_id_views_median = np.median(np.array(list(map(lambda x: x[1], ad_id_popularity.values()))))
ad_id_views_median
ad_id_views_mean = sum(map(lambda x: x[1], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_views_mean
# ### Average CTR by document_id (promoted_content)
# +
document_id_popularity_df = train_set_df.groupby('document_id_promo').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
document_id_popularity = document_id_popularity_df.filter('views > 5').select('document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['document_id_promo'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(document_id_popularity)
# -
document_id_popularity_broad = sc.broadcast(document_id_popularity)
# +
#document_id_popularity_df.count()
# +
#get_percentiles(document_id_popularity_df, 'clicks')
# +
#get_percentiles(document_id_popularity_df, 'views')
# -
document_id_avg_ctr = sum(map(lambda x: x[0], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_avg_ctr
document_id_weighted_avg_ctr = sum(list(map(lambda x: x[0]*x[1], document_id_popularity.values()))) / float(sum(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_weighted_avg_ctr
document_id_views_median = np.median(np.array(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_views_median
document_id_views_mean = sum(map(lambda x: x[1], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_views_mean
# ### Average CTR by (doc_event, doc_ad)
# +
doc_event_doc_ad_avg_ctr_df = train_set_df.groupBy('document_id_event', 'document_id_promo') \
.agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
doc_event_doc_ad_avg_ctr = doc_event_doc_ad_avg_ctr_df.filter('views > 5') \
.select('document_id_event', 'document_id_promo','ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['document_id_event'], x['document_id_promo']), (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(doc_event_doc_ad_avg_ctr)
# -
doc_event_doc_ad_avg_ctr_broad = sc.broadcast(doc_event_doc_ad_avg_ctr)
# ### Average CTR by country, source_id
# +
source_id_by_country_popularity_df = train_set_df.select('clicked', 'source_id', 'event_country', 'ad_id') \
.groupby('event_country', 'source_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
#source_id_popularity = source_id_popularity_df.filter('views > 100 and source_id is not null').select('source_id', 'ctr').rdd.collectAsMap()
source_id_by_country_popularity = source_id_by_country_popularity_df.filter('views > 5 and source_id is not null and event_country <> ""').select('event_country', 'source_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['source_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(source_id_by_country_popularity)
# -
source_id_by_country_popularity_broad = sc.broadcast(source_id_by_country_popularity)
source_id_by_country_avg_ctr = sum(map(lambda x: x[0], source_id_by_country_popularity.values())) / float(len(source_id_by_country_popularity))
source_id_by_country_avg_ctr
source_id_by_country_weighted_avg_ctr = sum(map(lambda x: x[0]*x[1], source_id_by_country_popularity.values())) / float(sum(map(lambda x: x[1], source_id_by_country_popularity.values())))
source_id_by_country_weighted_avg_ctr
source_id_by_country_views_median = np.median(np.array(list(map(lambda x: x[1], source_id_by_country_popularity.values()))))
source_id_by_country_views_median
source_id_by_country_views_mean = sum(map(lambda x: x[1], source_id_by_country_popularity.values())) / float(len(source_id_by_country_popularity))
source_id_by_country_views_mean
# ### Average CTR by source_id
# +
source_id_popularity_df = train_set_df.select('clicked', 'source_id', 'ad_id') \
.groupby('source_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
source_id_popularity = source_id_popularity_df.filter('views > 10 and source_id is not null').select('source_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['source_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(source_id_popularity)
# -
source_id_popularity_broad = sc.broadcast(source_id_popularity)
# +
#source_id_popularity_df.count()
# +
#get_percentiles(source_id_popularity_df, 'clicks')
# +
#get_percentiles(source_id_popularity_df, 'views')
# -
#source_id_popularity = source_id_popularity_df.filter('views > 100 and source_id is not null').select('source_id', 'ctr').rdd.collectAsMap()
# ### Average CTR by publisher_id
# +
publisher_popularity_df = train_set_df.select('clicked', 'publisher_id', 'ad_id') \
.groupby('publisher_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
publisher_popularity = publisher_popularity_df.filter('views > 10 and publisher_id is not null').select('publisher_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['publisher_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(publisher_popularity)
# -
publisher_popularity_broad = sc.broadcast(publisher_popularity)
# +
#publisher_popularity_df.count()
##863
# +
#get_percentiles(publisher_popularity_df, 'clicks')
# +
#get_percentiles(publisher_popularity_df, 'views')
# +
#publisher_id_popularity = publisher_popularity_df.filter('views > 100 and publisher_id is not null').select('publisher_id', 'ctr').rdd.collectAsMap()
#len(publisher_id_popularity)
##639
# -
# ### Average CTR by advertiser_id
# +
advertiser_id_popularity_df = train_set_df.select('clicked', 'advertiser_id', 'ad_id') \
.groupby('advertiser_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
advertiser_id_popularity = advertiser_id_popularity_df.filter('views > 10 and advertiser_id is not null').select('advertiser_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['advertiser_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(advertiser_id_popularity)
# -
advertiser_id_popularity_broad = sc.broadcast(advertiser_id_popularity)
# +
#advertiser_id_popularity_df.count()
##4063
# +
#get_percentiles(advertiser_id_popularity_df, 'clicks')
# +
#get_percentiles(advertiser_id_popularity_df, 'views')
# +
#advertiser_id_popularity = advertiser_id_popularity_df.filter('views > 100 and advertiser_id is not null').select('advertiser_id', 'ctr').rdd.collectAsMap()
#len(advertiser_id_popularity)
##3129
# -
# ### Average CTR by campaign_id
# +
campaign_id_popularity_df = train_set_df.select('clicked', 'campaign_id', 'ad_id') \
.groupby('campaign_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
campaign_id_popularity = campaign_id_popularity_df.filter('views > 10 and campaign_id is not null').select('campaign_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['campaign_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(campaign_id_popularity)
# -
campaign_id_popularity_broad = sc.broadcast(campaign_id_popularity)
# +
#campaign_id_popularity_df.count()
##31390
# +
#get_percentiles(campaign_id_popularity_df, 'clicks')
# +
#get_percentiles(campaign_id_popularity_df, 'views')
# +
#campaign_id_popularity = campaign_id_popularity_df.filter('views > 100 and campaign_id is not null').select('campaign_id', 'ctr').rdd.collectAsMap()
#len(campaign_id_popularity)
##16097
# -
# ### Average CTR by category
# +
category_id_popularity_df = train_set_df.join(documents_categories_df.alias('cat_local'), on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'ad_id') \
.groupby('category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
category_id_popularity = category_id_popularity_df.filter('views > 10').select('category_id', 'ctr', 'views', 'avg_confidence_level_cat', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['category_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_popularity)
# -
category_id_popularity_broad = sc.broadcast(category_id_popularity)
list(category_id_popularity.values())[:10]
np.median(np.array(list(map(lambda x: x[1], category_id_popularity.values()))))
sum(map(lambda x: x[1], category_id_popularity.values())) / float(len(category_id_popularity))
# +
#Parece haver uma hierarquia nas categorias pelo padrão dos códigos...
#category_id_popularity
# -
# ### Average CTR by (country, category)
# +
category_id_by_country_popularity_df = train_set_df.join(documents_categories_df.alias('cat_local'), on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'event_country', 'ad_id') \
.groupby('event_country','category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
category_id_by_country_popularity = category_id_by_country_popularity_df.filter('views > 10 and event_country <> ""').select('event_country', 'category_id', 'ctr', 'views', 'avg_confidence_level_cat', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['category_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_by_country_popularity)
# -
category_id_by_country_popularity_broad = sc.broadcast(category_id_by_country_popularity)
# ### Average CTR by Topic
topic_id_popularity_df = train_set_df.join(documents_topics_df.alias('top_local'), on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'ad_id') \
.groupby('topic_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
topic_id_popularity = topic_id_popularity_df.filter('views > 10').select('topic_id', 'ctr', 'views', 'avg_confidence_level_top', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['topic_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_popularity)
topic_id_popularity_broad = sc.broadcast(topic_id_popularity)
sum(map(lambda x: x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
sum(map(lambda x: x[2]*x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
# ### Average CTR by (country, topic)
# +
topic_id_by_country_popularity_df = train_set_df.join(documents_topics_df.alias('top_local'), on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top','event_country', 'ad_id') \
.groupby('event_country','topic_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
topic_id_id_by_country_popularity = topic_id_by_country_popularity_df.filter('views > 10 and event_country <> ""').select('event_country', 'topic_id', 'ctr', 'views', 'avg_confidence_level_top', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['topic_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_id_by_country_popularity)
# -
topic_id_id_by_country_popularity_broad = sc.broadcast(topic_id_id_by_country_popularity)
# ### Average CTR by Entity
# +
entity_id_popularity_df = train_set_df.join(documents_entities_df.alias('ent_local'), on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'confidence_level_ent', 'ad_id') \
.groupby('entity_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
entity_id_popularity = entity_id_popularity_df.filter('views > 5').select('entity_id', 'ctr', 'views', 'avg_confidence_level_ent', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['entity_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_popularity)
# -
entity_id_popularity_broad = sc.broadcast(entity_id_popularity)
np.median(np.array(list(map(lambda x: x[1], entity_id_popularity.values()))))
sum(map(lambda x: x[1], entity_id_popularity.values())) / float(len(entity_id_popularity))
# ### Average CTR by (country, entity)
# +
entity_id_by_country_popularity_df = train_set_df.join(documents_entities_df.alias('ent_local'), on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'event_country', 'confidence_level_ent','ad_id') \
.groupby('event_country','entity_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks','views'))
entity_id_by_country_popularity = entity_id_by_country_popularity_df.filter('views > 5 and event_country <> ""').select('event_country', 'entity_id', 'ctr', 'views', 'avg_confidence_level_ent', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['entity_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_by_country_popularity)
# -
entity_id_by_country_popularity_broad = sc.broadcast(entity_id_by_country_popularity)
# ### Loading # docs by categories, topics, entities
#import cPickle
import _pickle as cPickle
df_filenames_suffix = ''
if evaluation:
df_filenames_suffix = '_eval'
with open('aux_data/categories_docs_counts'+df_filenames_suffix+'.pickle', 'rb') as input_file:
categories_docs_counts = cPickle.load(input_file)
len(categories_docs_counts)
with open('aux_data/topics_docs_counts'+df_filenames_suffix+'.pickle', 'rb') as input_file:
topics_docs_counts = cPickle.load(input_file)
len(topics_docs_counts)
with open('aux_data/entities_docs_counts'+df_filenames_suffix+'.pickle', 'rb') as input_file:
entities_docs_counts = cPickle.load(input_file)
len(entities_docs_counts)
documents_total = documents_meta_df.count()
documents_total
# ## Exploring Publish Time
publish_times_df = train_set_df.filter('publish_time is not null').select('document_id_promo','publish_time').distinct().select(F.col('publish_time').cast(IntegerType()))
publish_time_percentiles = get_percentiles(publish_times_df, 'publish_time', quantiles_levels=[0.5], max_error_rate=0.001)
publish_time_percentiles
publish_time_median = int(publish_time_percentiles[0.5])
datetime.datetime.utcfromtimestamp(publish_time_median)
# +
def get_days_diff(newer_timestamp, older_timestamp):
sec_diff = newer_timestamp - older_timestamp
days_diff = sec_diff / 60 / 60 / 24
return days_diff
def get_time_decay_factor(timestamp, timestamp_ref=None, alpha=0.001):
if timestamp_ref == None:
timestamp_ref = time.time()
days_diff = get_days_diff(timestamp_ref, timestamp)
denominator = math.pow(1+alpha, days_diff)
if denominator != 0:
return 1.0 / denominator
else:
return 0.0
# -
def convert_odd_timestamp(timestamp_ms_relative):
TIMESTAMP_DELTA=1465876799998
return datetime.datetime.fromtimestamp((int(timestamp_ms_relative)+TIMESTAMP_DELTA)//1000)
TIME_DECAY_ALPHA = 0.0005
# +
ref_dates = [
1476714880, # 7 days
1474727680, # 30 days
1469370880, # 90 days
1461508480, # 180 days
1445697280, # 1 year
1414161280 # 2 years
]
for d in ref_dates:
print(datetime.datetime.utcfromtimestamp(d), get_time_decay_factor(d, alpha=TIME_DECAY_ALPHA))
# -
# ### Get local time
DEFAULT_TZ_EST = -4.0
def get_local_utc_bst_tz(event_country, event_country_state):
local_tz = DEFAULT_TZ_EST
if len(event_country) > 0:
if event_country in countries_utc_dst_broad.value:
local_tz = countries_utc_dst_broad.value[event_country]
if len(event_country_state)>2:
state = event_country_state[3:5]
if event_country == 'US':
if state in us_states_utc_dst_broad.value:
local_tz = us_states_utc_dst_broad.value[state]
elif event_country == 'CA':
if state in ca_countries_utc_dst_broad.value:
local_tz = ca_countries_utc_dst_broad.value[state]
return float(local_tz)
# +
hour_bins_dict = {'EARLY_MORNING': 1,
'MORNING': 2,
'MIDDAY': 3,
'AFTERNOON': 4,
'EVENING': 5,
'NIGHT': 6}
hour_bins_values = sorted(hour_bins_dict.values())
# -
def get_hour_bin(hour):
if hour >= 5 and hour < 8:
hour_bin = hour_bins_dict['EARLY_MORNING']
elif hour >= 8 and hour < 11:
hour_bin = hour_bins_dict['MORNING']
elif hour >= 11 and hour < 14:
hour_bin = hour_bins_dict['MIDDAY']
elif hour >= 14 and hour < 19:
hour_bin = hour_bins_dict['AFTERNOON']
elif hour >= 19 and hour < 22:
hour_bin = hour_bins_dict['EVENING']
else:
hour_bin = hour_bins_dict['NIGHT']
return hour_bin
def get_local_datetime(dt, event_country, event_country_state):
local_tz = get_local_utc_bst_tz(event_country, event_country_state)
tz_delta = local_tz - DEFAULT_TZ_EST
local_time = dt + datetime.timedelta(hours=tz_delta)
return local_time
get_local_datetime(datetime.datetime.now(), 'US', 'US>CA')
def is_weekend(dt):
return dt.weekday() >= 5
is_weekend(datetime.datetime(2016, 6, 14))
# ## Average CTR functions
timestamp_ref = date_time_to_unix_epoch(datetime.datetime(2016, 6, 29, 3, 59, 59))
decay_factor_default = get_time_decay_factor(publish_time_median, timestamp_ref, alpha=TIME_DECAY_ALPHA)
print("decay_factor_default", decay_factor_default)
# +
def get_confidence_sample_size(sample, max_for_reference=100000):
#Avoiding overflow for large sample size
if sample >= max_for_reference:
return 1.0
ref_log = math.log(1+max_for_reference, 2) #Curiosly reference in log with base 2 gives a slightly higher score, so I will keep
return math.log(1+sample) / float(ref_log)
for i in [0,0.5,1,2,3,4,5,10,20,30,100,200,300,1000,2000,3000,10000,20000,30000, 50000, 90000, 100000, 500000, 900000, 1000000, 2171607]:
print(i, get_confidence_sample_size(i))
# -
def get_popularity(an_id, a_dict):
return (a_dict[an_id][0], get_confidence_sample_size(a_dict[an_id][1] / float(a_dict[an_id][2])) * a_dict[an_id][3]) if an_id in a_dict else (None, None)
def get_weighted_avg_popularity_from_list(ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0]!=None, [(get_popularity(an_id, pop_dict), confidence) for an_id, confidence in zip(ids_list, confidence_ids_list)]))
#print("pops",pops)
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0]*x[0][1]*x[1], pops)) / float(sum(map(lambda x: x[0][1]*x[1], pops)))
confidence = max(map(lambda x: x[0][1]*x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_weighted_avg_country_popularity_from_list(event_country, ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0]!=None, [(get_popularity((event_country, an_id), pop_dict), confidence) for an_id, confidence in zip(ids_list, confidence_ids_list)]))
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0]*x[0][1]*x[1], pops)) / float(sum(map(lambda x: x[0][1]*x[1], pops)))
confidence = max(map(lambda x: x[0][1]*x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
probs = []
avg_ctr, confidence = get_popularity(ad_id, ad_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_ad_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(document_id, document_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_document_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity((document_id_event, document_id), doc_event_doc_ad_avg_ctr_broad.value)
if avg_ctr != None:
probs.append(('pop_doc_event_doc_ad', avg_ctr, confidence))
if source_id != -1:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_popularity((event_country, source_id), source_id_by_country_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_source_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(source_id, source_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_source_id', avg_ctr, confidence))
if publisher_id != None:
avg_ctr, confidence = get_popularity(publisher_id, publisher_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_publisher_id', avg_ctr, confidence))
if advertiser_id != None:
avg_ctr, confidence = get_popularity(advertiser_id, advertiser_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_advertiser_id', avg_ctr, confidence))
if campaign_id != None:
avg_ctr, confidence = get_popularity(campaign_id, campaign_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_campain_id', avg_ctr, confidence))
if len(entity_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(event_country, entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_by_country_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_entity_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_entity_id', avg_ctr, confidence))
if len(topic_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(event_country, topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_id_by_country_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_topic_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_topic_id', avg_ctr, confidence))
if len(category_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(event_country, category_ids_by_doc, cat_confidence_level_by_doc,
category_id_by_country_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_category_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(category_ids_by_doc, cat_confidence_level_by_doc,
category_id_popularity_broad.value)
if avg_ctr != None:
probs.append(('pop_category_id', avg_ctr, confidence))
#print("[get_popularity_score] probs", probs)
if output_detailed_list:
return probs
else:
if len(probs) > 0:
#weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * math.log(1+x[2],2), probs)) / float(sum(map(lambda x: math.log(1+x[2],2), probs)))
weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * x[2], probs)) / float(sum(map(lambda x: x[2], probs)))
confidence = max(map(lambda x: x[2], probs))
return weighted_avg_probs_by_confidence, confidence
else:
return None, None
# ## Content-Based similarity functions
def cosine_similarity_dicts(dict1, dict2):
dict1_norm = math.sqrt(sum([v**2 for v in dict1.values()]))
dict2_norm = math.sqrt(sum([v**2 for v in dict2.values()]))
sum_common_aspects = 0.0
intersections = 0
for key in dict1:
if key in dict2:
sum_common_aspects += dict1[key] * dict2[key]
intersections += 1
return sum_common_aspects / (dict1_norm * dict2_norm), intersections
def cosine_similarity_user_docs_aspects(user_aspect_profile, doc_aspect_ids, doc_aspects_confidence, aspect_docs_counts):
if user_aspect_profile==None or len(user_aspect_profile) == 0 or doc_aspect_ids == None or len(doc_aspect_ids) == 0:
return None, None
doc_aspects = dict(zip(doc_aspect_ids, doc_aspects_confidence))
doc_aspects_tfidf_confid = {}
for key in doc_aspects:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_aspects[key]
doc_aspects_tfidf_confid[key] = tf*idf * confidence
user_aspects_tfidf_confid = {}
for key in user_aspect_profile:
tfidf = user_aspect_profile[key][0]
confidence = user_aspect_profile[key][1]
user_aspects_tfidf_confid[key] = tfidf * confidence
similarity, intersections = cosine_similarity_dicts(doc_aspects_tfidf_confid, user_aspects_tfidf_confid)
if intersections > 0:
#P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_aspects) / float(len(aspect_docs_counts)), intersections) * \
math.pow(len(user_aspect_profile) / float(len(aspect_docs_counts)), intersections)
confidence = 1.0 - random_error
else:
#P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_aspects) / float(len(aspect_docs_counts))) * \
(len(user_aspect_profile) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def cosine_similarity_doc_event_doc_ad_aspects(doc_event_aspect_ids, doc_event_aspects_confidence,
doc_ad_aspect_ids, doc_ad_aspects_confidence,
aspect_docs_counts):
if doc_event_aspect_ids == None or len(doc_event_aspect_ids) == 0 or \
doc_ad_aspect_ids == None or len(doc_ad_aspect_ids) == 0:
return None, None
doc_event_aspects = dict(zip(doc_event_aspect_ids, doc_event_aspects_confidence))
doc_event_aspects_tfidf_confid = {}
for key in doc_event_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_event_aspects[key]
doc_event_aspects_tfidf_confid[key] = tf*idf * confidence
doc_ad_aspects = dict(zip(doc_ad_aspect_ids, doc_ad_aspects_confidence))
doc_ad_aspects_tfidf_confid = {}
for key in doc_ad_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_ad_aspects[key]
doc_ad_aspects_tfidf_confid[key] = tf*idf * confidence
similarity, intersections = cosine_similarity_dicts(doc_event_aspects_tfidf_confid, doc_ad_aspects_tfidf_confid)
if intersections > 0:
#P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_event_aspect_ids) / float(len(aspect_docs_counts)), intersections) * \
math.pow(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts)), intersections)
confidence = 1.0 - random_error
else:
#P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_event_aspect_ids) / float(len(aspect_docs_counts))) * \
(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def get_user_cb_interest_score(user_views_count, user_categories, user_topics, user_entities,
timestamp_event, category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
#Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_user_docs_aspects(user_categories, category_ids_by_doc, cat_confidence_level_by_doc, categories_docs_counts)
if categories_similarity != None:
sims.append(('user_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_user_docs_aspects(user_topics, topic_ids_by_doc, top_confidence_level_by_doc, topics_docs_counts)
if topics_similarity != None:
sims.append(('user_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_user_docs_aspects(user_entities, entity_ids_by_doc, ent_confidence_level_by_doc, entities_docs_counts)
if entities_similarity != None:
sims.append(('user_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1]*x[2], sims)) / float(sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
#print("[get_user_cb_interest_score] sims: {} | Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
def get_doc_event_doc_ad_cb_similarity_score(doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=False):
#Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
categories_docs_counts)
if categories_similarity != None:
sims.append(('doc_event_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
topics_docs_counts)
if topics_similarity != None:
sims.append(('doc_event_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
entities_docs_counts)
if entities_similarity != None:
sims.append(('doc_event_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1]*x[2], sims)) / float(sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
#print("[get_user_cb_interest_score] sims: {} | Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
# # Feature Vector export
bool_feature_names = ['event_weekend',
'user_has_already_viewed_doc']
int_feature_names = ['user_views',
'ad_views',
'doc_views',
'doc_event_days_since_published',
'doc_event_hour',
'doc_ad_days_since_published',
]
float_feature_names = [
'pop_ad_id',
'pop_ad_id_conf',
'pop_ad_id_conf_multipl',
'pop_document_id',
'pop_document_id_conf',
'pop_document_id_conf_multipl',
'pop_publisher_id',
'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl',
'pop_advertiser_id',
'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl',
'pop_campain_id',
'pop_campain_id_conf',
'pop_campain_id_conf_multipl',
'pop_doc_event_doc_ad',
'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl',
'pop_source_id',
'pop_source_id_conf',
'pop_source_id_conf_multipl',
'pop_source_id_country',
'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl',
'pop_entity_id',
'pop_entity_id_conf',
'pop_entity_id_conf_multipl',
'pop_entity_id_country',
'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl',
'pop_topic_id',
'pop_topic_id_conf',
'pop_topic_id_conf_multipl',
'pop_topic_id_country',
'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl',
'pop_category_id',
'pop_category_id_conf',
'pop_category_id_conf_multipl',
'pop_category_id_country',
'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl',
'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf',
'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics',
'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities',
'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories',
'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl',
'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf',
'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities',
'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl'
]
TRAFFIC_SOURCE_FV='traffic_source'
EVENT_HOUR_FV='event_hour'
EVENT_COUNTRY_FV = 'event_country'
EVENT_COUNTRY_STATE_FV = 'event_country_state'
EVENT_GEO_LOCATION_FV = 'event_geo_location'
EVENT_PLATFORM_FV = 'event_platform'
AD_ADVERTISER_FV = 'ad_advertiser'
DOC_AD_SOURCE_ID_FV='doc_ad_source_id'
DOC_AD_PUBLISHER_ID_FV='doc_ad_publisher_id'
DOC_EVENT_SOURCE_ID_FV='doc_event_source_id'
DOC_EVENT_PUBLISHER_ID_FV='doc_event_publisher_id'
DOC_AD_CATEGORY_ID_FV='doc_ad_category_id'
DOC_AD_TOPIC_ID_FV='doc_ad_topic_id'
DOC_AD_ENTITY_ID_FV='doc_ad_entity_id'
DOC_EVENT_CATEGORY_ID_FV='doc_event_category_id'
DOC_EVENT_TOPIC_ID_FV='doc_event_topic_id'
DOC_EVENT_ENTITY_ID_FV='doc_event_entity_id'
# ### Configuring feature vector
# +
category_feature_names_integral = ['ad_advertiser',
'doc_ad_category_id_1',
'doc_ad_category_id_2',
'doc_ad_category_id_3',
'doc_ad_topic_id_1',
'doc_ad_topic_id_2',
'doc_ad_topic_id_3',
'doc_ad_entity_id_1',
'doc_ad_entity_id_2',
'doc_ad_entity_id_3',
'doc_ad_entity_id_4',
'doc_ad_entity_id_5',
'doc_ad_entity_id_6',
'doc_ad_publisher_id',
'doc_ad_source_id',
'doc_event_category_id_1',
'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1',
'doc_event_topic_id_2',
'doc_event_topic_id_3',
'doc_event_entity_id_1',
'doc_event_entity_id_2',
'doc_event_entity_id_3',
'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6',
'doc_event_publisher_id',
'doc_event_source_id',
'event_country',
'event_country_state',
'event_geo_location',
'event_hour',
'event_platform',
'traffic_source']
feature_vector_labels_integral = bool_feature_names + int_feature_names + float_feature_names + \
category_feature_names_integral
# -
feature_vector_labels_integral_dict = dict([(key, idx) for idx, key in enumerate(feature_vector_labels_integral)])
with open('feature_vector_labels_integral.txt', 'w') as output:
output.writelines('\n'.join(feature_vector_labels_integral))
# +
def set_feature_vector_cat_value(field_name, field_value, feature_vector):
if not is_null(field_value) and str(field_value) != '-1':
feature_name = get_ohe_feature_name(field_name, field_value)
if feature_name in feature_vector_labels_dict:
feature_idx = feature_vector_labels_dict[feature_name]
else:
#Unpopular category value
feature_idx = feature_vector_labels_dict[get_ohe_feature_name(field_name, LESS_SPECIAL_CAT_VALUE)]
feature_vector[feature_idx] = float(1)
def set_feature_vector_cat_values(field_name, field_values, feature_vector):
for field_value in field_values:
set_feature_vector_cat_value(field_name, field_value, feature_vector)
# -
def get_ad_feature_vector(user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels):
try:
feature_vector = {}
if user_views_count != None:
feature_vector[feature_vector_labels_dict['user_views']] = float(user_views_count)
if user_doc_ids_viewed != None:
feature_vector[feature_vector_labels_dict['user_has_already_viewed_doc']] = float(document_id in user_doc_ids_viewed)
if ad_id in ad_id_popularity_broad.value:
feature_vector[feature_vector_labels_dict['ad_views']] = float(ad_id_popularity_broad.value[ad_id][1])
if document_id in document_id_popularity_broad.value:
feature_vector[feature_vector_labels_dict['doc_views']] = float(document_id_popularity_broad.value[document_id][1])
if timestamp_event > -1:
dt_timestamp_event = convert_odd_timestamp(timestamp_event)
if doc_ad_publish_time != None:
delta_days = (dt_timestamp_event - doc_ad_publish_time).days
if delta_days >= 0 and delta_days <= 365*10: #10 years
feature_vector[feature_vector_labels_dict['doc_ad_days_since_published']] = float(delta_days)
if doc_event_publish_time != None:
delta_days = (dt_timestamp_event - doc_event_publish_time).days
if delta_days >= 0 and delta_days <= 365*10: #10 years
feature_vector[feature_vector_labels_dict['doc_event_days_since_published']] = float(delta_days)
#Local period of the day (hours)
dt_local_timestamp_event = get_local_datetime(dt_timestamp_event, event_country, event_country_state)
local_hour_bin = get_hour_bin(dt_local_timestamp_event.hour)
feature_vector[feature_vector_labels_dict['doc_event_hour']] = float(local_hour_bin) #Hour for Decision Trees
set_feature_vector_cat_value(EVENT_HOUR_FV, local_hour_bin, feature_vector) #Period of day for FFM
#Weekend
weekend = int(is_weekend(dt_local_timestamp_event))
feature_vector[feature_vector_labels_dict['event_weekend']] = float(weekend)
conf_field_suffix = '_conf'
conf_multiplied_field_suffix = '_conf_multipl'
#Setting Popularity fields
pop_scores = get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in pop_scores:
feature_vector[feature_vector_labels_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_dict[score[0]+conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_dict[score[0]+conf_multiplied_field_suffix]] = score[1] * score[2]
#Setting User-Doc_ad CB Similarity fields
user_doc_ad_cb_sim_scores = get_user_cb_interest_score(user_views_count, user_categories, user_topics, user_entities,
timestamp_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in user_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_dict[score[0]+conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_dict[score[0]+conf_multiplied_field_suffix]] = score[1] * score[2]
#Setting Doc_event-doc_ad CB Similarity fields
doc_event_doc_ad_cb_sim_scores = get_doc_event_doc_ad_cb_similarity_score(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in doc_event_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_dict[score[0]+conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_dict[score[0]+conf_multiplied_field_suffix]] = score[1] * score[2]
set_feature_vector_cat_value(TRAFFIC_SOURCE_FV, traffic_source_pv, feature_vector)
set_feature_vector_cat_value(EVENT_COUNTRY_FV, event_country, feature_vector)
set_feature_vector_cat_value(EVENT_COUNTRY_STATE_FV, event_country_state, feature_vector)
set_feature_vector_cat_value(EVENT_GEO_LOCATION_FV, geo_location_event, feature_vector)
set_feature_vector_cat_value(EVENT_PLATFORM_FV, platform_event, feature_vector)
set_feature_vector_cat_value(AD_ADVERTISER_FV, advertiser_id, feature_vector)
set_feature_vector_cat_value(DOC_AD_SOURCE_ID_FV, source_id, feature_vector)
set_feature_vector_cat_value(DOC_AD_PUBLISHER_ID_FV, publisher_id, feature_vector)
set_feature_vector_cat_value(DOC_EVENT_SOURCE_ID_FV, doc_event_source_id, feature_vector)
set_feature_vector_cat_value(DOC_EVENT_PUBLISHER_ID_FV, doc_event_publisher_id, feature_vector)
set_feature_vector_cat_values(DOC_AD_CATEGORY_ID_FV, doc_ad_category_ids, feature_vector)
set_feature_vector_cat_values(DOC_AD_TOPIC_ID_FV, doc_ad_topic_ids, feature_vector)
set_feature_vector_cat_values(DOC_AD_ENTITY_ID_FV, doc_ad_entity_ids, feature_vector)
set_feature_vector_cat_values(DOC_EVENT_CATEGORY_ID_FV, doc_event_category_ids, feature_vector)
set_feature_vector_cat_values(DOC_EVENT_TOPIC_ID_FV, doc_event_topic_ids, feature_vector)
set_feature_vector_cat_values(DOC_EVENT_ENTITY_ID_FV, doc_event_entity_ids, feature_vector)
#Creating dummy column as the last column because xgboost have a problem if the last column is undefined for all rows,
#saying that dimentions of data and feature_names do not match
#feature_vector[feature_vector_labels_dict[DUMMY_FEATURE_COLUMN]] = float(0)
#Ensuring that all elements are floats for compatibility with UDF output (ArrayType(FloatType()))
#feature_vector = list([float(x) for x in feature_vector])
except Exception as e:
raise Exception("[get_ad_feature_vector] ERROR PROCESSING FEATURE VECTOR! Params: {}" \
.format([user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels]),
e)
return SparseVector(len(feature_vector_labels_dict), feature_vector)
get_ad_feature_vector_udf = F.udf(lambda user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities, event_country, event_country_state, ad_id, document_id, source_id,
doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent: \
get_ad_feature_vector(user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent),
VectorUDT())
# ### Building feature vectors
# +
def set_feature_vector_cat_value_integral(field_name, field_value, feature_vector):
if not is_null(field_value): #and str(field_value) != '-1':
feature_vector[feature_vector_labels_integral_dict[field_name]] = float(field_value)
def set_feature_vector_cat_top_multi_values_integral(field_name, values, confidences, feature_vector, top=5):
top_values = list(filter(lambda z: z != -1, map(lambda y: y[0], sorted(zip(values, confidences), key=lambda x: -x[1]))))[:top]
for idx, field_value in list(enumerate(top_values)):
set_feature_vector_cat_value_integral('{}_{}'.format(field_name, idx+1), field_value, feature_vector)
# -
def get_ad_feature_vector_integral(user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels):
try:
feature_vector = {}
if user_views_count != None:
feature_vector[feature_vector_labels_integral_dict['user_views']] = float(user_views_count)
if user_doc_ids_viewed != None:
feature_vector[feature_vector_labels_integral_dict['user_has_already_viewed_doc']] = float(document_id in user_doc_ids_viewed)
if ad_id in ad_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['ad_views']] = float(ad_id_popularity_broad.value[ad_id][1])
if document_id in document_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['doc_views']] = float(document_id_popularity_broad.value[document_id][1])
if timestamp_event > -1:
dt_timestamp_event = convert_odd_timestamp(timestamp_event)
if doc_ad_publish_time != None:
delta_days = (dt_timestamp_event - doc_ad_publish_time).days
if delta_days >= 0 and delta_days <= 365*10: #10 years
feature_vector[feature_vector_labels_integral_dict['doc_ad_days_since_published']] = float(delta_days)
if doc_event_publish_time != None:
delta_days = (dt_timestamp_event - doc_event_publish_time).days
if delta_days >= 0 and delta_days <= 365*10: #10 years
feature_vector[feature_vector_labels_integral_dict['doc_event_days_since_published']] = float(delta_days)
#Local period of the day (hours)
dt_local_timestamp_event = get_local_datetime(dt_timestamp_event, event_country, event_country_state)
local_hour_bin = get_hour_bin(dt_local_timestamp_event.hour)
feature_vector[feature_vector_labels_integral_dict['doc_event_hour']] = float(local_hour_bin) #Hour for Decision Trees
set_feature_vector_cat_value_integral(EVENT_HOUR_FV, local_hour_bin, feature_vector) #Period of day for FFM
#Weekend
weekend = int(is_weekend(dt_local_timestamp_event))
feature_vector[feature_vector_labels_integral_dict['event_weekend']] = float(weekend)
conf_field_suffix = '_conf'
conf_multiplied_field_suffix = '_conf_multipl'
#Setting Popularity fields
pop_scores = get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in pop_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0]+conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0]+conf_multiplied_field_suffix]] = score[1] * score[2]
#Setting User-Doc_ad CB Similarity fields
user_doc_ad_cb_sim_scores = get_user_cb_interest_score(user_views_count, user_categories, user_topics, user_entities,
timestamp_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in user_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0]+conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0]+conf_multiplied_field_suffix]] = score[1] * score[2]
#Setting Doc_event-doc_ad CB Similarity fields
doc_event_doc_ad_cb_sim_scores = get_doc_event_doc_ad_cb_similarity_score(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in doc_event_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0]+conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0]+conf_multiplied_field_suffix]] = score[1] * score[2]
#Process code for event_country
if event_country in event_country_values_counts:
event_country_code = event_country_values_counts[event_country]
else:
event_country_code = event_country_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_FV, event_country_code, feature_vector)
#Process code for event_country_state
if event_country_state in event_country_state_values_counts:
event_country_state_code = event_country_state_values_counts[event_country_state]
else:
event_country_state_code = event_country_state_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_STATE_FV, event_country_state_code, feature_vector)
#Process code for geo_location_event
if geo_location_event in event_geo_location_values_counts:
geo_location_event_code = event_geo_location_values_counts[geo_location_event]
else:
geo_location_event_code = event_geo_location_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_GEO_LOCATION_FV, geo_location_event_code, feature_vector)
set_feature_vector_cat_value_integral(TRAFFIC_SOURCE_FV, traffic_source_pv, feature_vector)
set_feature_vector_cat_value_integral(EVENT_PLATFORM_FV, platform_event, feature_vector)
set_feature_vector_cat_value_integral(AD_ADVERTISER_FV, advertiser_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_SOURCE_ID_FV, source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_PUBLISHER_ID_FV, publisher_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_SOURCE_ID_FV, doc_event_source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_PUBLISHER_ID_FV, doc_event_publisher_id, feature_vector)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_CATEGORY_ID_FV, doc_ad_category_ids, doc_ad_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_TOPIC_ID_FV, doc_ad_topic_ids, doc_ad_top_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_CATEGORY_ID_FV, doc_event_category_ids, doc_event_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_TOPIC_ID_FV, doc_event_topic_ids, doc_event_top_confidence_levels, feature_vector, top=3)
#Process codes for doc_ad_entity_ids
doc_ad_entity_ids_codes = [doc_entity_id_values_counts[x] if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_ad_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_AD_ENTITY_ID_FV, doc_ad_entity_ids_codes, doc_ad_ent_confidence_levels, feature_vector, top=6)
#Process codes for doc_event_entity_ids
doc_event_entity_ids_codes = [doc_entity_id_values_counts[x] if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_event_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_ENTITY_ID_FV, doc_event_entity_ids_codes, doc_event_ent_confidence_levels, feature_vector, top=6)
#Creating dummy column as the last column because xgboost have a problem if the last column is undefined for all rows,
#saying that dimentions of data and feature_names do not match
#feature_vector[feature_vector_labels_dict[DUMMY_FEATURE_COLUMN]] = float(0)
#Ensuring that all elements are floats for compatibility with UDF output (ArrayType(FloatType()))
#feature_vector = list([float(x) for x in feature_vector])
except Exception as e:
raise Exception("[get_ad_feature_vector_integral] ERROR PROCESSING FEATURE VECTOR! Params: {}" \
.format([user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels]),
e)
return SparseVector(len(feature_vector_labels_integral_dict), feature_vector)
get_ad_feature_vector_integral_udf = F.udf(lambda user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities, event_country, event_country_state, ad_id, document_id, source_id,
doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent: \
get_ad_feature_vector_integral(user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent),
VectorUDT())
#StructField("features", VectorUDT()))
#MapType(IntegerType(), FloatType()))
# ## Export Train set feature vectors
train_set_enriched_df = train_set_df \
.join(documents_categories_grouped_df, on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"), how='left') \
.join(documents_topics_grouped_df, on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"), how='left') \
.join(documents_entities_grouped_df, on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"), how='left') \
.join(documents_categories_grouped_df \
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list') \
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df \
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list') \
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df \
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list') \
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.select('display_id','uuid_event','event_country','event_country_state','platform_event',
'source_id_doc_event', 'publisher_doc_event','publish_time_doc_event',
'publish_time', 'ad_id','document_id_promo','clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list').alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list').alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list').alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list').alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list').alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list').alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id').alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list').alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list').alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list').alias('confidence_level_ent_list')
) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
train_set_feature_vectors_df = train_set_enriched_df \
.withColumn('feature_vector',
#get_ad_feature_vector_udf(
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid_event').alias('uuid'),
'display_id',
'ad_id',
'document_id_event',
F.col('document_id_promo').alias('document_id'),
F.col('clicked').alias('label'),
'feature_vector') #\
#.orderBy('display_id','ad_id')
if evaluation:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral_eval'
else:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral'
# %time train_set_feature_vectors_df.write.parquet(OUTPUT_BUCKET_FOLDER+train_feature_vector_gcs_folder_name, mode='overwrite')
# ## Exporting integral feature vectors to CSV
train_feature_vectors_exported_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER+train_feature_vector_gcs_folder_name)
train_feature_vectors_exported_df.take(3)
if evaluation:
train_feature_vector_integral_csv_folder_name = 'train_feature_vectors_integral_eval.csv'
else:
train_feature_vector_integral_csv_folder_name = 'train_feature_vectors_integral.csv'
# +
integral_headers = ['label', 'display_id', 'ad_id', 'doc_id', 'doc_event_id', 'is_leak'] + feature_vector_labels_integral
with open(train_feature_vector_integral_csv_folder_name+".header", 'w') as output:
output.writelines('\n'.join(integral_headers))
# -
def sparse_vector_to_csv_with_nulls_row(additional_column_values, vec, num_columns):
return ','.join([str(value) for value in additional_column_values] +
list([ '{:.5}'.format(vec[x]) if x in vec.indices else '' for x in range(vec.size) ])[:num_columns]) \
.replace('.0,',',')
train_feature_vectors_integral_csv_rdd = train_feature_vectors_exported_df.select(
'label', 'display_id', 'ad_id', 'document_id', 'document_id_event', 'feature_vector').withColumn('is_leak', F.lit(-1)) \
.rdd.map(lambda x: sparse_vector_to_csv_with_nulls_row([x['label'], x['display_id'], x['ad_id'], x['document_id'], x['document_id_event'], x['is_leak']],
x['feature_vector'], len(integral_headers)))
# %time train_feature_vectors_integral_csv_rdd.saveAsTextFile(OUTPUT_BUCKET_FOLDER+train_feature_vector_integral_csv_folder_name)
# # Export Validation/Test set feature vectors
def is_leak(max_timestamp_pv_leak, timestamp_event):
return max_timestamp_pv_leak >= 0 and max_timestamp_pv_leak >= timestamp_event
is_leak_udf = F.udf(lambda max_timestamp_pv_leak, timestamp_event: int(is_leak(max_timestamp_pv_leak, timestamp_event)), IntegerType())
# +
if evaluation:
data_df = validation_set_df
else:
data_df = test_set_df
test_validation_set_enriched_df = data_df.select('display_id','uuid_event','event_country','event_country_state','platform_event',
'source_id_doc_event', 'publisher_doc_event','publish_time_doc_event',
'publish_time',
'ad_id','document_id_promo','clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list').alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list').alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list').alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list').alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list').alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list').alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id').alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list').alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list').alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list').alias('confidence_level_ent_list'),
int_null_to_minus_one_udf('max_timestamp_pv').alias('max_timestamp_pv_leak')
) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
# -
test_validation_set_feature_vectors_df = test_validation_set_enriched_df \
.withColumn('feature_vector',
#get_ad_feature_vector_udf(
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid').alias('uuid'),
'display_id',
'ad_id',
'document_id_event',
F.col('document_id_promo').alias('document_id'),
F.col('clicked').alias('label'),
is_leak_udf('max_timestamp_pv_leak','timestamp_event').alias('is_leak'),
'feature_vector') #\
#.orderBy('display_id','ad_id')
if evaluation:
test_validation_feature_vector_gcs_folder_name = 'validation_feature_vectors_integral'
else:
test_validation_feature_vector_gcs_folder_name = 'test_feature_vectors_integral'
# %time test_validation_set_feature_vectors_df.write.parquet(OUTPUT_BUCKET_FOLDER+test_validation_feature_vector_gcs_folder_name, mode='overwrite')
# ## Exporting integral feature vectors to CSV
test_validation_feature_vectors_exported_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER+test_validation_feature_vector_gcs_folder_name)
test_validation_feature_vectors_exported_df.take(3)
if evaluation:
test_validation_feature_vector_integral_csv_folder_name = 'validation_feature_vectors_integral.csv'
else:
test_validation_feature_vector_integral_csv_folder_name = 'test_feature_vectors_integral.csv'
# +
integral_headers = ['label', 'display_id', 'ad_id', 'doc_id', 'doc_event_id', 'is_leak'] + feature_vector_labels_integral
with open(test_validation_feature_vector_integral_csv_folder_name+".header", 'w') as output:
output.writelines('\n'.join(integral_headers))
# -
test_validation_feature_vectors_integral_csv_rdd = test_validation_feature_vectors_exported_df.select(
'label', 'display_id', 'ad_id', 'document_id', 'document_id_event', 'is_leak', 'feature_vector') \
.rdd.map(lambda x: sparse_vector_to_csv_with_nulls_row([x['label'], x['display_id'], x['ad_id'], x['document_id'], x['document_id_event'], x['is_leak']],
x['feature_vector'], len(integral_headers)))
# %time test_validation_feature_vectors_integral_csv_rdd.saveAsTextFile(OUTPUT_BUCKET_FOLDER+test_validation_feature_vector_integral_csv_folder_name)
| spark/preprocessing/3-Outbrain-Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.2
# language: julia
# name: julia-1.4
# ---
# # Conditionals
# #### with the `if` keyword
#
# In Julia, the syntax
# ```julia
# if *condition 1*
# *option 1*
# elseif *condition 2*
# *option 2*
# else
# *option 3*
# end
# ```
#
# allows us to conditionally evaluate one of our options.
#
# For example, we might want to implement the FizzBuzz test: givne a number, N, print "Fiz"
# if N is divisible by 3, "Buzz" if N is divisible by 5, and "FizzBuzz" if N is divisible by
# 3 and 5. Otherwise just print the number itself! Enter your choice for `N` here:
N = 1
if (N % 3 == 0) && (N % 5 == 0) # `&&` means "AND"; % computes the remainder after division
println("FizzBuzz")
elseif N % 3 == 0
println("Fizz")
elseif N % 5 == 0
println("Buzz")
else
println(N)
end
# #### with ternary operators
#
# For this last block, we could instead use the ternary operator with the syntax
#
# ```julia
# a ? b : c
# ```
#
# which equates to
#
# ```julia
# if a
# b
# else
# c
# end
# ```
# Now let's say we want to return the larger of two numbers. Give `x` and `y` values here:
x = 1
y = 0
# Using the `if` and `else` keywords, we might write:
if x > y
x
else
y
end
# and as a ternay operator, the conditional looks like this:
(x > y) ? x : y
# #### with short-circuit evaluation
#
# We've already seen expressions with the syntax
# ```julia
# a && b
# ```
# to return true if both `a` and `b` are true. Of course, if `a` is false, Julia doesn't even need to knwo the value of `b` in order to dtermine that the overall result will be false. So Julia doesn't even need to check what `b` is; it can just"short-circuit" and immediately return `false`. The second argument `b` might be a more complicated expression like a function call with a side-effect, in which case it won't even be called:
false && (println("hi"); true)
true && (println("hi"); true)
# On the other hand, if `a` is true, Julia knows it can just return the value of `b` as the overall expression. This means that `b` doesn't necessarily need evaluate to `true` or `false`! `b` cound even be an error:
x = 1
(x > 0) && error("x cannot be greater than 0")
# Similarly, check out the `||` operator, which also uses short-circuit evaluation to perform the "or" operation.
true || println("hi")
# and
false || println("hi")
# ### Exercises
#
# #### 5.1
# Write a conditional statement that prints a number if the number is even and the string "odd" if the number is odd.
# #### 5.2
# Rewrite the code from 5.1 using a ternary operator.
# ### Solutions
# #### 5.1
x = 0 # change x to even or odd number
if x % 2 == 0
println(x)
else
println("odd")
end
# #### 5.2
y = 5 # change y to even or odd number
(y % 2 == 0) ? println(y) : println("odd")
| Conditionals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **AlTar 2 - a toy model with epistemic uncertainties**
#
# # **Step 2 - run AlTar2**
#
# This notebook follows the `step 1` notebook in which the input files for **AlTar2** have been prepared.
#
# As a reminder, we want to incorporate the epistemic uncertainties in our sampling process. To do so, we have prepared the configuration `test1.pfg` to update $\mathbf{C}_\mathrm{p}$ at every step of sampling procedure.
# Here are the input files for our toy model:
# - `test1.gf.h5`
# - `test1d.h5`
# - `test1.Cd.h5`
# - `test1.Cov.h5`
# - `test1.mprior.h5`
# - `test1.KElastic.h5`
#
# The *updated $\mathbf{C}_\mathrm{p}$* option currently only works on GPU. Within this notebook, we only have access to CPU. We will then perform the sampling process with a static $\mathbf{C}_\mathrm{p}$ incorporated as the misfit covariance matrix $\mathbf{C}_\mathrm{\chi}$. In this case, our input files become:
# - `test1.gf.h5`
# - `test1.d.h5`
# - `test1.Cx.h5` (sum of $\mathbf{C}_\mathrm{d}$ and $\mathbf{C}_\mathrm{p}$)
#
# In this example, we will:
# 1. Update the configuration file, for use with CPUs
# 2. Run Altar2
# 3. Analyze the results
#
# ### 1. Update the `test1.pfg` configuration file to `slipmodel.pfg`, for use with CPUs
#
# We modify the configuration file for use with CPUs, and with a static $\mathbf{C}_\mathrm{p}$.
#
# The `slipmodel.pfg` file is:
#
# ;
# ; <NAME>
# ; orthologue
# ; (c) 1998-2019 all rights reserved
# ;
#
# ; the application
# ; the name should be the same as the AlTar application instance (name='linear')
# linear:
# ; the model configuration
# model:
# case = test1
# ; the number of free parameters
# parameters = 20
# ; the number of observations
# observations = 300
#
# ; data observations
# data = test1.d.h5
#
# ; data covariance/uncertainties
# cd = test1.Cx.h5
#
# ; Green's function
# green = test1.gf.h5
#
# ; prior distribution for parameters
# prep = altar.models.seismic.moment
# prep:
# support = (0., 20.) ; slip range
# Mw_mean = 8.08
# Mw_sigma = 0.3
# Mu = [30] ; in GPa
# area = [250000000.0] ; patch area in m2
#
# prior:
# parameters = {linear.model.parameters}
# center = 10.
# sigma = 3.
#
# ; run configuration
# job.tasks = 1 ; number of tasks per host
# job.gpus = 0 ; number of gpus per task
# job.chains = 2**10 ; number of chains per task
#
#
# ; end of file
# ### 2. Run **AlTar**
#
# We follow the same steps as in the tutorial *An introduction to Altar2 framework - from a Linear Model example*. We will not detail the steps, so please refer to this tutorial for details.
# +
import altar
import altar.models.linear
# make a specialized app that uses the linear model by default
class LinearApp(altar.shells.altar, family='altar.applications.linear'):
"""
A specialized AlTar application that exercises the Linear model
"""
# user configurable component
model = altar.models.model(default='linear')
# create an instance
myapp = LinearApp(name='linear')
# call initialize method to prepare its compone
myapp.initialize()
# to sample the posterior with CATMIP alogorithm
myapp.model.posterior(application=myapp)
# +
# to obtain the final step data
step=myapp.controller.worker.step
print('beta =', step.beta)
print('samples =', step.samples)
print('parameters =', step.parameters)
# to obtain theta matrix dim(samples, parameters)
theta = step.theta
mean, sd = theta.mean_sd(axis=0) # axis=0 for averaging over samples
model = myapp.model
data = myapp.data
# -
# ### 3. Analyze the results
#
# #### 3.1 Represent the inferred slip
#
# We first write a function to represent the mean and standard deviations of the inferred slip models.
# +
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
## Create the colormap
color1 = [(218,240,178),(163,219,184),(96,194,192),(47,163,194),(32,120,180),(36,73,158)]
color12 = [(218,240,178),(163,219,184),(96,194,192),(47,163,194),(32,120,180)]
color2 = [(253,208,162),(253,174,107),(253,141,60),(241,105,19),(217,72,1),(166,54,3)]
colornames = list(reversed(color2)) + color1 + list(reversed(color12)) + color2
## Function to plot slip
def plotSlipBox(moy, std, ns, nd, bounds, **params):
'''
Plot every subfault as a gradient between mean - std and mean + std
IN ARGUMENT:
You need to specify at least 8 arguments:
--> mean
--> standard deviation
--> ns = number of patches in strike
--> nd = number of patches in dip
--> bounds = boudaries for slip
'''
# define slip steps
x00 = np.linspace(bounds[0],bounds[1],49)
plt.figure(figsize=((ns * 5 / 2) / 1.2, ((nd + 1) * 53.5 / 200) / 1.5))
# colorbar
cmape = colors.LinearSegmentedColormap.from_list('cptslip', [[i / 255 for i in x] for x in colornames])
cmape.set_over('#a63603')
cmape.set_under('#a63603')
cNorm = colors.Normalize(vmin=bounds[0], vmax=bounds[1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmape)
scalarMap.set_array([])
# Loop over subfaults
c = 1
l = 0
for i in range(Np // 2):
if i % ns == 0:
# j=ns-1
l = l + 1
c = 1
a = plt.subplot2grid((nd + 2, ns + 1), (l, c), colspan=1, rowspan=1)
# j=j-1
c = c + 1
# plot gradient from mean-std to mean
plt.imshow([np.linspace(moy[i] - std[i] - 0.1, moy[i], 30)],
cmap=cmape,
interpolation='bicubic',
extent=[moy[i] - std[i], moy[i], 0, 3],
vmin=boundss[0], vmax=boundss[1],
alpha = 0.75,zorder=3)
# plot gradient from mean to mean+std
plt.imshow([np.linspace(moy[i], moy[i] + std[i] + 0.1, 30)],
cmap=cmape,
interpolation='bicubic',
extent=[moy[i], moy[i]+std[i], 0, 3],
vmin=boundss[0], vmax=boundss[1],
alpha = 0.75,zorder=4)
# plot the mean value
colorval = scalarMap.to_rgba(abs(moy[i]))
rectangle = plt.Rectangle((moy[i]-0.5,-0.5), 1.0, 4, fc=colorval,ec="white",lw=0.5,zorder=5)
plt.gca().add_patch(rectangle)
# plot an horizontal line
plt.axhline(y=1.5,linewidth=0.1, color=darklavend,zorder=2)
# add a vertical line to represent the target model
plt.axvline(x=10.,linewidth=0.2, color=darkblue,alpha=0.5,zorder=2)
# ticks aspect
plt.box(False)
a.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="off", left="off", right="off", labelleft="off")
plt.xlim(min(x00)-0.1,max(x00)+0.1)
plt.ylim(-0.5,3.5)
plt.tight_layout(pad=0.05)
plt.show()
return
# -
ns = 1
nd = 20
bounds = [4.,16.]
plotSlipBox(mean, sd, ns, nd, bounds)
# #### 3.2 Represent the fit of the predictions to the observations
pred = model.green * mean
pred_sd = model.green * sd
# +
x = np.linspace(-40000,40000,100)
fig=plt.figure(figsize=(7,2))
plt.axvline(x=0,color='lightgray')
plt.fill_between(x/1000, data - data_sd, data + data_sd,color=orange, alpha=0.1)
plt.fill_between(x/1000, pred - pred_sd, pred + pred_sd, color=blue, alpha=0.1)
plt.plot(x/1000, data, color=orange, label='Target predictions')
plt.plot(x/1000, pred, '--',color=blue, label='Predictions with Cp')
plt.xlim([-35,35])
plt.xlabel('Distance perpendicular to fault (km)')
plt.ylabel('Surface displacement (m)')
plt.show()
fig.savefig('surface_disp_with_cp.pdf',format='pdf')
# -
| jupyter/intro_cp/toymodel_step2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **[Deep Learning Home Page](https://www.kaggle.com/learn/deep-learning)**
#
# ---
#
# # Intro
# The TV show *Silicon Valley* had an app called "See Food" that promised to identify food.
#
# In this notebook, you will write code using and comparing pre-trained models to choose one as an engine for the See Food app.
#
# You won't go too deep into Keras or TensorFlow details in this particular exercise. Don't worry. You'll go deeper into model development soon. For now, you'll make sure you know how to use pre-trained models.
#
# # Set-Up
# We will run a few steps of environmental set-up before writing your own code. **You don't need to understand the details of this set-up code.** You can just run each code cell until you get to the exercises.
#
# ### 1) Create Image Paths
# This workspace includes image files you will use to test your models. Run the cell below to store a few filepaths to these images in a variable `img_paths`.
# +
import os
from os.path import join
hot_dog_image_dir = '../input/hot-dog-not-hot-dog/seefood/train/hot_dog'
hot_dog_paths = [join(hot_dog_image_dir,filename) for filename in
['1000288.jpg',
'127117.jpg']]
not_hot_dog_image_dir = '../input/hot-dog-not-hot-dog/seefood/train/not_hot_dog'
not_hot_dog_paths = [join(not_hot_dog_image_dir, filename) for filename in
['823536.jpg',
'99890.jpg']]
img_paths = hot_dog_paths + not_hot_dog_paths
print("hot_dog_paths->", hot_dog_paths)
print("not_hot_dog_paths->", not_hot_dog_paths)
hotdog_image_data = read_and_prep_images(hot_dog_paths)
preds_for_hotdogs = model.predict(hotdog_image_data)
print("preds_for_hotdogs->", is_hot_dog(preds_for_hotdogs))
# -
# ### 2) Run an Example Model
# Here is the code you saw in the tutorial. It loads data, loads a pre-trained model, and makes predictions. Run this cell too.
# +
from IPython.display import Image, display
from learntools.deep_learning.decode_predictions import decode_predictions
import numpy as np
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.preprocessing.image import load_img, img_to_array
image_size = 224
def read_and_prep_images(img_paths, img_height=image_size, img_width=image_size):
imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]
img_array = np.array([img_to_array(img) for img in imgs])
output = preprocess_input(img_array)
return(output)
my_model = ResNet50(weights='../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
test_data = read_and_prep_images(img_paths)
preds = my_model.predict(test_data)
print(preds)
most_likely_labels = decode_predictions(preds, top=3)
print(most_likely_labels)
# -
# ### 3) Visualize Predictions
for i, img_path in enumerate(img_paths):
display(Image(img_path))
print(most_likely_labels[i])
# ### 4) Set Up Code Checking
# As a last step before writing your own code, run the following cell to enable feedback on your code.
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning.exercise_3 import *
print("Setup Complete")
# # Exercises
#
# You will write a couple useful functions in the next exercises. Then you will put these functions together to compare the effectiveness of various pretrained models for your hot-dog detection program.
#
# ### Exercise 1
#
# We want to distinguish whether an image is a hot dog or not. But our models classify pictures into 1000 different categories. Write a function that takes the models predictions (in the same format as `preds` from the set-up code) and returns a list of `True` and `False` values.
#
# Some tips:
# - Work iteratively. Figure out one line at a time outsie the function, and print that line's output to make sure it's right. Once you have all the code you need, move it into the function `is_hot_dog`. If you get an error, check that you have copied the right code and haven't left anything out.
# - The raw data we loaded in `img_paths` had two images of hot dogs, followed by two images of other foods. So, if you run your function on `preds`, which represents the output of the model on these images, your function should return `[True, True, False, False]`.
# - You will want to use the `decode_predictions` function that was also used in the code provided above. We provided a line with this in the code cell to get you started.
#
#
#
# +
# Experiment with code outside the function, then move it into the function once you think it is right
# the following lines are given as a hint to get you started
decoded = decode_predictions(preds, top=1)
print(decoded)
labels = [d[0][1] for d in decoded]
print("labels->")
print(labels)
hotdogs = [l == 'hotdog' for l in labels]
print(hotdogs)
def is_hot_dog(preds):
'''
inputs:
preds_array: array of predictions from pre-trained model
outputs:
is_hot_dog_list: a list indicating which predictions show hotdog as the most likely label
'''
labels = [d[0][1] for d in decoded]
print(labels)
hotdogs = [l == 'hotdog' for l in labels]
print(hotdogs)
return hotdogs
#pass
# Check your answer
q_1.check()
# -
# If you'd like to see a hint or the solution, uncomment the appropriate line below.
#
# **If you did not get a working solution, copy the solution code into your code cell above and run it. You will need this function for the next step.**
q_1.hint()
q_1.solution()
# +
print("hot_dog_paths->", hot_dog_paths)
print("not_hot_dog_paths->", not_hot_dog_paths)
new_model = ResNet50(weights='../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
hotdog_image_data = read_and_prep_images(hot_dog_paths)
preds_for_hotdogs = new_model.predict(hotdog_image_data)
print("preds_for_hotdogs->", is_hot_dog(preds_for_hotdogs))
not_hotdog_image_data = read_and_prep_images(not_hot_dog_paths)
preds_for_not_hotdogs = new_model.predict(not_hotdog_image_data)
print("preds_for_not_hotdogs->", is_hot_dog(preds_for_not_hotdogs))
# -
# ### Exercise 2: Evaluate Model Accuracy
#
# You have a model (called `my_model`). Is it good enough to build your app around?
#
# Find out by writing a function that calculates a model's accuracy (fraction correct). You will try an alternative model in the next step. So we will put this logic in a reusable function that takes data and the model as arguments, and returns the accuracy.
#
# Tips:
#
# - Use the `is_hot_dog` function from above to help write your function
# - To save you some scrolling, here is the code from above where we used a TensorFlow model to make predictions:
#
# ```
# my_model = ResNet50(weights='../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
# test_data = read_and_prep_images(img_paths)
# preds = my_model.predict(test_data)
# ```
# +
# def calc_accuracy(model, paths_to_hotdog_images, paths_to_other_images):
# # iterate through images testing prediction against actual
# # if match, bump accurate counter
# # accuracy = accurate counter / total
# my_model = ResNet50(weights='../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
# test_data = read_and_prep_images(img_paths)
# preds = my_model.predict(test_data)
# hotdog_count = len(paths_to_hotdog_images)
# not_hotdog_count = len(paths_to_other_images)
# print("total hotdog vs not hotdog = ", hotdog_count, " vs ", not_hotdog_count)
# pred_hotdog_count = 0
# pred_not_hotdog_count = 0
# hotdogs = is_hot_dog(preds)
# print(hotdogs)
# # find total pred hotdog - not_hotdog
# for pred in hotdogs:
# if (pred): pred_hotdog_count = pred_hotdog_count + 1
# else: pred_not_hotdog_count = pred_not_hotdog_count + 1
# print("total pred hotdog vs not hotdog = ", pred_hotdog_count, " vs ", pred_not_hotdog_count)
# total = hotdog_count + not_hotdog_count
# delta = (abs(hotdog_count - pred_hotdog_count)) + (abs(not_hotdog_count - pred_not_hotdog_count))
# accuracy = ((total - delta)/total)
# print("accuracy = ", accuracy)
# return accuracy
# #print(preds)
# # Code to call calc_accuracy. my_model, hot_dog_paths and not_hot_dog_paths were created in the setup code
# my_model_accuracy = calc_accuracy(my_model, hot_dog_paths, not_hot_dog_paths)
# print("Fraction correct in small test set: {}".format(my_model_accuracy))
# # Check your answer
# q_2.check()
# +
def calc_accuracy(model, paths_to_hotdog_images, paths_to_other_images):
# We'll use the counts for denominator of accuracy calculation
num_hot_dog_images = len(paths_to_hotdog_images)
num_other_images = len(paths_to_other_images)
print("total hotdog vs not hotdog = ", num_hot_dog_images, " vs ", num_other_images)
hotdog_image_data = read_and_prep_images(paths_to_hotdog_images)
preds_for_hotdogs = model.predict(hotdog_image_data)
print("preds_for_hotdogs->", is_hot_dog(preds_for_hotdogs))
# Summing list of binary variables gives a count of True values
num_correct_hotdog_preds = sum(is_hot_dog(preds_for_hotdogs))
other_image_data = read_and_prep_images(paths_to_other_images)
preds_other_images = model.predict(other_image_data)
print("preds_other_images->", is_hot_dog(preds_other_images))
# Number correct is the number judged not to be hot dogs
num_correct_other_preds = num_other_images - sum(is_hot_dog(preds_other_images))
print("total correct hotdog vs not hotdog = ", num_correct_hotdog_preds, " vs ", num_correct_other_preds)
total_correct = num_correct_hotdog_preds + num_correct_other_preds
total_preds = num_hot_dog_images + num_other_images
print("total_correct=",total_correct)
print("total_preds=",total_preds)
return total_correct / total_preds
# Code to call calc_accuracy. my_model, hot_dog_paths and not_hot_dog_paths were created in the setup code
print("hot_dog_paths->", hot_dog_paths)
print("not_hot_dog_paths->", not_hot_dog_paths)
my_model_accuracy = calc_accuracy(my_model, hot_dog_paths, not_hot_dog_paths)
print("Fraction correct in small test set: {}".format(my_model_accuracy))
# Check your answer
q_2.check()
# -
# If you'd like a hint or the solution, uncomment the appropriate line below
#q_2.hint()
q_2.solution()
# ### Exercise 3:
# There are other models besides the ResNet model (which we have loaded). For example, an earlier winner of the ImageNet competition is the VGG16 model. Don't worry about the differences between these models yet. We'll come back to that later. For now, just focus on the mechanics of applying these models to a problem.
#
# The code used to load a pretrained ResNet50 model was
#
# ```
# my_model = ResNet50(weights='../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
# ```
#
# The weights for the model are stored at `../input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5`.
#
# In the cell below, create a VGG16 model with the preloaded weights. Then use your `calc_accuracy` function to determine what fraction of images the VGG16 model correctly classifies. Is it better or worse than the pretrained ResNet model?
# +
# import the model
from tensorflow.keras.applications import VGG16
vgg16_model = VGG16(weights='../input/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5')
# calculate accuracy on small dataset as a test
vgg16_accuracy = calc_accuracy(vgg16_model, hot_dog_paths, not_hot_dog_paths)
print("Fraction correct in small dataset: {}".format(vgg16_accuracy))
# Check your answer
q_3.check()
# -
# Uncomment the appropriate line below if you'd like a hint or the solution
q_3.hint()
#q_3.solution()
# If this model is used for an app that runs on a phone, what factors besides accuracy might you care about? After you've thought about it, keep going below.
# # Keep Going
# You are ready for **[Transfer Learning](https://www.kaggle.com/dansbecker/transfer-learning/)**, which will allow you to apply the same level of power for your custom purposes.
#
# ---
# **[Deep Learning Home Page](https://www.kaggle.com/learn/deep-learning)**
#
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
| kaggle-tutorials/Exercise_ TensorFlow Programming (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: spatial_audio
# language: python
# name: spatial_audio
# ---
# +
import os
import gc
import time
import numpy as np
import IPython.display as ipd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from tqdm.notebook import tqdm
os.chdir(os.path.dirname("../src/"))
# +
import torch
import torch.nn as nn
import torch.optim as optim
# Set seeds for reproducibility
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# -
from sparl.datasets import librispeech_loader
from sparl.models import LRClassifier
from sparl.utils import ConfigObject
# # Hyperparameters
# +
N_BITS = 8
params = {
'loader_params': {
'batch_size': 128,
'shuffle': False,
'num_workers': 0,
'ls_root': '../data/',
'url': 'test-clean',
'cipic_root': '../data/CIPIC_hrtf_database/standard_hrir_database/',
'n_mels': 128,
'seconds': .5,
'n_bits': N_BITS
},
'model_path': '../models/last_model.pt'
}
# -
# # Data Loaders
loader = librispeech_loader(**params['loader_params'])
# # Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = LRClassifier()
model.load_state_dict(torch.load(params['model_path']))
model = model.to(device)
m_loss = nn.BCEWithLogitsLoss()
# # Setup Execution
def evaluate(model, loader, loss_fn, threshold=.5):
model.eval()
heldout_loss = 0
preds = []
targets = []
sigmoid = nn.Sigmoid()
with torch.no_grad():
for idx, (melspec, target, _) in enumerate(loader):
melspec = melspec.to(device)
target = target.to(device)
out = model(melspec)
heldout_loss += loss_fn(out, target).item()*len(target)
preds.append(sigmoid(out).detach().cpu().numpy())
targets.append(target.detach().cpu().numpy())
heldout_loss /= len(loader)
# Compute the accuracy
preds = np.concatenate(preds, axis=0)
preds = np.where(preds < threshold, 0, 1)
targets = np.concatenate(targets, axis=0)
heldout_acc = accuracy_score(targets, preds)
return heldout_loss, heldout_acc
# # Evaluate on Librispeech
loss_list, acc_list = [], []
for _ in tqdm(range(10)):
loss, acc = evaluate(model, loader, m_loss)
loss_list.append(loss)
acc_list.append(acc)
print("Librispeech loss: mean = {:.2f} | std = {:.2f}".format(np.mean(loss_list), np.std(loss_list)))
print("Librispeech acc: mean = {:.2f} | std = {:.2f}".format(np.mean(acc_list), np.std(acc_list)))
| notebooks/01_librispeech_evaluate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HIMS database
# * Loop through the database to see what tables we might be interested in
# * Export those as parquet to explore further
# +
import pandas
import pyodbc
import sqlalchemy
import urllib
import os
server = '10.43.20.148'
database = 'HimsLoan'
username = os.environ.get('HIMS_USERNAME')
password = os.environ.get('HIMS_PASSWORD')
driver='{ODBC Driver 17 for SQL Server}'
params = urllib.parse.quote_plus('DRIVER='+driver+';SERVER='+server+';PORT=1443;DATABASE='+database+';UID='+username+';PWD='+password)
engine = sqlalchemy.create_engine('mssql+pyodbc:///?odbc_connect=%s' % params)
# -
table_list = ['Project', 'LutProjectStatus', 'Loan', 'LoanFund', 'FundingInfo',
'LutSourceofFunds', 'AssnProjectIDISNo', 'MilestoneDate', 'LutMilestoneName'
]
# +
pandas.options.display.max_columns = 999
for table in table_list:
try:
size = engine.execute(f'SELECT COUNT(*) FROM "{table}"').fetchall()[0][0]
df = pandas.read_sql(f'SELECT * FROM "{table}"', engine)
df.to_parquet(f'../data/HIMS/{table}.parquet')
except Exception as e:
pass
# -
for table in lut_tables:
try:
size = engine.execute(f'SELECT COUNT(*) FROM "{table}"').fetchall()[0][0]
df = pandas.read_sql(f'SELECT * FROM "{table}"', engine)
df.to_parquet(f'../data/HIMS/{table}.parquet')
except Exception as e:
pass
"""
# Display head for all the tables
pandas.options.display.max_columns = 999
for table in engine.table_names():
try:
size = engine.execute(f'SELECT COUNT(*) FROM "{table}"').fetchall()[0][0]
display({'text/markdown': f'## {table}:\n ### {size} rows'}, raw=True)
df = pandas.read_sql(f'SELECT TOP 5 * FROM "{table}"', engine)
display(df.head(5))
except Exception as e:
pass
"""
filtered = list(filter(lambda x: x.find("Lut") != 0 and x.lower().find("temp") != 0, engine.table_names()))
filtered
| notebooks/hims.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# Data Shift Exploration - R
# ===
#
# R-lang analysis of data shift.
library(kernlab)
library(Hotelling)
sdf = read.csv("/export/scratch2/levon003/repos/wiki-ores-feedback/data/derived/data-shifts/sdf.csv", sep=",", row.names=1)
sdf_mat <- as.matrix(sdf)
dim(sdf_mat)
sdf = read.table("/export/scratch2/levon003/repos/wiki-ores-feedback/data/derived/data-shifts/sdf.csv",
header=TRUE, sep=",")
sdf_mat <- as.matrix(sdf)
dim(sdf_mat)
head(sdf_mat)
tdf = read.table("/export/scratch2/levon003/repos/wiki-ores-feedback/data/derived/data-shifts/tdf.csv",
header=TRUE, sep=",")
tdf_mat <- as.matrix(tdf)
dim(tdf_mat)
kmmd(sdf_mat, tdf_mat, kernel="rbfdot",kpar="automatic", alpha = 0.7,
asymptotic = FALSE, replace = TRUE, ntimes = 200, frac = 1)
hotelling.test(sdf, tdf, shrinkage=FALSE, perm=FALSE)
| notebook/DataShiftExploration-R.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bombora Topic Interest Datasets
# Explaining Bombora topic interest score datasets.
# ## 0. Surge vs Interest?
#
# As a matter of clarification, *topic surge* as a product is generated from *topic interest* models. In technical discussions, we'll refer to both the product and the models by the latter, providing a conceptually more meaningful mapping. Bombora's user-facing topic interest scores (and the origin of the data you currently have) are currently generated from a 3rd party topic interest model and service. As mentioned previously, we are also are developing an internal topic interest model.
#
#
# ## 1. End User Datasets (Overview)
#
# In general, down the topic interest line, there exists primarily three datasets that are consumed by end users (ordering from raw to aggregate):
#
# 1. **Firehose** *(FH)*: the raw content consumption data, which contains event level resolution. Size is thousands of GBs/week, only a handful of clients actually consume this data. Bombora Data Science team refers to this as the *raw event data*.
#
# 2. **All Domains All Topic** *(ADAT)*: an aggregate topic interest score on keys of interest in the Firehose data. Size is tens of GBs/week. Bombora Data Science team refers to this as the *topic interest score data*.
#
# 3. **Master Surge** *(MS)*: A filtering and transformation of the *ADAT* dataset to consider only those topic keys whose scores meet some *surge* score criteria (explained below). Size is GBs/week. Bombora Data Science team refer to this as *surging topic interest score data*.
#
# While dataset naming convention might be a little confusing, the simple explanation is that the topic interest model ingests *Firehose* data, and outputs both the *ADAT* and *MasterSurge*.
#
# ## 2. End User Dataset (Details)
#
# As you're interested in the aggregate topic interest score, we'll only consider ADAT and MasterSurge. While similar, each has their own schema. To understand better, we consider representative topic interest result files for both *ADAT* and *MasterSurge* that are output from the current topic surge batch jobs for the week starting *2016-07-19*:
#
#
#
# !ls -lh ../../data/topic-interest-score/
# Note the file `Input_AllDomainsAllTopics_20150719-reduced.csv` is tagged with `reduce` to indicate it's not the complete record set. This is due to 2GB file limiations with [git LFS](https://github.com/blog/1986-announcing-git-large-file-storage-lfs). The orginal compressed `Input_AllDomainsAllTopics_20150719.csv.gz` file weighed in at 2.62 GB.
#
# This file was generated via:
#
# `head -n 166434659 Input_AllDomainsAllTopics_20150719.csv > Input_AllDomainsAllTopics_20150719-reduced.csv`
# To get an idea of record count, count the number of lines in both files:
# !gzip -dc ../../data/topic-interest-score/Output_MasterSurgeFile_20150719.csv.gz | wc -l
# !gzip -dc ../../data/topic-interest-score/Input_AllDomainsAllTopics_20150719-reduced.csv.gz | wc -l
# As we're interested in understanding the data schema we'll consider a smaller (non-statistically significant) sample for both files.
path_to_data = '../../data/topic-interest-score/'
# data_files = !ls {path_to_data}
data_files
n = 10000
#cl_cmd_args = '{cmd} -n {n} ../sample_data/{data_file} >> {data_file_root}-sample.csv'
cl_cmd_args = 'gzip -cd {path_to_data}{data_file} | {cmd} -n {n} >> {data_file_out}'
for data_file in data_files:
data_file_out = data_file.strip('.csv.gz') + '-sample.csv'
print('rm -f {data_file_out}'.format(data_file_out=data_file_out))
# !rm -f {data_file_out}
print('touch {data_file_out}'.format(data_file_out=data_file_out))
# !touch {data_file_out}
final_cl_cmd = cl_cmd_args.format(cmd='head', n=n,
path_to_data=path_to_data,
data_file=data_file,
data_file_out=data_file_out)
print(final_cl_cmd)
# !{final_cl_cmd}
# ### ADAT
# The ADAT file contains topic interest scores across both global and metro resolutions, which are model aggregate values produced at both keys `(domain, topic)`: and `(domain, topic, metro)` keys. Note that
#
# The schema of the data is:
# ```
# Company Name, Domain, Size, Industry, Category, Topic, Composite Score, Bucket Code, Metro Area, Metro Composite Score, Metro Bucket Code, Domain Origin Country
# ```
#
# Note that in the schema above, the:
#
# - `Composite Score` is the topic interest score from the `(domain, topic)` key.
# - `Metro Composite Score` is the topic interest score from the `(domain, topic, metro)` key.
#
#
# Additionally, we note that the format of data in the ADAT file topic interest scores a denormalized / flattened schema, as show below
# ! head -n 15 Input_AllDomainsAllTopics_20150719-reduced-sample.csv
# ! tail -n 15 Input_AllDomainsAllTopics_20150719-reduced-sample.csv
# ### Master Surge
# #### Filter
# For end users who only wish to consider the *surging* topics—`(domain, topic)` and `(domain, topic, metro)` keys whose topic interest score meet surge criteria (i.e., when score is > 50)—we filter the ADAT dataset to only consider scores greater than 50.
#
# #### Transform
# In producing this filtered result, instead of leaving the schema intact, the 3rd-party also performs a tranformation of the topic interest score(s) representation. The schema is the same intitally, like:
#
# ```
# Company Name, Domain, Size, Industry, Category, Topic, Composite Score,
# ```
# however, the metro resolution scores is now collapsed into an array (of sorts), unique to each `(domain, topic)` key. The metro name and score is formatted as `metro name[metro score]`, and each line can contain multiple results, formatted together like:
# ```
# metro_1[metro_1 score]|metro_2[metro_2 score]|metro_3[metro_3 score],
# ```
# and finally, again, ending with the domain origin country, which would collectively look like:
#
# ```
# Company Name, Domain, Size, Industry, Category, Topic, Composite Score,vmetro_1[metro_1 score]|metro_2[metro_2 score]|metro_3[metro_3 score], Domain Country Origin
# ```
#
# Example output, below:
# ! head -n 15 Output_MasterSurgeFile_20150719-sample.csv
# ! tail -n 15 Output_MasterSurgeFile_20150719-sample.csv
| notebooks/topic-interest-score/topic-interest-result-data-schema.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import torch
import sys
sys.path.extend(['..'])
from mvn import MVNIso as MVN
from distributions import log_prob_banana as log_p
from samplers.mcmc import HMC
from util import ess
import matplotlib.pyplot as plt
import time
# %matplotlib inline
torch.manual_seed(131416327) # chosen by keyboard-mashing
# -
# Make a discrete grid of x, y and compute the $p(x)$ density on it
x, y = torch.linspace(-4, 4, 201), torch.linspace(-2.5, 3.5, 201)
xx, yy = torch.meshgrid(x, y)
xy = torch.stack([xx.flatten(), yy.flatten()], dim=0)
log_p_grid = log_p(xy)
p_grid = (log_p_grid - torch.logsumexp(log_p_grid.flatten(), dim=0)).exp().reshape(xx.size())
# Do classic MCMC on x
# +
hmc = HMC(log_p, leapfrog_t=2.)
tstart=time.time()
hmc.tune(torch.randn(200, 2), target_accept=0.95)
telapse = time.time()-tstart
print(f"Classic HMC sampler tuned in {telapse} seconds")
print(f"\tmass = {hmc.mass}")
print(f"\tdt = {hmc.dt}")
tstart=time.time()
hmc_results = hmc.sample(torch.randn(2), n_samples=1000)
telapse = time.time()-tstart
print(f"Got {hmc_results['samples'].size(0)} samples in {telapse} seconds")
print(f"\taccept = {hmc_results['accept']*100:.2f}%")
effective_num_samples = ess(hmc_results['samples']).numpy()
print(f"\tess = {effective_num_samples.round()}")
# -
plt.figure()
plt.plot(hmc_results['samples'])
plt.show()
# Do classic VI on $q(x;\theta)$
# +
q_vi = MVN.new_random(d=2)
q_vi.theta.requires_grad_(True)
def _kl_q_p_helper():
kl = -q_vi.entropy() - q_vi.quadrature_ev(log_p, 5)
grad_kl = torch.autograd.grad(kl, q_vi.theta, create_graph=True)[0]
hess = torch.zeros(q_vi.n_params, q_vi.n_params)
for i in range(q_vi.n_params):
hess[i, :] = torch.autograd.grad(grad_kl[i], q_vi.theta, retain_graph=True)[0]
return kl.detach(), grad_kl.detach(), hess
kl_vals = torch.zeros(50)
for i in range(50):
kl_vals[i], grad, hess = _kl_q_p_helper()
with torch.no_grad():
# Newton's method step
q_vi.theta.copy_(q_vi.theta - torch.linalg.solve(hess, grad))
q_vi.theta.requires_grad_(False)
plt.figure()
plt.plot(kl_vals)
plt.xlabel('VI step')
plt.ylabel('KL(q||p) + log Z')
# -
# Do our parametric-sampling method
# +
q, lam_kl = MVN(d=2), 2.
def log_psi(th):
q.set_theta(th)
kl_qp = -q.entropy() - q.quadrature_ev(log_p, 5)
return 0.5*q.log_det_fisher() - lam_kl * kl_qp
stams_hmc = HMC(log_psi, leapfrog_t=2.)
# +
tstart=time.time()
stams_hmc.tune(torch.randn(200, q.n_params), target_accept=0.95)
telapse = time.time()-tstart
print(f"Stams HMC sampler tuned in {telapse} seconds")
print(f"\tmass = {stams_hmc.mass}")
print(f"\tdt = {stams_hmc.dt}")
tstart=time.time()
stams_hmc_results = stams_hmc.sample(torch.randn(q.n_params), n_samples=1000)
telapse = time.time()-tstart
print(f"Got {stams_hmc_results['samples'].size(0)} samples in {telapse} seconds")
print(f"\taccept = {stams_hmc_results['accept']*100:.2f}%")
effective_num_samples = ess(stams_hmc_results['samples']).numpy()
print(f"\tess = {effective_num_samples.round()}")
# -
plt.figure()
plt.plot(stams_hmc_results['samples'])
plt.show()
# ## Figure 1: conceptual introduction to sampling, VI, and our intermediate method
# +
levels = 20
fig, ax = plt.subplots(1, 3, figsize=(6.5, 2.5))
cont = ax[0].contourf(xx, yy, p_grid, origin='lower', vmin=0., levels=levels)
xl, yl = ax[0].get_xlim(), ax[0].get_ylim()
ax[0].set_title('Sampling')
ax[0].set_xticks([]); ax[0].set_yticks([])
ax[0].set_xlim(xl); ax[0].set_ylim(yl)
ax[0].axis('image')
ax[0].plot(hmc_results['samples'][::10,0], hmc_results['samples'][::10,1], '.r', markersize=2)
ax[1].contourf(xx, yy, p_grid, origin='lower', vmin=0., levels=levels)
ax[1].set_title('VI')
ax[1].set_xticks([]); ax[1].set_yticks([])
ax[1].set_xlim(xl); ax[1].set_ylim(yl)
ax[1].axis('image')
ax[1].plot(*q_vi.ellipse(nsigma=1.), color='r', linewidth=1)
ax[2].contourf(xx, yy, p_grid, origin='lower', vmin=0., levels=levels)
ax[2].set_title("Proposed 'best of both'")
ax[2].set_xticks([]); ax[2].set_yticks([])
ax[2].set_xlim(xl); ax[2].set_ylim(yl)
ax[2].axis('image')
for theta in stams_hmc_results['samples'][::50]:
q = MVN(theta=theta)
ax[2].plot(*q.ellipse(nsigma=1.), color='r', linewidth=1)
fig.savefig('concept_figure.svg')
fig.tight_layout()
# -
cont.cmap
| notebooks/concept_figure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## List comprehensions: Basic syntax
#
# - Minimal syntax: `[expression for element in iterable]`
# - The square brackets are necessary—without it's a generator expression!
#
# Let's consider a `for` loop that prints out the square root of 0 - 4:
# +
from math import sqrt
for i in range(5):
print(sqrt(i))
# -
# Now let's implement this with a `list` comprehension:
[print(sqrt(i)) for i in range(5)]
# ## Filtering list comprehensions
#
# - Filtering syntax: `[expression for element in iterable if expression]`
# - This is an alternative (in many cases) to the `continue` statement
#
# Let's consider a `for` loop that skips all odd numbers:
for i in range(5):
if i%2:
continue
print(i)
# Now let's implement this with a `list` comprehension:
[print(i) for i in range(5) if not i%2]
# ## Breaking list comprehensions
#
# - There is no way to `break` a list comprehension
# - Although you can do this with a generator expression, which we will meet later this section!
#
# Let's consider a `for` loop that iteratres over an infinite generator function, `fibonacci()`, until a number that is larger than 10 is encountered:
# +
def fibonacci():
yield 1
yield 1
l = [1, 1]
while True:
l = [l[-1], sum(l[-2:])]
yield l[-1]
for i in fibonacci():
if i > 10:
break
print(i)
# -
# There is no way to implement this behavior with a `list` comprehension. The following results in an infinite loop!
[i for i in fibonacci() if i <= 10]
| Functional_Thinking/Lab/29A-list-comprehensions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="Tia3MP1SJpgj"
# # Springboard Data Science Career Track Unit 4 Challenge - Tier 3 Complete
#
# ## Objectives
# Hey! Great job getting through those challenging DataCamp courses. You're learning a lot in a short span of time.
#
# In this notebook, you're going to apply the skills you've been learning, bridging the gap between the controlled environment of DataCamp and the *slightly* messier work that data scientists do with actual datasets!
#
# Here’s the mystery we’re going to solve: ***which boroughs of London have seen the greatest increase in housing prices, on average, over the last two decades?***
#
#
# A borough is just a fancy word for district. You may be familiar with the five boroughs of New York… well, there are 32 boroughs within Greater London [(here's some info for the curious)](https://en.wikipedia.org/wiki/London_boroughs). Some of them are more desirable areas to live in, and the data will reflect that with a greater rise in housing prices.
#
# ***This is the Tier 3 notebook, which means it's not filled in at all: we'll just give you the skeleton of a project, the brief and the data. It's up to you to play around with it and see what you can find out! Good luck! If you struggle, feel free to look at easier tiers for help; but try to dip in and out of them, as the more independent work you do, the better it is for your learning!***
#
# This challenge will make use of only what you learned in the following DataCamp courses:
# - Prework courses (Introduction to Python for Data Science, Intermediate Python for Data Science)
# - Data Types for Data Science
# - Python Data Science Toolbox (Part One)
# - pandas Foundations
# - Manipulating DataFrames with pandas
# - Merging DataFrames with pandas
#
# Of the tools, techniques and concepts in the above DataCamp courses, this challenge should require the application of the following:
# - **pandas**
# - **data ingestion and inspection** (pandas Foundations, Module One)
# - **exploratory data analysis** (pandas Foundations, Module Two)
# - **tidying and cleaning** (Manipulating DataFrames with pandas, Module Three)
# - **transforming DataFrames** (Manipulating DataFrames with pandas, Module One)
# - **subsetting DataFrames with lists** (Manipulating DataFrames with pandas, Module One)
# - **filtering DataFrames** (Manipulating DataFrames with pandas, Module One)
# - **grouping data** (Manipulating DataFrames with pandas, Module Four)
# - **melting data** (Manipulating DataFrames with pandas, Module Three)
# - **advanced indexing** (Manipulating DataFrames with pandas, Module Four)
# - **matplotlib** (Intermediate Python for Data Science, Module One)
# - **fundamental data types** (Data Types for Data Science, Module One)
# - **dictionaries** (Intermediate Python for Data Science, Module Two)
# - **handling dates and times** (Data Types for Data Science, Module Four)
# - **function definition** (Python Data Science Toolbox - Part One, Module One)
# - **default arguments, variable length, and scope** (Python Data Science Toolbox - Part One, Module Two)
# - **lambda functions and error handling** (Python Data Science Toolbox - Part One, Module Four)
# + [markdown] colab_type="text" id="Ipgd2nV8Jpgl"
# ## The Data Science Pipeline
#
# This is Tier Three, so we'll get you started. But after that, it's all in your hands! When you feel done with your investigations, look back over what you've accomplished, and prepare a quick presentation of your findings for the next mentor meeting.
#
# Data Science is magical. In this case study, you'll get to apply some complex machine learning algorithms. But as [<NAME>](https://www.youtube.com/watch?v=oUs1uvsz0Ok) reminds us, there is no substitute for simply **taking a really, really good look at the data.** Sometimes, this is all we need to answer our question.
#
# Data Science projects generally adhere to the four stages of Data Science Pipeline:
# 1. Sourcing and loading
# 2. Cleaning, transforming, and visualizing
# 3. Modeling
# 4. Evaluating and concluding
#
# + [markdown] colab_type="text" id="zswDqbefJpgm"
# ### 1. Sourcing and Loading
#
# Any Data Science project kicks off by importing ***pandas***. The documentation of this wonderful library can be found [here](https://pandas.pydata.org/). As you've seen, pandas is conveniently connected to the [Numpy](http://www.numpy.org/) and [Matplotlib](https://matplotlib.org/) libraries.
#
# ***Hint:*** This part of the data science pipeline will test those skills you acquired in the pandas Foundations course, Module One.
# + [markdown] colab_type="text" id="aEau5nEvJpgm"
# #### 1.1. Importing Libraries
# + colab={} colab_type="code" id="7Bt_Q_oPJpgn"
# Let's import the pandas, numpy libraries as pd, and np respectively.
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import numpy as np
# Load the pyplot collection of functions from matplotlib, as plt
import matplotlib.pyplot as plt
# + [markdown] colab_type="text" id="koUrawxsJpgq"
# #### 1.2. Loading the data
# Your data comes from the [London Datastore](https://data.london.gov.uk/): a free, open-source data-sharing portal for London-oriented datasets.
# + colab={} colab_type="code" id="AiLiD4v3Jpgr"
# First, make a variable called url_LondonHousePrices, and assign it the following link, enclosed in quotation-marks as a string:
# https://data.london.gov.uk/download/uk-house-price-index/70ac0766-8902-4eb5-aab5-01951aaed773/UK%20House%20price%20index.xls
url_LondonHousePrices = "https://data.london.gov.uk/download/uk-house-price-index/70ac0766-8902-4eb5-aab5-01951aaed773/UK%20House%20price%20index.xls"
# The dataset we're interested in contains the Average prices of the houses, and is actually on a particular sheet of the Excel file.
# As a result, we need to specify the sheet name in the read_excel() method.
# Put this data into a variable called properties.
properties = pd.read_excel(url_LondonHousePrices, sheet_name='Average price', index_col= None)
# + [markdown] colab_type="text" id="POukEJXgJpgu"
# ### 2. Cleaning, transforming, and visualizing
# This second stage is arguably the most important part of any Data Science project. The first thing to do is take a proper look at the data. Cleaning forms the majority of this stage, and can be done both before or after Transformation.
#
# The end goal of data cleaning is to have tidy data. When data is tidy:
#
# 1. Each variable has a column.
# 2. Each observation forms a row.
#
# Keep the end goal in mind as you move through this process, every step will take you closer.
#
#
#
# ***Hint:*** This part of the data science pipeline should test those skills you acquired in:
# - Intermediate Python for data science, all modules.
# - pandas Foundations, all modules.
# - Manipulating DataFrames with pandas, all modules.
# - Data Types for Data Science, Module Four.
# - Python Data Science Toolbox - Part One, all modules
# + [markdown] colab_type="text" id="Te0Q548tnzZa"
# **2.1. Exploring your data**
#
# Think about your pandas functions for checking out a dataframe.
# + colab={} colab_type="code" id="Rxirxw_qoAJa"
#Check data structure
properties.head()
# + [markdown] colab_type="text" id="tE9Sqt9-oAta"
# **2.2. Cleaning the data**
#
# You might find you need to transpose your dataframe, check out what its row indexes are, and reset the index. You also might find you need to assign the values of the first row to your column headings . (Hint: recall the .columns feature of DataFrames, as well as the iloc[] method).
#
# Don't be afraid to use StackOverflow for help with this.
# + colab={} colab_type="code" id="cdAu1A3YoH_r"
#Change DataFrame so that rows are observations and columns are variables
properties = properties.transpose()
# -
#Reset index
properties = properties.reset_index()
#Reassign first row as column header
properties.columns = properties.iloc[0]
properties.head()
#Drop the first row, which is now column header
properties = properties.drop(properties.index[0])
properties.head()
#Reset row index
properties = properties.reset_index(drop=True)
properties.head()
# + [markdown] colab_type="text" id="o1uLbJAsoIjK"
# **2.3. Cleaning the data (part 2)**
#
# You might have to **rename** a couple columns. How do you do this? The clue's pretty bold...
# + colab={} colab_type="code" id="GKkmn1AnoVZS"
#Rename columns with non-descriptive names
properties = properties.rename(columns={'Unnamed: 0': 'London_Borough', pd.NaT: 'ID'})
# -
properties.head()
# + [markdown] colab_type="text" id="jy8BzXHmoWEw"
# **2.4.Transforming the data**
#
# Remember what <NAME> said about tidy data?
#
# You might need to **melt** your DataFrame here.
# + colab={} colab_type="code" id="S2wM0qLuo2Zt"
#Collapse each observation for Month-Year and average value into one column each
clean_properties = pd.melt(properties, id_vars= ['London_Borough', 'ID'])
# + [markdown] colab_type="text" id="7kIsgAo7o3mf"
# Remember to make sure your column data types are all correct. Average prices, for example, should be floating point numbers...
# + colab={} colab_type="code" id="ZcR4IHbcpOaq"
clean_properties.head()
# -
#Change non-descriptive column names
clean_properties = clean_properties.rename(columns = {0: 'Month', 'value': 'Average_price'})
clean_properties.head()
#Check that each column has the type of data we are looking for. For example, Average_price should all be floats
clean_properties.dtypes
#Change values in Average_price column to floats
clean_properties['Average_price'] = pd.to_numeric(clean_properties['Average_price'])
clean_properties.dtypes
#Check for missing values
clean_properties.count()
# + [markdown] colab_type="text" id="knLUXHLypOtw"
# **2.5. Cleaning the data (part 3)**
#
# Do we have an equal number of observations in the ID, Average Price, Month, and London Borough columns? Remember that there are only 32 London Boroughs. How many entries do you have in that column?
#
# Check out the contents of the London Borough column, and if you find null values, get rid of them however you see fit.
# + colab={} colab_type="code" id="BnvTW5a3p0fC"
#Check all unique instances of London_Borough column; notice that some of these are not actually boroughs of London
clean_properties['London_Borough'].unique()
# -
#Inspect unnamed columns, e.g. 'Unnamed: 34'
clean_properties[clean_properties['London_Borough']=='Unnamed: 34'].head()
#Find null values and get rid of them
clean_properties = clean_properties.dropna()
clean_properties.head(48)
clean_properties.count()
#Check for unique borough names again
clean_properties['London_Borough'].unique()
#Remove the boroughs that are not actually London Boroughs. Start with a list of nonBoroughs
nonBoroughs = ['Inner London', 'Outer London',
'NORTH EAST', 'NORTH WEST', 'YORKS & THE HUMBER',
'EAST MIDLANDS', 'WEST MIDLANDS',
'EAST OF ENGLAND', 'LONDON', 'SOUTH EAST',
'SOUTH WEST', 'England']
#Now remove the rows with nonBoroughs
df = clean_properties[~clean_properties.London_Borough.isin(nonBoroughs)]
df.head()
print(df.shape)
print(clean_properties.shape)
# + [markdown] colab_type="text" id="PGEx6mJsp6dG"
# **2.6. Visualizing the data**
#
# To visualize the data, why not subset on a particular London Borough? Maybe do a line plot of Month against Average Price?
# +
# Create a new dataframe with information on Camden
camden_prices = df[df['London_Borough'] == 'Camden']
#Plot Camden prices with Month in the x-axis and Average price on the y-axis, call it ax
ax = camden_prices.plot(kind ='line', x = 'Month', y='Average_price')
# Set'Price' as the label for y-axis.
ax.set_ylabel('Price')
#Show plot
plt.show()
# + [markdown] colab_type="text" id="aWTPqSJeqHnC"
# To limit the number of data points you have, you might want to extract the year from every month value your *Month* column.
#
# To this end, you *could* apply a ***lambda function***. Your logic could work as follows:
# 1. look through the `Month` column
# 2. extract the year from each individual value in that column
# 3. store that corresponding year as separate column.
#
# Whether you go ahead with this is up to you. Just so long as you answer our initial brief: which boroughs of London have seen the greatest house price increase, on average, over the past two decades?
# + colab={} colab_type="code" id="e0DF92cyqnu8"
# Extract year from datetime objects and create a new column
# Had to introduce the following to cell [1] to avoid warning messages:
# pd.options.mode.chained_assignment = None # default='warn'
df['Year'] = pd.DatetimeIndex(df['Month']).year
df.tail()
# -
#Now group dataframe by Year and print the mean of average_price/year
df_year = df.groupby(by=['London_Borough','Year']).mean()
df_year.sample(10)
# Don't want London_Borough to be the index column, so must reset index
df_year = df_year.reset_index()
df_year.head()
# + [markdown] colab_type="text" id="2knuTxAEqoJ4"
# **3. Modeling**
#
# Consider creating a function that will calculate a ratio of house prices, comparing the price of a house in 2018 to the price in 1998.
#
# Consider calling this function create_price_ratio.
#
# You'd want this function to:
# 1. Take a filter of dfg, specifically where this filter constrains the London_Borough, as an argument. For example, one admissible argument should be: dfg[dfg['London_Borough']=='Camden'].
# 2. Get the Average Price for that Borough, for the years 1998 and 2018.
# 4. Calculate the ratio of the Average Price for 1998 divided by the Average Price for 2018.
# 5. Return that ratio.
#
# Once you've written this function, you ultimately want to use it to iterate through all the unique London_Boroughs and work out the ratio capturing the difference of house prices between 1998 and 2018.
#
# Bear in mind: you don't have to write a function like this if you don't want to. If you can solve the brief otherwise, then great!
#
# ***Hint***: This section should test the skills you acquired in:
# - Python Data Science Toolbox - Part One, all modules
# + colab={} colab_type="code" id="cKTyr437UgDa"
# Define the function create_price_ratio
def create_price_ratio(borough):
borough_1998 = float(borough['Average_price'][borough['Year']==1998])
borough_2018 = float(borough['Average_price'][borough['Year']==2018])
borough_ratio = [borough_2018/borough_1998]
return borough_ratio
# -
# Create an empty dictionary to store for loop data
ratios_final = {}
# Iterate over the unique boroughs in df_year and apply above function to each of them
for b in df_year['London_Borough'].unique():
borough = df_year[df_year['London_Borough']==b]
ratios_final[b] = create_price_ratio(borough) # populate empty dictionary
print(ratios_final)
# Turn dictionary into a DataFrame
df_ratios = pd.DataFrame(ratios_final)
df_ratios.head()
# Restructure DataFrame so that each row == one observation and each column == one variable
df_ratios = df_ratios.transpose()
df_ratios = df_ratios.reset_index()
df_ratios.head()
# Rename columns
df_ratios = df_ratios.rename(columns={'index':'Borough', 0:'2018_increase_factor'})
df_ratios.head()
# Select the top 10 boroughs that have seen the biggest increase in housing prices
top10 = df_ratios.sort_values(by='2018_increase_factor',ascending=False).head(10)
print(top10)
# Plot a bar graph of the top 10 boroughs
ax = top10.plot(kind='bar',x='Borough',y='2018_increase_factor',legend=False)
ax.set_xticklabels(top10.Borough)
ax.set_ylabel('Housing Price Increase Factor (2018 vs. 1998)')
# + [markdown] colab_type="text" id="NzYUI7FxJpgv"
# ### 4. Conclusion
# What can you conclude? Type out your conclusion below.
#
# Look back at your notebook. Think about how you might summarize what you have done, and prepare a quick presentation on it to your mentor at your next meeting.
#
# We hope you enjoyed this practical project. It should have consolidated your data hygiene and pandas skills by looking at a real-world problem involving just the kind of dataset you might encounter as a budding data scientist. Congratulations, and looking forward to seeing you at the next step in the course!
# -
# ### Final Conclusion
# Hackney is the borough with the highest increase (> 6 times) in housing price average in London over the 2 decades from 1998 to 2018. The list of top 10 boroughs experiencing increased housing prices over that time period also includes Waltham Forest (2nd), Southwark (3rd), Lewisham (4th), Westminster and Newham (5th), City of London (7th), Haringey (8th), Kensington & Chelsea (9th), and Lambeth (10th).
| London_Calling_CaseStudy_DSantiagoRamos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SNSim Examples
import snsim
import numpy as np
import matplotlib.pyplot as plt
# ## Simulate one SNIa by setting the parameters
# ### Init survey file
# +
# Set the cosmology
cosmology = {'name': 'planck18'}
cosmo = snsim.utils.set_cosmo(cosmology)
# Set the sncosmo model
sn_model = snsim.utils.init_sn_model('salt2')
# Set the survey
survey_config = {'survey_file': './survey_file_example.csv',
'sig_psf': 0.0,
'sig_zp': 0.0,
'gain': 1.,
'zp': 26.,
'ra_size': 7.295,
'dec_size': 7.465,
'noise_key': ['skynoise', 'skysigADU']}
survey = snsim.survey_host.SurveyObs(survey_config)
survey.print_config()
# -
# ### Init SN
# +
zcos = 0.05
coords = np.radians([42, 42])
sn_par = {'zcos': zcos,
'z2cmb': 0.0,
'como_dist': cosmo.comoving_distance(zcos).value,
'vpec': 300,
'sim_t0': 58030,
'ra': coords[0],
'dec': coords[1],
'mag_sct': 0.0,
'sncosmo': {'x1': 1, 'c': 0.1}
}
model_par = {'M0': -19.3,
'alpha': 0.14,
'beta': 3.1,
'mod_fcov': False}
SNIa = snsim.astrobj.SNIa(sn_par, sn_model, model_par=model_par)
# +
epochs = survey.epochs_selection(SNIa.coord,
(SNIa.sim_model.mintime(),
SNIa.sim_model.maxtime()))
SNIa.epochs = epochs
SNIa.gen_flux(np.random.default_rng(1200))
# -
# ### Plot the SN
fig, ax = plt.subplots(dpi=100)
time = np.linspace(SNIa.sim_lc['time'].min(), SNIa.sim_lc['time'].max(), 500)
for b, c in zip(SNIa.sim_lc['band'].unique(), ['r', 'b']):
ep = SNIa.sim_lc.query(f"band == '{b}'")
ax.errorbar(ep['time'], ep['flux'], yerr=ep['fluxerr'], fmt='o', c=c, label=b)
ax.plot(time, SNIa.sim_model.bandflux(b, time, zp=26., zpsys='ab'), c=c)
plt.ylabel('Flux [ADU] ZP = 26')
plt.xlabel('time [MJD]')
plt.axhline(0, c='k', ls='--')
plt.legend();
| Notebook_examples/SNSim_one_snia_simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
file = open('files/rosalind_dna.txt', 'r')
s = file.read()
file.close()
s = s.upper()
print('%d %d %d %d ' % (s.count('A'), s.count('C'), s.count('G'), s.count('T')));
| Counting DNA Nucleotides/Counting DNA Nucleotides.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <a href="http://cocl.us/pytorch_link_top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png" width="750" alt="IBM Product " />
# </a>
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png" width="200" alt="cognitiveclass.ai logo" />
# <h1>Logistic Regression</h1>
# <h2>Table of Contents</h2>
# <p>In this lab, we will cover logistic regression using PyTorch.</p>
#
# <ul>
# <li><a href="#Log">Logistic Function</a></li>
# <li><a href="#Seq">Build a Logistic Regression Using nn.Sequential</a></li>
# <li><a href="#Model">Build Custom Modules</a></li>
# </ul>
# <p>Estimated Time Needed: <strong>15 min</strong></p>
#
# <hr>
# <h2>Preparation</h2>
# We'll need the following libraries:
# +
# Import the libraries we need for this lab
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
# -
# Set the random seed:
# +
# Set the random seed
torch.manual_seed(2)
# -
# <!--Empty Space for separating topics-->
# <h2 id="Log">Logistic Function</h2>
# Create a tensor ranging from -100 to 100:
z = torch.arange(-100, 100, 0.1).view(-1, 1)
print("The tensor: ", z)
# Create a sigmoid object:
# +
# Create sigmoid object
sig = nn.Sigmoid()
# -
# Apply the element-wise function Sigmoid with the object:
# +
# Use sigmoid object to calculate the
yhat = sig(z)
# -
# Plot the results:
plt.plot(z.numpy(), yhat.numpy())
plt.xlabel('z')
plt.ylabel('yhat')
# Apply the element-wise Sigmoid from the function module and plot the results:
yhat = torch.sigmoid(z)
plt.plot(z.numpy(), yhat.numpy())
# <!--Empty Space for separating topics-->
# <h2 id="Seq">Build a Logistic Regression with <code>nn.Sequential</code></h2>
# Create a 1x1 tensor where x represents one data sample with one dimension, and 2x1 tensor X represents two data samples of one dimension:
# +
# Create x and X tensor
x = torch.tensor([[1.0]])
X = torch.tensor([[1.0], [100]])
print('x = ', x)
print('X = ', X)
# -
# Create a logistic regression object with the <code>nn.Sequential</code> model with a one-dimensional input:
# +
# Use sequential function to create model
model = nn.Sequential(nn.Linear(1, 1), nn.Sigmoid())
# -
# The object is represented in the following diagram:
# <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter3/3.1.1_logistic_regression_block_diagram.png" width = 800, align = "center" alt="logistic regression block diagram" />
# In this case, the parameters are randomly initialized. You can view them the following ways:
# +
# Print the parameters
print("list(model.parameters()):\n ", list(model.parameters()))
print("\nmodel.state_dict():\n ", model.state_dict())
# -
# Make a prediction with one sample:
# +
# The prediction for x
yhat = model(x)
print("The prediction: ", yhat)
# -
# Calling the object with tensor <code>X</code> performed the following operation <b>(code values may not be the same as the diagrams value depending on the version of PyTorch) </b>:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter3/3.1.1_logistic_functio_example%20.png" width="400" alt="Logistic Example" />
# Make a prediction with multiple samples:
# +
# The prediction for X
yhat = model(X)
yhat
# -
# Calling the object performed the following operation:
# Create a 1x2 tensor where x represents one data sample with one dimension, and 2x3 tensor X represents one data sample of two dimensions:
# +
# Create and print samples
x = torch.tensor([[1.0, 1.0]])
X = torch.tensor([[1.0, 1.0], [1.0, 2.0], [1.0, 3.0]])
print('x = ', x)
print('X = ', X)
# -
# Create a logistic regression object with the <code>nn.Sequential</code> model with a two-dimensional input:
# +
# Create new model using nn.sequential()
model = nn.Sequential(nn.Linear(2, 1), nn.Sigmoid())
# -
# The object will apply the Sigmoid function to the output of the linear function as shown in the following diagram:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter3/3.1.1logistic_output.png" width="800" alt="The structure of nn.sequential"/>
# In this case, the parameters are randomly initialized. You can view them the following ways:
# +
# Print the parameters
print("list(model.parameters()):\n ", list(model.parameters()))
print("\nmodel.state_dict():\n ", model.state_dict())
# -
# Make a prediction with one sample:
# +
# Make the prediction of x
yhat = model(x)
print("The prediction: ", yhat)
# -
# The operation is represented in the following diagram:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter3/3.3.1.logisticwithouptut.png" width="500" alt="Sequential Example" />
# Make a prediction with multiple samples:
# +
# The prediction of X
yhat = model(X)
print("The prediction: ", yhat)
# -
# The operation is represented in the following diagram:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter3/3.1.1_logistic_with_outputs2.png" width="800" alt="Sequential Example" />
# <!--Empty Space for separating topics-->
# <h2 id="Model">Build Custom Modules</h2>
# In this section, you will build a custom Module or class. The model or object function is identical to using <code>nn.Sequential</code>.
# Create a logistic regression custom module:
# +
# Create logistic_regression custom class
class logistic_regression(nn.Module):
# Constructor
def __init__(self, n_inputs):
super(logistic_regression, self).__init__()
self.linear = nn.Linear(n_inputs, 1)
# Prediction
def forward(self, x):
yhat = torch.sigmoid(self.linear(x))
return yhat
# -
# Create a 1x1 tensor where x represents one data sample with one dimension, and 3x1 tensor where $X$ represents one data sample of one dimension:
# +
# Create x and X tensor
x = torch.tensor([[1.0]])
X = torch.tensor([[-100], [0], [100.0]])
print('x = ', x)
print('X = ', X)
# -
# Create a model to predict one dimension:
# +
# Create logistic regression model
model = logistic_regression(1)
# -
# In this case, the parameters are randomly initialized. You can view them the following ways:
# +
# Print parameters
print("list(model.parameters()):\n ", list(model.parameters()))
print("\nmodel.state_dict():\n ", model.state_dict())
# -
# Make a prediction with one sample:
# +
# Make the prediction of x
yhat = model(x)
print("The prediction result: \n", yhat)
# -
# Make a prediction with multiple samples:
# +
# Make the prediction of X
yhat = model(X)
print("The prediction result: \n", yhat)
# -
# Create a logistic regression object with a function with two inputs:
# +
# Create logistic regression model
model = logistic_regression(2)
# -
# Create a 1x2 tensor where x represents one data sample with one dimension, and 3x2 tensor X represents one data sample of one dimension:
# +
# Create x and X tensor
x = torch.tensor([[1.0, 2.0]])
X = torch.tensor([[100, -100], [0.0, 0.0], [-100, 100]])
print('x = ', x)
print('X = ', X)
# -
# Make a prediction with one sample:
# +
# Make the prediction of x
yhat = model(x)
print("The prediction result: \n", yhat)
# -
# Make a prediction with multiple samples:
# +
# Make the prediction of X
yhat = model(X)
print("The prediction result: \n", yhat)
# -
# <!--Empty Space for separating topics-->
# <h3>Practice</h3>
# Make your own model <code>my_model</code> as applying linear regression first and then logistic regression using <code>nn.Sequential()</code>. Print out your prediction.
# +
# Practice: Make your model and make the prediction
X = torch.tensor([-10.0])
# -
# Double-click <b>here</b> for the solution.
#
# <!--
# my_model = nn.Sequential(nn.Linear(1, 1),nn.Sigmoid())
# yhat = my_model(X)
# print("The prediction: ", yhat)
# -->
# <!--Empty Space for separating topics-->
# <a href="http://cocl.us/pytorch_link_bottom">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" />
# </a>
# <h2>About the Authors:</h2>
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/"><NAME></a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
# Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/"><NAME></a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a>
# <hr>
# Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
model = nn.Sequential(nn.Linear(2, 1), nn.Sigmoid())
x = torch.tensor([[1.0]])
yhat = model(x)
z=torch.arange(-100,100,0.1).view(-1, 1)
sig=nn. Sigmoid ()
yhat=sig(z)
yhat
| Coursera/IBM Python 01/Course04/5.1logistic_regression_prediction_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# +
from __future__ import division
import sys
import os
from glob import glob
from typing import *
sys.path.append('../../')
from fastprogress import progress_bar
from modules.basics import *
from skopt.space import Real, Integer, Categorical
from skopt.utils import use_named_args
from skopt import gp_minimize, Optimizer
from modules.plotting import *
# -
experiment = Experiment.from_json(str(RESULTS_PATH/'17_hyperparam_search_mbp.json'))
for m in ['helios_cuda', 'icarus', 'daedalus', 'morpheus', 'lipml']:
e = Experiment.from_json(str(RESULTS_PATH/f'17_hyperparam_search_{m}.json'))
print(f'Machine {m} with {len(e["scores"])}')
experiment['scores'] += e['scores']
experiment['params'] += e['params']
len(experiment['scores'])
def proc_params(params:Dict[str,Any]) -> List[Any]:
ps = [params['depth'], params['do'], params['wd'], params['width'], params['growth_rate']]
return ps
def proc_results(results:Dict[str,Any], val:bool=True) -> float:
return results['val'] if val else results['test'][0]
def get_min(x) -> float:
print(x)
return 0
def get_model_builder(params:Dict[str,Union[int,float]]) -> ModelBuilder:
body = partial(FullyConnected, depth=params['depth'], width=params['width'], act='swish',
dense=True, growth_rate=params['growth_rate'], do=params['do'])
cat_embedder = CatEmbedder.from_fy(train_fy, emb_szs=[3])
opt_args = {'opt':'adam', 'eps':1e-8, 'weight_decay':params['wd']}
return ModelBuilder(objective='classification', cont_feats=train_fy.cont_feats, n_out=1, cat_embedder=cat_embedder,
opt_args=opt_args, body=body)
train_fy = HEPAugFoldYielder(DATA_PATH/'train.hdf5', rot_mult=2)
z0 = []
for x in progress_bar(experiment['params']):
m = Model(get_model_builder(x))
z0.append(m.get_param_count())
df = pd.DataFrame({'MVAC': [s['val'] for s in experiment['scores']],
'MAPA': [s['test'][0] for s in experiment['scores']],
'MAPA_unc': [s['test'][1] for s in experiment['scores']],
'Size': z0})
df.sort_values('Size', inplace=True)
df = df[df.Size <= 100000]
def plot_scores_by_complexity(df, savename:Optional[str]=None, settings:PlotSettings=plot_settings) -> None:
with sns.axes_style(settings.style), sns.color_palette(settings.cat_palette) as palette:
plt.figure(figsize=(settings.w_mid, settings.h_mid))
sns.regplot('Size', 'MVAC', data=df, label='MVAC')
sns.regplot('Size', 'MAPA', data=df, label='MAPA')
plt.legend(loc=settings.leg_loc, fontsize=settings.leg_sz)
plt.xlabel('Number of parameters', fontsize=settings.lbl_sz, color=settings.lbl_col)
plt.ylabel('Metric', fontsize=settings.lbl_sz, color=settings.lbl_col)
plt.xticks(fontsize=settings.tk_sz, color=settings.tk_col)
plt.yticks(fontsize=settings.tk_sz, color=settings.tk_col)
plt.title(settings.title, fontsize=settings.title_sz, color=settings.title_col, loc=settings.title_loc)
if savename is not None: plt.savefig(settings.savepath/f'{savename}{settings.format}', bbox_inches='tight')
plt.show()
plot_scores_by_complexity(df, savename='complexity')
# # Validation
x0 = [proc_params(x) for x in experiment['params']]
y0 = [-proc_results(x) for x in experiment['scores']]
len(x0), len(y0)
arr = np.array(x0)
space = [Integer(arr[:,0].min(), arr[:,0].max(), name='depth'),
Real(arr[:,1].min(), arr[:,1].max(), name='do'),
Real(arr[:,2].min(), arr[:,2].max(), name='wd'),
Integer(arr[:,3].min(), arr[:,3].max(), name='width'),
Real(arr[:,4].min(), arr[:,4].max(), name='growth_rate')]
opt = gp_minimize(lambda x: 0, space, n_random_starts=0, x0=x0, y0=y0, n_calls=0, verbose=1, random_state=0);
_ = plot_evaluations(opt, dimensions=['depth', 'do', 'wd', 'width', 'growth_rate'], settings=plot_settings, savename='arch_opt_2_evals')
_ = plot_objective(opt, dimensions=['depth', 'do', 'wd', 'width', 'growth_rate'], size=3, levels=20, n_points=80,
settings=plot_settings, savename='arch_opt_2_val')
opt.x, opt.fun
# ### Test
x0 = [proc_params(x) for x in experiment['params']]
y0 = [-proc_results(x, False) for x in experiment['scores']]
len(x0), len(y0)
opt = gp_minimize(lambda x: 0, space, n_random_starts=0, x0=x0, y0=y0, n_calls=0, verbose=1, random_state=0);
_ = plot_objective(opt, dimensions=['depth', 'do', 'wd', 'width', 'growth_rate'], size=3, levels=20, n_points=80,
settings=plot_settings, savename='arch_opt_2_test')
opt.x, opt.fun
| notebooks/17_hyperparam_search/Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="UBfRygZTd5K0" colab_type="text"
# **Imports**
# + id="s_z3yt9Vd5Vf" colab_type="code" colab={}
import tensorflow as tf
import numpy as np
import rcwa_utils
import tensor_utils
import solver
import matplotlib.pyplot as plt
# + [markdown] id="qOjIlt6Ud5gE" colab_type="text"
# **Loss Function Definition**
# + id="74fUyYA-d5oq" colab_type="code" colab={}
def loss_func():
# Global parameters dictionary.
global params
# Generate permitivitty and permeability distributions.
ER_t, UR_t = solver.generate_cylindrical_nanoposts(var_duty, params)
# Set the device layer thickness based on the length variable.
thickness_coeff = tf.clip_by_value(var_length, clip_value_min = params['length_min'], clip_value_max = params['length_max'])
thickness_coeff = tf.cast(thickness_coeff, dtype = tf.complex64)
length_shape = (1, 1, 1, 1, 1, 1)
substrate_layer = tf.ones(shape = length_shape, dtype = tf.complex64)
device_layer = thickness_coeff * tf.ones(shape = length_shape, dtype = tf.complex64)
wavelength = params['lam0'][0, 0, 0, 0, 0, 0].numpy()
params['L'] = wavelength * tf.concat([device_layer, substrate_layer], axis = 3)
# Simulate the system.
outputs = solver.simulate(ER_t, UR_t, params)
# Maximize the product of the reflectances.
ref_theta1 = outputs['REF'][0, 0, 0]
ref_theta2 = outputs['REF'][1, 0, 0]
return -ref_theta1 * ref_theta2
# + [markdown] id="n3tr9yegd5z6" colab_type="text"
# **Setup and Initialize Variables**
# + id="GVhDWNNPd58S" colab_type="code" colab={}
# Initialize global params dictionary.
params = solver.initialize_params(wavelengths = [632.0, 632.0],
thetas = [0.0, 5.0],
phis = [0.0, 0.0],
pte = [1.0, 1.0],
ptm = [0.0, 0.0])
params['erd'] = 6.76 # Grating layer permittivity.
params['ers'] = 2.25 # Subtrate layer permittivity.
params['PQ'] = [11, 11] # Fourier Harmonics.
# Initialize grating duty cycle variable.
var_shape = (1, params['pixelsX'], params['pixelsY'])
duty_initial = 0.7 * np.ones(shape = var_shape)
var_duty = tf.Variable(duty_initial, dtype = tf.float32)
# Initialize grating thickness variable.
length_initial = 1.0
var_length = tf.Variable(length_initial, dtype = tf.float32)
# + [markdown] id="qMmIcj6Nd6GC" colab_type="text"
# **Optimize**
# + id="27VkA9_Fd6Oe" colab_type="code" colab={}
# Number of optimization iterations.
N = 200
# Define an optimizer and data to be stored.
opt = tf.keras.optimizers.Adam(learning_rate = 0.002)
loss = np.zeros(N + 1)
# Compute initial loss.
loss[0] = loss_func().numpy()
# Optimize.
for i in range(N):
opt.minimize(loss_func, var_list = [var_duty, var_length])
loss[i + 1] = loss_func().numpy()
# + [markdown] id="1WXf-8_zd6Wr" colab_type="text"
# **Display Learning Curve**
# + id="cZ3nUIGXd6eH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 278} outputId="cd281953-7d57-4333-9e9f-82a86a57ded8"
plt.plot(loss)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.xlim(0, N)
plt.show()
| examples/gratings/reflective_grating_0deg_5deg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import wget, json, os, math
from pathlib import Path
from string import capwords
from pybtex.database import parse_string
import pybtex.errors
from mpcontribs.client import Client
from bravado.exception import HTTPNotFound
from pymatgen.core import Structure
from pymatgen.ext.matproj import MPRester
from tqdm.notebook import tqdm
from matminer.datasets import load_dataset
from monty.json import MontyEncoder, MontyDecoder
# ### Configuration and Initialization
BENCHMARK_FULL_SET = [
{
"name": "log_kvrh",
"data_file": "matbench_log_kvrh.json.gz",
"target": "log10(K_VRH)",
"clf_pos_label": None,
"unit": None,
"has_structure": True,
}, {
"name": "log_gvrh",
"data_file": "matbench_log_gvrh.json.gz",
"target": "log10(G_VRH)",
"clf_pos_label": None,
"unit": None,
"has_structure": True,
}, {
"name": "dielectric",
"data_file": "matbench_dielectric.json.gz",
"target": "n",
"clf_pos_label": None,
"unit": None,
"has_structure": True,
}, {
"name": "jdft2d",
"data_file": "matbench_jdft2d.json.gz",
"target": "exfoliation_en",
"clf_pos_label": None,
"unit": "meV/atom",
"has_structure": True,
}, {
"name": "mp_gap",
"data_file": "matbench_mp_gap.json.gz",
"target": "gap pbe",
"clf_pos_label": None,
"unit": "eV",
"has_structure": True,
}, {
"name": "mp_is_metal",
"data_file": "matbench_mp_is_metal.json.gz",
"target": "is_metal",
"clf_pos_label": True,
"unit": None,
"has_structure": True,
}, {
"name": "mp_e_form",
"data_file": "matbench_mp_e_form.json.gz",
"target": "e_form",
"clf_pos_label": None,
"unit": "eV/atom",
"has_structure": True,
}, {
"name": "perovskites",
"data_file": "matbench_perovskites.json.gz",
"target": "e_form",
"clf_pos_label": None,
"unit": "eV",
"has_structure": True,
}, {
"name": "glass",
"data_file": "matbench_glass.json.gz",
"target": "gfa",
"clf_pos_label": True,
"unit": None,
"has_structure": False,
}, {
"name": "expt_is_metal",
"data_file": "matbench_expt_is_metal.json.gz",
"target": "is_metal",
"clf_pos_label": True,
"unit": None,
"has_structure": False,
}, {
"name": "expt_gap",
"data_file": "matbench_expt_gap.json.gz",
"target": "gap expt",
"clf_pos_label": None,
"unit": "eV",
"has_structure": False,
}, {
"name": "phonons",
"data_file": "matbench_phonons.json.gz",
"target": "last phdos peak",
"clf_pos_label": None,
"unit": "cm^-1",
"has_structure": True,
}, {
"name": "steels",
"data_file": "matbench_steels.json.gz",
"target": "yield strength",
"clf_pos_label": None,
"unit": "MPa",
"has_structure": False,
}
]
# Map of canonical yet non-mpcontribs-compatible tagret nams to compatible (unicode, no punctuation) target names
target_map = {
"yield strength": "σᵧ",
"log10(K_VRH)": "log₁₀Kᵛʳʰ",
"log10(G_VRH)": "log₁₀Gᵛʳʰ",
"n": "𝑛",
"exfoliation_en": "Eˣ",
"gap pbe": "Eᵍ",
"is_metal": "metallic",
"e_form": "Eᶠ",
"gfa": "glass",
"gap expt": "Eᵍ",
"last phdos peak": "ωᵐᵃˣ",
}
pybtex.errors.set_strict_mode(False)
mprester = MPRester()
client = Client(host='ml-api.materialsproject.org')
# +
datadir = Path('/Users/patrick/gitrepos/mp/mpcontribs-data/')
fn = Path('dataset_metadata.json')
fp = datadir / fn
if not fp.exists():
prefix = "https://raw.githubusercontent.com/hackingmaterials/matminer"
url = f'{prefix}/master/matminer/datasets/{fn}'
wget.download(url)
fn.rename(fp)
metadata = json.load(open(fp, 'r'))
# -
# ### Prepare and create/update Projects
for ds in BENCHMARK_FULL_SET:
name = "matbench_" + ds["name"]
primitive_key = "structure" if ds["has_structure"] else "composition"
target = ds["target"]
columns = {
target_map[target]: metadata[name]["columns"][target],
primitive_key: metadata[name]["columns"][primitive_key]
}
project = {
'name': name,
'is_public': True,
'owner': '<EMAIL>',
'title': name, # TODO update and set long_title
'authors': '<NAME>, <NAME>',
'description': metadata[name]['description'] + \
" If you are viewing this on MPContribs-ML interactively, please ensure the order of the"
f"identifiers is sequential (mb-{ds['name']}-0001, mb-{ds['name']}-0002, etc.) before benchmarking.",
'other': {
'columns': columns,
'entries': metadata[name]['num_entries']
},
'references': [
{'label': 'RawData', 'url': metadata["name"]["url"]}
]
}
for ref in metadata[name]['bibtex_refs']:
if name == "matbench_phonons":
ref = ref.replace(
"petretto_dwaraknath_miranda_winston_giantomassi_rignanese_van setten_gonze_persson_hautier_2018",
"petretto2018"
)
bib = parse_string(ref, 'bibtex')
for key, entry in bib.entries.items():
key_is_doi = key.startswith('doi:')
url = 'https://doi.org/' + key.split(':', 1)[-1] if key_is_doi else entry.fields.get('url')
k = 'Zhuo2018' if key_is_doi else capwords(key.replace('_', ''))
if k.startswith('C2'):
k = 'Castelli2012'
elif k.startswith('Landolt'):
k = 'LB1997'
elif k == 'Citrine':
url = 'https://www.citrination.com'
if len(k) > 8:
k = k[:4] + k[-4:]
project['references'].append(
{'label': k, 'url': url}
)
try:
client.projects.get_entry(pk=name, _fields=["name"]).result()
except HTTPNotFound:
client.projects.create_entry(project=project).result()
print(name, "created")
else:
project.pop("name")
client.projects.update_entry(pk=name, project=project).result()
print(name, "updated")
# ### Prepare Contributions
# +
structure_filename = "/Users/patrick/Downloads/outfile.cif"
for ds in BENCHMARK_FULL_SET:
name = "matbench_" + ds["name"]
fn = datadir / f"{name}.json"
if fn.exists():
continue
target = ds["target"]
unit = f" {ds['unit']}" if ds["unit"] else ""
df = load_dataset(name)
contributions = []
id_prefix = df.shape[0]
id_n_zeros = math.floor(math.log(df.shape[0], 10)) + 1
for i, row in tqdm(enumerate(df.iterrows()), total=df.shape[0]):
entry = row[1]
contrib = {'project': name, 'is_public': True}
if "structure" in entry.index:
s = entry.loc["structure"]
s.to("cif", structure_filename)
s = Structure.from_file(structure_filename)
c = s.composition.get_integer_formula_and_factor()[0]
contrib["structures"] = [s]
else:
c = entry["composition"]
id_number = f"{i+1:0{id_n_zeros}d}"
identifier = f"mb-{ds['name']}-{id_number}"
contrib["identifier"] = identifier
contrib["data"] = {target_map[target]: f"{entry.loc[target]}{unit}"}
contrib["formula"] = c
contributions.append(contrib)
with open(fn, "w") as f:
json.dump(contributions, f, cls=MontyEncoder)
print("saved to", fn)
# -
# ### Submit Contributions
name = "matbench_log_gvrh"
fn = datadir / f"{name}.json"
with open(fn, "r") as f:
contributions = json.load(f, cls=MontyDecoder)
# client.delete_contributions(name)
client.submit_contributions(contributions, ignore_dupes=True)
| mpcontribs-portal/notebooks/ml.materialsproject.org/get_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pip install mysql-connector-python
# To install mysql connector
import mysql.connector
# +
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="<PASSWORD>!" # Enter DB Password
)
print(mydb)
# +
mycursor = mydb.cursor()
mycursor.execute("SHOW DATABASES") # To list the databases in MySQL
# -
for x in mycursor:
print(x) # Print the databases
# +
import mysql.connector
mydb1 = mysql.connector.connect(
host="localhost",
user="root",
password="<PASSWORD>!",
database="world"
)
# -
print(mydb1)
mycursor = mydb1.cursor()
sql= 'SELECT * FROM world.city where ID=1'
mycursor.execute(sql)
myresult = mycursor.fetchall()
for x in myresult:
print(x) # Basic Select
import mysql.connector as sql # Set sql as alias
import pandas as pd
# +
db_conn = sql.connect( host="localhost",database='world',user='Viky',password='<PASSWORD>!') # Enter DB password
db_cursor = db_conn.cursor()
db_cursor.execute('SELECT * FROM city')
table_rows = db_cursor.fetchall()
df = pd.DataFrame(table_rows)
# Connect to Database
#Export tables from database to a Data Frame in Pandas
# -
df.head(2) # Data from the database
df.columns = ['ID','City_Name','Country_Code','Identifier','Pin Code']
# Rename columns to a meaningul name
df.head(2)
df.info() # Information about columns in the database
df.sort_values(by=['City_Name','Identifier'],ascending = True).head(3)
df.pivot_table(index = 'Country_Code')
sql.version # Print the sql version
| MySQL/Connecting to MySQL_Data_Base.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple plotting
# This example reads in a few files and creates xy-graphs with custom colours into the same plot. The custom colour scheme is done with the seaborn package. The first command is needed in Jupyter notebooks.
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import os
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.set_context('poster', font_scale=2)
sns.set_style('ticks')
# set colours to be used
col = ["#e74c3c", "#3498db", "#2ecc71", "#9b59b6", "#95a5a6", "#34495e", ]
# and print out a sample
sns.palplot(sns.color_palette(col))
# -
# The appearance of the plots are set using "seaborn". It is a useful tool built on top of matplotlib and <i>it provides a high-level interface for drawing attractive and informative statistical graphics</i>. More info, sample gallery and tutorials: https://seaborn.pydata.org/
# to get help on the seaborn library, give
help(sns)
# ### Hands on
# The next cell sets the variable "do" to "charges". It is used further below to select what is plotted and how it's labelled. Once you've done the first plot, change it to "energies" and run the scripts again.
#do = 'charges'
do = 'energies'
# The next script looks for files containing the string just specified and stores them in a list.
filename = []
for i in os.listdir('./'):
if i.startswith('psize') and i.split('_')[2].startswith(do):
filename += [i]
filename = np.sort(filename)
print(filename)
# Now we loop over all matching files found, set some properties of the plot, plot the data and save it as pdf.
for file_ in filename:
data = np.genfromtxt(file_)
plt.plot(data[:, 0] * 10000, data[:, 1], label=file_.split('_')[1])
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Train size (units, logscale)')
plt.ylabel('MAE (e{}, logscale)'.format('' if do == 'charges' else 'V'))
#plt.ylabel('Mean average error (e{})'.format('' if do == 'charges' else 'V'))
plt.xlim([1e3, 10000])
#plt.ylim(ymin=0)
plt.legend(loc=(0.5, 0.2), fontsize=24)
plt.savefig(do+'_curve.pdf', format='pdf', bbox_inches='tight')
# ### Hands on
# Now the legend is on top of some of the curves. Can you move it to the right out of the way? Does it make sense to decrease the font? Copy this script from above and edit that so that you have the original to go back to if needed. Hint: the variable to tweak is plt.legend
# ### Hands on
# Now, try changing the colour palette. Open the seaborn web page in a separate tab, open the tutorials and look for the colour palettes. Try to switch to using the palette designed for colour blind people. Hint: copy the second script cell below and edit the palette command. Copy over the plot command and run it as well.
| simple/plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# This example shows how to do image classification from scratch, starting from JPEG image files on disk, without leveraging pre-trained weights or a pre-made Keras Application model. We demonstrate the workflow on the Kaggle Cats vs Dogs binary classification dataset.
#
# I use the image_dataset_from_directory utility to generate the datasets, and we use Keras image preprocessing layers for image standardization and data augmentation.
# # Setup
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# -
# # Generate Dataset
# +
image_size = (180, 180)
batch_size = 32
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
'../input/cat-and-dog/training_set/training_set',
validation_split=0.2,
subset="training",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
'../input/cat-and-dog/training_set/training_set',
validation_split=0.2,
subset="validation",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)
# -
# # Visualize the data
# Here are the first 9 images in the training dataset. As you can see, label 1 is "dog" and label 0 is "cat".
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(int(labels[i]))
plt.axis("off")
# -
#
# # Using image data augmentation
# When you don't have a large image dataset, it's a good practice to artificially introduce sample diversity by applying random yet realistic transformations to the training images, such as random horizontal flipping or small random rotations. This helps expose the model to different aspects of the training data while slowing down overfitting.
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.1),
]
)
# Let's visualize what the augmented samples look like, by applying data_augmentation repeatedly to the first image in the dataset:
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
# # Two options to preprocess the data
# Option 1: Make it part of the model: With this option, your data augmentation will happen on device, synchronously with the rest of the model execution, meaning that it will benefit from GPU acceleration. Note that data augmentation is inactive at test time, so the input samples will only be augmented during fit(), not when calling evaluate() or predict().If you're training on GPU, this is the better option.
#
# Option 2: apply it to the dataset, so as to obtain a dataset that yields batches of augmented images.With this option, your data augmentation will happen on CPU, asynchronously, and will be buffered before going into the model. If you're training on CPU, this is the better option, since it makes data augmentation asynchronous and non-blocking.
#
# In our case, we'll go with the first option.
#
# # Configure the dataset for performance
# Let's make sure to use buffered prefetching so we can yield data from disk without having I/O becoming blocking:
#
# +
train_ds = train_ds.prefetch(buffer_size=32)
val_ds = val_ds.prefetch(buffer_size=32)
# -
# # Build a model
#
# Note that:
# We start the model with the data_augmentation preprocessor, followed by a Rescaling layer.
# We include a Dropout layer before the final classification layer.
# +
def make_model(input_shape, num_classes):
inputs=keras.Input(shape=input_shape)
#Image augmentation block
x = data_augmentation(inputs)
#Entry block
x = layers.experimental.preprocessing.Rescaling(1./255)(x)
x=layers.Conv2D(32,3,strides=2,padding="same")(x)
x=layers.BatchNormalization()(x)
x=layers.Activation("relu")(x)
x=layers.Conv2D(64,3,padding="same")(x)
x=layers.BatchNormalization()(x)
x=layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [128, 256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes==2:
activation="sigmoid"
units=1
else:
activation="softmax"
units=num_classes
x=layers.Dropout(0.5)(x)
outputs=layers.Dense(units,activation=activation)(x)
return keras.Model(inputs,outputs)
model=make_model(input_shape=image_size+(3,),num_classes=2)
keras.utils.plot_model(model,show_shapes=True)
# -
# # Train the model
# +
epochs = 50
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.h5"),
]
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=val_ds,
)
# -
# # Run inference on new data
# Note that data augmentation and dropout are inactive at inference time
#
# +
img = keras.preprocessing.image.load_img(
'../input/cat-and-dog/training_set/training_set/dogs/dog.1006.jpg', target_size=image_size
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create batch axis
predictions = model.predict(img_array)
score = predictions[0]
print(
"This image is %.2f percent cat and %.2f percent dog."
% (100 * (1 - score), 100 * score)
)
if predictions[0][0] >=0.5:
prediction='dog'
else:
prediction='cat'
print(prediction)
| Dogs vs Cats_Image Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# BroBeur Tweet
#
# Sends tweets for brobeur
from TwitterFollowBot import TwitterBot
import praw
import random
my_bot = TwitterBot()
r = praw.Reddit('brobeurtweet')
subredz = ['DevBlogs', 'gamedev', 'gamejams', 'Games', 'gaming']
randsubrepo = random.choice(subredz)
hashthi = ('#' + randsubrepo)
rgvz = r.get_subreddit(randsubrepo)
rgtnew = rgvz.get_new
ransub = rgvz.get_random_submission()
rantit = ransub.title
randurl = ransub.url
my_bot.send_tweet(rantit + ' ' + randurl + ' ' + hashthi)
my_bot.auto_rt("#gamejams", count=1)
my_bot.auto_follow("#gamedev", count=1)
| posts/.ipynb_checkpoints/brobeurtweet-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import math
from mpl_toolkits.mplot3d import Axes3D
from scipy.ndimage.morphology import distance_transform_edt
# #### Gradient Ascent
# \begin{align}
# \mathbf{r}_{i+1}&=\mathbf{r}_i+\eta\Delta \mathbf{r} \\
# \Delta\mathbf{r} &\sim -\frac{\nabla \mathbf{f}}{\|\nabla \mathbf{f}\|}
# \end{align}
# where $\mathbf{f}$ the potential field, $\nabla$ the gradient, $i$ the iteration of the for-loop, $\eta$ the rate of change constant and $\mathbf{r}$ the position.
def mesh(X,Y,Z):
ax = plt.gca()
ax.plot_surface(X,Y,Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
# ax.contour3D(x, y, repulmap, 50)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.view_init(70,-110)
def round2(n):
return np.floor(n+ 0.5).astype(int)
class PotentialFieldPathDemo:
def __init__(self):
self.nrows = 400
self.ncols = 600
self.d0 = 2
self.nu = 800
self.start = np.array([50,350])
self.goal = np.array([400,50])
self.xi = 1/700
self.x,self.y=np.meshgrid(np.linspace(1,self.ncols,self.ncols),
np.linspace(1,self.nrows,self.nrows))
self.maxIter = 1000
def generateObstacle(self):
obstacle = False*np.ones((self.nrows,self.ncols))
obstacle[299:,99:249] = True
obstacle[149:199, 399:499] = True
t = ((self.x-200)**2+(self.y-50)**2) < 50**2
obstacle[t] = True
t = ((self.x-400)**2+(self.y-300)**2)< 100**2
obstacle[t] = True
d = distance_transform_edt(1-obstacle)
d2 = d/100 + 1
repulsive=self.nu*((1/d2-1/self.d0)**2)
repulsive[d2>self.d0] = 0
return obstacle,repulsive,self.x,self.y
def generateAttractive(self):
attractive=self.xi*((self.x-self.goal[0])**2+
(self.y-self.goal[1])**2)
return attractive,self.x,self.y
def GradientBasedPlanner(self,f):
gy,gx = np.gradient(-f)
route = self.start.reshape(-1,2).astype(float);
rate = 1
current = route[0,:]
G = np.sqrt(gx**2+gy**2); gx /= G; gy /= G
for i in range(self.maxIter):
tmpx = round2(current[1])
tmpy = round2(current[0])
current+=rate*np.array([gx[tmpx,tmpy],gy[tmpx,tmpy]])
if np.sum(current<=0):
break
elif np.prod(round2(current)==self.goal):
print('yes')
break
route = np.concatenate((route,
np.array(current).reshape(-1,2)))
route = np.concatenate((route,
np.array(current).reshape(-1,2)))
return route
demo = PotentialFieldPathDemo()
obsmap,repulmap,x,y = demo.generateObstacle()
attmap,_,_ = demo.generateAttractive()
f = repulmap+attmap
route = demo.GradientBasedPlanner(f)
plt.figure(figsize=(20,10))
plt.subplot(221,projection='3d'); mesh(x,y,repulmap)
plt.subplot(222,projection='3d'); mesh(x,y,attmap)
plt.subplot(223,projection='3d'); mesh(x,y,f)
plt.subplot(224);
plt.imshow(obsmap)
plt.plot(route[:,0],route[:,1],'-',linewidth=5)
dxdy = route[10,:] - route[0,:]
plt.arrow(route[0,0],route[0,1],dxdy[0],dxdy[1],width=15)
plt.plot(demo.start[0],demo.start[1],
'rp',markersize=15)
plt.plot(demo.goal[0],demo.goal[1],
'r*',markersize=15)
| Robotics/PotentialFieldPlanPath/.ipynb_checkpoints/PotentialFieldPath-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xml.etree.ElementTree as ET
from lxml import etree
# +
# nml_tree = ET.parse("2018-05-29.nml")
# -
parser = lxml.etree.XMLParser(load_dtd=True, dtd_validation=True)
# help(etree.XMLParser)
xml_elems_list = []
with open("2018-05-29.nml") as myfile:
while True:
next_line = next(myfile, "EOF")
if next_line == "EOF":
break
curr_file_str += next_line
if next_line == "</doc>\n":
xml_elems_list.append(ET.fromstring(curr_file_str))
curr_file_str = ""
all_text = xml_elems_list[7].find(".//text")
"".join(all_text.itertext())
articles_df = pd.DataFrame(columns=['date', 'headline', 'company', 'article_text'])
test_company = xml_elems_list[15].find(".//djn-company")
"".join(test_company.itertext())
dates_list = []
headlines_list = []
companies_list = []
article_texts = []
for elem in xml_elems_list:
dates_list.append(elem.find("djnml").attrib['docdate'])
headlines_list.append(elem.find(".//headline").text)
companies_list.append(elem.find(".//c").text)
all_article_text = elem.find(".//text")
article_texts.append("".join(all_article_text.itertext()))
articles_df['date'] = pd.Series(dates_list)
articles_df['headline'] = pd.Series(headlines_list)
articles_df['company'] = pd.Series(companies_list)
articles_df['article_text'] = pd.Series(article_texts)
articles_df
| nml_parsing/nml_parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Betting markets - Monte Carlo simulation of seat outcomes
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Python-set-up" data-toc-modified-id="Python-set-up-1"><span class="toc-item-num">1 </span>Python set-up</a></span></li><li><span><a href="#Get-data" data-toc-modified-id="Get-data-2"><span class="toc-item-num">2 </span>Get data</a></span></li><li><span><a href="#Do-the-Monte-Carlo-(MC)-simulation" data-toc-modified-id="Do-the-Monte-Carlo-(MC)-simulation-3"><span class="toc-item-num">3 </span>Do the Monte Carlo (MC) simulation</a></span></li><li><span><a href="#Compile-results-from-MC-simulation" data-toc-modified-id="Compile-results-from-MC-simulation-4"><span class="toc-item-num">4 </span>Compile results from MC simulation</a></span></li><li><span><a href="#And-plot-..." data-toc-modified-id="And-plot-...-5"><span class="toc-item-num">5 </span>And plot ...</a></span></li></ul></div>
# -
# ## Python set-up
# +
import datetime
import numpy as np
from numpy.random import choice
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.units as munits
import matplotlib.ticker as ticker
from matplotlib.ticker import MaxNLocator
import matplotlib.patheffects as pe
import common
from common import COLOR_COALITION, COLOR_LABOR, COLOR_OTHER, COLOR_GREEN
# matplotlib stuff for date formatting xticklabels
converter = mdates.ConciseDateConverter()
munits.registry[np.datetime64] = converter
munits.registry[datetime.date] = converter
munits.registry[datetime.datetime] = converter
munits.registry[pd.Timestamp] = converter
munits.registry[pd.Period] = converter
plt.style.use('./markgraph.mplstyle')
# -
# ## Get data
FILE = '../historical-data/sportsbet-2022-individual-seats.csv'
df = pd.read_csv(FILE, na_values = 'SUS', parse_dates=['datetime'])
df = df[df['value'].notna()] # ignore missing values
# ## Do the Monte Carlo (MC) simulation
# Key constants ...
IGNORE_ODDS_ABOVE = 20 # dollars - minimise impact of long shot bias
MONTE_CARLO = 500_000 # simulation runs
WIN_THRESH = 75.5 # seats in parliament needed to govern outright
simulations = {}
for seat in df.seat.unique():
# get latest betting market probabilities
seat_probs = (
df[df.seat == seat]
.pivot(index='datetime', columns='variable', values='value')
.sort_index(ascending=True)
.pipe(lambda x: x.iloc[-1]) # last row
# options for managing the favourite-longshot bias (FLB) ...
# NOTE: change method statement at the end of this statement
#.pipe(lambda x: x.where(x <= IGNORE_ODDS_ABOVE, other=np.nan)) # trimmed
#.pipe(lambda x: x * np.sqrt(x)) # x * root(x)
.pipe(lambda x: x * x) # x squared
.dropna()
.pipe(lambda x: 1 / x) # convert odds to probabilities
.pipe(lambda x: x / x.sum()) # standardise so probabilities sum to one
)
#flb_method = f'Fav-Longshot bias management: odds > ${IGNORE_ODDS_ABOVE} removed'
#flb_method = 'Fav-Longshot bias management: odds * sqrt(odds)'
flb_method = 'Fav-Longshot bias management: odds-squared'
#flb_method = 'Fav-Longshot bias management: none'
# fix quirky naming by bookmaker
reindex_map = {
'Liberal': 'Coalition',
}
seat_probs = seat_probs.rename(index=reindex_map)
# simulate election in seat
simulation = choice(a=seat_probs.index, size=MONTE_CARLO,
p=seat_probs.values)
simulations[seat] = simulation
results = pd.DataFrame(simulations).T
results.index.name = 'Seat'
results.columns.name = 'Simulation Run'
results.head()
len(results) # Should be 151
# ## Compile results from MC simulation
# count the seats won by party for each simulation run
# this step takes around 25 seconds if MC=100,000 and 2 minutes if MC=500,000
counts = {}
for run in results.columns:
counts[run] = results[run].value_counts()
counts_df = pd.DataFrame(counts).T.fillna(0.0).astype(int)
counts_df.index.name = 'Simulation Run'
counts_df.columns.name = 'Party'
counts_df.head()
# Simplified columns (group other parties and independents)
majors = ['Coalition', 'Labor', 'Green']
simplified = counts_df[majors].copy()
simplified['Other'] = (
counts_df[[x for x in counts_df.columns if x not in majors]]
.sum(axis=1, skipna=True)
)
simplified.head()
# hung parliament probability
coalition = ((simplified['Coalition'] > WIN_THRESH).sum() / MONTE_CARLO) * 100 # %
labor = ((simplified['Labor'] > WIN_THRESH).sum() / MONTE_CARLO) * 100 # %
hung = 100 - coalition - labor
parliament = pd.Series([coalition, hung, labor],
index=['Absolute Coalition Majority',
'Hung Parliament',
'Absolute Labor Majority'])
parliament
# Count the distribution of seats won by party group, convert to probabilities
freq = {}
for party in simplified.columns:
freq[party] = simplified[party].value_counts(dropna=True)
win_prob = (pd.DataFrame(freq).fillna(0) / MONTE_CARLO) * 100
win_prob.columns.name = 'Party'
win_prob.index.name = 'Number of Seats Won'
win_prob.head()
# ## And plot ...
# +
# Plot outcome distributions by party
color_map = {
'Coalition': COLOR_COALITION,
'Labor': COLOR_LABOR,
'Other': COLOR_OTHER,
'Green': COLOR_GREEN
}
footer = f'{flb_method}; MC sims = {MONTE_CARLO:,}'
for party in win_prob.columns:
data = win_prob[party]
fig, ax = common.initiate_plot()
ax.bar(data.index, data, color=color_map[party])
ax.axvline(WIN_THRESH, lw=0.5, c='b')
title = f'Sportsbet - Election Outcome from Seat Odds for {party}'
outright = data[data.index > WIN_THRESH].sum()
lfooter = f'{footer}; Probability of outright win = {outright:.1f}%'
common.plot_finalise(ax, title=title, ylabel='Implied Win Probability (%)',
xlabel=f'Number of Seats Won by {party}',
save_suffix=flb_method,
lfooter=lfooter)
# +
# Plot House Outcome Probabilities
fig, ax = common.initiate_plot()
colors = [COLOR_COALITION, COLOR_OTHER, COLOR_LABOR, ]
ax.barh(parliament.index, parliament, color=colors)
for index, value in parliament.iteritems():
text = ax.text(x=1, y=index, s=f'{value:.1f}%',
fontsize=30, ha='left', va='center')
text.set_path_effects([pe.withStroke(linewidth=5, foreground='w')])
common.plot_finalise(ax, 'Sportsbet - House Outcome Probabilities',
ylabel=None,
xlabel='Probability (%)',
lfooter=footer,
save_suffix=flb_method,
)
# -
| notebooks/Betting markets - Monte Carlo simulation of seat outcomes - independent draws.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Case study: Bioinformatics - comparing genomes
#
# **Objectives**
#
# * In this case study, we will compare two DNA sequences: (1) human; vs. (2) bacteria
#
# **Dataset features**
#
# Escherichia coli strain U 5/41 16S ribosomal RNA gene, partial sequence
#
# * Rows: 37
# * Columns: 2
# * File format: fasta
# * Source: https://www.ncbi.nlm.nih.gov/nuccore/M10098.1?report=fasta
#
# Data Science: Visualização de Dados com Python Udemy's course:
#
# 'DNA is a molecule present in all living beings, which is responsible for storing hereditary characteristics. It is composed of nucleotide sequences, which can be of four types: adenine, thymine, cytosine or guanine.
#
# "Computerly" speaking we can represent them through 4 letters: A, T, C or G.
#
# In this case study, we want to assess whether structures with similar functions (we are using ribosomal RNA sequences) from different organisms have differences. For this we will evaluate the number of nucleotide pairs.'
# 
# ### Opening and reading files
# +
# Reading the file as a list skiping the first line
human = open("human.fasta").readlines()[1:]
bacteria = open("bacteria.fasta").readlines()[1:]
# Concating the list elements
human = ''.join(human)
bacteria = ''.join(bacteria)
# For breaklines
human = human.replace("\n","")
bacteria = bacteria.replace("\n","")
# Creating and writing a new html file
comparison = open("human_bacteria_comparison.html", "w")
print(bacteria)
# -
# ### Creating a dictionary
# +
count = {}
# Creating a Simple Arrangement of 4 elements taken 2 to 2.
for i in ['A', 'T', 'C', 'G']:
for j in ['A', 'T', 'C', 'G']:
count[i+j] = 0
count
# -
# ### Counting human and bacteria nucleotide pairs
# +
# Distributing
human_count = count
bacteria_count = count
# Counting how many combinations are there in the human file
for k in range(len(human)-1):
human_count[human[k]+human[k+1]] += 1
print(human_count)
# Counting how many combinations are there in the bacterian file
for k in range(len(bacteria)-1):
bacteria_count[bacteria[k]+bacteria[k+1]] += 1
print(bacteria_count)
# -
# ### Printing on a HTML page
# +
# HTML part
i = 1
for k in count:
transparency = human_count[k]/max(human_count.values())
comparison.write("<div style='width:100px; border:1px solid #111; color:#fff; height:100px; float:left; background-color:rgba(0, 0, 0, "+str(transparency)+")'>"+k+"</div>\n")
if i % 4 == 0:
comparison.write("<div style='clear:both'></div>")
i += 1
comparison.close()
# -
import matplotlib.pyplot as plt
import numpy as np
# +
''' Example
chess = np.array([[1,0,1,0,1,0,1,0],
[0,1,0,1,0,1,0,1],
[1,0,1,0,1,0,1,0],
[0,1,0,1,0,1,0,1],
[1,0,1,0,1,0,1,0],
[0,1,0,1,0,1,0,1],
[1,0,1,0,1,0,1,0],
[0,1,0,1,0,1,0,1]])
'''
human_transparency = np.array([human_count[k]/max(human_count.values()) for k in count]).reshape(4,4)
print(human_transparency)
plt.figure(figsize=(8,8))
plt.imshow(human_transparency, cmap='gray_r') # gray_r reverse scale
plt.axis(False)
plt.show()
# -
| human_bacteria_genome_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# ==========================================================
# Compute point-spread functions (PSFs) for MNE/dSPM/sLORETA
# ==========================================================
#
# PSFs are computed for four labels in the MNE sample data set
# for linear inverse operators (MNE, dSPM, sLORETA).
# PSFs describe the spread of activation from one label
# across the cortical surface.
#
# +
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from mayavi import mlab
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, point_spread_function
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_inv_eegmeg = (data_path +
'/MEG/sample/sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif')
fname_inv_meg = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
data_path + '/MEG/sample/labels/Aud-lh.label',
data_path + '/MEG/sample/labels/Vis-rh.label',
data_path + '/MEG/sample/labels/Vis-lh.label']
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# read inverse operators
inverse_operator_eegmeg = read_inverse_operator(fname_inv_eegmeg)
inverse_operator_meg = read_inverse_operator(fname_inv_meg)
# read label(s)
labels = [mne.read_label(ss) for ss in fname_label]
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'MNE' # can be 'MNE' or 'sLORETA'
mode = 'svd'
n_svd_comp = 1
stc_psf_eegmeg, _ = point_spread_function(
inverse_operator_eegmeg, forward, method=method, labels=labels,
lambda2=lambda2, pick_ori='normal', mode=mode, n_svd_comp=n_svd_comp)
stc_psf_meg, _ = point_spread_function(
inverse_operator_meg, forward, method=method, labels=labels,
lambda2=lambda2, pick_ori='normal', mode=mode, n_svd_comp=n_svd_comp)
# save for viewing in mne_analyze in order of labels in 'labels'
# last sample is average across PSFs
# stc_psf_eegmeg.save('psf_eegmeg')
# stc_psf_meg.save('psf_meg')
time_label = "EEGMEG %d"
brain_eegmeg = stc_psf_eegmeg.plot(hemi='rh', subjects_dir=subjects_dir,
time_label=time_label,
figure=mlab.figure(size=(500, 500)))
time_label = "MEG %d"
brain_meg = stc_psf_meg.plot(hemi='rh', subjects_dir=subjects_dir,
time_label=time_label,
figure=mlab.figure(size=(500, 500)))
# The PSF is centred around the right auditory cortex label,
# but clearly extends beyond it.
# It also contains "sidelobes" or "ghost sources"
# in middle/superior temporal lobe.
# For the Aud-RH example, MEG and EEGMEG do not seem to differ a lot,
# but the addition of EEG still decreases point-spread to distant areas
# (e.g. to ATL and IFG).
# The chosen labels are quite far apart from each other, so their PSFs
# do not overlap (check in mne_analyze)
| stable/_downloads/6d2e7707c1a288e56b1da50b32d16c03/plot_mne_point_spread_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp data.block
# -
#export
from local.torch_basics import *
from local.test import *
from local.data.core import *
from local.data.load import *
from local.data.external import *
from local.data.transforms import *
from local.notebook.showdoc import *
# # Data block
#
# > High level API to quickly get your data in a `DataBunch`
# ## General API
#export
from inspect import isfunction,ismethod
#export
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), lambda o:
o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__)
return L(v[-1] for k,v in g.items()).map(instantiate)
#For example, so not exported
from local.vision.core import *
from local.vision.augment import *
# +
#hide
tfms = _merge_tfms([Categorize, MultiCategorize, Categorize(['dog', 'cat'])], Categorize(['a', 'b']))
#If there are several instantiated versions, the last one is kept.
test_eq(len(tfms), 2)
test_eq(tfms[1].__class__, MultiCategorize)
test_eq(tfms[0].__class__, Categorize)
test_eq(tfms[0].vocab, ['a', 'b'])
tfms = _merge_tfms([PILImage.create, PILImage.show])
#Check methods are properly separated
test_eq(len(tfms), 2)
tfms = _merge_tfms([show_image, set_trace])
#Check functions are properly separated
test_eq(len(tfms), 2)
# -
#export
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `DataSource` and `DataBunch`"
get_x=get_items=splitter=get_y = None
dl_type = TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
def __init__(self, types=None, dl_type=None, getters=None, n_inp=None, **kwargs):
types = L(getattr(self,'types',(float,float)) if types is None else types)
self.default_type_tfms = types.map(
lambda t: L(getattr(t,'create',None)) + L(getattr(t,'default_type_tfms',None)))
self.default_item_tfms = _merge_tfms(ToTensor, *types.attrgot('default_item_tfms', L()))
self.default_batch_tfms = _merge_tfms(Cuda, *types.attrgot('default_batch_tfms', L()))
for t in types: self.dl_type = getattr(t, 'dl_type', self.dl_type)
if dl_type is not None: self.dl_type = dl_type
self.databunch = delegates(self.dl_type.__init__)(self.databunch)
self.dbunch_kwargs = {}
for t in types: self.dbunch_kwargs.update(getattr(t, 'dbunch_kwargs', {}))
self.n_inp,self.getters = n_inp,L(getters)
if getters is not None: assert self.get_x is None and self.get_y is None
assert not kwargs
def datasource(self, source, type_tfms=None):
self.source = source
items = (self.get_items or noop)(source)
if isinstance(items,tuple):
items = L(items).zip()
labellers = [itemgetter(i) for i in range_of(self.default_type_tfms)]
else: labellers = [noop] * len(self.default_type_tfms)
splits = (self.splitter or noop)(items)
if self.get_x: labellers[0] = self.get_x
if self.get_y: labellers[1] = self.get_y
if self.getters: labellers = self.getters
if type_tfms is None: type_tfms = [L() for t in self.default_type_tfms]
type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip(
lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm))
return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp)
def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs):
dsrc = self.datasource(source, type_tfms=type_tfms)
item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
kwargs = {**self.dbunch_kwargs, **kwargs}
return dsrc.databunch(path=path, after_item=item_tfms, after_batch=batch_tfms, **kwargs)
_docs = dict(datasource="Create a `Datasource` from `source` with `type_tfms`",
databunch="Create a `DataBunch` from `source` with `item_tfms` and `batch_tfms`")
# To build a `DataBlock` you need to give the library four things: the types of your input/labels then at least two functions: `get_items` and `splitter`. You may also need to include `get_x` and `get_y` or a more generic list of `getters` that are applied to the results of `get_items`.
#
# Once those are provided, you automatically get a `DataSource` or a `DataBunch`:
show_doc(DataBlock.datasource)
show_doc(DataBlock.databunch)
# You can create a `DataBlock` by passing functions or subclassing. The two following data blocks are the same for instance:
# +
class MNIST(DataBlock):
types = PILImageBW,Category
def get_items(self, source): return get_image_files(Path(source))
def splitter (self, items ): return GrandparentSplitter()(items)
def get_y (self, item ): return parent_label(item)
mnist = MNIST()
# -
mnist = DataBlock(types = (PILImageBW,Category),
get_items = get_image_files,
splitter = GrandparentSplitter(),
get_y = parent_label)
# Each type comes with default transforms that will be applied
# - at the base level to create items in a tuple (usually input,target) from the base elements (like filenames)
# - at the item level of the datasource
# - at the batch level
#
# They are called respectively type transforms, item transforms, batch transforms. In the case of MNIST, the type transforms are the method to create a `PILImageBW` (for the input) and the `Categorize` transform (for the target), the item transform is `ToTensor` and the batch transforms are `Cuda` and `IntToFloatTensor`. You can add any other transforms by passing them in `DataBlock.datasource` or `DataBlock.databunch`.
test_eq(mnist.default_type_tfms, [[PILImageBW.create], [Categorize]])
test_eq(mnist.default_item_tfms.map(type), [ToTensor])
test_eq(mnist.default_batch_tfms.map(type), [Cuda, IntToFloatTensor])
dsrc = MNIST().datasource(untar_data(URLs.MNIST_TINY))
test_eq(dsrc.vocab, ['3', '7'])
x,y = dsrc.train[0]
test_eq(x.size,(28,28))
show_at(dsrc.train, 0, cmap='Greys', figsize=(2,2));
# ## Export -
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
| dev/07_data_block.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HW 13 - Quantum Mechanics II
#
# ### Name: <NAME>
# ### ID: 201700399
import numpy as np
from numpy.random import randint as rand
import matplotlib.pyplot as plt
import time
from scipy.integrate import odeint
from scipy.linalg import eig
from matplotlib.pyplot import cm
import random
# # Hamiltonian for 1D Problems
#
# $$H\psi_n = E\psi_n = \dfrac{2\psi_n - \psi_{n+1} - \psi_{n-1} + 2{\Delta x}^2 V_n \psi_n }{2{\Delta x}^2}$$
# $$\hat{H_n} =\dfrac{1}{{\Delta x}^2} \big(1+\Delta x^2 V_n \big) $$
# $$\hat{H_{n+1}} =-\dfrac{1}{2{\Delta x}^2}$$
# $$\hat{H_{n-1}} =-\dfrac{1}{2{\Delta x}^2}$$
# # #2 Variational Method
#
# # Lennard-Jones Potential
# +
dx = 0.1
x=np.round(np.arange(0.7,5,dx),3)
lx = x.size
V = np.zeros(lx)
epsilon = 10
sigma = 1
phi0 = np.zeros(lx)
const=3
for i in range(lx):
V[i] = 4*epsilon*(np.power(sigma/x[i],12) - np.power(sigma/x[i], 6))
if x[i]>=1 and x[i]<4:
phi0[i] = 3
plt.figure()
plt.plot(x,V, label='V')
def Hamiltonian(V):
H = np.zeros([lx,lx])
for i in range(lx):
H[i,i] = 1/np.power(dx,2)+ V[i]
if lx-i-1: H[i,i+1] = -1/(2*np.power(dx,2))
if i: H[i,i-1] = -1/(2*np.power(dx,2))
return H
H = Hamiltonian(V)
norm = np.dot(phi0,phi0)
phiNorm = phi0/np.sqrt(norm)
plt.plot(x, phiNorm, label=r'$\phi_0$')
engy = phiNorm.dot(H).dot(phiNorm)
engold = engy
phiNew = np.copy(phiNorm)
phiNorm1 = np.copy(phiNorm)
dphiRange = np.arange(-0.01, 0.011, 0.001)
N = 1000001
factor = 1
for n in range(N):
phiNew = np.copy(phiNorm1)
pos = np.where(x==np.random.choice(x))[0][0]
dphi = np.random.choice(dphiRange)
#pos= 3
phiNew[pos] += dphi
#phiNew[pos] = phiNorm[pos] + 1
norm = phiNew.dot(phiNew)
phiNorm = phiNew/np.sqrt(norm)
engy = phiNorm.dot(H).dot(phiNorm)
if engy > engold :
phiNorm = np.copy(phiNorm1)
else:
phiNorm1 = np.copy(phiNorm)
engold = engy
if (not n%factor) or n==5000:
factor *=10
print(engold)
plt.plot(x, phiNorm1, label='n= '+str(n))
plt.ylim(-0.2,0.5)
plt.xlabel('x')
plt.ylabel(r'$\psi$')
plt.suptitle("Figure 10.14: Variational Montecarlo")
plt.title("<NAME>")
plt.grid(alpha=0.2)
plt.legend()
plt.show()
# -
# # #3 Problem 10.9
#
# # One Dimensional Square Well
# +
l=1
dx = 0.1
x=np.round(np.arange(-l,l+dx,dx),3)
lx = x.size
phi0 = np.zeros(lx)
const=3
V = np.zeros(lx)
VOutside = 10000
for i in range(lx):
if x[i]<=-l or x[i]>=l:
V[i]=VOutside
phi0[i] = const
plt.figure()
plt.plot(x,V, label='V')
def Hamiltonian(V):
H = np.zeros([lx,lx])
for i in range(lx):
H[i,i] = 1/np.power(dx,2)+ V[i]
if lx-i-1: H[i,i+1] = -1/(2*np.power(dx,2))
if i: H[i,i-1] = -1/(2*np.power(dx,2))
return H
H = Hamiltonian(V)
norm = np.dot(phi0,phi0)
phiNorm = phi0/np.sqrt(norm)
plt.plot(x, phiNorm, label=r'$\phi_0$')
engy = phiNorm.dot(H).dot(phiNorm)
engold = engy
phiNew = np.copy(phiNorm)
phiNorm1 = np.copy(phiNorm)
dphiRange = np.arange(-0.01, 0.011, 0.001)
N = 1000001
factor = 1
for n in range(N):
phiNew = np.copy(phiNorm1)
pos = np.where(x==np.random.choice(x))[0][0]
dphi = np.random.choice(dphiRange)
#pos= 3
phiNew[pos] += dphi
#phiNew[pos] = phiNorm[pos] + 1
norm = phiNew.dot(phiNew)
phiNorm = phiNew/np.sqrt(norm)
engy = phiNorm.dot(H).dot(phiNorm)
if engy > engold :
phiNorm = np.copy(phiNorm1)
else:
phiNorm1 = np.copy(phiNorm)
engold = engy
if (not n%factor) and n!= (N-1):
factor *=10
print(engold)
plt.plot(x, phiNorm1, label='n= '+str(n))
if n == (N-1):
plt.plot(x, phiNorm1, label='n= '+str(n),lw = 3)
plt.ylim(-0.2,0.5)
plt.xlabel('x')
plt.ylabel(r'$\psi$')
plt.suptitle("Figure 10.14: Variational Montecarlo")
plt.title("<NAME> Potential")
plt.grid(alpha=0.2)
plt.legend()
plt.show()
# -
# # Hamiltonian for Hydrogen Atom
#
# $$\hat{H_n} = V_n + \dfrac{1}{{r}^2} + \dfrac{1}{{\Delta r}^2} $$
# $$\hat{H_{n+1}} =-\dfrac{1}{2{\Delta r}^2}$$
# $$\hat{H_{n-1}} =-\dfrac{1}{2{\Delta r}^2}$$
#
# # #4 Matrix Method for Hydrogen Atom
# +
l=10
dr = 0.01
r=np.round(np.arange(dr,l+dr,dr),3)
lr = r.size
def Hamiltonian(V):
H = np.zeros([lr,lr])
for i in range(lr):
#H[i,i] = 1/np.power(r[i],2)+1/np.power(dr,2)+ V[i]
H[i,i] = 1/np.power(dr,2)+ V[i]
if lr-i-1: H[i,i+1] = -1/(2*np.power(dr,2))
if i: H[i,i-1] = -1/(2*np.power(dr,2))
return H
V = np.zeros(lr)
for i in range(lr):
V[i]=-1/r[i]
values, vectors = eig(Hamiltonian(V))
np.sort(values)[:2]
# -
E_exac = np.array([-(1/(2*np.power(n,2))) for n in range(1,3)])
E_exac
# # Matrix Method for Quantum Harmonic Oscillator
# +
l=1
dx = 0.01
x=np.round(np.arange(-l-1,l+1+dx,dx),3)
lx = x.size
def Hamiltonian(V):
H = np.zeros([lx,lx])
for i in range(lx):
H[i,i] = 1/np.power(dx,2)+ V[i]
if lx-i-1: H[i,i+1] = -1/(2*np.power(dx,2))
if i: H[i,i-1] = -1/(2*np.power(dx,2))
return H
V = np.zeros(lx)
for i in range(lx):
V[i]=np.power(x[i],2)/2
values, vectors = eig(Hamiltonian(V))
np.sort(values)[:2]
# -
E_exac = np.array([(k+.5) for k in range(2)])
E_exac
| Project#13_Quantum Mechanics_Matching,Variational,Matrix Methods/Project#13_Part II #2,3,4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "skip"}
# <table>
# <tr align=left><td><img align=left src="./images/CC-BY.png">
# <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) <NAME></td>
# </table>
# + slideshow={"slide_type": "skip"}
from __future__ import print_function
from __future__ import absolute_import
# %matplotlib inline
import numpy
import matplotlib.pyplot as plt
# + [markdown] slideshow={"slide_type": "slide"}
# # Interpolation
#
# There are times when you have estimates for the values of a function for specific inputs. The values of the function may be obtained in a variety of ways either through experiment or through the use of other approximation techniques. Our goal in this chapter is to explore techniques that allow us to determine a new function whose values match the known observations at a set of predetermined input values. We first formally define the term we will use to describe the process.
# + [markdown] slideshow={"slide_type": "subslide"}
# **Definition:** Given a discrete set of values $y_i$ at locations $x_i$, an *interpolant* is a (piece-wise) continuous function $f(x)$ that passes exactly through the data (*i.e.* $f(x_i) = y_i$).
#
# **Example 0** The linear polynomial
#
# $$
# P_1(x) = 2(x-1)+3
# $$
#
# interpolates the coordinates $(1,3)$ and $(3,7)$.
#
# In general a polynomial of degree $N$ can be used to interpolate $N+1$ data points. There are many different kinds of functions to use to interpolate values, but here we focus on polynomials.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Applications
#
# - Data filling
# - Function approximation
# - Fundamental component of other algorithms
# - Root finding (secant method)
# - Optimization, minima/maxima (successive parabolic interpolation)
# - Numerical integration and differentiation
# - The Finite Element Method
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Polynomial Interpolation
#
# **Theorem:** There is a *unique* polynomial of degree $N$, $P_N(x)$, that passes exactly through $N + 1$ values $y_1, y_2, \ldots, y_N, y_{N+1}$ at *distinct* points $x_1, x_2, \ldots, x_N, x_{N+1}$.
#
# Consequence of the number of unknowns in $P_N(x)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 1: 2 Points
#
# Given points are $(x_0, y_0)$ and $(x_1, y_1)$ which will lead to a line:
#
# Define $P_1(x) = p_1 x + p_0$ and use the two points to find $p_0$ and $p_1$:
# + [markdown] slideshow={"slide_type": "subslide"}
# We first note that we have two equations and two unknowns. The two equations can be found by assuming the function $P_1(x)$ interpolates the two data points
# $$
# \begin{align}
# y_0 &= p_1 x_0 + p_0, \\
# y_1 &= p_1 x_1 + p_0.
# \end{align}
# $$
# In this example we will solve the first equation for $p_0$, substitute the result into the second equation, and then solve for $p_1$.
#
# $$y_0 = p_1 x_0 + p_0 \quad \Rightarrow \quad p_0 = y_0 - p_1 x_0$$
#
# + [markdown] slideshow={"slide_type": "subslide"}
# $$\begin{aligned}
# y_1 &= p_1 x_1 + p_0 & \Rightarrow \\
# y_1 &= p_1 x_1 + y_0 - p_1 x_0 & \Rightarrow \\
# p_1 &= \frac{y_1 - y_0}{x_1 - x_0} & \Rightarrow \\
# p_0 &= y_0 - \frac{y_1 - y_0}{x_1 - x_0} x_0 &
# \end{aligned}$$
# + [markdown] slideshow={"slide_type": "subslide"}
# $$P_1(x) = \frac{y_1 - y_0}{x_1 - x_0} x + y_0 - \frac{y_1 - y_0}{x_1 - x_0} x_0 = \frac{y_1 - y_0}{x_1 - x_0} (x - x_0) + y_0$$
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 2: 3 Points
#
# Given points are $(x_0, y_0)$, $(x_1, y_1)$, and $(x_2, y_2)$ which will lead to quadratic polynomial:
#
# Define $P_2(x) = p_0 x^2 + p_1 x + p_2$ leading to the equations
# + [markdown] slideshow={"slide_type": "subslide"}
# $$y_0 = p_2 x_0^2 + p_1 x_0 + p_0$$
#
# $$y_1 = p_2 x_1^2 + p_1 x_1 + p_0$$
#
# $$y_2 = p_2 x_2^2 + p_1 x_2 + p_0$$
#
# This gets complicated quickly! Note, we have three equations and three unknowns, and the previous system is a linear system of three equations.
# and in general, the problem will reduce to a linear system
# $$
# A(\mathbf{x})\mathbf{p} = \mathbf{y}
# $$
# A more general approach to solving the system will be explored later, but first it is important to determine whether or not the system even has a solution.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Proof - Uniqueness of Polynomial Interpolants
#
# Let
#
# $$\mathcal{P}_N(x) = \sum^N_{n=0} p_n x^n $$
# or
# $$\mathcal{P}_N(x) = p_0 + p_1 x + \cdots + p_{N - 1} x^{N - 1} + p_{N} x^N$$
#
# and require $\mathcal{P}_N(x_i) = y_i$ for $i=0,1,\ldots,N$ and $x_i \neq x_j ~~~ \forall i,j$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Preliminaries: Monomial Basis
#
# We can think of $\mathcal{P}_N(x) = \sum^N_{n=0} p_n x^n$ as a polynomial, or more fundamentally as a *linear combination* of a set of simpler functions, the monomials
#
# $$1, x, x^2, x^3, \ldots, x^{N-1}, x^N$$
#
# with weights
#
# $$p_0, p_1, p_2, p_3, \ldots, p_{N-1}, \text{and } p_N$$
#
# respectively.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Linear independence of the Monomials
#
# The monomials, form a *linearly independent* set of functions such that no monomial $x^n$ can be written as a linear combination of any other monomial. We can see this graphically, for the first few monomials
# + hide_input=true slideshow={"slide_type": "-"}
x = numpy.linspace(-1,1,100)
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1,1,1)
for n in range(4):
axes.plot(x,x**n,label='$x^{}$'.format(n))
axes.set_xlabel('x')
axes.grid()
axes.legend(loc='best')
axes.set_title('The First 4 Monomials')
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# But more fundamentally. A set of functions is linearly independent if the only linear combination that add to form the zero function, e.g.
#
# $$
# P_N(x) = p_0 1 + p_1 x + p_2 x^2 + \ldots + p_n x^n = 0
# $$
#
# is if all the coefficients $p_i = 0$, $\forall i=0,\ldots N$
# + [markdown] slideshow={"slide_type": "subslide"}
# **Theorem**: The monomials $x^0,\ldots, x^n$ are linear independent.
#
# **Proof**: consider $P_N(x) = 0$ for all $x$. Since the polynomials (and monomials) are differentiable at least $n$ times, differentiate $n$ times to yield
# $$
# P^{(n)}_N(x) = n!p_n = 0
# $$
# which implies $p_n=0$.
#
# Using this result and differentiating $n-1$ times shows $p_{n-1}=0$, which by induction gives all $p_i = 0$.
#
# Put another way, the only $n$th degree polynomial that is zero everywhere is if all coefficients are zero.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### The Fundamental theorem of algebra
#
# Every $n$th degree polynomial has exactly $n$ complex roots, i.e.
#
# $$
# P_N(x) = (x - a_1)(x - a_2)\ldots(x - a_n)
# $$
# for $a_i\in \mathbb{C}$. Therefore, a non-trivial $n$th order polynomial can only be zero at $n$ points.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Proof - Uniqueness of Polynomial Interpolants
#
# Let
#
# $$\mathcal{P}_N(x) = \sum^N_{n=0} p_n x^n $$
#
# **interpolate** the $N+1$ points $y_i$ at $x_i$.
#
# i.e.
# $$\mathcal{P}_N(x_i) = y_i,\quad \mathrm{for}\quad i=0,1,\ldots,N$
# $$
# and $x_i \neq x_j ~~~ \forall i,j$.
# + [markdown] slideshow={"slide_type": "subslide"}
# Assume there exists another polynomial
#
# $$Q_N(x) = \sum^N_{n=0} q_n x^n$$
#
# that passes through the same set of points such that $Q_N(x_i) = y_i$. Now compute $T_N(x) = \mathcal{P}_N(x) - Q_N(x)$:
# + [markdown] slideshow={"slide_type": "subslide"}
# Now, by construction, $T_N(x_i) = 0$ which implies that it is equal to zero at $n+1$ points. However,
#
# $$T_N(x) = \mathcal{P}_N(x) - Q_N(x) = \sum^N_{n=0} p_n x^n - q_n x^n = \sum^N_{n=0} (p_n - q_n) x^n$$
#
# is a $n$th order polynomial which has at most $n$ real roots. The only way to reconcile this is if T_n(x) = 0, for all $x$, and therefore $p_n - q_n = 0$ individually and therefore $\mathcal{P}_N(x) = Q_N(x)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 3: Monomial Basis
#
# Consider $\mathcal{P}_3(x) = p_0 + p_1 x + p_2 x^2 + p_3 x^3$ with the four data points $(x_i, y_i), ~~ i = 0,1,2,3$. We have four equations and four unknowns as expected:
#
# $$\mathcal{P}_3(x_0) = p_0 + p_1 x_0 + p_2 x_0^2 + p_3 x_0^3 = y_0$$
#
# $$\mathcal{P}_3(x_1) = p_0 + p_1 x_1 + p_2 x_1^2 + p_3 x_1^3 = y_1$$
#
# $$\mathcal{P}_3(x_2) = p_0 + p_1 x_2 + p_2 x_2^2 + p_3 x_2^3 = y_2$$
#
# $$\mathcal{P}_3(x_3) = p_0 + p_1 x_3 + p_2 x_3^2 + p_3 x_3^3 = y_3$$
# + [markdown] slideshow={"slide_type": "subslide"}
# Lets rewrite these as a matrix equation:
#
# $$\mathbf{x} = \begin{bmatrix} x_0 \\ x_1 \\ x_2 \\ x_3 \end{bmatrix} \quad \mathbf{y} = \begin{bmatrix} y_0 \\ y_1 \\ y_2 \\ y_3 \end{bmatrix} \quad \mathbf{p} = \begin{bmatrix} p_0 \\ p_1 \\ p_2 \\ p_3 \end{bmatrix}$$
#
# When we write the system in matrix/vector form the matrix that arises is called *Vandermonde* matrix:
#
# $$
# V = \begin{bmatrix}
# 1 & x_0 & x_0^2 & x_0^3 \\
# 1 & x_1 & x_1^2 & x_1^3 \\
# 1 & x_2 & x_2^2 & x_2^3 \\
# 1 & x_3 & x_3^2 & x_3^3
# \end{bmatrix}.
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now write the system of linear equations as $V \mathbf{p} = \mathbf{y}$:
#
# $$\begin{bmatrix}
# 1 & x_0 & x_0^2 & x_0^3 \\
# 1 & x_1 & x_1^2 & x_1^3 \\
# 1 & x_2 & x_2^2 & x_2^3 \\
# 1 & x_3 & x_3^2 & x_3^3
# \end{bmatrix} \begin{bmatrix} p_0 \\ p_1 \\ p_2 \\ p_3 \end{bmatrix} = \begin{bmatrix} y_0 \\ y_1 \\ y_2 \\ y_3 \end{bmatrix}.$$
# + [markdown] slideshow={"slide_type": "fragment"}
# **Note**: the columns of $V$ are simply the monomial functions sampled at the discrete points $x_i$. Because the monomials are linearly independent, so are the columns of $V$
# + [markdown] slideshow={"slide_type": "subslide"}
# $$\begin{bmatrix}
# 1 & x_0 & x_0^2 & x_0^3 \\
# 1 & x_1 & x_1^2 & x_1^3 \\
# 1 & x_2 & x_2^2 & x_2^3 \\
# 1 & x_3 & x_3^2 & x_3^3
# \end{bmatrix} \begin{bmatrix} p_0 \\ p_1 \\ p_2 \\ p_3 \end{bmatrix} = \begin{bmatrix} y_0 \\ y_1 \\ y_2 \\ y_3 \end{bmatrix}$$
#
# - What happens if we have redundant data? Either $(x_i, y_i)$ is repeated or for one $i$ we have two values of $y$.
# - What if we have more points then the order of polynomial we want?
# - How does this relate to solving the above linear system of equations?
# + [markdown] slideshow={"slide_type": "subslide"}
# Vandermonde matrices in general are defined as
#
# $$V = \begin{bmatrix}
# 1 & x_0 & x_0^2 & \cdots & x_0^N \\
# 1 & x_1 & x_1^2 & \cdots & x_1^N \\
# \vdots & \vdots & \vdots & \ddots & \vdots \\
# 1 & x_m & x_m^2 & \cdots & x_m^N \\
# \end{bmatrix}
# $$
#
# where $V$ is a $m \times n$ matrix with points $(x_i, y_i)$ for $i = 0, 1, 2, 3, \ldots m$ and for an order $N$ polynomial $\mathcal{P}_N(x)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Finding $p_i$
#
# Finding the coefficients of $\mathcal{P}_N(x)$ can be done by solving the system outlined above. There are functions in `numpy` that can do this for us such as:
# - `numpy.polyfit(x, y, x.shape[0] - 1)`
# - `numpy.vander(x, N=None)` to construct the matrix and use a linear solver routine.
#
# We can also use a different **basis** that might be easier to use.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Basis
#
# **Def:** A basis for a $N$ dimensional vector space is a set of linearly independent vectors that span the space.
#
# The monomials, $1,x,\ldots, x^n$, form the usual basis for the vector space of $n$th degree polynomials $P_N(x)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# **Example** $P_2(x)$ is the space of all quadratic functions. i.e. $P_2(x) = \mathrm{span}< 1,x,x^2>$
#
# $$
# P_2(x) = p_0 + p_1 x + p_2 x^2
# $$
#
# i.e for every vector $\mathbf{p}\in\mathbb{R}^3$, there is a unique quadratic function in $P_2(x)$. (we say $P_2$ is *isomorphic* to $\mathbb{R}^3$ and is a three dimensional function space).
# + [markdown] slideshow={"slide_type": "fragment"}
# **However**, the monomials are not the only basis for $P_N$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Lagrange Basis
#
# Given $N+1$ points $(x_0,y_0), (x_1,y_1), \ldots, (x_{N},y_{N})$ again assuming the $x_i$ are all unique, the interpolating polynomial $\mathcal{P}_N(x)$ can be written as
#
# $$\mathcal{P}_N(x) = \sum^{N}_{i=0} y_i \ell_i(x)$$
#
# where
#
# $$\ell_i(x) = \prod^{N}_{j=0, j \neq i} \frac{x - x_j}{x_i - x_j} = \frac{x - x_0}{x_i - x_0} \frac{x - x_1}{x_i - x_1} \cdots \frac{x - x_{i-1}}{x_i - x_{i-1}}\frac{x - x_{i+1}}{x_i - x_{i+1}} \cdots \frac{x - x_{N}}{x_i - x_{N}}$$
#
# are the **Lagrange Polynomials**
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Lagrange Polynomials
#
# $$\ell_i(x) = \prod^{N}_{j=0, j \neq i} \frac{x - x_j}{x_i - x_j} $$
# + [markdown] slideshow={"slide_type": "fragment"}
# A Key property of the Lagrange polynomials is that
#
# $$
# \ell_i(x_j) = \delta_{ij} = \left\{\begin{matrix}
# 0 & i\neq j \\
# 1 & i=j\\
# \end{matrix}\right.
# $$
# which is why the weights in $P_N(x)$ are simply the $y$ values of the interpolant
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Solving for the coefficients of $P_N(x)$
#
# In general, if
#
# $$
# P_N(x) = \sum_{n=0}^N w_j\phi_j(x)
# $$
# where $\phi_j(x)$ is any basis function for $P_N$ (i.e. monomial, Lagrange, and there are many more). Then finding the unique set of weights for the interpolating polynomial through $N+1$ distinct data points $(x_i, y_i)$, just reduces to solving $N+1$ linear equations $y_i = P_N(x_i)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# For the monomial basis this reduces to the linear system
#
# $$
# V(\mathbf{x})\mathbf{w} = \mathbf{y}
# $$
#
# What is the matrix for the Lagrange Basis?
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Visualizing the Lagrange Polynomials
# + hide_input=false slideshow={"slide_type": "-"}
# ====================================================
# Compute the Lagrange basis (\ell_i(x))
def lagrange_basis(x, data):
"""Compute Lagrange basis at x given data"""
basis = numpy.ones((data.shape[0], x.shape[0]))
for i in range(data.shape[0]):
for j in range(data.shape[0]):
if i != j:
basis[i, :] *= (x - data[j, 0]) / (data[i, 0] - data[j, 0])
return basis
# ====================================================
# Calculate full polynomial
def poly_interpolant(x, data):
"""Compute polynomial interpolant of (x,y) using Lagrange basis"""
P = numpy.zeros(x.shape[0])
basis = lagrange_basis(x, data)
for n in range(data.shape[0]):
P += basis[n, :] * data[n, 1]
return P
# ====================================================
# + slideshow={"slide_type": "subslide"}
x_data = numpy.array([0., 1., 2., 3.])
y_data = numpy.ones(x_data.shape)
data = numpy.array([x_data, y_data]).T
x = numpy.linspace(x_data.min(),x_data.max(),100)
# + hide_input=true slideshow={"slide_type": "-"}
# ====================================================
# Plot individual basis functions
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
basis = lagrange_basis(x, data)
for i in range(len(x_data)):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.grid()
axes.legend(loc='best')
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Linear Independence of the Lagrange Polynomials
#
# Because the weights of each basis function in the Lagrange basis is just the $y$ value at the interpolation points, it is straightforward to show that the Lagrange polynomials are linearly independent. I.e. the statement
#
# $$
# \sum_{n=0}^N w_j\phi_j(x) =0
# $$
# is equivalent to interpolating the zero function, where all the $w_j =0$
# + [markdown] slideshow={"slide_type": "subslide"}
# **Example 0 Revisited** In example 0 above the linear polynomial that interpolates the coordinates $(1,3)$ and $(3,7)$ was simply stated as
# $$
# P_1(x) = 2(x-1)+3.
# $$
# Another way to look at this example is to first note that when we add two linear polynomials
# the result is another linear polynomial. The first polynomial to define interpolates $(1,1)$
# and $(3,0)$,
# $$
# \ell_0(x) = \frac{x-3}{1-3}.
# $$
# The second polynomial to define interpolates $(1,0)$ and $(3,1)$,
# $$
# \ell_1(x) = \frac{x-1}{3-1}.
# $$
#
# A linear combination of these two functions can be defined that will interpolate the points $(1,3)$ and $(3,7)$,
# $$
# P_1(x) = 3\cdot\ell_0(x) + 7\cdot\ell_1(x).
# $$
# The graphs of these functions are shown below.
#
# + hide_input=true slideshow={"slide_type": "subslide"}
# =============================================================
# Plot the two example basis functions in the current example
x = numpy.linspace(1.0, 3.0, 2)
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
axes.set_ylim([0,9])
axes.plot(x, (x-3)/(-2), color='r', label="$\ell_{%s}(x)$" % 0)
axes.plot(x, (x-1)/(2), color='b', label="$\ell_{%s}(x)$" % 1)
axes.plot(x, 3*(x-3)/(-2) + 7*(x-1)/(2),color='g',label='interpolant')
axes.set_title("Interpolant for (1,3) and (3,7)")
axes.set_xlabel("x")
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 4: $N = 1$ Lagrange Polynomial
#
# Given 2 points $(x_0, y_0)$ and $(x_1, y_1)$ the Lagrange form of $\mathcal{P}_N(x)$ is given by
#
# $$\ell_0(x) = \frac{x - x_1}{x_0 - x_1}$$
#
# and
#
# $$\ell_1(x) = \frac{x - x_0}{x_1 - x_0}$$
#
# so that
#
# $$\mathcal{P}_1(x) = \ell_0(x) \cdot y_0 + \ell_1(x) \cdot y_1 = \frac{x - x_1}{x_0 - x_1} \cdot y_0 + \frac{x - x_0}{x_1 - x_0} \cdot y_1$$
#
# One important aspect of Lagrange polynomials to note is that the $\ell_i(x)$ functions are exactly 1 when $x = x_i$ and that every other $\ell_j(x)$ where $j \neq i$ is 0.
# + hide_input=false slideshow={"slide_type": "skip"}
data = numpy.array([[-1.5, -0.5], [0.0, 0.5]])
# + hide_input=true slideshow={"slide_type": "skip"}
# data = numpy.array([[-1.5, -0.5], [0.0, 0.5], [-0.5, 1.0]])
N = data.shape[0] - 1
M = data.shape[0]
x = numpy.linspace(-2.0, 2.0, 100)
# Plot individual basis functions
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
basis = lagrange_basis(x, data)
for i in range(N + 1):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.grid()
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.legend(loc=8)
# Plot full polynomial P_N(x)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), label="$P_{%s}(x)$" % N)
for point in data:
axes.plot(point[0], point[1], 'ko')
axes.set_title("$P_N(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$P_N(x)$")
axes.grid()
plt.show()
# + [markdown] hide_input=false slideshow={"slide_type": "subslide"}
# #### Example 5: Interpolate four points from $sin(\pi x)$
#
# Use four points to approximate $\sin$ on the interval $x \in [-1, 1]$. What is the behavior as $N \rightarrow \infty$? Also plot the error between $f(x)$ and the interpolant $P_N(x)$.
# + hide_input=false slideshow={"slide_type": "-"}
num_points = 21
# + hide_input=true slideshow={"slide_type": "-"}
# num_points = 5
# num_points = 6
# num_points = 20
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = numpy.sin(2.0 * numpy.pi * data[:, 0])
N = data.shape[0] - 1 # Degree of polynomial
M = data.shape[0]
x = numpy.linspace(-1.0, 1.0, 100)
# ====================================================
# Plot individual basis functions
fig = plt.figure(figsize=(16,6))
axes = fig.add_subplot(1, 2, 1)
basis = lagrange_basis(x, data)
for i in range(N + 1):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.legend(loc=1)
axes.grid()
# Plot full polynomial P_N(x)
axes = fig.add_subplot(1, 2, 2)
axes.plot(x, poly_interpolant(x, data), label="$P_{%s}(x)$" % N)
axes.plot(x, numpy.sin(2.0 * numpy.pi * x), 'r--', label="True $f(x)$")
for point in data:
axes.plot(point[0], point[1], 'ko')
axes.set_title("$P_N(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$P_N(x)$")
axes.legend(loc=1)
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 6: Runge's Function
#
# Interpolate $f(x) = \frac{1}{1 + 25 x^2}$ using 6 points of your choosing on $x \in [-1, 1]$.
#
# Try it with 11 points.
#
# Keep increasing the number of points and see what happens.
# + slideshow={"slide_type": "subslide"}
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
x = numpy.linspace(-1, 1, 100)
# x = numpy.linspace(-2, 2, 100)
num_points = 15
# + hide_input=true slideshow={"slide_type": "-"}
# num_points = 10
# num_points = 20
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = f(data[:, 0])
N = data.shape[0] - 1
# Plot the results
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), 'b', label="$P_6(x)$")
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(data[:, 0], data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 7: Weierstrass "Monster" Function
#
# Defined as
# $$
# f(x) = \sum^\infty_{n=0} a^n \cos(b^n \pi x)
# $$
# such that
# $$
# 0 < a < 1 \quad \text{and} \quad a b > 1 + \frac{3\pi}{2}.
# $$
# This function is continuous everywhere but not differentiable anywhere.
# + hide_input=true slideshow={"slide_type": "fragment"}
def f(x, a=0.9, N=100):
summation = 0.0
b = (1.0 + 3.0 / 2.0 * numpy.pi) / a + 0.01
print(b)
for n in range(N + 1):
summation += a**n * numpy.cos(b**n * numpy.pi * x)
return summation
x = numpy.linspace(-1, 1, 1000)
# x = numpy.linspace(-2, 2, 100)
num_points = 10
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = f(data[:, 0])
N = data.shape[0] - 1
# Plot the results
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), 'b', label="$P_6(x)$")
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(data[:, 0], data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Rules of Thumb
#
# - Avoid high-order interpolants when possible! Keep increasing the number of points and see what happens.
# - Avoid extrapolation - Increase the range of $x$ in the above example and check how good the approximation is beyond our sampling interval
# + [markdown] slideshow={"slide_type": "slide"}
# ### Error Analysis
#
# **Theorem:** Lagrange Remainder Theorem - Let $f(x) \in C^{N+1}[-1, 1]$, then
# $$
# f(x) = \mathcal{P}_N(x) + R_N(x)
# $$
# where $\mathcal{P}_N(x)$ is the interpolating polynomial and
# $$
# R_N(x) = Q(x) \frac{f^{(N+1)}(c)}{(N+1)!} \quad \text{with} \quad c \in [-1,1]
# $$
# with
# $$
# Q(x) = \prod^N_{i=0} (x - x_i) = (x-x_0)(x-x_1)\cdots(x-x_N) .
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# A few things to note:
# - For Taylor's theorem note that $Q(x) = (x - x_0)^{N+1}$ and the error only vanishes at $x_0$.
# - For Lagrange's theorem the error vanishes at all $x_i$.
# - To minimize $R_N(x)$ requires minimizing $|Q(x)|$ for $x \in [-1, 1]$.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Minimizing $R_N(x)$
#
# Minimizing the error $R_N(x)$ in Lagrange's theorem is equivalent to minimizing $|Q(x)|$ for $x \in [-1, 1]$.
#
# Minimizing error $\Leftrightarrow$ picking roots of $Q(x)$ or picking the points where the interpolant data is located. How do we this?
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Chebyshev Polynomials
#
# *Chebyshev polynomials* $T_N(x)$ are another basis that can be used for interpolation.
#
# First 5 polynomials
# $$T_0(x) = 1$$
#
# $$T_1(x) = x$$
#
# $$T_2(x) = 2 x^2 - 1$$
#
# $$T_3(x) = 4 x^3 - 3 x$$
#
# $$T_4(x) = 8x^4 - 8x^2 + 1$$
# + hide_input=false slideshow={"slide_type": "subslide"}
def cheb_poly(x, N):
"""Compute the *N*th Chebyshev polynomial and evaluate it at *x*"""
T = numpy.empty((3, x.shape[0]))
T[0, :] = numpy.ones(x.shape)
T[1, :] = x
if N == 0:
return T[0, :]
elif N == 1:
return T[1, :]
else:
for k in range(2, N + 1):
T[2, :] = 2.0 * x * T[1, :] - T[0, :]
T[0, :] = T[1, :]
T[1, :] = T[2, :]
return T[2, :]
# + hide_input=true slideshow={"slide_type": "-"}
x = numpy.linspace(-1, 1, 100)
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
for n in range(5):
axes.plot(x, cheb_poly(x, n), label="$T_%s$" % n)
axes.set_ylim((-1.1, 1.1))
axes.set_title("Chebyshev Polynomials")
axes.set_xlabel("x")
axes.set_ylabel("$T_N(x)$")
axes.legend(loc='best')
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# 1. Chebyshev nodes of the 1st kind (roots)
# $$
# x_k = \cos \left (\frac{(2 k - 1) \pi}{2 N} \right ) \quad k = 1, \ldots, N
# $$
# 1. Chebyshev nodes of the 2nd kind (extrema)
# $$
# x_k = \cos \left( \frac{k \pi}{N} \right) \quad k = 0, \ldots, N
# $$
#
#
# + hide_input=false slideshow={"slide_type": "-"}
N = 4
x_extrema = numpy.cos(numpy.arange(N + 1) * numpy.pi / N)
x_nodes = numpy.cos((2.0 * numpy.arange(1, N + 1) - 1.0) / (2.0 * N) * numpy.pi)
# + hide_input=true slideshow={"slide_type": "fragment"}
fig = plt.figure()
# fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 1, 1)
# Plot points
axes.plot(x_extrema, numpy.zeros(N+1), 'ro')
axes.plot(x_nodes, numpy.zeros(N), 'bo')
# Plot some helpful lines
axes.plot((-1.0, -1.0), (-1.1, 1.1), 'k--')
axes.plot((1.0, 1.0), (-1.1, 1.1), 'k--')
axes.plot((-1.0, 1.0), (0.0, 0.0), 'k--')
for i in range(x_extrema.shape[0]):
axes.plot((x_extrema[i], x_extrema[i]), (-1.1, 1.1), 'r--')
axes.plot(x_extrema[i], cheb_poly(x_extrema, N)[i], 'ro')
print('Nodes = {}'.format(numpy.sort(x_nodes)))
print('Extrema = {}'.format(numpy.sort(x_extrema)))
#print(numpy.cos(x_extrema))
# Plot Chebyshev polynomial
x_hat = numpy.linspace(-1, 1, 1000)
axes.plot(x_hat, cheb_poly(x_hat, N), 'k')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
# Labels
axes.set_title("Chebyshev Nodes and Extrema, N={}".format(N), fontsize="20")
axes.set_xlabel("x", fontsize="15")
axes.set_ylabel("$T_{N+1}(x)$", fontsize="15")
plt.show()
# + slideshow={"slide_type": "skip"}
# First-kind Nesting (3 x)
fig = plt.figure()
# fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 1, 1)
N = 5
factor = 3
x_1 = numpy.cos((2.0 * numpy.arange(1, N + 1) - 1.0) / (2.0 * N) * numpy.pi)
x_2 = numpy.cos((2.0 * numpy.arange(1, factor * N + 1) - 1.0) / (2.0 * factor * N) * numpy.pi)
axes.plot(x_1, numpy.zeros(N), "o", color="r", markerfacecolor="lightgray", markersize="15")
axes.plot(x_2, numpy.zeros(N * factor), 'kx', markersize="10")
x_hat = numpy.linspace(-1, 1, 1000)
axes.plot(x_hat, cheb_poly(x_hat, N), 'k')
axes.plot(x_hat, cheb_poly(x_hat, factor * N), 'k')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
axes.set_title("Nesting of 1st and 2nd Kind Chebyshev Polynomials")
axes.set_xlabel("$x$")
axes.set_ylabel("$T_N(x)$")
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Properties of Chebyshev Polynomials
# 1. Defined by a recurrence relation
#
# $$T_k(x) = 2 x T_{k-1}(x) - T_{k-2}(x)$$
# + [markdown] slideshow={"slide_type": "fragment"}
# 2. Leading coefficient of $x^N$ in $T_N(x)$ is $2^{N-1}$ for $N \geq 1$
# + [markdown] slideshow={"slide_type": "fragment"}
# 3. Extreme values:
#
# $$|T_N(x)| \leq 1 \quad \text{for} \quad -1 \leq x \leq 1$$
#
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Properties of Chebyshev Polynomials
# 4. Minimax principle: The polynomial
#
# $$T(x) = \frac{T_{N+1}(x)}{2^N}$$
#
# is a *monic polynomial*, a univariate function with the leading coefficient equal to 1, with the property that
#
# $$
# \max |T(x)| \leq \max |Q(X)| \quad \text{for} \quad x \in [-1, 1], \quad \text{and}
# $$
# $$
# \max |T(x)| = \frac{1}{2^N}
# $$
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Recall that the remainder term in the Lagrange Remainder Theorem was
# $$
# R_N(x) = Q(x) \frac{f^{(N+1)}(c)}{(N+1)!} \quad \text{with} \quad c \in [-1,1]
# $$
# with
# $$
# Q(x) = \prod^N_{i=0} (x - x_i) = (x-x_0)(x-x_1)\cdots(x-x_N) .
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Error Analysis Redux
#
# Given that the Chebyshev polynomials are a minimum on the interval $[-1, 1]$ we would like $T(x) = Q(x)$.
#
# Since we only know the roots of $Q(x)$ (the points where the interpolant data is located) we require these points to be the roots of the Chebyshev polynomial $T_{N+1}(x)$ therefore enforcing $T(x) = Q(x)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# The zeros of $T_N(x)$ in the interval $[-1, 1]$ can be shown to satisfy
# $$
# x_k = \cos\left( \frac{(2k - 1) \pi}{2 N} \right ) \quad \text{for} \quad k=1, \ldots, N
# $$
# These nodal points (sampling the function at these points) can be shown to minimize interpolation error.
# + hide_input=true slideshow={"slide_type": "-"}
x = numpy.linspace(0, numpy.pi, 100)
N = 15
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1, aspect="equal")
axes.plot(numpy.cos(x), numpy.sin(x), 'r--')
axes.plot(numpy.linspace(-1.1, 1.1, 100), numpy.zeros(x.shape), 'r')
for k in range(1, N + 1):
location = [numpy.cos((2.0 * k - 1.0) * numpy.pi / (2.0 * N)),
numpy.sin((2.0 * k - 1.0) * numpy.pi / (2.0 * N))]
axes.plot(location[0], location[1], 'ko')
axes.plot(location[0], 0.0, 'ko')
axes.plot([location[0], location[0]], [0.0, location[1]], 'k--')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-0.1, 1.1))
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Summary
#
# 1. Minimizing the error in Lagrange's theorem is equivalent to minimizing
# $$
# |Q(x)| \quad \text{for} \quad x \in [-1, 1].
# $$
# 1. We know Chebyshev polynomials are a minimum on the interval $[-1, 1]$ so we would like to have $T(x) = Q(x)$.
# 1. Since we only know the roots of $Q(x)$ (the points where the interpolant data is located) we require these points to be the roots of the Chebyshev polynomial $T_{N+1}(x)$ therefore enforcing $T(x) = Q(x)$.
# 1. The zeros of $T_N(x)$ in the interval $[-1, 1]$ can be shown to satisfy
# $$
# x_k = \cos\left( \frac{(2k - 1) \pi}{2 N} \right ) \quad \text{for} \quad k=1, \ldots, N
# $$
# These nodal points (sampling the function at these points) can be shown to minimize interpolation error.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Notes
# - The Chebyshev nodes minimize interpolation error for any polynomial basis (due to uniqueness of the interpolating polynomial, any polynomial that interpolates these points are identical regardless of the basis).
# - Chebyshev nodes uniquely define the Chebyshev polynomials.
# - The boundedness properties of Chebyshev polynomials are what lead us to the roots as a minimization but there are other used for these orthogonal polynomials.
# - There are two kinds of Chebyshev nodes and therefore two definitions.
# + slideshow={"slide_type": "subslide"}
# Runge's function again
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
# Parameters
x = numpy.linspace(-1, 1, 100)
num_points = 5
# + hide_input=true slideshow={"slide_type": "fragment"}
# ============================================================
# Equidistant nodes
equidistant_data = numpy.empty((num_points, 2))
equidistant_data[:, 0] = numpy.linspace(-1, 1, num_points)
equidistant_data[:, 1] = f(equidistant_data[:, 0])
N = equidistant_data.shape[0] - 1
P_lagrange = poly_interpolant(x, equidistant_data)
# ============================================================
# Chebyshev nodes
chebyshev_data = numpy.empty((num_points, 2))
chebyshev_data[:, 0] = numpy.cos((2.0 * numpy.arange(1, num_points + 1) - 1.0) * numpy.pi / (2.0 * num_points))
chebyshev_data[:, 1] = f(chebyshev_data[:, 0])
P_cheby1 = poly_interpolant(x, chebyshev_data)
# Fit directly with Chebyshev polynomials
coeff = numpy.polynomial.chebyshev.chebfit(chebyshev_data[:, 0], chebyshev_data[:, 1], N)
P_cheby2 = numpy.polynomial.chebyshev.chebval(x, coeff)
# Check on unique polynomials
#print(numpy.allclose(P_cheby1, P_cheby2))
# calculate errornorms for different interpolants
equidistant_err = numpy.linalg.norm(P_lagrange - f(x))
cheb_err = numpy.linalg.norm(P_cheby1 - f(x))
# ============================================================
# Plot the results
fig = plt.figure(figsize=(16,6))
fig.subplots_adjust(hspace=.5)
axes = fig.add_subplot(1, 2, 1)
axes.plot(x, P_lagrange, 'b', label="$P_%s(x)$" % N)
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(equidistant_data[:, 0], equidistant_data[:, 1], 'ro', label="data")
axes.set_title("Interpolation at Equispaced Points: err = {}".format(equidistant_err))
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=8)
#print('Equispaced error = {}'.format(numpy.linalg.norm(P_lagrange - f(x))))
axes = fig.add_subplot(1, 2, 2)
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(x, P_cheby1, 'b', label="$P_%s(x)$" % N)
axes.plot(chebyshev_data[:, 0], chebyshev_data[:, 1], 'ro', label="data")
axes.set_title("Interpolation at Chebyshev Points: err = {}".format(cheb_err))
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
#print('Chebyshev error = {}'.format(numpy.linalg.norm(P_cheby1 - f(x))))
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Piece-Wise Polynomial Interpolation
#
# Given $N$ points, use lower order polynomial interpolation to fit the function in pieces. We can choose the order of the polynomials and the continuity.
#
# - $C^0$: Interpolant is continuous
# - Linear interpolation
# - Quadratic interpolation
# - $C^1$: Interpolation and 1st derivative are continuous
# - Cubic Hermite polynomials (PCHiP)
# - $C^2$: Interpolation, 1st and 2nd derivatives are continuous
# - Cubic splines
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Piece-Wise Linear
#
# Given a segment between point $(x_k, y_k)$a nd $(x_{k+1}, y_{k+1})$ define the segment as
#
# $$\mathcal{P}_k(x) = \frac{y_{k+1} - y_k}{x_{k+1} - x_k} (x - x_k) + y_k$$
#
# The final interpolant $\mathcal{P}(x)$ is then defined on $[x_k, x_{k+1}]$ using this function.
# + slideshow={"slide_type": "subslide"}
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
N = data.shape[0] - 1
# Lagrange Basis
P_lagrange = poly_interpolant(x, data)
# C^0 Piece-wise linear
# P_pw_linear = numpy.interp(x, data[:, 0], data[:, 1])
P_linear = numpy.zeros(x.shape)
for n in range(1, N + 1):
P_linear += ((data[n, 1] - data[n - 1, 1]) / (data[n, 0] - data[n - 1, 0]) * (x - data[n - 1, 0])
+ data[n - 1, 1]) * (x > data[n - 1, 0]) * (x <= data[n, 0])
# + hide_input=true slideshow={"slide_type": "-"}
# Add end points for continuity
P_linear += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_linear += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko')
axes.plot(x, P_lagrange, 'b--')
axes.plot(x, P_linear, 'r')
axes.set_title("Interpolated Data - $C^0$ Linear")
axes.set_xlabel("x")
axes.set_ylabel("$P_1(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
# + [markdown] hide_input=true slideshow={"slide_type": "subslide"}
# ### Piece-Wise Overlapping Polynomials
#
# In sets of three points $(x_{k+1}, y_{k+1})$, $(x_{k}, y_{k})$, and $(x_{k-1}, y_{k-1})$, find quadratic interpolant and define final interpolant $P(x)$ using the quadratic interpolant $\mathcal{P}_k(x)$ on $[x_{k-1}, x_{k+1}]$.
# + slideshow={"slide_type": "skip"}
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
N = data.shape[0] - 1
# This isn't overlapping, it's more like C_0 P_2
# C^0 Piece-wise quadratic
P_quadratic = numpy.zeros(x.shape)
for k in range(1, N + 1, 2):
p = numpy.polyfit(data[k - 1:k + 2, 0], data[k - 1:k + 2, 1], 2)
P_quadratic += numpy.polyval(p, x) * (x > data[k - 1, 0]) * (x <= data[k + 1, 0])
# Add end points for continuity
P_quadratic += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_quadratic += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko')
axes.plot(x, P_lagrange, 'b--')
axes.plot(x, P_quadratic, 'r')
axes.set_title("Interpolated Data - $C^0$ Quadratic")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Piece-Wise $C^1$ Cubic Interpolation
#
# For the previous two cases we had discontinous 1st derivatives! We can make this better by constraining the polynomials to be continuous at the boundaries of the piece-wise intervals.
# + [markdown] slideshow={"slide_type": "subslide"}
# Given a segment between points $(x_k, y_k)$ and $(x_{k+1}, y_{k+1})$ we want to fit a cubic function between the two points.
#
# $$\mathcal{P}_k(x) = p_0 + p_1 x + p_2 x^2 + p_3 x^3$$
#
# $$\mathcal{P}_k(x_k) = y_k, \quad \mathcal{P}_k(x_{k+1}) = y_{k+1}$$
# + [markdown] slideshow={"slide_type": "subslide"}
# Now we have 4 unknowns but only two data points! Constraining the derivative at each interval end will lead to two new equations and therefore we can solve for the interpolant.
#
# $$\frac{\text{d}}{\text{dx}} \mathcal{P}_k(x_k) = d_k, \quad \frac{\text{d}}{\text{dx}} \mathcal{P}_k(x_{k+1}) = d_{k+1}$$
#
# where we need to prescribe the $d_k$s. Since we know the polynomial we can write these 4 equations as
#
# $$\begin{aligned}
# p_0 + p_1 x_k + p_2 x_k^2 + p_3 x_k^3 &= y_k \\
# p_0 + p_1 x_{k+1} + p_2 x_{k+1}^2 + p_3 x_{k+1}^3 &= y_{k+1} \\
# p_1 + 2p_2 x_k + 3 p_3 x_k^2 &= d_k \\
# p_1 + 2 p_2 x_{k+1} + 3 p_3 x_{k+1}^2 &= d_{k+1}
# \end{aligned}$$
# + [markdown] slideshow={"slide_type": "subslide"}
# Rewriting this as a system we get
# $$\begin{bmatrix}
# 1 & x_k & x_k^2 & x_k^3 \\
# 1 & x_{k+1} & x_{k+1}^2 & x_{k+1}^3 \\
# 0 & 1 & 2 x_k & 3 x_k^2 \\
# 0 & 1 & 2 x_{k+1} & 3 x_{k+1}^2
# \end{bmatrix} \begin{bmatrix}
# p_0 \\ p_1 \\ p_2 \\ p_3
# \end{bmatrix} = \begin{bmatrix}
# y_k \\ y_{k+1} \\ d_k \\ d_{k+1}
# \end{bmatrix}$$
# + [markdown] slideshow={"slide_type": "subslide"}
# A common simplification to the problem description re-parameterizes the locations of the points such that $s \in [0, 1]$ and recast the problem with $(0, y_k)$ and $(1, y_{k+1})$. This simplifies the above system to
# $$\begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# 1 & 1 & 1 & 1 \\
# 0 & 1 & 0 & 0 \\
# 0 & 1 & 2 & 3
# \end{bmatrix} \begin{bmatrix}
# p_0 \\ p_1 \\ p_2 \\ p_3
# \end{bmatrix} = \begin{bmatrix}
# y_k \\ y_{k+1} \\ d_k \\ d_{k+1}
# \end{bmatrix}$$
#
# which can be solved to find
#
# $$\begin{aligned}
# \mathcal{P}(s) &= (1-s)^2 (1 + 2s) y_k + s^2 (3 - 2 s) y_{k+1} + s (1 - s)^2 d_k - s^2 (1 - s)d_{k+1}\\
# \mathcal{P}'(s) &= 6s(s-1) y_k + 6s(1-s) y_{k+1} + (s-1)(3s-1) d_k - s(3s-2) d_{k+1}\\
# \mathcal{P}''(s) &= 6 (1-2s)(y_{k+1} - y_k) + (6s - 4) d_k + (6s-2) d_{k+1}
# \end{aligned}$$
#
# Now, how to choose $d_k$?
# + [markdown] slideshow={"slide_type": "subslide"}
# #### PCHIP
#
# Piecewise Cubic Hermite Interpolation Polynomial
#
# - Picks the slope that preserves monotonicity
# - Also tried to preserve the shape of the data
# - Note that in general this interpolant is $\mathcal{P}_k(x) \in C^1$
# + slideshow={"slide_type": "subslide"}
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# C^1 Piece-wise PCHIP
P_pchip = interpolate.pchip_interpolate(data[:, 0], data[:, 1], x)
# + hide_input=true slideshow={"slide_type": "fragment"}
# Plot
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ro')
axes.plot(x, P_pchip, 'r')
axes.set_title("Interpolated Data - $C^1$ Cubic PCHIP")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Cubic Splines
#
# Enfores continuity on second derivatives as well:
#
# $$\mathcal{P}''_{k}(x_{k}) = \mathcal{P}''_{k-1}(x_k)$$
# + [markdown] slideshow={"slide_type": "subslide"}
# From our generalization before we know
#
# $$\mathcal{P}''(s) = 6 (1-2s)(y_{k+1} - y_k) + (6s - 4) d_k + (6s-2) d_{k+1}$$
#
# and our constraint now becomes
#
# $$\mathcal{P}''_{k}(0) = \mathcal{P}''_{k-1}(1)$$
#
# $$\mathcal{P}''_{k-1}(1) = 6 (1-2 \cdot 1)(y_{k} - y_{k-1}) + (6\cdot 1 - 4) d_{k-1} + (6\cdot 1-2) d_{k}$$
#
# $$\mathcal{P}''_{k}(0) = 6 (1-2 \cdot 0)(y_{k+1} - y_k) + (6\cdot 0 - 4) d_k + (6\cdot 0-2) d_{k+1}$$
#
# $$-6(y_{k} - y_{k-1}) + 2 d_{k-1} + 4 d_{k} = 6 (y_{k+1} - y_k) - 4 d_k -2 d_{k+1}$$
#
# We now have constraints on choosing the $d_k$ values. Note that we still need to prescribe them at the boundaries of the full interval.
# + [markdown] slideshow={"slide_type": "subslide"}
# This forms a linear set of equations for the $d_k$s based on the $y_k$ values and can be reformulated into a tri-diagonal linear system
#
# $$\begin{bmatrix}
# & \ddots & \ddots & \ddots & & &\\
# & 0 & 2 & 8 & 2 & 0 & & \\
# & & 0 & 2 & 8 & 2 & 0 & & & \\
# & & & 0 & 2 & 8 & 2 & 0 & & \\
# & & & & & \ddots & \ddots & \ddots &
# \end{bmatrix}\begin{bmatrix}
# \vdots \\ d_{k-1} \\ d_{k} \\ d_{k+1} \\ \vdots
# \end{bmatrix} = \begin{bmatrix}
# \vdots \\ 6 (y_{k} - y_{k-2}) \\ 6 (y_{k+1} - y_{k-1}) \\ 6 (y_{k+2} - y_{k}) \\\vdots
# \end{bmatrix}$$
#
# The boundaries are still left unconstrained and we must pick some rule to specify the derivatives there.
# + slideshow={"slide_type": "subslide"}
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# C^2 Piece-wise Splines
# Note that to get an interpolant we need to set the smoothing
# parameters *s* to 0
P_spline = interpolate.UnivariateSpline(data[:, 0], data[:, 1], s=0)
# + hide_input=true slideshow={"slide_type": "skip"}
# Plot
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ro')
axes.plot(x, P_spline(x), 'r', label = '$C^2$')
axes.plot(x, P_pchip, 'b--', label = 'Pchip')
axes.set_title("Interpolated Data - $C^2$ Cubic Splines")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
axes.grid()
axes.legend(loc='best')
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Let's compare all of these methods
# + hide_input=true slideshow={"slide_type": "skip"}
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# Lagrange Basis
N = data.shape[0] - 1
lagrange_basis = numpy.ones((N + 1, x.shape[0]))
for i in range(N + 1):
for j in range(N + 1):
if i != j:
lagrange_basis[i, :] *= (x - data[j, 0]) / (data[i, 0] - data[j, 0])
# Calculate full polynomial
P_lagrange = numpy.zeros(x.shape[0])
for n in range(N + 1):
P_lagrange += lagrange_basis[n, :] * data[n, 1]
# C^0 Piece-wise linear
# P_pw_linear = numpy.interp(x, data[:, 0], data[:, 1])
P_linear = numpy.zeros(x.shape)
for n in range(1, N + 1):
P_linear += ((data[n, 1] - data[n - 1, 1]) / (data[n, 0] - data[n - 1, 0]) * (x - data[n - 1, 0])
+ data[n - 1, 1]) * (x > data[n - 1, 0]) * (x <= data[n, 0])
# Add end points for continuity
P_linear += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_linear += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# C^0 Piece-wise quadratic
P_quadratic = numpy.zeros(x.shape)
for k in range(1, N + 1, 2):
p = numpy.polyfit(data[k - 1:k + 2, 0], data[k - 1:k + 2, 1], 2)
P_quadratic += numpy.polyval(p, x) * (x > data[k - 1, 0]) * (x <= data[k + 1, 0])
# Add end points for continuity
P_quadratic += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_quadratic += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# C^1 Piece-wise PCHIP
P_pchip = interpolate.pchip_interpolate(data[:, 0], data[:, 1], x)
# C^2 Piece-wise Splines
P_spline = interpolate.UnivariateSpline(data[:, 0], data[:, 1], s=0)
# Plot
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko', label="Data")
axes.plot(x, P_lagrange, 'y', label="Lagrange")
axes.plot(x, P_linear, 'g', label="PW Linear")
axes.plot(x, P_quadratic, 'r', label="PW Quadratic")
axes.plot(x, P_pchip, 'c', label="PW Cubic - PCHIP")
axes.plot(x, P_spline(x), 'b', label="PW Cubic - Spline")
axes.grid()
axes.set_title("Interpolated Data - Method Comparisons")
axes.set_xlabel("x")
axes.set_ylabel("$P(x)$")
axes.legend(loc='best')
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
# + [markdown] hide_input=false slideshow={"slide_type": "slide"}
# ## Relationship to Regression
#
# What if we have more data and want a lower degree polynomial but do not want to use a piece-wise defined interpolant?
# Regression techniques are often used to minimize a form of error between the data points $y_i$ at $x_i$ with an approximating function $f(x_i)$. Note that this is NOT interpolation anymore!
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Least-Squares
#
# One way of doing this is to require that we minimize the least-squares error
# $$
# E = \left( \sum^m_{i=1} |y_i - f(x_i)|^2 \right )^{1/2}.
# $$
# where as before we have data $y_i$ at locations $x_i$ and an approximating function $f(x_i)$.
# + [markdown] slideshow={"slide_type": "subslide"}
# From the beginning of our discussion we know we can write the interpolant as a system of linear equations which we can then solve for the coefficients of a monomial basis. If we wanted to fit a line
# $$
# \mathcal{P}_1(x) = p_0 + p_1 x
# $$
# to $N$ data points we would have
# $$
# \begin{bmatrix}
# 1 & x_1 \\
# 1 & x_2 \\
# \vdots & \vdots \\
# 1 & x_N
# \end{bmatrix} \begin{bmatrix}
# p_0 \\ p_1
# \end{bmatrix} = \begin{bmatrix}
# y_1 \\ y_2 \\ \vdots \\ y_N
# \end{bmatrix}
# $$
# or
# $$
# A p = y
# $$
# What's wrong with this system?
# + [markdown] slideshow={"slide_type": "subslide"}
# This leads to the likelihood that there is no solution to the system as
# $$
# A \in \mathbb{R}^{N \times 2}, p \in \mathbb{R}^{2 \times 1}, \text{ and } y \in \mathbb{R}^{N \times 1}.
# $$
#
# Instead we can solve the related least-squares system
# $$
# A^T A p = A^T y
# $$
# whose solution minimizes the least-square error defined before as $E$.
#
# Note: In general, this is not the most stable way to solve least squares problems, in general, using an orthogonalization technique like $QR$ factorization is better numerically.
# + slideshow={"slide_type": "subslide"}
# Linear Least Squares Problem
N = 50
x = numpy.linspace(-1.0, 1.0, N)
y = x + numpy.random.random((N))
# + slideshow={"slide_type": "-"}
A = numpy.ones((x.shape[0], 2))
A[:, 1] = x
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
#p = numpy.linalg.lstsq(A, y, rcond=None)[0]
f = lambda x: p[0] + p[1] * x
E = numpy.linalg.norm(y - f(x), ord=2)
# + hide_input=false slideshow={"slide_type": "fragment"}
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, f(x), 'r')
axes.set_title("Least Squares Fit to Data, err={}".format(E))
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Themes and variations
#
# You can play all sorts of games, whether they are justified by the data or not, for example we can fit the same random data with a function like
#
# $$
# f(x) = p_0 + p_1\tanh(x)
# $$
#
# which is still a linear problem for the coefficients $p_0$ and $p_1$, however the vandermonde matrix now has columns of $\mathbf{1}$ and $\tanh\mathbf{x}$.
# + slideshow={"slide_type": "subslide"}
# Linear Least Squares Problem
A = numpy.ones((x.shape[0], 2))
A[:, 1] = numpy.tanh(x)
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
# p = numpy.linalg.lstsq(A, y)[0]
f = lambda x: p[0] + p[1] * numpy.tanh(x)
E = numpy.linalg.norm(y - f(x), ord=2)
# + hide_input=false slideshow={"slide_type": "fragment"}
fig = plt.figure(figsize=(8,6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, f(x), 'r')
axes.set_title("Least Squares Fit to Data, err = {}".format(E))
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
axes.grid()
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Let ye be warned...
#
# 
#
# (Original image can be found at [Curve Fitting](https://xkcd.com/2048/).)
| 06_interpolation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="liOrDEUwvgB0"
import os
from os import listdir
import pathlib
from shutil import copyfile
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
import scipy.io.wavfile as wav
import pandas as pd
from zipfile import ZipFile
from tensorflow import keras
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
from tensorflow.keras import models
from IPython import display
from tensorflow.keras.models import Sequential
# Set seed for experiment reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
# + colab={"base_uri": "https://localhost:8080/"} id="HTwRFFXUviif" outputId="e854c933-bc32-4341-c7ea-531879617b07"
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
# + [markdown] id="ei7_biOz7pnn"
# # Helper Methods
# + [markdown] id="cc1_V-e_7wtT"
# **Extract content from zip**
# + id="PknbInrY7e2L"
def unzip(file):
with ZipFile(file, 'r') as zipObj:
zipObj.extractall()
# + [markdown] id="AxI5v1FmvtpG"
# ## Methods to convert the audio files to spectrograms
# + [markdown] id="wV7WG4eRwLd8"
# **Convert a audio file to a spectrogram**
# + id="mMQVChmavlVE"
def wav_to_spectrogram(audio_path, save_path, dimensions=(128, 128), noverlap=16, cmap='gray_r'):
sample_rate, samples = wav.read(audio_path)
fig = plt.figure()
fig.set_size_inches((dimensions[0]/fig.get_dpi(), dimensions[1]/fig.get_dpi()))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.specgram(samples, Fs=2, noverlap=noverlap)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
fig.savefig(save_path, bbox_inches="tight", pad_inches=0)
plt.close(fig)
# + [markdown] id="ckx0sAPZwBQl"
# **Convert all audio files in a directory**
# + id="l-1RsRvcvsZg"
def convert_audio_dir_to_sectrogram(audio_dir, spectrogram_dir, dimensions=(128, 128), noverlap=16, cmap='gray_r'):
for file in listdir(audio_dir):
audio_path = audio_dir + file
spectrogram_path = spectrogram_dir + file.replace('.wav', '.png')
print(file)
if ('.wav' in file):
wav_to_spectrogram(audio_path,
spectrogram_path,
dimensions=dimensions,
noverlap=noverlap,
cmap=cmap)
# + [markdown] id="0qVVndgi8_KH"
# **Create needed folders**
# + id="wotK7vyO8m0U"
def create_needed_folders(path):
if not os.path.exists(path):
os.makedirs(path)
# + id="7_XwLiz19kql"
# Single Car Folder
single_car_audio = './single-car/'
# Multiple Car Folder
multiple_car_audio = './multiple-cars/'
# Bike Folder
bike_audio = './bike/'
# City Folder
city_audio = './city/'
# + id="Zw3f8_q_89mh"
# Spectrogram Folder
spectrograms_path = './spectrograms'
# Single Car Spectrogram Folder
single_car_spectrograms = './spectrograms/single-car/'
# Multiple Car Spectrogram Folder
multiple_cars_spectrograms = './spectrograms/multiple-cars/'
# Bike Spectrogram Folder
bike_spectrograms = './spectrograms/bike/'
# City Spectrogram Folder
city_spectrograms = './spectrograms/city/'
# + id="pLQJMceO9Tg1"
create_needed_folders(single_car_spectrograms)
create_needed_folders(multiple_cars_spectrograms)
create_needed_folders(bike_spectrograms)
create_needed_folders(city_spectrograms)
# + id="HcXLUwdV07o2"
#people_audio = './people/'
#people_spectrograms = './spectrograms/people/'
#create_needed_folders(people_spectrograms)
# + [markdown] id="4cmY34Kg8DDK"
# # Prepare Audio Data for Training
# + [markdown] id="1edEEvJA8JDF"
# **Extract the audio data**
# + id="kcuhVvdnwAoF"
unzip('single-car.zip')
# + id="JVnN4_KhFafY"
unzip('multiple-cars.zip')
# + id="aotAiIW9FaTy"
unzip('bike.zip')
# + id="q6Wjep0j79en"
unzip('city.zip')
# + [markdown] id="lp36rtU4_eO8"
# **Convert all audio files to spectrograms**
# + id="Z4BMxZ698R7J" colab={"base_uri": "https://localhost:8080/"} outputId="719f0007-8944-4e67-af02-f536f95d25cf"
# Single Car
convert_audio_dir_to_sectrogram(single_car_audio, single_car_spectrograms)
# + id="bBmB6D9GFkB1" colab={"base_uri": "https://localhost:8080/"} outputId="a3bfdfe6-52a0-4aab-db31-c901342e0746"
# Multiple Cars
convert_audio_dir_to_sectrogram(multiple_car_audio, multiple_cars_spectrograms)
# + id="0JOgEANtFo_J" colab={"base_uri": "https://localhost:8080/"} outputId="1ce743d8-cd1d-400e-c302-25173da358d2"
# Bike
convert_audio_dir_to_sectrogram(bike_audio, bike_spectrograms)
# + id="fr6wDoeS9v4l" colab={"base_uri": "https://localhost:8080/"} outputId="8dc430d2-676f-49ab-d158-7153d662b0e9"
# City
convert_audio_dir_to_sectrogram(city_audio, city_spectrograms)
# + [markdown] id="kXLm9L01_kGa"
# **Split into training / testing data**
# + id="YQAB-Cri_Sru"
training_data = './training'
testing_data = './testing'
# + id="YlbyZ4NA_9EI"
create_needed_folders(training_data)
create_needed_folders(testing_data)
# + id="dhOiADfRAHEf"
batch_size = 64
img_height = 128
img_width = 128
# + colab={"base_uri": "https://localhost:8080/"} id="ui5QApZXAGZu" outputId="d96391b3-fdf5-4417-89e3-2715b70d1224"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
spectrograms_path,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# + colab={"base_uri": "https://localhost:8080/"} id="cgVM1EIGAZqU" outputId="85c8781e-12e1-4801-dd3d-7d255b5f0011"
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
spectrograms_path,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# + colab={"base_uri": "https://localhost:8080/"} id="YUSnzNthAagv" outputId="49bfd298-47ce-4831-807b-545c77818bea"
class_names = train_ds.class_names
print(class_names)
# + [markdown] id="Z_mAT3_vAkFP"
# # Create the AI Model
# + id="a0fdT3X5Ad-X"
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# + id="rv7iXO1iAf4O"
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
# + id="mCU2qYdYAsac"
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
# + id="dvvHXCXjAw0a"
num_classes = len(class_names)
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(64, 3, activation='relu'),
layers.Conv2D(128, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_classes),
])
# + id="_fqq-q67A0Jz"
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="Ei-NMHgbA1k3" outputId="705d3fbf-d72e-48ec-91fa-3bf2d8784e87"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="odsawja3A3Cc" outputId="410fa255-1500-4c3a-cfec-af8b74f7c226"
epochs=10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# + [markdown] id="R6w4Bd8qW-DQ"
# **Save the Model**
# + colab={"base_uri": "https://localhost:8080/"} id="osCqK_GYXCuc" outputId="949119cd-865c-404d-8ad0-3f7309111146"
# !mkdir -p saved_model
model.save('saved_model/model')
# + colab={"base_uri": "https://localhost:8080/"} id="Q34jHdHLXehL" outputId="3996b7c1-07f7-4f5a-eb76-4455ce28d9c0"
# !tar -czvf model.tar.gz saved_model/model/
# + id="vcoYMAGOX6UX"
# !ls -la
# + [markdown] id="-2x36ST6YlWN"
# **Convert To Tensorflow Lite (testing)**
# + id="ybNPAjWVYpmo"
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model('./saved_model/model') # path to the SavedModel directory
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
# + [markdown] id="y5gOViNUBGl9"
# # Test the AI Model
# + id="9UVw00YmCkpk"
def get_prediction(file):
img = keras.preprocessing.image.load_img(file, target_size=(128, 128))
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
# + id="hn7-0w7WA46p"
car_eval_audio = './output.wav'
car_eval_spectrogram = './output.png'
wav_to_spectrogram(car_eval_audio, car_eval_spectrogram, dimensions=(128, 128), noverlap=16, cmap='gray_r')
# + id="WBONCQ08CPuX"
get_prediction(car_eval_spectrogram)
# + id="4fpEk4UCzIl3"
| notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zWmv9Y7Bk2gK"
# # <NAME> BERT Language Model
#
#
# ## Creating vocab
#
# + id="SVU1jFLikBIH" colab={"base_uri": "https://localhost:8080/"} outputId="b33cb0a6-bb78-4bb7-ae92-899d721b934e"
from google.colab import drive
drive.mount('/content/drive')
article_lines = []
article_open = False
import string
import re
exclude = set(string.punctuation)
with open('/content/drive/My Drive/arywiki.txt', mode='w', newline='\n', encoding='utf-8') as ofile:
with open('/content/drive/My Drive/arywiki.xml', mode='r', newline='\n', encoding='utf-8') as file:
for line in file:
if '<text' in line:
article_open = True
elif '</text>' in line:
article_open = False
for oline in article_lines[1:]:
if oline != '\n':
ofile.write(str(oline).rstrip() + " ")
ofile.write("\n\n")
article_lines = []
else:
if article_open:
line = re.sub(r'[A-Za-z0-9]', ' ' , line)
import string
line = line.translate(str.maketrans('', '', string.punctuation))
line = line.rstrip()
article_lines.append(line)
print (line)
# + id="qdkOtAIacqT5" colab={"base_uri": "https://localhost:8080/"} outputId="6ec37cd6-60ba-42dd-818d-5907716c1eff"
# !pip install tokenizers
# + id="Gd9hSlbokO_R"
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.processors import TemplateProcessing
from tokenizers.pre_tokenizers import Whitespace
from tokenizers import normalizers
from tokenizers.normalizers import Lowercase, NFD, StripAccents
bert_tokenizer = Tokenizer(WordPiece())
bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()])
bert_tokenizer.pre_tokenizer = Whitespace()
bert_tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", 1),
("[SEP]", 2),
],
)
# + id="RPrNyyFDkjf8"
from tokenizers.trainers import WordPieceTrainer
trainer = WordPieceTrainer(
vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
)
files = ['/content/drive/My Drive/arywiki.txt']
bert_tokenizer.train(files, trainer)
model_files = bert_tokenizer.model.save("/content/drive/My Drive/data", "bert-wiki")
bert_tokenizer.model = WordPiece.from_file(*model_files, unk_token="[UNK]")
bert_tokenizer.save("/content/drive/My Drive/data/bert-wiki.json")
# + id="tt0I1EuJkmC0" colab={"base_uri": "https://localhost:8080/"} outputId="e494fe5f-a5fa-4cd7-c86c-b5e30269a497"
### Decoding
output = bert_tokenizer.encode("نسبة نّاس اللي خدامين في لقطاع لخاص")
print(output.ids)
# + id="_pkO9C79koQc" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0986ad79-d76d-40f6-c8fd-cc330ebb2922"
bert_tokenizer.decode([1, 1899, 1914, 1888, 1966, 1917, 2260, 2239, 2])
# + id="dQBlR6iXkqkS" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b38850e5-f64c-41a7-fb2e-bc0e246e0771"
from tokenizers import decoders
bert_tokenizer.decoder = decoders.WordPiece()
bert_tokenizer.decode(output.ids)
# + id="cbLr3zrJ4n6b" colab={"base_uri": "https://localhost:8080/"} outputId="789ba473-8e72-4c89-e5da-acc5e03eae50"
bert_tokenizer.get_vocab_size()
# + [markdown] id="LD8xBpe6kxuJ"
# #2. Train a tokenizer
# + id="Wx4qwVARkt85" colab={"base_uri": "https://localhost:8080/"} outputId="75167a1c-7792-429e-afbc-3741bd8e8200"
from pathlib import Path
from tokenizers import ByteLevelBPETokenizer
paths = ['/content/drive/My Drive/arywiki.txt']
# Initialize a tokenizer
tokenizer = ByteLevelBPETokenizer()
# Customize training
tokenizer.train(files=paths, vocab_size=52_000, min_frequency=2, special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
# Save files to disk
tokenizer.save_model("/content/drive/My Drive/data/", "darijabert")
# + id="1tE3Dbz4lMwr" colab={"base_uri": "https://localhost:8080/"} outputId="3c0fe49b-14c0-4c07-9353-b8b9be07a0a3"
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
tokenizer = ByteLevelBPETokenizer(
'/content/drive/My Drive/data/darijabert-vocab.json',
'/content/drive/My Drive/data/darijabert-merges.txt',
)
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)
print(tokenizer.encode("نسبة نّاس اللي خدامين في لقطاع لخاص").tokens)
# + [markdown] id="WQpUC_CDhnWW"
# # 3. Train a language model from scratch
#
#
#
# + id="kD140sFjh0LQ" colab={"base_uri": "https://localhost:8080/"} outputId="db522df1-a56c-4b05-b7dc-beade7e00525"
# Check that we have a GPU
# !nvidia-smi
# + id="VNZZs-r6iKAV" colab={"base_uri": "https://localhost:8080/"} outputId="4d9f117b-273c-43bf-a449-016fd89493c2"
# Check that PyTorch sees it
import torch
torch.cuda.is_available()
# + [markdown] id="u0qQzgrBi1OX"
# ### We'll define the following config for the model
# + id="GJOdUtSheg86" colab={"base_uri": "https://localhost:8080/"} outputId="6b4c5e88-4abe-4e6b-819d-335b9de0a5c4"
# !pip install transformers
# + id="LTXXutqeDzPi"
from transformers import RobertaConfig
config = RobertaConfig(
vocab_size=52_000,
max_position_embeddings=514,
num_attention_heads=12,
num_hidden_layers=6,
type_vocab_size=1,
)
# + id="4keFBUjQFOD1" colab={"base_uri": "https://localhost:8080/"} outputId="4898456c-4c96-460c-ffe4-83ce5eea8069"
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
from google.colab import drive
drive.mount('/content/drive')
tokenizer = ByteLevelBPETokenizer(
'/content/drive/My Drive/data/darijabert-vocab.json',
'/content/drive/My Drive/data/darijabert-merges.txt',
)
# + id="BzMqR-dzF4Ro"
from transformers import RobertaForMaskedLM
model = RobertaForMaskedLM(config=config)
# + id="jU6JhBSTKiaM" colab={"base_uri": "https://localhost:8080/"} outputId="6b8a14e2-2104-4c73-fcfe-d8f8598d8f53"
model.num_parameters()
# => 84 million parameters
# + [markdown] id="jBtUHRMliOLM"
# ## Build our training Dataset
#
# + id="GlvP_A-THEEl"
from transformers import PreTrainedTokenizerFast
# Save the tokenizer you trained
tokenizer.save("byte-level-BPE.tokenizer.json")
# Load it using transformers
tokenizer = PreTrainedTokenizerFast(tokenizer_file="byte-level-BPE.tokenizer.json")
# + id="XCjqDD-KlsiD" colab={"base_uri": "https://localhost:8080/"} outputId="ce2c4109-5841-4834-a056-f7108f198cdf"
# %%time
from transformers import LineByLineTextDataset
dataset = LineByLineTextDataset(
tokenizer=tokenizer,
file_path= '/content/drive/My Drive/arywiki.txt',
block_size=128,
)
# + id="KmLBox1ilwAc"
from transformers import DataCollatorForLanguageModeling
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=False, mlm_probability=0.15
)
# + [markdown] id="Wnx9JO2KlyAE"
# ## Initialize Trainer
# + id="rrdxHbNolyxZ"
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir='/content/drive/My Drive/DarijaBERT',
overwrite_output_dir=True,
num_train_epochs=1,
per_gpu_train_batch_size=64,
save_steps=10_000,
save_total_limit=2,
prediction_loss_only=True,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=dataset,
)
# + id="HTulEnjf_Gmz" colab={"base_uri": "https://localhost:8080/"} outputId="8e005427-e316-4d6b-d713-a8cb9e8139e8"
print(model.get_output_embeddings())
# + id="yB181OSjl13q" colab={"base_uri": "https://localhost:8080/"} outputId="73f9c782-a90d-486f-d46f-03601c7ea857"
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
tokenizer.add_special_tokens({'mask_token': '[MASK]'})
# + [markdown] id="vgBuvrzKl29i"
# ### Start training
# + id="_WSWXcUv6kb7"
import torch
torch.cuda.empty_cache()
# + id="egTXzkNYl4lc" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="ccaed5f3-f038-48aa-d7ca-607d8d8be97e"
# %%time
trainer.train()
# + [markdown] id="Qnw57vJnl7Ub"
# ### Save final model (+ tokenizer + config) to disk
# + id="nFOVCURpl8ZM" colab={"base_uri": "https://localhost:8080/"} outputId="ee28c734-27e7-4a6c-b1d8-108207bab538"
trainer.save_model("DarijaBERT_model")
# + [markdown] id="d0caceCy_p1-"
# ## 4. Check that the LM actually trained
# + id="ltXgXyCbAJLY" colab={"base_uri": "https://localhost:8080/"} outputId="a98d5eec-456a-441f-98e7-bf4191544c34"
from transformers import pipeline
from transformers import PreTrainedTokenizerFast
tokenizer = PreTrainedTokenizerFast(tokenizer_file="byte-level-BPE.tokenizer.json")
tokenizer.add_special_tokens({'mask_token': '[MASK]'})
mask_fill = pipeline("fill-mask", model="DarijaBERT_model", tokenizer=tokenizer)
# + id="UIvgZ3S6AO0z" colab={"base_uri": "https://localhost:8080/"} outputId="c23a312d-79df-41a3-bd66-899d9a736ec6"
mask_fill("رسام صانع طباعة{}.".format(tokenizer.mask_token))
# + colab={"base_uri": "https://localhost:8080/"} id="_lM8EF7LbMae" outputId="f80b3486-ed96-4ded-94bc-970d3e46502f"
mask_fill("كايتعتابر من أساطير لفن لمغريبي حيتاش {}.".format(tokenizer.mask_token))
# + [markdown] id="UMjlNV0Zapzg"
#
| Darija_bert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pvlibs
# language: python
# name: pvlibs
# ---
# ### Import Libraries
# +
## general functions ##
# random number generation
import random
## process monitoring ##
# timing of process components
import time
## data processing ##
# numerical image array handling / manipulation
import numpy as np
# image convolution and analysis
import scipy.ndimage as ndi
## visual display ##
# set qt(5) as rendering framework (display)
# %matplotlib qt
# matplotlib plotting for interactive image display
import matplotlib.pyplot as plt
# display colour / colourmap handling
import matplotlib.colors as colors
#import matplotlib.cm as cmx
# -
# ### Visual Display Framework
# +
### Initialise Interactive Display Figure ###
## Inputs ##
# none
## Outputs ##
# fig - matplotlib.pyplot figure reference
# axs - figure axes reference
def init_display(_ticks = False, _labels = False, _ax_lim = False):
# ensure set interactive plotting on
plt.ion()
# initialise display figure and axes
fig = plt.figure(figsize=(5, 5))
axs = fig.add_subplot(111)
plt.tight_layout()
# set background colour
axs.set_facecolor('k')
#ax.set_facecolor((1.0, 0.47, 0.42))
# set axes range, relative zero
if _ax_lim:
axs.set_xlim(-_ax_lim, _ax_lim)
axs.set_ylim(-_ax_lim, _ax_lim)
# clean format display, no ticks / labels
if not _labels:
axs.set_xticklabels('')
axs.set_yticklabels('')
if not _ticks:
axs.set_xticks([])
axs.set_yticks([])
# return figure and axes
return fig, axs
# -
# ### Plan: Rendering Information from Physics Framework
# +
### combined physics and visual rendering
## generate initial sparse multidimensional vector array
# calculate initial point state (distance, force, potential/kinetic energy)
# cluster points by euclidian distance with thresholds weighted by point total energy
# group/sort by energy density, set tiered calculation frequency
## initialise perspective
# make uniform grids per step group (calculation frequency), coarse for low energy density groups
# number of grid coarse levels determines distance level of detail smoothness
# average/interpolation between grid levels at distance threshold limits
# label sparse points with location in each step grid
# calculate initial grid segment energy densities, parameter averages relevant to force fields (mass)
## initialise perspective transforms (translate, rotate, skew)
# from static perspective, build distance relations from perspective plane intersection and each grid component
# level of detail coarse grid selection, scaled by distance and energy density
### to display each frame, iterate over steps (each group by calculation frequency):
## perform physics calculations on data point subset (energy density grouping) for a given step
# use average point of coarser group grid for finer group step boundary calculations
# complexity of force fields used depends on group energy density
# update point positions and energy, store point index mask of updated data (energy, position)
## build frame for display, case of static perspective, dynamic environment
# take updated points mask and update each grid averages/parameters for grid subset linked to updated points
# only consider each grid total grid point energy change or grid to grid energy transfer, both by thresholds
# current frame uniform intensity decay, constant time (scaled by step time)
# build counts/intensity per pixel over perspective plane from grid vector intersections each step
# intensity/pixel size scaled by energy density and distance (coarse grid selection)
# update frame with new step pixel intensities
## consistent rotate/translate/transform grid for fixed perspective, movement of perspective requires:
# rebuild perspective plane to each grid point distance relations
# reduce pixel intensity decay rate and increase intensity scaling for first few steps to normalise frame shift
# return decay and scale to standard when perspective is static
## perspective plane vector intersection can be single ray from point, inherently grid scaled by energy density
# can be extended into multi-ray projection per point, random direction/scattering
# further incorporate full path/ray tracing with reflection based on grid/point material properties
### interest and display scaling based on any dimension/averaged measure
# physics calculation segmentation can be completely separate from light path rendering and display
# require grid average properties that are relevant to light propagation
# separate segmentation grids for reflection/surface roughness, coarse detail requirements
# each process has tiered grids segmented/clustered based on requisite parameters for process calculations
# vector points contain labels for location within each process grids
### larger generic structure
# physical vector point array with continous consistent position axes, accurate clustering by energy density
# uniform grids for each process (display rendering), tiered by desired parameters (distance, energy, material)
# fast analysis of system state and properties using process grids to average, direct display filters
# machine learning for coarse grid physics approximations where appropriate, reduce computational load
### coarse grid switching at high energy density gradients, fractal material based detail generation
### colour/alpha channel for pixel intensities, based on desired parameter (distance, energy density)
# adjust the displayed information, both static appearance and temporal behaviour (relative intensity decay)
# enable pseudo-fft filter on display, pixel intensity delta, variable post-frame generation filter
# -
# ### Point Physics Simulation Framework
# +
## Notes:
# physics framework currently written using straight python and flexible data structure, limit lib. deps.
# +
### Data Structure Definition and State Initialisation Functions ###
## Generate Data Storage Object ##
## Details:
# define data storage object structure and set defaults
# includes current node count [integer], node storage object [dict]
## Inputs:
# none
## Outputs:
# data - data structure object [dict]
def gen_data():
data = {}
data['nodes'] = {}
data['n_nodes'] = 0
return data
## Generate Data Node Object ##
## Details:
# define data node object structure and set defaults
# includes node index [integer], params object [dict], rels object [dict]
# call gen_params, gen_rels for node param/rel objects
## Inputs:
# nid - node index identifier [integer]
## Outputs:
# node - data structure node object [dict]
def gen_node(nid):
node = {}
node['nid'] = nid
node['params'] = gen_params()
node['rels'] = gen_rels()
return node
## Generate Data Node Parameters Object ##
## Details:
# define node parameters object structure and set defaults
# includes node mass [float], node position/velocity/acceleration (n-dimensional) [array]
## Inputs:
# none
## Outputs:
# params - data node params object [dict]
def gen_params():
params = {}
params['mass'] = random.randint(1, 100)/1.
dims = 2
params['pos'] = []
for d in range(dims):
params['pos'].append(random.randint(-100, 100)/10.)
params['vel'] = []
for d in range(dims):
params['vel'].append(random.randint(-10, 10)/100.)
params['acc'] = []
for d in range(dims):
params['acc'].append(random.randint(-1, 1)/100.)
return params
## Generate Data Node-Node Relations Object ##
## Details:
# define node relations object structure and set defaults
# includes node-node distance and (multiple) force objects [dict]
## Inputs:
# none
## Outputs:
# rels - data node rels object [dict]
def gen_rels():
rels = {}
rels['dist'] = {}
rels['gravity'] = {}
#rels['fear'] = {}
return rels
## Generate and Add Data Node Object to Data Storage Object ##
## Details:
# get node index, update current node count
# call gen_node, add generated data node to data storage object
## Inputs:
# data - data storage object
## Outputs:
# none
def add_node(data):
nid = data['n_nodes']
node = gen_node(nid)
data['nodes'].update({nid:node})
data['n_nodes'] += 1
# +
### Physics and Parameter Calculation Functions ###
## Calculate Node-Node Euclidean Distance ##
## Details:
# calculate inter-node euclidean distance from node position vectors
## Inputs:
# nid1, nid2 - data node indicies
## Outputs:
# dist - node-node distance [float]
def distance(nid1, nid2):
n1 = data['nodes'][nid1]['params']['pos']
n2 = data['nodes'][nid2]['params']['pos']
# update to include dimensional weighting
dist = sum([ (n2[i] - n1[i])**2 for i in range(len(n1)) ])**.5
return dist
## Calculate Node-Node Force: Gravity ##
## Details:
# calculate inter-node force vector, gravity from node-node distance vector
## Inputs:
# nid1, nid2 - data node indicies
## Outputs:
# force - node-node force vector [float], (n-dimensional) array
def gravity(nid1, nid2):
node1 = data['nodes'][nid1]
node2 = data['nodes'][nid2]
n1p = node1['params']['pos']
n2p = node2['params']['pos']
# get node-node distance each dimension (vector array)
di = [ (n2p[i] - n1p[i]) for i in range(len(n1p)) ]
dist = node1['rels']['dist'][nid2]
n1m = node1['params']['mass']
n2m = node2['params']['mass']
G = 1.
grav = G *( (n1m * n2m) / dist**2 )
force = [ grav*d for d in di ]
return force
def get_fear(nid1, nid2):
node1 = data['nodes'][nid1]
node2 = data['nodes'][nid2]
n1p = node1['params']['pos']
n2p = node2['params']['pos']
di = [ (n2p[i] - n1p[i]) for i in range(len(n1p)) ]
#dist = distance(node1, node2)
dist = node1['rels']['dist'][nid2]
n1m = node1['params']['mass']
n2m = node2['params']['mass']
k = 0.001
force = -(k*(np.e**dist))
return [ d*force for d in di ]
# +
### Update State Functions ###
# update node-node euclidean distance
def update_distance(nodes):
for n in [ (n1, n2) for n1 in nodes for n2 in nodes if n1 < n2 ]:
dist = distance(n[0], n[1])
data['nodes'][n[0]]['rels']['dist'][n[1]] = dist
data['nodes'][n[1]]['rels']['dist'][n[0]] = dist
# update node-node force: gravity
def update_gravity(nodes):
for n in [ (n1, n2) for n1 in nodes for n2 in nodes if n1 < n2 ]:
grav = gravity(n[0], n[1])
data['nodes'][n[0]]['rels']['gravity'][n[1]] = grav
data['nodes'][n[1]]['rels']['gravity'][n[0]] = [ -g for g in grav ]
def update_fear(nodes):
for n in [ (n1, n2) for n1 in nodes for n2 in nodes if n1 < n2 ]:
fear = get_fear(n[0], n[1])
data['nodes'][n[0]]['rels']['fear'][n[1]] = fear
data['nodes'][n[1]]['rels']['fear'][n[0]] = [ -f for f in fear ]
# update node acceleration vector from net force vectors
def update_acc(nodes):
for n in nodes:
grav = data['nodes'][n]['rels']['gravity']
net_g = [ sum([i[d] for i in grav.values()]) for d in range(len(list(grav.values())[0])) ]
#fear = data['nodes'][n]['rels']['fear']
#net_d = [ sum([i[d] for i in fear.values()]) for d in range(len(list(fear.values())[0])) ]
net_f = net_g #np.array(net_g) + np.array(net_d)
mass = data['nodes'][n]['params']['mass']
net_a = [ f/mass for f in net_f ]
# set node object net acc vector
data['nodes'][n]['params']['acc'] = net_a
# update node velocity and position vectors from acceleration vector
def update_vel_pos(nodes, t_delta):
for n in range(data['n_nodes']):
pos = data['nodes'][n]['params']['pos']
vel = data['nodes'][n]['params']['vel']
acc = data['nodes'][n]['params']['acc']
n_vel = [ vel[d] + acc[d]*t_delta for d in range(len(acc)) ]
n_pos = [ pos[d] + vel[d]*t_delta + .5*acc[d]*t_delta**2 for d in range(len(acc)) ]
# set node object pos/vel vector
data['nodes'][n]['params']['pos'] = n_pos
data['nodes'][n]['params']['vel'] = n_vel
# iterate simulation by uniform timestep, calculate net force, update positions
def timestep(nodes, t_delta):
update_distance(nodes)
update_gravity(nodes)
#update_fear(nodes)
update_acc(nodes)
update_vel_pos(nodes, t_delta)
# +
## runnning sim functions
# initialise data storage object, generate N nodes
def init(N = 10):
data = gen_data()
for _ in range(N):
add_node(data)
# weight
#for n in range(data['n_nodes']):
# data['nodes'][n]['params']['pos'][2] += 100
return data
# run simulation over time period, display node positions
def plot_timestep(steps = 100, t_delta = 0.01, Hz = 60):
# initialise figure
fig, axs = init_display(_ax_lim = 100)
# initialise plot
x = [ data['nodes'][n]['params']['pos'][0] for n in range(data['n_nodes']) ]
y = [ data['nodes'][n]['params']['pos'][1] for n in range(data['n_nodes']) ]
#z = [ data['nodes'][n]['params']['pos'][2] for n in range(data['n_nodes']) ]
m = [ data['nodes'][n]['params']['mass'] for n in range(data['n_nodes']) ]
#sca = ax.scatter(x, y, s = [i for i in z], c = m)
sca = axs.scatter(x, y, c = m, s = m, cmap = 'Reds', edgecolor = None)
plt.pause(0.5)
# iterate through time
for ti in range(steps):
nodes = list(data['nodes'].keys())
timestep(nodes, t_delta)
# only display every nth timestep update
n = 1
if ti % n == 0:
x = []; y = []; z = []; lbls = []
for n in range(data['n_nodes']):
pos = data['nodes'][n]['params']['pos']
x.append(pos[0])
y.append(pos[1])
#z.append(pos[2])
sca.set_offsets(np.c_[x,y])
#sca.set_sizes([i for i in z])
plt.pause(Hz**-1)
# +
data = init(100)
#sun_params = {'mass': 2000.0, 'pos': [0.0, 0.0], 'vel': [0.0, 0.0], 'acc': [0.0, 0.0]}
#data['nodes'][0]['params'] = sun_params
plot_timestep(500)
# -
| nbks/splitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # A whirlwind tour of python
#
# The goal of this notebook is to quickly get you refreshed with the basic constructs of python. Knowledge of prior programming experience is assumed. This is by no means a complete guide, just key points you can build around.
#
# + [markdown] slideshow={"slide_type": "-"}
# # 1. Getting started
#
# Working with python is simple. Two main ways use the interpreter, writing your code in a `.py` file and executing it with python as such `python my_prog.py`, or opening the python interpreter and directly interacting with it -
# ```
# [ ~/Desktop ] » python
# Python 2.7.6 (default, Sep 9 2014, 15:04:36)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.39)] on darwin
# Type "help", "copyright", "credits" or "license" for more information.
# ```
# ```python
# >>> print "hello world"
# hello world
# >>>
# ```
# The interpreter mode is great for testing code snippets and playing with language features. Note that given that python is an interpreted language, there is no compiling to be done.
# -
# The simplest program we can start with is the traditional hello world program.
print "hello world"
# # 2. Variables and types
# Variables don't need to be declared with a type (as in most statically typed languages), just assign a value to a variable and start using it.
greeting = "how are you"
print greeting
num = 5
print num+3
# **Note :** If you try to use a variable before assigning it, then the interpreter will throw an error (NameError).
print xyz
# There are various inbuilt types in Python, few key ones are **:** *`int, float, str, None, bool, dict, list, tuple
# Exception, method, function, generator, iterator, etc`*
# `int` and `float` are the general numeral types, and you can perform arithmetic operations on them.
#
# `str` is the string type, `bool` is boolean, `dict, list, tuple` are commonly used built in data structures.
#
# `None` is similar to "null" which we see in other languages. It basically signifies that the variable holds no value as such.
#
# The rest we'll cover later!
# ## 2.1 Booleans
# (cover truthsy and falsey values here) -- or as a note later on?
#
# Booleans can hold two values - True or False. `True` and `False` are python reserved words. General boolean operators apply. Boolean operators are not represented by the traditional characters such as `||, &&, !` rather by the corresponding words `or, and, not`.
# +
t = True
f = False
print t or f
print t and f
print not t
print t == f
# -
# Other than True and False there is also a concept of Truthy and Falsey values.
# Truthy and Falsey are values of other types (like integer and string) that evaluate to True and False. But aren't exactly equivalent. This concept of truthy and falsey is extremely useful (in terms of syntactic sugar) while working with conditionals (if-else statements).
# +
x = 1
y = 0
s = ""
r = "hello"
# any value for which bool(val) == True is a truthy and bool(val) == False
print bool(x) # for integers, any non zero is a truthy
print bool(y)
print bool(s)
print bool(r) #for strings, any non empty sting is a truthy
# -
# ## 2.2 Integers and Floats
# Integers and floats are numeric types and usual mathematical operations hold true. In case of integer division, the quotient gets floored. In case of operations involving both integers and floats, the result gets type converted to float.
# random experiments
x = 123
y = 5
z = 3.0
print type(x)
print type(z)
print x+y, x*y, x-y, x/y
print x+z, x/z
# more operations
print x % y # modulo
print y // z # for flored quotient in case of floats
print int(y)
print float(z)
# ## 2.3 Strings
# Strings are quite powerful in python, allowing a variety of operations.
s = "hello"
print str(-10) # gets converted to string as is
print type("abc")
print len(s)
print s.upper()
# +
#sting concatination
s = "Hello"
w = "world!"
result = s + ", "+ w
print result
# can concatinate by multiplying
print "--*--" * 3
# +
# Indexing a string -it behaves like an array
s = "ABCDEFGH"
print s[0] # 0th index
print s[-1] # last index
print s[-2] # last but one index
# slicing
print s[1:3] # 1st index till 3rd (3rd not included)
print s[:] # whole string
print s[::2] # skip every 2nd element starting from 1st one
print s[-1:-5]
# +
template = "hello %s and %s"
names = ("foo", "bar")
print template % names # if there is only one formatter "%s", then names can be a string
#t = "hello {0} and {1}"
#print t.format("one", "two") #this produces identical output as the following
t = "hello {i} and {j}"
print t.format(i="one", j="two")
# -
# ## 2.4 Lists
# The inbuilt list in python is simply the linked list data structure. It is a hetrogeneous list capable of having any kind of object, including more lists. Structure wise it is like an array, except that the List type supports more sophisticated operations. In terms of the nature of operations, it is quite similar to strings.
a = [1,2,3]
print a
print type(a)
print len(a)
# +
b = [3,6,2,1,0]
b.sort()
print b
c = ["1", 2, 3.0, True]
print c
print c.sort() # no return value, hence prints None
print c # shouldn't sort hetrogeneous lists :(
# +
a = [0,1,2,3,4,5,6]
print a[0]
print a[::-1] # reverses it - slicing is similar to strings
# print a[100] - Error!
print list("hello") # each individual character becomes a list element
# -
print [1,2, 3] + [4,5] # list concatination
print [1,2,3] * 2 # again, similar to strings
# print [1,2,3] * [4,5] -- Error!
# lists are mutable
a = [1,2,3]
b = a
a.append(4)
print a
print b
# ## 2.5 Tuples
# Tuples are similar to lists, except for the fact that they are immutable. Conventionally they are used for homogeneous data (eg, to represent a point in coordinate space)
a = (1,2,3)
print a
print a[1]
print a[:]
b = a
# a[1] = 4 - Error! can't assign. tuples are immutable.
a = a + (4,)
print a, b # b remains unchanged
# ## 2.6 Dicts
# Dictionaries in python are similar to hashmaps. They store key value pairs. Dicts can store hetrogeneous values, the keys can be any type as far as it is "hashable". eg, Integers and Strings are hashable, lists are not. Unlike lists and tuples, there is no sense of order in a dict.
# +
# way to declare a dictonary
a = {"1" : "one", 2 : "two", "three" : 3.0}
print a["1"], a[2]
# to get list of keys and values respectively
print a.keys()
print a.values()
# print a[5] -- Error! Key doesn't exist
print a.get(5) #safe way to do it
print a.get(5, "five") # default values
# +
# to update the dict
b = {"4" : 4, "5" : "five"}
c = a
a.update(b)
print a # a is updated with b's value
print c # dicts are mutable, c is also updated
# to update a value
a['4'] = 'four'
# to delete a value
a.pop(2)
a.pop('three')
print a
print len(a) # to get the size
# -
# # 3. Loops
# There are two kinds of looping constructs in python. `for` and `while`
#
# The `for loop` expects an iterable to "loop over". Each time in the loop, the loop variable picks the next value from the iterable object. Lists, dicts, tuples and strings are examples of iterable objects. An object that has an `__iter__` and `next()` method is said to be iterable.
#
# range and xrange are two other builtin functions that are iterable.
# for loops
for i in range(3):
print "number", i
# while loops
count = 0
while count < 3:
print "number", count
count += 1
# +
print "looping through list"
for i in [1,2,3]:
print i,
print "\nlooping through tuple"
for i in (1,2,3):
print i,
print "\nlooping through dict"
for i in {1:1, 2:4, 3:9}:
print i,
# +
print "continue statement"
for i in range(2):
print "cont."
continue
print "oops" # never printed
print "break statement"
for i in range(10):
print "brk"
break # loop exited
# +
# here the loop exits
for i in range(2):
print "ok"
else:
print "loop exited normally"
print "-" * 10
# here the loop exits
for i in range(2):
print "ok"
break
else:
print "the one with break" # gets printed only when loop terminates by loop condition becoming false!
# -
# -------
#
# Do check out different ways to use `range`, read up about `itertools`, different ways of iterating through dicts and writing your own iterator.
# # 4. Conditionals
# Python's conditional constructs include `if`, `else` and `elif`. Unlike C like languages, it does not support switch case.
#
a = 4
b = 0
if a:
print "a : ", a
if b:
print "b : ", b # not printed as 0 is a falsey value
if a:
print "x"
if b:
print "y"
else: #this links to closest if at the same indentation level
print "z"
if a:
print "x"
if b: # nested if, does not affect outer else
print "y"
else:
print "z"
if b:
print "b"
elif 3>4:
# evaluates to false
print "c"
elif "hi":
# "hi" is a truthy (non empty string)
print "d"
else:
print "e"
# If statements can also be used to assign values to variables, this is somewhat similar to ternary operators in C like languages.
# +
a = True
# of the format "var = val_1 if cond else val_2"
b = 5 if a else 6
print b
# due to operator precedence and short-circuiting the following statement is equivalent to the "if else" one
b = a and '1' or '2'
print b
# -
# # 5. Functions
# functions, how they're objects, passing them around, multiple params, default values (also talk about lists), deconstructing tuple return values.
# +
def foo(a,b):
'''This is a doc string, used for documentation'''
return a+b
print foo(1,2)
# +
def bar(func, a,b):
return func(a,b)
# functions can be passed around like any other objects
print bar(foo, 1,2)
# -
# **Functions are objects!**
print foo, type(foo)
print "Function name is", foo.func_name
print "DOC STRING :", foo.__doc__
# +
# functions can take multiple parameters - both named and unnamed
def demo(fixed, *args, **kwargs):
print fixed
print args # comes in as a list
print kwargs # comes in as a dict
demo(1, 2, 3, 4, five=5, six=6)
# note the order
# - first necessary params should be
# - second all unnamed parameters should follow
# - finally the key value pairs
# -
# lists and dicts can be deconstructed on the fly
demo(1, *[2,3,4], **{"five" : 5, "six" : 6})
# +
# function parameters can have default values
def foo(a, b=5):
print a+b
foo(1) # takes default value of b
foo(1,2) # overide value of b
foo(a=2, b=2)
#note the order while dealing with default values
# as follows - def foo(a, b=5, *args, **kwargs)
# +
# careful while assigning default values that are mutable in nature
def foo(a,l=[1,2]):
l.append(a)
print l
foo(3)
foo(4, [1,1,1])
foo(5) # old value of 'l' is reused!!
# This happens as the default value (in this case [1,2])
# is bound to the variable when the function is defined!
# +
# multiple return values
def foo(a,b):
return a+b, a*b
add, prod = foo(2,3)
print add, prod
# python functions cannot return multiple values, though it seems like that
# what it's actually returning is a tuple, which gets deconstructed on the LHS
print foo(2,3), type(foo(2,3))
# -
# # 6. Class
# Classes serve the same purpose in python as it does in any other language - it provides user defined abstraction. It gives you a way to create custom objects which are a type of your class.
# +
class Point(object):
def __init__(self, x, y):
#self refers to the object itself
# now define the instance variables
self.x = x
self.y = y
def print_x(self):
print self.x
def print_y(self):
print self.y
p = Point(2,3)
p.print_x()
p.print_y()
# +
class Point3D(Point):
def __init__(self, x, y, z):
# if child class does not have a init, directly goes to parent.
# If child has init does not automatically go to parent's __init__
self.z = z
super(Point3D, self).__init__(x, y)
def print_z(self):
print "This is z :", self.z
def print_y(self):
print "This is y:", self.y
pp = Point3D(1,2,3)
pp.print_x() # goes to base class's print_x
pp.print_y() # override
pp.print_z()
# +
class test(object):
fox = 10
@staticmethod
def s_met():
return test.fox
@classmethod
def c_met(cls):
# cls passed is the object's cls, not necessarily this class
# so in case a obj of child class calls this, cls will be the child class
print cls
class test2(test):
pass
t = test()
print t.s_met()
t.c_met()
t2 = test2()
t2.c_met()
print t.fox, t2.fox, test.fox, test2.fox # all resolve to same variable
# -
# # 8. Exceptions
# Exceptions are quite similar to what you would expect in any other language. general syntax :
# ```py
# try:
# # try block
# except XyzException as e:
# # exception handling
# finally:
# # so something
# ```
#
# Most of the builtin exceptions are named of the form XyzError, where Xyz is the error type. All exceptions inherit from the BaseException class. User defined exceptions should inherit from Exception class (which inturn inherits BaseException).
# +
def foo():
try:
print "trying"
print 0/0
print "oops..."
except ZeroDivisionError as e:
# we could have ommited "ZeroDivisionError" for a catch-all case, but thats bad practice
print e, type(e)
print e.message
finally:
print "Finally block"
foo()
# +
a = []
# In a try except block, an exception can be caught only once.
# if an exception is thrown within an except block,
# finally immediately executes and the new exception is propogated!
def foo():
try:
print "trying"
print a[1]
except IndexError as e:
print e
print 0/0
except ZeroDivisionError as e:
print e
finally:
print "||| finally |||"
foo()
# +
# a try except else block is also possible
# like how finally executes irrespective of whether except has been executed or not,
# else executes only if an except block has not been executed
def foo():
try:
print "trying"
print 1/1
except ZeroDivisionError as e:
print e
else:
print "everything went fine"
finally:
print "finally"
foo()
# -
# -----------
# **Note** that KeyboardInterrupt (thrown when crtl+C) inherits directly from BaseException (and not Exception) so as to not be caught by mistake by a catch all exception clause.
| Python/pytutorial-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1><center>Introductory Data Analysis Workflow</center></h1>
#
# 
# https://xkcd.com/2054
# # An example machine learning notebook
#
# * Original Notebook by [<NAME>](http://www.randalolson.com/)
# * Supported by [<NAME>](http://www.epistasis.org/)
# * [University of Pennsylvania Institute for Bioinformatics](http://upibi.org/)
# * Adapted for LU Py-Sem 2018 by [<NAME>](<EMAIL>)
# **You can also [execute the code in this notebook on Binder](https://mybinder.org/v2/gh/ValRCS/RigaComm_DataAnalysis/master) - no local installation required.**
# text 17.04.2019
import datetime
print(datetime.datetime.now())
print('hello')
# ## Table of contents
#
# 1. [Introduction](#Introduction)
#
# 2. [License](#License)
#
# 3. [Required libraries](#Required-libraries)
#
# 4. [The problem domain](#The-problem-domain)
#
# 5. [Step 1: Answering the question](#Step-1:-Answering-the-question)
#
# 6. [Step 2: Checking the data](#Step-2:-Checking-the-data)
#
# 7. [Step 3: Tidying the data](#Step-3:-Tidying-the-data)
#
# - [Bonus: Testing our data](#Bonus:-Testing-our-data)
#
# 8. [Step 4: Exploratory analysis](#Step-4:-Exploratory-analysis)
#
# 9. [Step 5: Classification](#Step-5:-Classification)
#
# - [Cross-validation](#Cross-validation)
#
# - [Parameter tuning](#Parameter-tuning)
#
# 10. [Step 6: Reproducibility](#Step-6:-Reproducibility)
#
# 11. [Conclusions](#Conclusions)
#
# 12. [Further reading](#Further-reading)
#
# 13. [Acknowledgements](#Acknowledgements)
# ## Introduction
#
# [[ go back to the top ]](#Table-of-contents)
#
# In the time it took you to read this sentence, terabytes of data have been collectively generated across the world — more data than any of us could ever hope to process, much less make sense of, on the machines we're using to read this notebook.
#
# In response to this massive influx of data, the field of Data Science has come to the forefront in the past decade. Cobbled together by people from a diverse array of fields — statistics, physics, computer science, design, and many more — the field of Data Science represents our collective desire to understand and harness the abundance of data around us to build a better world.
#
# In this notebook, I'm going to go over a basic Python data analysis pipeline from start to finish to show you what a typical data science workflow looks like.
#
# In addition to providing code examples, I also hope to imbue in you a sense of good practices so you can be a more effective — and more collaborative — data scientist.
#
# I will be following along with the data analysis checklist from [The Elements of Data Analytic Style](https://leanpub.com/datastyle), which I strongly recommend reading as a free and quick guidebook to performing outstanding data analysis.
#
# **This notebook is intended to be a public resource. As such, if you see any glaring inaccuracies or if a critical topic is missing, please feel free to point it out or (preferably) submit a pull request to improve the notebook.**
# ## License
#
# [[ go back to the top ]](#Table-of-contents)
#
# Please see the [repository README file](https://github.com/rhiever/Data-Analysis-and-Machine-Learning-Projects#license) for the licenses and usage terms for the instructional material and code in this notebook. In general, I have licensed this material so that it is as widely usable and shareable as possible.
# ## Required libraries
#
# [[ go back to the top ]](#Table-of-contents)
#
# If you don't have Python on your computer, you can use the [Anaconda Python distribution](http://continuum.io/downloads) to install most of the Python packages you need. Anaconda provides a simple double-click installer for your convenience.
#
# This notebook uses several Python packages that come standard with the Anaconda Python distribution. The primary libraries that we'll be using are:
#
# * **NumPy**: Provides a fast numerical array structure and helper functions.
# * **pandas**: Provides a DataFrame structure to store data in memory and work with it easily and efficiently.
# * **scikit-learn**: The essential Machine Learning package in Python.
# * **matplotlib**: Basic plotting library in Python; most other Python plotting libraries are built on top of it.
# * **Seaborn**: Advanced statistical plotting library.
# * **watermark**: A Jupyter Notebook extension for printing timestamps, version numbers, and hardware information.
#
# **Note:** I will not be providing support for people trying to run this notebook outside of the Anaconda Python distribution.
# ## The problem domain
#
# [[ go back to the top ]](#Table-of-contents)
#
# For the purposes of this exercise, let's pretend we're working for a startup that just got funded to create a smartphone app that automatically identifies species of flowers from pictures taken on the smartphone. We're working with a moderately-sized team of data scientists and will be building part of the data analysis pipeline for this app.
#
# We've been tasked by our company's Head of Data Science to create a demo machine learning model that takes four measurements from the flowers (sepal length, sepal width, petal length, and petal width) and identifies the species based on those measurements alone.
#
# <img src="img/petal_sepal.jpg" />
#
# We've been given a [data set](https://github.com/ValRCS/RCS_Data_Analysis_Python/blob/master/data/iris-data.csv) from our field researchers to develop the demo, which only includes measurements for three types of *Iris* flowers:
#
# ### *Iris setosa*
#
# <img src="img/iris_setosa.jpg" />
#
# ### *Iris versicolor*
# <img src="img/iris_versicolor.jpg" />
#
# ### *Iris virginica*
# <img src="img/iris_virginica.jpg" />
#
# The four measurements we're using currently come from hand-measurements by the field researchers, but they will be automatically measured by an image processing model in the future.
#
# **Note:** The data set we're working with is the famous [*Iris* data set](https://archive.ics.uci.edu/ml/datasets/Iris) — included with this notebook — which I have modified slightly for demonstration purposes.
# ## Step 1: Answering the question
#
# [[ go back to the top ]](#Table-of-contents)
#
# The first step to any data analysis project is to define the question or problem we're looking to solve, and to define a measure (or set of measures) for our success at solving that task. The data analysis checklist has us answer a handful of questions to accomplish that, so let's work through those questions.
#
# >Did you specify the type of data analytic question (e.g. exploration, association causality) before touching the data?
#
# We're trying to classify the species (i.e., class) of the flower based on four measurements that we're provided: sepal length, sepal width, petal length, and petal width.
#
# Petal - ziedlapiņa, sepal - arī ziedlapiņa
#
# 
#
# >Did you define the metric for success before beginning?
#
# Let's do that now. Since we're performing classification, we can use [accuracy](https://en.wikipedia.org/wiki/Accuracy_and_precision) — the fraction of correctly classified flowers — to quantify how well our model is performing. Our company's Head of Data has told us that we should achieve at least 90% accuracy.
#
# >Did you understand the context for the question and the scientific or business application?
#
# We're building part of a data analysis pipeline for a smartphone app that will be able to classify the species of flowers from pictures taken on the smartphone. In the future, this pipeline will be connected to another pipeline that automatically measures from pictures the traits we're using to perform this classification.
#
# >Did you record the experimental design?
#
# Our company's Head of Data has told us that the field researchers are hand-measuring 50 randomly-sampled flowers of each species using a standardized methodology. The field researchers take pictures of each flower they sample from pre-defined angles so the measurements and species can be confirmed by the other field researchers at a later point. At the end of each day, the data is compiled and stored on a private company GitHub repository.
#
# >Did you consider whether the question could be answered with the available data?
#
# The data set we currently have is only for three types of *Iris* flowers. The model built off of this data set will only work for those *Iris* flowers, so we will need more data to create a general flower classifier.
#
# <hr />
#
# Notice that we've spent a fair amount of time working on the problem without writing a line of code or even looking at the data.
#
# **Thinking about and documenting the problem we're working on is an important step to performing effective data analysis that often goes overlooked.** Don't skip it.
# ## Step 2: Checking the data
#
# [[ go back to the top ]](#Table-of-contents)
#
# The next step is to look at the data we're working with. Even curated data sets from the government can have errors in them, and it's vital that we spot these errors before investing too much time in our analysis.
#
# Generally, we're looking to answer the following questions:
#
# * Is there anything wrong with the data?
# * Are there any quirks with the data?
# * Do I need to fix or remove any of the data?
#
# Let's start by reading the data into a pandas DataFrame.
import pandas as pd
# +
iris_data = pd.read_csv('../data/iris-data.csv')
# -
#lets take a look at the first 5 rows
iris_data.head()
iris_data.tail()
# +
# Resources for loading data from nonlocal sources
# Pandas Can generally handle most common formats
# https://pandas.pydata.org/pandas-docs/stable/io.html
# SQL https://stackoverflow.com/questions/39149243/how-do-i-connect-to-a-sql-server-database-with-python
# NoSQL MongoDB https://realpython.com/introduction-to-mongodb-and-python/
# Apache Hadoop: https://dzone.com/articles/how-to-get-hadoop-data-into-a-python-model
# Apache Spark: https://www.datacamp.com/community/tutorials/apache-spark-python
# Data Scraping / Crawling libraries : https://elitedatascience.com/python-web-scraping-libraries Big Topic in itself
# Most data resources have some form of Python API / Library
# -
iris_data.head()
# We're in luck! The data seems to be in a usable format.
#
# The first row in the data file defines the column headers, and the headers are descriptive enough for us to understand what each column represents. The headers even give us the units that the measurements were recorded in, just in case we needed to know at a later point in the project.
#
# Each row following the first row represents an entry for a flower: four measurements and one class, which tells us the species of the flower.
#
# **One of the first things we should look for is missing data.** Thankfully, the field researchers already told us that they put a 'NA' into the spreadsheet when they were missing a measurement.
#
# We can tell pandas to automatically identify missing values if it knows our missing value marker.
iris_data.shape
iris_data.info()
iris_data.describe()
# with na_values we can pass what cells to mark as na
iris_data = pd.read_csv('../data/iris-data.csv', na_values=['NA', 'N/A'])
# Voilà! Now pandas knows to treat rows with 'NA' as missing values.
# Next, it's always a good idea to look at the distribution of our data — especially the outliers.
#
# Let's start by printing out some summary statistics about the data set.
iris_data.describe()
# We can see several useful values from this table. For example, we see that five `petal_width_cm` entries are missing.
#
# If you ask me, though, tables like this are rarely useful unless we know that our data should fall in a particular range. It's usually better to visualize the data in some way. Visualization makes outliers and errors immediately stand out, whereas they might go unnoticed in a large table of numbers.
#
# Since we know we're going to be plotting in this section, let's set up the notebook so we can plot inside of it.
# +
# This line tells the notebook to show plots inside of the notebook
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sb
# -
# Next, let's create a **scatterplot matrix**. Scatterplot matrices plot the distribution of each column along the diagonal, and then plot a scatterplot matrix for the combination of each variable. They make for an efficient tool to look for errors in our data.
#
# We can even have the plotting package color each entry by its class to look for trends within the classes.
sb.pairplot(iris_data, hue='class')
# We have to temporarily drop the rows with 'NA' values
# because the Seaborn plotting function does not know
# what to do with them
sb.pairplot(iris_data.dropna(), hue='class')
# From the scatterplot matrix, we can already see some issues with the data set:
#
# 1. There are five classes when there should only be three, meaning there were some coding errors.
#
# 2. There are some clear outliers in the measurements that may be erroneous: one `sepal_width_cm` entry for `Iris-setosa` falls well outside its normal range, and several `sepal_length_cm` entries for `Iris-versicolor` are near-zero for some reason.
#
# 3. We had to drop those rows with missing values.
#
# In all of these cases, we need to figure out what to do with the erroneous data. Which takes us to the next step...
# ## Step 3: Tidying the data
#
# ### GIGO principle
#
# [[ go back to the top ]](#Table-of-contents)
#
# Now that we've identified several errors in the data set, we need to fix them before we proceed with the analysis.
#
# Let's walk through the issues one-by-one.
#
# >There are five classes when there should only be three, meaning there were some coding errors.
#
# After talking with the field researchers, it sounds like one of them forgot to add `Iris-` before their `Iris-versicolor` entries. The other extraneous class, `Iris-setossa`, was simply a typo that they forgot to fix.
#
# Let's use the DataFrame to fix these errors.
iris_data['class'].unique()
len(iris_data['class'].unique())
# Copy and Replace
# in df.loc[rows, thencolumns]
iris_data.loc[iris_data['class'] == 'versicolor', 'class'] = 'Iris-versicolor'
iris_data['class'].unique()
# +
# So we take a row where a specific column('class' here) matches our bad values
# and change them to good values
iris_data.loc[iris_data['class'] == 'Iris-setossa', 'class'] = 'Iris-setosa'
iris_data['class'].unique()
# -
iris_data.tail()
iris_data[98:103]
iris_data['class'].unique()
# Much better! Now we only have three class types. Imagine how embarrassing it would've been to create a model that used the wrong classes.
#
# >There are some clear outliers in the measurements that may be erroneous: one `sepal_width_cm` entry for `Iris-setosa` falls well outside its normal range, and several `sepal_length_cm` entries for `Iris-versicolor` are near-zero for some reason.
#
# Fixing outliers can be tricky business. It's rarely clear whether the outlier was caused by measurement error, recording the data in improper units, or if the outlier is a real anomaly. For that reason, we should be judicious when working with outliers: if we decide to exclude any data, we need to make sure to document what data we excluded and provide solid reasoning for excluding that data. (i.e., "This data didn't fit my hypothesis" will not stand peer review.)
#
# In the case of the one anomalous entry for `Iris-setosa`, let's say our field researchers know that it's impossible for `Iris-setosa` to have a sepal width below 2.5 cm. Clearly this entry was made in error, and we're better off just scrapping the entry than spending hours finding out what happened.
# here we see all flowers with sepal_width_cm under 2.5m
iris_data.loc[(iris_data['sepal_width_cm'] < 2.5)]
## for multiple filters we use & for AND , and use | for OR
smallpetals = iris_data.loc[(iris_data['sepal_width_cm'] < 2.5) & (iris_data['class'] == 'Iris-setosa') ]
smallpetals
iris_data.loc[iris_data['class'] == 'Iris-setosa', 'sepal_width_cm'].hist()
len(iris_data)
# +
# This line drops any 'Iris-setosa' rows with a separal width less than 2.5 cm
# Let's go over this command in class
iris_data = iris_data.loc[(iris_data['class'] != 'Iris-setosa') | (iris_data['sepal_width_cm'] >= 2.5)]
iris_data.loc[iris_data['class'] == 'Iris-setosa', 'sepal_width_cm'].hist()
# -
len(iris_data)
# Excellent! Now all of our `Iris-setosa` rows have a sepal width greater than 2.5.
#
# The next data issue to address is the several near-zero sepal lengths for the `Iris-versicolor` rows. Let's take a look at those rows.
iris_data.loc[(iris_data['class'] == 'Iris-versicolor') &
(iris_data['sepal_length_cm'] < 1.0)]
# How about that? All of these near-zero `sepal_length_cm` entries seem to be off by two orders of magnitude, as if they had been recorded in meters instead of centimeters.
#
# After some brief correspondence with the field researchers, we find that one of them forgot to convert those measurements to centimeters. Let's do that for them.
iris_data.loc[iris_data['class'] == 'Iris-versicolor', 'sepal_length_cm'].hist()
iris_data.loc[(iris_data['class'] == 'Iris-versicolor') &
(iris_data['sepal_length_cm'] < 1.0)]
iris_data['sepal_length_cm'].hist()
# Phew! Good thing we fixed those outliers. They could've really thrown our analysis off.
#
# >We had to drop those rows with missing values.
#
# Let's take a look at the rows with missing values:
iris_data.notnull()
iris_data.loc[(iris_data['sepal_length_cm'].isnull()) |
(iris_data['sepal_width_cm'].isnull()) |
(iris_data['petal_length_cm'].isnull()) |
(iris_data['petal_width_cm'].isnull())]
# It's not ideal that we had to drop those rows, especially considering they're all `Iris-setosa` entries. Since it seems like the missing data is systematic — all of the missing values are in the same column for the same *Iris* type — this error could potentially bias our analysis.
#
# One way to deal with missing data is **mean imputation**: If we know that the values for a measurement fall in a certain range, we can fill in empty values with the average of that measurement.
#
# Let's see if we can do that here.
iris_data.loc[iris_data['class'] == 'Iris-setosa', 'petal_width_cm'].hist()
# Most of the petal widths for `Iris-setosa` fall within the 0.2-0.3 range, so let's fill in these entries with the average measured petal width.
iris_setosa_avg = iris_data.loc[iris_data['class'] == 'Iris-setosa', 'petal_width_cm'].mean()
iris_setosa_avg
type(iris_setosa_avg)
round(iris_setosa_avg, 2)
# for our purposes 4 digita accuracy is sufficient, add why here :)
iris_setosa_avg = round(iris_setosa_avg, 4)
average_petal_width = iris_data.loc[iris_data['class'] == 'Iris-setosa', 'petal_width_cm'].mean()
print(average_petal_width)
average_petal_width = iris_setosa_avg
# +
# we find iris-setosa rows where petal_width_cm is missing
iris_data.loc[(iris_data['class'] == 'Iris-setosa') &
(iris_data['petal_width_cm'].isnull()),
'petal_width_cm'] = average_petal_width
# +
# we find all iris-setosa with the average
iris_data.loc[(iris_data['class'] == 'Iris-setosa') &
(iris_data['petal_width_cm'] == average_petal_width)]
# -
iris_data.loc[(iris_data['sepal_length_cm'].isnull()) |
(iris_data['sepal_width_cm'].isnull()) |
(iris_data['petal_length_cm'].isnull()) |
(iris_data['petal_width_cm'].isnull())]
# if we want to drop rows with missing data
# and save them into a new dataframe
dfwithoutmissingvalues = iris_data.dropna()
len(dfwithoutmissingvalues)
# Great! Now we've recovered those rows and no longer have missing data in our data set.
#
# **Note:** If you don't feel comfortable imputing your data, you can drop all rows with missing data with the `dropna()` call:
#
# iris_data.dropna(inplace=True)
#
# After all this hard work, we don't want to repeat this process every time we work with the data set. Let's save the tidied data file *as a separate file* and work directly with that data file from now on.
import json
iris_data.to_json('../data/iris-clean.json')
# to bypass pandas missing json formatter we can format the data ourselves
df_json_pretty = json.dumps(json.loads(iris_data.to_json()), indent=4)
type(df_json_pretty)
df_json_pretty[:100]
with open('data.json', 'w', encoding='utf-8') as f:
f.write(df_json_pretty)
# for saving in the same folder
iris_data.to_csv('iris-data-clean.csv', index=False)
iris_data_clean = pd.read_csv('../data/iris-data-clean.csv')
iris_data_clean.head()
# Now, let's take a look at the scatterplot matrix now that we've tidied the data.
myplot = sb.pairplot(iris_data_clean, hue='class')
myplot.savefig('irises.png')
import scipy.stats as stats
iris_data = pd.read_csv('../data/iris-data.csv')
iris_data.columns.unique()
stats.entropy(iris_data_clean['sepal_length_cm'])
iris_data.columns[:-1]
# we go through list of column names except last one and get entropy
# for data (without missing values) in each column
for col in iris_data.columns[:-1]:
print("Entropy for: ", col, stats.entropy(iris_data[col].dropna()))
# Of course, I purposely inserted numerous errors into this data set to demonstrate some of the many possible scenarios you may face while tidying your data.
#
# The general takeaways here should be:
#
# * Make sure your data is encoded properly
#
# * Make sure your data falls within the expected range, and use domain knowledge whenever possible to define that expected range
#
# * Deal with missing data in one way or another: replace it if you can or drop it
#
# * Never tidy your data manually because that is not easily reproducible
#
# * Use code as a record of how you tidied your data
#
# * Plot everything you can about the data at this stage of the analysis so you can *visually* confirm everything looks correct
# ## Bonus: Testing our data
#
# [[ go back to the top ]](#Table-of-contents)
#
# At SciPy 2015, I was exposed to a great idea: We should test our data. Just how we use unit tests to verify our expectations from code, we can similarly set up unit tests to verify our expectations about a data set.
#
# We can quickly test our data using `assert` statements: We assert that something must be true, and if it is, then nothing happens and the notebook continues running. However, if our assertion is wrong, then the notebook stops running and brings it to our attention. For example,
#
# ```Python
# assert 1 == 2
# ```
#
# will raise an `AssertionError` and stop execution of the notebook because the assertion failed.
#
# Let's test a few things that we know about our data set now.
assert 1 == 3
# We know that we should only have three classes
assert len(iris_data_clean['class'].unique()) == 3
assert len(iris_data['class'].unique()) == 3
# We know that sepal lengths for 'Iris-versicolor' should never be below 2.5 cm
assert iris_data_clean.loc[iris_data_clean['class'] == 'Iris-versicolor', 'sepal_length_cm'].min() >= 2.5
# We know that our data set should have no missing measurements
assert len(iris_data_clean.loc[(iris_data_clean['sepal_length_cm'].isnull()) |
(iris_data_clean['sepal_width_cm'].isnull()) |
(iris_data_clean['petal_length_cm'].isnull()) |
(iris_data_clean['petal_width_cm'].isnull())]) == 0
# We know that our data set should have no missing measurements
assert len(iris_data.loc[(iris_data['sepal_length_cm'].isnull()) |
(iris_data['sepal_width_cm'].isnull()) |
(iris_data['petal_length_cm'].isnull()) |
(iris_data['petal_width_cm'].isnull())]) == 0
# And so on. If any of these expectations are violated, then our analysis immediately stops and we have to return to the tidying stage.
# ### Data Cleanup & Wrangling > 80% time spent in Data Science
# ## Step 4: Exploratory analysis
#
# [[ go back to the top ]](#Table-of-contents)
#
# Now after spending entirely too much time tidying our data, we can start analyzing it!
#
# Exploratory analysis is the step where we start delving deeper into the data set beyond the outliers and errors. We'll be looking to answer questions such as:
#
# * How is my data distributed?
#
# * Are there any correlations in my data?
#
# * Are there any confounding factors that explain these correlations?
#
# This is the stage where we plot all the data in as many ways as possible. Create many charts, but don't bother making them pretty — these charts are for internal use.
#
# Let's return to that scatterplot matrix that we used earlier.
sb.pairplot(iris_data_clean)
;
# Our data is normally distributed for the most part, which is great news if we plan on using any modeling methods that assume the data is normally distributed.
#
# There's something strange going on with the petal measurements. Maybe it's something to do with the different `Iris` types. Let's color code the data by the class again to see if that clears things up.
sb.pairplot(iris_data_clean, hue='class')
;
# Sure enough, the strange distribution of the petal measurements exist because of the different species. This is actually great news for our classification task since it means that the petal measurements will make it easy to distinguish between `Iris-setosa` and the other `Iris` types.
#
# Distinguishing `Iris-versicolor` and `Iris-virginica` will prove more difficult given how much their measurements overlap.
#
# There are also correlations between petal length and petal width, as well as sepal length and sepal width. The field biologists assure us that this is to be expected: Longer flower petals also tend to be wider, and the same applies for sepals.
#
# We can also make [**violin plots**](https://en.wikipedia.org/wiki/Violin_plot) of the data to compare the measurement distributions of the classes. Violin plots contain the same information as [box plots](https://en.wikipedia.org/wiki/Box_plot), but also scales the box according to the density of the data.
# +
plt.figure(figsize=(10, 10))
for column_index, column in enumerate(iris_data_clean.columns):
if column == 'class':
continue
plt.subplot(2, 2, column_index + 1)
sb.violinplot(x='class', y=column, data=iris_data_clean)
# -
# Enough flirting with the data. Let's get to modeling.
# ## Step 5: Classification
#
# [[ go back to the top ]](#Table-of-contents)
#
# Wow, all this work and we *still* haven't modeled the data!
#
# As tiresome as it can be, tidying and exploring our data is a vital component to any data analysis. If we had jumped straight to the modeling step, we would have created a faulty classification model.
#
# Remember: **Bad data leads to bad models.** Always check your data first.
#
# <hr />
#
# Assured that our data is now as clean as we can make it — and armed with some cursory knowledge of the distributions and relationships in our data set — it's time to make the next big step in our analysis: Splitting the data into training and testing sets.
#
# A **training set** is a random subset of the data that we use to train our models.
#
# A **testing set** is a random subset of the data (mutually exclusive from the training set) that we use to validate our models on unforseen data.
#
# Especially in sparse data sets like ours, it's easy for models to **overfit** the data: The model will learn the training set so well that it won't be able to handle most of the cases it's never seen before. This is why it's important for us to build the model with the training set, but score it with the testing set.
#
# Note that once we split the data into a training and testing set, we should treat the testing set like it no longer exists: We cannot use any information from the testing set to build our model or else we're cheating.
#
# Let's set up our data first.
# +
# iris_data_clean = pd.read_csv('../data/iris-data-clean.csv')
# We're using all four measurements as inputs
# Note that scikit-learn expects each entry to be a list of values, e.g.,
# [ [val1, val2, val3],
# [val1, val2, val3],
# ... ]
# such that our input data set is represented as a list of lists
# We can extract the data in this format from pandas like this:
# usually called X
all_inputs = iris_data_clean[['sepal_length_cm', 'sepal_width_cm',
'petal_length_cm', 'petal_width_cm']].values
# Similarly, we can extract the class labels
# answers/label often called little y
all_labels = iris_data_clean['class'].values
# Make sure that you don't mix up the order of the entries
# all_inputs[5] inputs should correspond to the class in all_labels[5]
# Here's what a subset of our inputs looks like:
all_inputs[:5]
# -
type(all_inputs)
all_labels[:5]
type(all_labels)
# Now our data is ready to be split.
all_inputs[:3]
iris_data_clean.head(3)
all_labels[:3]
from sklearn.model_selection import train_test_split
# Here we split our data into training and testing data
# you can read more on split function at
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
(training_inputs,
testing_inputs,
training_classes,
testing_classes) = train_test_split(all_inputs, all_labels, test_size=0.25, random_state=1)
len(all_inputs)
len(training_inputs)
0.75*149
149*0.25
len(testing_inputs)
training_inputs[:5]
testing_inputs[:5]
testing_classes[:5]
training_classes[:5]
# With our data split, we can start fitting models to our data. Our company's Head of Data is all about decision tree classifiers, so let's start with one of those.
#
# Decision tree classifiers are incredibly simple in theory. In their simplest form, decision tree classifiers ask a series of Yes/No questions about the data — each time getting closer to finding out the class of each entry — until they either classify the data set perfectly or simply can't differentiate a set of entries. Think of it like a game of [Twenty Questions](https://en.wikipedia.org/wiki/Twenty_Questions), except the computer is *much*, *much* better at it.
#
# Here's an example decision tree classifier:
#
# <img src="img/iris_dtc.png" />
#
# Notice how the classifier asks Yes/No questions about the data — whether a certain feature is <= 1.75, for example — so it can differentiate the records. This is the essence of every decision tree.
#
# The nice part about decision tree classifiers is that they are **scale-invariant**, i.e., the scale of the features does not affect their performance, unlike many Machine Learning models. In other words, it doesn't matter if our features range from 0 to 1 or 0 to 1,000; decision tree classifiers will work with them just the same.
#
# There are several [parameters](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) that we can tune for decision tree classifiers, but for now let's use a basic decision tree classifier.
# +
from sklearn.tree import DecisionTreeClassifier
# Create the classifier
decision_tree_classifier = DecisionTreeClassifier()
# Train the classifier on the training set
decision_tree_classifier.fit(training_inputs, training_classes)
# here we have a working classifier after the fit
# Validate the classifier on the testing set using classification accuracy
decision_tree_classifier.score(testing_inputs, testing_classes)
# -
1-1/38
decision_tree_classifier.score(training_inputs, training_classes)
150*0.25
len(testing_inputs)
# How the accuracy score came about 37 out of 38 correct
37/38
# lets try a cooler model SVM - Support Vector Machines
from sklearn import svm
svm_classifier = svm.SVC(gamma = 'scale')
svm_classifier.fit(training_inputs, training_classes)
svm_classifier.score(testing_inputs, testing_classes)
svm_classifier = svm.SVC(gamma = 'scale')
svm_classifier.fit(training_inputs, training_classes)
svm_classifier.score(testing_inputs, testing_classes)
# Heck yeah! Our model achieves 97% classification accuracy without much effort.
#
# However, there's a catch: Depending on how our training and testing set was sampled, our model can achieve anywhere from 80% to 100% accuracy:
import matplotlib.pyplot as plt
# +
# here we randomly split data 1000 times in differrent training and test sets
model_accuracies = []
for repetition in range(1000):
(training_inputs,
testing_inputs,
training_classes,
testing_classes) = train_test_split(all_inputs, all_labels, test_size=0.25)
# notice how we do not specify a seed so 1000 times we perform a random split
decision_tree_classifier = DecisionTreeClassifier()
decision_tree_classifier.fit(training_inputs, training_classes)
classifier_accuracy = decision_tree_classifier.score(testing_inputs, testing_classes)
model_accuracies.append(classifier_accuracy)
plt.hist(model_accuracies)
;
# -
plt.hist(model_accuracies, bins=10)
max(model_accuracies)
min(model_accuracies)
1-9/38
from collections import Counter
acc_count = Counter(model_accuracies)
acc_count
1/38
100/38
# It's obviously a problem that our model performs quite differently depending on the subset of the data it's trained on. This phenomenon is known as **overfitting**: The model is learning to classify the training set so well that it doesn't generalize and perform well on data it hasn't seen before.
#
# ### Cross-validation
#
# [[ go back to the top ]](#Table-of-contents)
#
# This problem is the main reason that most data scientists perform ***k*-fold cross-validation** on their models: Split the original data set into *k* subsets, use one of the subsets as the testing set, and the rest of the subsets are used as the training set. This process is then repeated *k* times such that each subset is used as the testing set exactly once.
#
# 10-fold cross-validation is the most common choice, so let's use that here. Performing 10-fold cross-validation on our data set looks something like this:
#
# (each square is an entry in our data set)
iris_data_clean.head(15)
iris_data_clean.tail()
# +
# new text
# +
import numpy as np
from sklearn.model_selection import StratifiedKFold
def plot_cv(cv, features, labels):
masks = []
for train, test in cv.split(features, labels):
mask = np.zeros(len(labels), dtype=bool)
mask[test] = 1
masks.append(mask)
plt.figure(figsize=(15, 15))
plt.imshow(masks, interpolation='none', cmap='gray_r')
plt.ylabel('Fold')
plt.xlabel('Row #')
plot_cv(StratifiedKFold(n_splits=10), all_inputs, all_labels)
# -
# You'll notice that we used **Stratified *k*-fold cross-validation** in the code above. Stratified *k*-fold keeps the class proportions the same across all of the folds, which is vital for maintaining a representative subset of our data set. (e.g., so we don't have 100% `Iris setosa` entries in one of the folds.)
#
# We can perform 10-fold cross-validation on our model with the following code:
from sklearn.model_selection import cross_val_score
# +
from sklearn.model_selection import cross_val_score
decision_tree_classifier = DecisionTreeClassifier()
# cross_val_score returns a list of the scores, which we can visualize
# to get a reasonable estimate of our classifier's performance
cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_labels, cv=10)
plt.hist(cv_scores)
plt.title('Average score: {}'.format(np.mean(cv_scores)))
;
# -
cv_scores
1-1/15
len(all_inputs.T[1])
import scipy.stats as stats
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html
# https://en.wikipedia.org/wiki/Entropy_(information_theory)
print("Entropy for: ", stats.entropy(all_inputs.T[1]))
# we go through list of column names except last one and get entropy
# for data (without missing values) in each column
def printEntropy(npdata):
for i, col in enumerate(npdata.T):
print("Entropy for column:", i, stats.entropy(col))
printEntropy(all_inputs)
# Now we have a much more consistent rating of our classifier's general classification accuracy.
#
# ### Parameter tuning
#
# [[ go back to the top ]](#Table-of-contents)
#
# Every Machine Learning model comes with a variety of parameters to tune, and these parameters can be vitally important to the performance of our classifier. For example, if we severely limit the depth of our decision tree classifier:
# +
decision_tree_classifier = DecisionTreeClassifier(max_depth=1)
cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_labels, cv=10)
plt.hist(cv_scores)
plt.title('Average score: {}'.format(np.mean(cv_scores)))
;
# -
# the classification accuracy falls tremendously.
#
# Therefore, we need to find a systematic method to discover the best parameters for our model and data set.
#
# The most common method for model parameter tuning is **Grid Search**. The idea behind Grid Search is simple: explore a range of parameters and find the best-performing parameter combination. Focus your search on the best range of parameters, then repeat this process several times until the best parameters are discovered.
#
# Let's tune our decision tree classifier. We'll stick to only two parameters for now, but it's possible to simultaneously explore dozens of parameters if we want.
# +
# prepare to grid and to fit
from sklearn.model_selection import GridSearchCV
decision_tree_classifier = DecisionTreeClassifier()
# the parameters will depend on the model we use above
parameter_grid = {'max_depth': [1, 2, 3, 4, 5, 6, 7],
'max_features': [1, 2, 3, 4]}
cross_validation = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(decision_tree_classifier,
param_grid=parameter_grid,
cv=cross_validation)
# +
# here the grid search will loop through all parameter combinations and fit the model to cross validated splits
grid_search.fit(all_inputs, all_labels)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
# -
# Now let's visualize the grid search to see how the parameters interact.
type(grid_search)
grid_search.estimator
grid_search.param_grid
type(grid_search.param_grid)
grid_search.cv
grid_search.cv_results_['mean_test_score']
cv_res = grid_search.cv_results_['mean_test_score']
cv_res.shape
import seaborn as sb
# +
grid_visualization = grid_search.cv_results_['mean_test_score']
grid_visualization.shape = (7, 4)
sb.heatmap(grid_visualization, cmap='Oranges', annot=True)
plt.xticks(np.arange(4) + 0.5, grid_search.param_grid['max_features'])
plt.yticks(np.arange(7) + 0.5, grid_search.param_grid['max_depth'])
plt.xlabel('max_features')
plt.ylabel('max_depth')
plt.savefig("grid_heatmap.png")
;
# -
# Now we have a better sense of the parameter space: We know that we need a `max_depth` of at least 2 to allow the decision tree to make more than a one-off decision.
#
# `max_features` doesn't really seem to make a big difference here as long as we have 2 of them, which makes sense since our data set has only 4 features and is relatively easy to classify. (Remember, one of our data set's classes was easily separable from the rest based on a single feature.)
#
# Let's go ahead and use a broad grid search to find the best settings for a handful of parameters.
# +
decision_tree_classifier = DecisionTreeClassifier()
parameter_grid = {'criterion': ['gini', 'entropy'],
'splitter': ['best', 'random'],
'max_depth': [1, 2, 3, 4, 5],
'max_features': [1, 2, 3, 4]}
cross_validation = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(decision_tree_classifier,
param_grid=parameter_grid,
cv=cross_validation)
grid_search.fit(all_inputs, all_labels)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
# -
149*grid_search.best_score_
143/149
145/149
# Now we can take the best classifier from the Grid Search and use that:
# we pick the best one and save for now in a different variable
decision_tree_classifier = grid_search.best_estimator_
decision_tree_classifier
# We can even visualize the decision tree with [GraphViz](http://www.graphviz.org/) to see how it's making the classifications:
# +
import sklearn.tree as tree
from sklearn.externals.six import StringIO
with open('iris_dtc.dot', 'w') as out_file:
out_file = tree.export_graphviz(decision_tree_classifier, out_file=out_file)
# -
# <img src="img/iris_dtc.png" />
# (This classifier may look familiar from earlier in the notebook.)
#
# Alright! We finally have our demo classifier. Let's create some visuals of its performance so we have something to show our company's Head of Data.
decision_tree_classifier
# +
dt_scores = cross_val_score(decision_tree_classifier, all_inputs, all_labels, cv=10)
sb.boxplot(dt_scores)
sb.stripplot(dt_scores, jitter=True, color='orange')
;
# -
# Hmmm... that's a little boring by itself though. How about we compare another classifier to see how they perform?
#
# We already know from previous projects that Random Forest classifiers usually work better than individual decision trees. A common problem that decision trees face is that they're prone to overfitting: They complexify to the point that they classify the training set near-perfectly, but fail to generalize to data they have not seen before.
#
# **Random Forest classifiers** work around that limitation by creating a whole bunch of decision trees (hence "forest") — each trained on random subsets of training samples (drawn with replacement) and features (drawn without replacement) — and have the decision trees work together to make a more accurate classification.
#
# Let that be a lesson for us: **Even in Machine Learning, we get better results when we work together!**
#
# Let's see if a Random Forest classifier works better here.
#
# The great part about scikit-learn is that the training, testing, parameter tuning, etc. process is the same for all models, so we only need to plug in the new classifier.
from sklearn.ensemble import RandomForestClassifier
# +
from sklearn.ensemble import RandomForestClassifier
random_forest_classifier = RandomForestClassifier()
parameter_grid = {'n_estimators': [10, 25, 50, 100],
'criterion': ['gini', 'entropy'],
'max_features': [1, 2, 3, 4]}
cross_validation = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(random_forest_classifier,
param_grid=parameter_grid,
cv=cross_validation)
grid_search.fit(all_inputs, all_labels)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
grid_search.best_estimator_
# -
# Now we can compare their performance:
# +
random_forest_classifier = grid_search.best_estimator_
rf_df = pd.DataFrame({'accuracy': cross_val_score(random_forest_classifier, all_inputs, all_labels, cv=10),
'classifier': ['Random Forest'] * 10})
dt_df = pd.DataFrame({'accuracy': cross_val_score(decision_tree_classifier, all_inputs, all_labels, cv=10),
'classifier': ['Decision Tree'] * 10})
both_df = rf_df.append(dt_df)
both_df.head()
# -
both_df
# +
sb.boxplot(x='classifier', y='accuracy', data=both_df)
sb.stripplot(x='classifier', y='accuracy', data=both_df, jitter=True, color='orange')
;
# -
# How about that? They both seem to perform about the same on this data set. This is probably because of the limitations of our data set: We have only 4 features to make the classification, and Random Forest classifiers excel when there's hundreds of possible features to look at. In other words, there wasn't much room for improvement with this data set.
# ## Step 6: Reproducibility
#
# [[ go back to the top ]](#Table-of-contents)
#
# Ensuring that our work is reproducible is the last and — arguably — most important step in any analysis. **As a rule, we shouldn't place much weight on a discovery that can't be reproduced**. As such, if our analysis isn't reproducible, we might as well not have done it.
#
# Notebooks like this one go a long way toward making our work reproducible. Since we documented every step as we moved along, we have a written record of what we did and why we did it — both in text and code.
#
# Beyond recording what we did, we should also document what software and hardware we used to perform our analysis. This typically goes at the top of our notebooks so our readers know what tools to use.
#
# [<NAME>](http://sebastianraschka.com/) created a handy [notebook tool](https://github.com/rasbt/watermark) for this:
# !pip install watermark
# %load_ext watermark
myversions = pd.show_versions()
myversions
# %watermark -a 'RCS_12' -nmv --packages numpy,pandas,sklearn,matplotlib,seaborn
# Finally, let's extract the core of our work from Steps 1-5 and turn it into a single pipeline.
# +
# %matplotlib inline
import pandas as pd
import seaborn as sb
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
# We can jump directly to working with the clean data because we saved our cleaned data set
iris_data_clean = pd.read_csv('../data/iris-data-clean.csv')
# Testing our data: Our analysis will stop here if any of these assertions are wrong
# We know that we should only have three classes
assert len(iris_data_clean['class'].unique()) == 3
# We know that sepal lengths for 'Iris-versicolor' should never be below 2.5 cm
assert iris_data_clean.loc[iris_data_clean['class'] == 'Iris-versicolor', 'sepal_length_cm'].min() >= 2.5
# We know that our data set should have no missing measurements
assert len(iris_data_clean.loc[(iris_data_clean['sepal_length_cm'].isnull()) |
(iris_data_clean['sepal_width_cm'].isnull()) |
(iris_data_clean['petal_length_cm'].isnull()) |
(iris_data_clean['petal_width_cm'].isnull())]) == 0
# get inputs and labels in NumPY (out of Pandas dataframe)
all_inputs = iris_data_clean[['sepal_length_cm', 'sepal_width_cm',
'petal_length_cm', 'petal_width_cm']].values
all_labels = iris_data_clean['class'].values
# This is the classifier that came out of Grid Search
random_forest_classifier = RandomForestClassifier(criterion='gini', max_features=3, n_estimators=50)
# All that's left to do now is plot the cross-validation scores
rf_classifier_scores = cross_val_score(random_forest_classifier, all_inputs, all_labels, cv=10)
sb.boxplot(rf_classifier_scores)
sb.stripplot(rf_classifier_scores, jitter=True, color='black')
# ...and show some of the predictions from the classifier
(training_inputs,
testing_inputs,
training_classes,
testing_classes) = train_test_split(all_inputs, all_labels, test_size=0.25)
random_forest_classifier.fit(training_inputs, training_classes)
for input_features, prediction, actual in zip(testing_inputs[:10],
random_forest_classifier.predict(testing_inputs[:10]),
testing_classes[:10]):
print('{}\t-->\t{}\t(Actual: {})'.format(input_features, prediction, actual))
# -
len(testing_inputs)
for input_features, prediction, actual in zip(testing_inputs,
random_forest_classifier.predict(testing_inputs),
testing_classes):
if (prediction == actual):
print('{}\t-->\t{}\t(Actual: {})'.format(input_features, prediction, actual))
else:
print('!!!!!MISMATCH***{}\t-->\t{}\t(Actual: {})'.format(input_features, prediction, actual))
mismatches = findMismatches(all_inputs, all_labels, random_forest_classifier)
mismatches
random_forest_classifier.score(all_inputs, all_labels)
def findMismatches(inputs, answers, classifier):
mismatches = []
predictions = classifier.predict(inputs)
for X, answer, prediction in zip(inputs, answers, predictions):
if answer != prediction:
mismatches.append([X,answer, prediction])
return mismatches
numbers = [1,2,5,6,6,6]
for number in numbers:
print(number)
146/149
# +
# %matplotlib inline
import pandas as pd
import seaborn as sb
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
def processData(filename):
# We can jump directly to working with the clean data because we saved our cleaned data set
iris_data_clean = pd.read_csv(filename)
# Testing our data: Our analysis will stop here if any of these assertions are wrong
# We know that we should only have three classes
assert len(iris_data_clean['class'].unique()) == 3
# We know that sepal lengths for 'Iris-versicolor' should never be below 2.5 cm
assert iris_data_clean.loc[iris_data_clean['class'] == 'Iris-versicolor', 'sepal_length_cm'].min() >= 2.5
# We know that our data set should have no missing measurements
assert len(iris_data_clean.loc[(iris_data_clean['sepal_length_cm'].isnull()) |
(iris_data_clean['sepal_width_cm'].isnull()) |
(iris_data_clean['petal_length_cm'].isnull()) |
(iris_data_clean['petal_width_cm'].isnull())]) == 0
all_inputs = iris_data_clean[['sepal_length_cm', 'sepal_width_cm',
'petal_length_cm', 'petal_width_cm']].values
all_labels = iris_data_clean['class'].values
# This is the classifier that came out of Grid Search
random_forest_classifier = RandomForestClassifier(criterion='gini', max_features=3, n_estimators=50)
# All that's left to do now is plot the cross-validation scores
rf_classifier_scores = cross_val_score(random_forest_classifier, all_inputs, all_labels, cv=10)
sb.boxplot(rf_classifier_scores)
sb.stripplot(rf_classifier_scores, jitter=True, color='black')
# ...and show some of the predictions from the classifier
(training_inputs,
testing_inputs,
training_classes,
testing_classes) = train_test_split(all_inputs, all_labels, test_size=0.25)
random_forest_classifier.fit(training_inputs, training_classes)
for input_features, prediction, actual in zip(testing_inputs[:10],
random_forest_classifier.predict(testing_inputs[:10]),
testing_classes[:10]):
print('{}\t-->\t{}\t(Actual: {})'.format(input_features, prediction, actual))
return rf_classifier_scores
# -
myscores = processData('../data/iris-data-clean.csv')
type(myscores)
myscores.max()
myscores[:5]
# There we have it: We have a complete and reproducible Machine Learning pipeline to demo to our company's Head of Data. We've met the success criteria that we set from the beginning (>90% accuracy), and our pipeline is flexible enough to handle new inputs or flowers when that data set is ready. Not bad for our first week on the job!
# ## Conclusions
#
# [[ go back to the top ]](#Table-of-contents)
#
# I hope you found this example notebook useful for your own work and learned at least one new trick by reading through it.
#
#
# * [Submit an issue](https://github.com/ValRCS/LU-pysem/issues) on GitHub
#
# * Fork the [notebook repository](https://github.com/ValRCS/LU-pysem), make the fix/addition yourself, then send over a pull request
# ## Further reading
#
# [[ go back to the top ]](#Table-of-contents)
#
# This notebook covers a broad variety of topics but skips over many of the specifics. If you're looking to dive deeper into a particular topic, here's some recommended reading.
#
# **Data Science**: <NAME> compiled a [list of free books](http://www.wzchen.com/data-science-books/) for newcomers to Data Science, ranging from the basics of R & Python to Machine Learning to interviews and advice from prominent data scientists.
#
# **Machine Learning**: /r/MachineLearning has a useful [Wiki page](https://www.reddit.com/r/MachineLearning/wiki/index) containing links to online courses, books, data sets, etc. for Machine Learning. There's also a [curated list](https://github.com/josephmisiti/awesome-machine-learning) of Machine Learning frameworks, libraries, and software sorted by language.
#
# **Unit testing**: Dive Into Python 3 has a [great walkthrough](http://www.diveintopython3.net/unit-testing.html) of unit testing in Python, how it works, and how it should be used
#
# **pandas** has [several tutorials](http://pandas.pydata.org/pandas-docs/stable/tutorials.html) covering its myriad features.
#
# **scikit-learn** has a [bunch of tutorials](http://scikit-learn.org/stable/tutorial/index.html) for those looking to learn Machine Learning in Python. <NAME>'s [scikit-learn workshop materials](https://github.com/amueller/scipy_2015_sklearn_tutorial) are top-notch and freely available.
#
# **matplotlib** has many [books, videos, and tutorials](http://matplotlib.org/resources/index.html) to teach plotting in Python.
#
# **Seaborn** has a [basic tutorial](http://stanford.edu/~mwaskom/software/seaborn/tutorial.html) covering most of the statistical plotting features.
# ## Acknowledgements
#
# [[ go back to the top ]](#Table-of-contents)
#
# Many thanks to [<NAME>](http://amueller.github.io/) for some of his [examples](https://github.com/amueller/scipy_2015_sklearn_tutorial) in the Machine Learning section. I drew inspiration from several of his excellent examples.
#
# The photo of a flower with annotations of the petal and sepal was taken by [<NAME>](https://commons.wikimedia.org/wiki/File:Petal-sepal.jpg).
#
# The photos of the various *Iris* flower types were taken by [<NAME>](http://www.signa.org/index.pl?Display+Iris-setosa+2) and [<NAME>](http://www.signa.org/index.pl?Display+Iris-virginica+3).
# ## Further questions?
#
# Feel free to contact [<NAME>ulespurens]
# (email:<EMAIL>)
| Machine_Learning_intro_scikit-learn/Irises Data Analysis Workflow_classwork_2019_12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME> Manufacturing ML Project
# Importing necessary Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
train = pd.read_csv(r"C:\D-Drive\Data Science\Machine Learning\Projects\Mercedes-Benz Greener Manufacturing\train.csv")
test = pd.read_csv(r"C:\D-Drive\Data Science\Machine Learning\Projects\Mer<NAME> Manufacturing\test.csv")
train.head()
test.head()
# # EDA of datasets
print("Type of Test data",type(test))
print("Type of Train data",type(train))
test.columns
train.columns
test.shape
# It means the data set has 4209 rows and 377 columns
train.shape
print("Sum of NULL values present in train dataset:",train.isna().any().sum())
print("Sum of NULL values present in test dataset:",test.isna().any().sum())
#Visulizing the Y distribution of the dataset, As Y is the output variable
plt.figure(figsize=(6,6))
sns.distplot(train.y.values, bins=50, kde=False)
plt.xlabel('y value', fontsize=12)
plt.show()
#check the unique values against every columns
train_UV=pd.DataFrame(train.nunique(),columns=['Unique'])
train_UV
#columns who have more than 2 unique values
print('Train Features with unique values greater than 2\n',train_UV[train_UV.Unique >2].unstack())
#checking the same in test dataset
test_UV=pd.DataFrame(test.nunique(),columns=['Unique'])
test_UV
print('Test Features with unique values greater than 2\n',test_UV[test_UV.Unique >2].unstack())
#checking the variance of the dataset,
train_var=pd.DataFrame(train.var(axis=0),columns=['Variance'])
train_var
#function to call zero variance columns
def features_zero_var(df):
df_var=pd.DataFrame(df.var(axis=0),columns=['Variance'])
return((df_var[df_var.Variance==0]))
features_zero_var(train)
train_new= train.drop(columns=train_var[train_var.Variance==0].index)
train_new.drop('ID', axis='columns', inplace=True)
# As so many column have zero variance we will delete it, as it produces noise in the dataset
train_new
train_new.shape
#checking the variance in test dataset
test_var=pd.DataFrame(test.var(axis=0),columns=['Variance'])
test_var
features_zero_var(test)
test_new= test.drop(columns=['ID','X11', 'X93', 'X107','X233', 'X235', 'X268', 'X289', 'X290', 'X293','X297','X330','X347'])
test_new.shape
##Checking Categorical Data in DataFrame for train Data
train_new.describe(include=object)
#As the categorical data is not numerical so we need to do label encoding
from sklearn.preprocessing import LabelEncoder
def label_encoder(df):
features=df.select_dtypes(include='object').columns
le=LabelEncoder()
for i in features:
df[i]=le.fit_transform(df[i])
label_encoder(train_new)
train_new.head()
##Checking Categorical Data in DataFrame for train Data
test_new.describe(include=object)
# # Visualization the Categorical variable w.r.t y(outcome variable)
var="X0"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.stripplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X1"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.boxplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X2"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.stripplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X3"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.violinplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X4"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.violinplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X5"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.stripplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X6"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.boxplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
var="X8"
colu_order=np.sort(train_new[var].unique()).tolist()
plt.figure(figsize=(12,6))
sns.boxplot(x=var,y="y",data=train_new,order=colu_order)
plt.xlabel(var,fontsize=12)
plt.ylabel("y",fontsize=12)
plt.title("Distribution of y variable with "+var, fontsize=15)
plt.show()
# # As the categorical variables are in string , so we have to do label encoding
from sklearn.preprocessing import LabelEncoder
label_encoder(train_new)
train_new.head()
label_encoder(test_new)
test_new.head()
x_train = train_new.drop('y',axis=1)
y_train = train_new['y'].values
# # Dimensionality Reduction using PCA
from sklearn.decomposition import PCA
pca = PCA(n_components= 10)
pca.fit(x_train)
pca_x_train= pca.transform(x_train)
pca_x_train.shape
x_test = test_new
pca_x_test = pca.transform(x_test)
pca_x_test.shape
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test= train_test_split(pca_x_train,y_train,test_size=0.25,random_state=100)
# # Modeling and testing the data using XGBOOST
import xgboost as xgb
from sklearn.metrics import r2_score
# Building the final feature set
f_train = xgb.DMatrix(x_train, label = y_train)
f_valid = xgb.DMatrix(x_test, label = y_test)
f_test = xgb.DMatrix(pca_x_test)
# Setting the parameters for XGB
params = {}
params['objective'] = 'reg:linear'
params['eta'] = 0.02
params['max_depth'] = 4
# +
# Predicting the score
# Creating a function for the same
def scorer(m, w):
labels = w.get_label()
return 'r2', r2_score(labels, m)
final_set = [(f_train, 'train'), (f_valid, 'pred')]
P = xgb.train(params, f_train, 1000, final_set, early_stopping_rounds=50, feval=scorer, maximize=True, verbose_eval=10)
# -
# Predicting on test set
p_test = P.predict(f_test)
p_test
Predicted_Data = pd.DataFrame()
Predicted_Data['y'] = p_test
Predicted_Data.head()
| Mercedes-Benz Greener Manufacturing project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="eSA4DnL3itZG"
# _Lambda School Data Science, Classification 1_
#
# This sprint, your project is about water pumps in Tanzania. Can you predict which water pumps are faulty?
#
# # Logistic Regression, One-Hot Encoding
#
# #### Objectives
# - begin with baselines for classification
# - use classification metric: accuracy
# - do train/validate/test split
# - use scikit-learn for logistic regression
# - submit to predictive modeling competitions
# + [markdown] colab_type="text" id="mCi5ZScvoFb6"
# ## Get ready
# + [markdown] colab_type="text" id="eYW8zY1Zn_h-"
# ### Get started on Kaggle
# 1. [Sign up for a Kaggle account](https://www.kaggle.com/), if you don’t already have one.
# 2. Go to our Kaggle InClass competition website. You will be given the URL in Slack.
# https://www.kaggle.com/t/eccea810e6844cbf8f382db8b62e2640
# 3. Go to the Rules page. Accept the rules of the competition.
# + [markdown] colab_type="text" id="dFozuC-7jFiw"
# ### Install [category_encoders](http://contrib.scikit-learn.org/categorical-encoding/) (version 2+)
# - Local Anaconda: `conda install -c conda-forge category_encoders`
# - Google Colab: `pip install category_encoders`
# -
# ### Install [pandas-profiling](https://github.com/pandas-profiling/pandas-profiling) (version 2+)
# - `pip install --update pandas-profiling`
# + [markdown] colab_type="text" id="3_1DyzxZje5X"
# ## Get data
#
# ### Option 1. Github
#
# Get data from our [GitHub repo](https://github.com/LambdaSchool/DS-Unit-2-Linear-Models/tree/master/data/tanzania).
# - [train_features.csv](https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/train_features.csv)
# - [train_labels.csv](https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/train_labels.csv)
# - [test_features.csv](https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/test_features.csv)
# - [sample_submission.csv](https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/sample_submission.csv)
#
#
# ### Option 2. Kaggle web UI
#
# Go to our Kaggle InClass competition webpage. Go to the Data page. After you have accepted the rules of the competition, use the download buttons to download the data.
#
#
# ### Option 3. Kaggle API
#
# 1. [Follow these instructions](https://github.com/Kaggle/kaggle-api#api-credentials) to create a Kaggle “API Token” and download your `kaggle.json` file.
#
# 2. Put `kaggle.json` in the correct location.
#
# - If you're using Anaconda, put the file in the directory specified in the [instructions](https://github.com/Kaggle/kaggle-api#api-credentials).
#
# - If you're using Google Colab, upload the file to your Google Drive, and run this cell:
#
# ```
# from google.colab import drive
# drive.mount('/content/drive')
# # %env KAGGLE_CONFIG_DIR=/content/drive/My Drive/
# ```
#
# 3. Install the Kaggle API package.
# ```
# pip install kaggle
# ```
#
# 4. After you have accepted the rules of the competiton, use the Kaggle API package to get the data.
# ```
# kaggle competitions download -c COMPETITION-NAME
# ```
#
# + [markdown] colab_type="text" id="Ph7_ka3DrjzA"
# ## Read data
# - `train_features.csv` : the training set features
# - `train_labels.csv` : the training set labels
# - `test_features.csv` : the test set features
# - `sample_submission.csv` : a sample submission file in the correct format
# + colab={} colab_type="code" id="nhiIa4x_pEPD"
import pandas as pd
LOCAL = '../data/tanzania/'
WEB = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/tanzania/'
train_features = pd.read_csv(WEB + 'train_features.csv')
train_labels = pd.read_csv(WEB + 'train_labels.csv')
test_features = pd.read_csv(WEB + 'test_features.csv')
sample_submission = pd.read_csv(WEB + 'sample_submission.csv')
assert train_features.shape == (59400, 40)
assert train_labels.shape == (59400, 2)
assert test_features.shape == (14358, 40)
assert sample_submission.shape == (14358, 2)
# + [markdown] colab_type="text" id="ScT5oOhCraOO"
# ### Features
#
# Your goal is to predict the operating condition of a waterpoint for each record in the dataset. You are provided the following set of information about the waterpoints:
#
# - `amount_tsh` : Total static head (amount water available to waterpoint)
# - `date_recorded` : The date the row was entered
# - `funder` : Who funded the well
# - `gps_height` : Altitude of the well
# - `installer` : Organization that installed the well
# - `longitude` : GPS coordinate
# - `latitude` : GPS coordinate
# - `wpt_name` : Name of the waterpoint if there is one
# - `num_private` :
# - `basin` : Geographic water basin
# - `subvillage` : Geographic location
# - `region` : Geographic location
# - `region_code` : Geographic location (coded)
# - `district_code` : Geographic location (coded)
# - `lga` : Geographic location
# - `ward` : Geographic location
# - `population` : Population around the well
# - `public_meeting` : True/False
# - `recorded_by` : Group entering this row of data
# - `scheme_management` : Who operates the waterpoint
# - `scheme_name` : Who operates the waterpoint
# - `permit` : If the waterpoint is permitted
# - `construction_year` : Year the waterpoint was constructed
# - `extraction_type` : The kind of extraction the waterpoint uses
# - `extraction_type_group` : The kind of extraction the waterpoint uses
# - `extraction_type_class` : The kind of extraction the waterpoint uses
# - `management` : How the waterpoint is managed
# - `management_group` : How the waterpoint is managed
# - `payment` : What the water costs
# - `payment_type` : What the water costs
# - `water_quality` : The quality of the water
# - `quality_group` : The quality of the water
# - `quantity` : The quantity of water
# - `quantity_group` : The quantity of water
# - `source` : The source of the water
# - `source_type` : The source of the water
# - `source_class` : The source of the water
# - `waterpoint_type` : The kind of waterpoint
# - `waterpoint_type_group` : The kind of waterpoint
#
# ### Labels
#
# There are three possible values:
#
# - `functional` : the waterpoint is operational and there are no repairs needed
# - `functional needs repair` : the waterpoint is operational, but needs repairs
# - `non functional` : the waterpoint is not operational
# + [markdown] colab_type="text" id="LIWeFmyWswtB"
# ## Why doesn't Kaggle give you labels for the test set?
#
# #### <NAME>, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/)
#
# > One great thing about Kaggle competitions is that they force you to think about validation sets more rigorously (in order to do well). For those who are new to Kaggle, it is a platform that hosts machine learning competitions. Kaggle typically breaks the data into two sets you can download:
#
# > 1. a **training set**, which includes the _independent variables_, as well as the _dependent variable_ (what you are trying to predict).
#
# > 2. a **test set**, which just has the _independent variables_. You will make predictions for the test set, which you can submit to Kaggle and get back a score of how well you did.
#
# > This is the basic idea needed to get started with machine learning, but to do well, there is a bit more complexity to understand. You will want to create your own training and validation sets (by splitting the Kaggle “training” data). You will just use your smaller training set (a subset of Kaggle’s training data) for building your model, and you can evaluate it on your validation set (also a subset of Kaggle’s training data) before you submit to Kaggle.
#
# > The most important reason for this is that Kaggle has split the test data into two sets: for the public and private leaderboards. The score you see on the public leaderboard is just for a subset of your predictions (and you don’t know which subset!). How your predictions fare on the private leaderboard won’t be revealed until the end of the competition. The reason this is important is that you could end up overfitting to the public leaderboard and you wouldn’t realize it until the very end when you did poorly on the private leaderboard. Using a good validation set can prevent this. You can check if your validation set is any good by seeing if your model has similar scores on it to compared with on the Kaggle test set. ...
#
# > Understanding these distinctions is not just useful for Kaggle. In any predictive machine learning project, you want your model to be able to perform well on new data.
# + [markdown] colab_type="text" id="LVZMzBqwvTdD"
# ## Why hold out an independent test set?
#
# #### <NAME>, [Winning Data Science Competitions](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions)
#
# > There are many ways to overfit. Beware of "multiple comparison fallacy." There is a cost in "peeking at the answer."
#
# > Good validation is _more important_ than good models. Simple training/validation split is _not_ enough. When you looked at your validation result for the Nth time, you are training models on it.
#
# > If possible, have "holdout" dataset that you do not touch at all during model build process. This includes feature extraction, etc.
#
# > What if holdout result is bad? Be brave and scrap the project.
#
# #### Hastie, Tibshirani, and Friedman, [The Elements of Statistical Learning](http://statweb.stanford.edu/~tibs/ElemStatLearn/), Chapter 7: Model Assessment and Selection
#
# > If we are in a data-rich situation, the best approach is to randomly divide the dataset into three parts: a training set, a validation set, and a test set. The training set is used to fit the models; the validation set is used to estimate prediction error for model selection; the test set is used for assessment of the generalization error of the final chosen model. Ideally, the test set should be kept in a "vault," and be brought out only at the end of the data analysis. Suppose instead that we use the test-set repeatedly, choosing the model with the smallest test-set error. Then the test set error of the final chosen model will underestimate the true test error, sometimes substantially.
#
# #### <NAME> and <NAME>, [Introduction to Machine Learning with Python](https://books.google.com/books?id=1-4lDQAAQBAJ&pg=PA270)
#
# > The distinction between the training set, validation set, and test set is fundamentally important to applying machine learning methods in practice. Any choices made based on the test set accuracy "leak" information from the test set into the model. Therefore, it is important to keep a separate test set, which is only used for the final evaluation. It is good practice to do all exploratory analysis and model selection using the combination of a training and a validation set, and reserve the test set for a final evaluation - this is even true for exploratory visualization. Strictly speaking, evaluating more than one model on the test set and choosing the better of the two will result in an overly optimistic estimate of how accurate the model is.
#
# #### <NAME>, [R for Data Science](https://r4ds.had.co.nz/model-intro.html#hypothesis-generation-vs.hypothesis-confirmation)
#
# > There is a pair of ideas that you must understand in order to do inference correctly:
#
# > 1. Each observation can either be used for exploration or confirmation, not both.
#
# > 2. You can use an observation as many times as you like for exploration, but you can only use it once for confirmation. As soon as you use an observation twice, you’ve switched from confirmation to exploration.
#
# > This is necessary because to confirm a hypothesis you must use data independent of the data that you used to generate the hypothesis. Otherwise you will be over optimistic. There is absolutely nothing wrong with exploration, but you should never sell an exploratory analysis as a confirmatory analysis because it is fundamentally misleading.
#
# > If you are serious about doing an confirmatory analysis, one approach is to split your data into three pieces before you begin the analysis.
# + [markdown] colab_type="text" id="tG74jmbKrsj-"
# ## Begin with baselines for classification
# + [markdown] colab_type="text" id="QKCDx07WxXZj"
# ### Get majority class baseline
#
# [<NAME>](https://twitter.com/koehrsen_will/status/1088863527778111488)
#
# > A baseline for classification can be the most common class in the training dataset.
#
# [*Data Science for Business*](https://books.google.com/books?id=4ZctAAAAQBAJ&pg=PT276), Chapter 7.3: Evaluation, Baseline Performance, and Implications for Investments in Data
#
# > For classification tasks, one good baseline is the _majority classifier_, a naive classifier that always chooses the majority class of the training dataset (see Note: Base rate in Holdout Data and Fitting Graphs). This may seem like advice so obvious it can be passed over quickly, but it is worth spending an extra moment here. There are many cases where smart, analytical people have been tripped up in skipping over this basic comparison. For example, an analyst may see a classification accuracy of 94% from her classifier and conclude that it is doing fairly well—when in fact only 6% of the instances are positive. So, the simple majority prediction classifier also would have an accuracy of 94%.
# + [markdown] colab_type="text" id="nRnL7Bw12YZo"
# #### Determine majority class
# + colab={} colab_type="code" id="6D6UZ1XJxTpj"
# + [markdown] colab_type="text" id="Hl8qcAgp2bKC"
# #### What if we guessed the majority class for every prediction?
# + colab={} colab_type="code" id="sNhv3xPc2GHl"
# + [markdown] colab_type="text" id="2WWkumm3rwdb"
# ## Use classification metric: accuracy
#
# #### [_Classification metrics are different from regression metrics!_](https://scikit-learn.org/stable/modules/model_evaluation.html)
# - Don't use _regression_ metrics to evaluate _classification_ tasks.
# - Don't use _classification_ metrics to evaluate _regression_ tasks.
#
# [Accuracy](https://scikit-learn.org/stable/modules/model_evaluation.html#accuracy-score) is a common metric for classification. Accuracy is the ["proportion of correct classifications"](https://en.wikipedia.org/wiki/Confusion_matrix): the number of correct predictions divided by the total number of predictions.
# + [markdown] colab_type="text" id="p7TYYqJT28f1"
# #### What is the baseline accuracy if we guessed the majority class for every prediction?
# + colab={} colab_type="code" id="IhhM1vAd2s0b"
# + [markdown] colab_type="text" id="Y2OLlsMar1c3"
# ## Do train/validate/test split
# + [markdown] colab_type="text" id="Pq01q_kp3QKd"
# #### <NAME>, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/)
#
# > You will want to create your own training and validation sets (by splitting the Kaggle “training” data). You will just use your smaller training set (a subset of Kaggle’s training data) for building your model, and you can evaluate it on your validation set (also a subset of Kaggle’s training data) before you submit to Kaggle.
#
# #### <NAME>, [Model Evaluation](https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html)
#
# > Since “a picture is worth a thousand words,” I want to conclude with a figure (shown below) that summarizes my personal recommendations ...
#
# <img src="https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg" width="600">
#
# + [markdown] colab_type="text" id="M1tGjw9_4u0r"
#
# Usually, we want to do **"Model selection (hyperparameter optimization) _and_ performance estimation."**
#
# Therefore, we use **"3-way holdout method (train/validation/test split)"** or we use **"cross-validation with independent test set."**
# + [markdown] colab_type="text" id="1JkSL6K14ry6"
# #### We have two options for where we choose to split:
# - Time
# - Random
#
# To split on time, we can use pandas.
#
# To split randomly, we can use the [**`sklearn.model_selection.train_test_split`**](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function.
# + colab={} colab_type="code" id="86bG7yPe5aXI"
# + [markdown] colab_type="text" id="kOdIbMMCr4Nc"
# ## Use scikit-learn for logistic regression
# - [sklearn.linear_model.LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html)
# - Wikipedia, [Logistic regression](https://en.wikipedia.org/wiki/Logistic_regression)
# + [markdown] colab_type="text" id="RIiTQPQ_8bDX"
# ### Begin with baselines: fast, first models
#
# #### Drop non-numeric features
# + colab={} colab_type="code" id="OEUujvzH7pBO"
# + [markdown] colab_type="text" id="5cVaFgL_8lZl"
# #### Drop nulls if necessary
# + colab={} colab_type="code" id="FAkDFto77qec"
# + [markdown] colab_type="text" id="xMJL579p8tSM"
# #### Fit Logistic Regresson on train data
# + colab={} colab_type="code" id="2pEyqCGy7-kZ"
# + [markdown] colab_type="text" id="WyIUh-th9Bnw"
# #### Evaluate on validation data
# + colab={} colab_type="code" id="Um_q4k9-8zvp"
# + [markdown] colab_type="text" id="jgYwtN7D9ewk"
# #### What predictions does a Logistic Regression return?
# + colab={} colab_type="code" id="-X9KwbEl9VJu"
# + [markdown] colab_type="text" id="CkE2lbblr7Fn"
# ## Do one-hot encoding of categorical features
# + [markdown] colab_type="text" id="y1AuoNR-BO-N"
# ### Check "cardinality" of categorical features
#
# [Cardinality](https://simple.wikipedia.org/wiki/Cardinality) means the number of unique values that a feature has:
# > In mathematics, the cardinality of a set means the number of its elements. For example, the set A = {2, 4, 6} contains 3 elements, and therefore A has a cardinality of 3.
#
# One-hot encoding adds a dimension for each unique value of each categorical feature. So, it may not be a good choice for "high cardinality" categoricals that have dozens, hundreds, or thousands of unique values.
# + colab={} colab_type="code" id="hLbD2DLmAm1g"
# + [markdown] colab_type="text" id="MbV7HjibCYV5"
# ### Explore `quantity` feature
# + colab={} colab_type="code" id="iOZ3QQoFBhoS"
# + [markdown] colab_type="text" id="XC_oqFnwCcYP"
# ### Encode `quantity` feature
# + colab={} colab_type="code" id="-hQlbyrijS-5"
# # !pip install category_encoders
# + colab={} colab_type="code" id="LMiGjpy3ChIz"
import category_encoders as ce
# + colab={} colab_type="code" id="UYR3qImzCTga"
# + [markdown] colab_type="text" id="6kCA47KPr9PE"
# ## Do one-hot encoding & Scale features,
# within a complete model fitting workflow.
#
# ### Why and how to scale features before fitting linear models
#
# Scikit-Learn User Guide, [Preprocessing data](https://scikit-learn.org/stable/modules/preprocessing.html)
# > Standardization of datasets is a common requirement for many machine learning estimators implemented in scikit-learn; they might behave badly if the individual features do not more or less look like standard normally distributed data: Gaussian with zero mean and unit variance.
#
# > The `preprocessing` module further provides a utility class `StandardScaler` that implements the `Transformer` API to compute the mean and standard deviation on a training set. The scaler instance can then be used on new data to transform it the same way it did on the training set.
#
# ### How to use encoders and scalers in scikit-learn
# - Use the **`fit_transform`** method on the **train** set
# - Use the **`transform`** method on the **validation** set
#
# + colab={} colab_type="code" id="yTkS24UwHJHa"
# + [markdown] colab_type="text" id="Chix-W9-LTEX"
# ### Compare original features, encoded features, & scaled features
# + colab={} colab_type="code" id="YhJ3PHTAKzFx"
# + [markdown] colab_type="text" id="ZfVECpN7J6gb"
# ### Get & plot coefficients
# + colab={} colab_type="code" id="9nHkKk5XKwVm"
# + [markdown] colab_type="text" id="ZhUzucgPr_he"
# ## Submit to predictive modeling competition
#
#
# ### Write submission CSV file
#
# The format for the submission file is simply the row id and the predicted label (for an example, see `sample_submission.csv` on the data download page.
#
# For example, if you just predicted that all the waterpoints were functional you would have the following predictions:
#
# <pre>id,status_group
# 50785,functional
# 51630,functional
# 17168,functional
# 45559,functional
# 49871,functional
# </pre>
#
# Your code to generate a submission file may look like this:
# <pre># estimator is your scikit-learn estimator, which you've fit on X_train
#
# # X_test is your pandas dataframe or numpy array,
# # with the same number of rows, in the same order, as test_features.csv,
# # and the same number of columns, in the same order, as X_train
#
# y_pred = estimator.predict(X_test)
#
#
# # Makes a dataframe with two columns, id and status_group,
# # and writes to a csv file, without the index
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission['status_group'] = y_pred
# submission.to_csv('your-submission-filename.csv', index=False)
# </pre>
# + colab={} colab_type="code" id="yRitgZ_ULx6K"
# + [markdown] colab_type="text" id="PpG9knom1FN7"
# ### Send submission CSV file to Kaggle
#
# #### Option 1. Kaggle web UI
#
# Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file.
#
#
# #### Option 2. Kaggle API
#
# Use the Kaggle API to upload your CSV file.
# + [markdown] colab_type="text" id="sszvYWKYsDwY"
# # Assignment
# - Learn about the mathematics of Logistic Regression by watching <NAME>'s [video #1](https://www.youtube.com/watch?v=pREaWFli-5I) (12 minutes) & [video #2](https://www.youtube.com/watch?v=bDQgVt4hFgY) (9 minutes).
# - Start a clean notebook.
# - Do train/validate/test split with the Tanzania Waterpumps data.
# - Begin to explore and clean the data. For ideas, refer to [The Quartz guide to bad data](https://github.com/Quartz/bad-data-guide), a "reference to problems seen in real-world data along with suggestions on how to resolve them." One of the issues is ["Zeros replace missing values."](https://github.com/Quartz/bad-data-guide#zeros-replace-missing-values)
# - Select different numeric and categorical features.
# - Do one-hot encoding. (Remember it may not work with high cardinality categoricals.)
# - Scale features.
# - Use scikit-learn for logistic regression.
# - Get your validation accuracy score.
# - Get and plot your coefficients.
# - Submit your predictions to our Kaggle competition.
# - Commit your notebook to your fork of the GitHub repo.
#
# ## Stretch Goals
# - Begin to visualize the data.
# - Try different [scikit-learn scalers](https://scikit-learn.org/stable/modules/preprocessing.html)
# - Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html):
#
# > Pipeline can be used to chain multiple estimators into one. This is useful as there is often a fixed sequence of steps in processing the data, for example feature selection, normalization and classification. Pipeline serves multiple purposes here:
#
# > - **Convenience and encapsulation.** You only have to call fit and predict once on your data to fit a whole sequence of estimators.
# > - **Joint parameter selection.** You can grid search over parameters of all estimators in the pipeline at once.
# > - **Safety.** Pipelines help avoid leaking statistics from your test data into the trained model in cross-validation, by ensuring that the same samples are used to train the transformers and predictors.
#
| module4-logistic-regression/logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from Bio import Entrez
#Entrez.esearch(db,id,rettype,retmode)
#Entrez.efetch(db,id,rettype,retmode)
Entrez.email = '<EMAIL>'
handle = Entrez.esearch(db='gene',
term='fumarase homo sapiens',
sort='relevance',
idtype='acc')
#print(handle.read())
fastafile = ''
for i in Entrez.read(handle)['IdList']:
h = Entrez.efetch(db='gene',id=i, rettype='fasta', retmode='text')
seq = h.read()
print(seq)
fastafile = seq
# if i == 0:
# fastafile = seq
# print('yea')
# break
print(fastafile)
# +
from Bio import SeqIO
for lines in fastafile:
print(lines)
# +
#Enzymes
#GLycolysis:(1) triosephosphate isomerase, (2) phosphoglycerate kinase,
#(3)Phosphoenolpyruvate carboxylase, (4) Pyruvate kinase (also in pentose phosphate)
#Pentose phosphate:(1) Glucose-6-phosphate dehydrogenase (also in glycolysis),
#(2) Gluconolactonase, (3) 6-phosphogluconate dehydrogenase
#(4) transaldolase
#TCA cyle:(3)Malate dehydrogenase (also in glycolysis), (2) Fumarase ,
# (4)Citrate synthase (1)Succinate dehydrogenase
# +
Enzyme Function
GLycolysis:(1) triosephosphate isomerase
catalyzes the reversible interconversion of the triose
phosphate isomers dihydroxyacetone phosphate and D-glyceraldehyde 3-phosphate
EC 5.3.1.1
(2) phosphoglycerate kinase
catalyzes the reversible transfer of a phosphate group from 1,3-bisphosphoglycerate to ADP producing 3-phosphoglycerate and ATP
EC 2.7.2.3
(3)Phosphoenolpyruvate carboxylase,
an enzyme in the family of carboxy-lyases found in plants and some bacteria that catalyzes the addition of bicarbonate (HCO3−) to phosphoenolpyruvate (PEP) to form the four-carbon compound oxaloacetate and inorganic phosphate
EC 4.1.1.31
(4) Pyruvate kinase (also in pentose phosphate)
It catalyzes the transfer of a phosphate group from phosphoenolpyruvate to adenosine diphosphate, yielding one molecule of pyruvate and one molecule of ATP.
EC 2.7.1.40
------------------#
Pentose phosphate:
(1) Glucose-6-phosphate dehydrogenase (also in glycolysis),
D-glucose 6-phosphate + NADP⁺ ⇌ 6-phospho-D-glucono-1,5-lactone + NADPH + H⁺
EC 1.1.1.49
(2) Gluconolactonase,
This enzyme belongs to the family of hydrolases, specifically those acting on carboxylic ester bonds.
EC 3.1.1.17
(3) 6-phosphogluconate dehydrogenase
It forms ribulose 5-phosphate from 6-phosphogluconate. It is an oxidative carboxylase that catalyses the decarboxylating reduction of 6-phosphogluconate into ribulose 5-phosphate in the presence of NADP
EC 1.1.1.44
(4) transaldolase
transaldolase:catalyzes the following reaction sedoheptulose 7-phosphate + glyceraldehyde 3-phosphate erythrose 4-phosphate + fructose 6-phosphate.
EC 2.2.1.2
------------------#
#TCA Cycle:
(1) Succinate dehydrogenase
This enzyme catalyzes the oxidation of succinate to fumarate with the reduction of ubiquinone to ubiquinol
EC 1.3.5.1
(2) Fumarase
It catalyzes the reversible hydration/dehydration of fumarate to malate
EC 4.2.1.2
(3) Malate dehydrogenase (also in glycolysis)
It reversibly catalyzes the oxidation of malate to oxaloacetate using the reduction of NAD+ to NADH.
EC 1.1.1.37
(4)Citrate synthase
It catalyzes the condensation reaction of the two-carbon acetate residue from acetyl coenzyme A and a molecule of four-carbon oxaloacetate to form the six-carbon citrate
EC 2.3.3.1
### TCA cycle - explanation from wikipedia
# +
# Gene description
#Humans
#GLycolysis:(1) triosephosphate isomerase,
# This gene encodes an enzyme, consisting of two identical proteins, which catalyzes the isomerization of glyceraldehydes 3-phosphate (G3P) and dihydroxy-acetone phosphate (DHAP) in glycolysis and gluconeogenesis. Mutations in this gene are associated with triosephosphate isomerase deficiency. Pseudogenes have been identified on chromosomes 1, 4, 6 and 7
#(2) phosphoglycerate kinase
#The protein encoded by this gene is a glycolytic enzyme that catalyzes the conversion of 1,3-diphosphoglycerate to 3-phosphoglycerate. The encoded protein may also act as a cofactor for polymerase alpha. Additionally, this protein is secreted by tumor cells where it participates in angiogenesis by functioning to reduce disulfide bonds in the serine protease, plasmin, which consequently leads to the release of the tumor blood vessel inhibitor angiostatin. The encoded protein has been identified as a moonlighting protein based on its ability to perform mechanistically distinct functions.
#(3)Phosphoenolpyruvate carboxylase
#This gene is a main control point for the regulation of gluconeogenesis. The cytosolic enzyme encoded by this gene, along with GTP, catalyzes the formation of phosphoenolpyruvate from oxaloacetate, with the release of carbon dioxide and GDP. The expression of this gene can be regulated by insulin, glucocorticoids, glucagon, cAMP, and diet.
#(4) Pyruvate kinase (also in pentose phosphate)
# This gene encodes a protein involved in glycolysis. The encoded protein is a pyruvate kinase that catalyzes the transfer of a phosphoryl group from phosphoenolpyruvate to ADP, generating ATP and pyruvate. This protein has been shown to interact with thyroid hormone and may mediate cellular metabolic effects induced by thyroid hormones. This protein has been found to bind Opa protein, a bacterial outer membrane protein involved in gonococcal adherence to and invasion of human cells, suggesting a role of this protein in bacterial pathogenesis.
#Pentose phosphate:
#(1) Glucose-6-phosphate dehydrogenase (also in glycolysis)
# This gene encodes glucose-6-phosphate dehydrogenase. This protein is a cytosolic enzyme encoded by a housekeeping X-linked gene whose main function is to produce NADPH, a key electron donor in the defense against oxidizing agents and in reductive biosynthetic reactions. G6PD is remarkable for its genetic diversity. Many variants of G6PD, mostly produced from missense mutations, have been described with wide ranging levels of enzyme activity and associated clinical symptoms.
#(2) Gluconolactonase
#protein encoded by this gene is a highly conserved, calcium-binding protein, that is preferentially expressed in the liver and kidney. It may have an important role in calcium homeostasis. Studies in rat indicate that this protein may also play a role in aging, as it shows age-associated down-regulation. This gene is part of a gene cluster on chromosome Xp11.3-Xp11.23.
#(3) 6-phosphogluconate dehydrogenase
# This gene, which encodes a member of the serine/threonine kinase family, regulates cell polarity and functions as a tumor suppressor. Mutations in this gene have been associated with Peutz-Jeghers syndrome, an autosomal dominant disorder characterized by the growth of polyps in the gastrointestinal tract, pigmented macules on the skin and mouth, and other neoplasms
#(4) transaldolase
# Transaldolase 1 is a key enzyme of the nonoxidative pentose phosphate pathway providing ribose-5-phosphate for nucleic acid synthesis and NADPH for lipid biosynthesis. This pathway can also maintain glutathione at a reduced state and thus protect sulfhydryl groups and cellular integrity from oxygen radicals. The functional gene of transaldolase 1 is located on chromosome 11 and a pseudogene is identified on chromosome 1 but there are conflicting map locations. The second and third exon of this gene were developed by insertion of a retrotransposable element. This gene is thought to be involved in multiple sclerosis
#TCA Cycle:
(1) Succinate dehydrogenase
des1
(2) Fumarase
(3) Malate dehydrogenase (also in glycolysis)
(4)Citrate synthase
#------------------#
#E coli
# GLycolysis:
# (1)triosephosphate isomerase
#Binds TrxA
#(2) phosphoglycerate kinase
#Phosphoglycerate kinase is one of the proteins induced by anaerobiosis.
#(3)Phosphoenolpyruvate carboxylase
#Mutant has reduced growth rate with little acetate excreation, decrease glucose consumption and a decreased carbon dioxide evolution rate.
#(4) Pyruvate kinase (also in pentose phosphate)
#Pyruvate kinase I and pyruvate kinase II differ in physical and chemical properties as well as in their kinetic behavior
#Pentose phosphate:
#(1) Glucose-6-phosphate dehydrogenase (also in glycolysis),
# ATP-regulated binding and release of polypeptide substrates. [More information is available at EcoGene: EG10241]. Hsc56 exhibits specificity toward Hsc62, as Hsc56 does not activate DnaK or Hsc66 ATPase activity
#(2) Gluconolactonase may be an issue did no show in database
#
#(3) 6-phosphogluconate dehydrogenase
# A null mutation in the gnd gene encoding 6-phosphogluconate dehydrogenase does not affect the growth rate significantly. [More information is available at EcoCyc: EG10411].
#(4) transaldolase
#Transaldolase is an enzyme of the pentose phosphate pathway, where it catalyzes the reversible interconversion of glyceraldehyde-3-phosphate and sedoheptulose-7-phosphate to fructose-6-phosphate and erythrose-4-phosphate
#TCA Cycle:
(1) Succinate dehydrogenase
(2) Fumarase
(3) Malate dehydrogenase (also in glycolysis)
(4)Citrate synthase
#------------------#
Drosphilia
GLycolysis:
(1)triosephosphate isomerase
(2) phosphoglycerate kinase
(3)Phosphoenolpyruvate carboxylase
(4) Pyruvate kinase (also in pentose phosphate)
Pentose phosphate:
(1) Glucose-6-phosphate dehydrogenase (also in glycolysis),
(2) Gluconolactonase may be an issue did no show in database
(3) 6-phosphogluconate dehydrogenase
(4) transaldolase
TCA Cycle:
(1) Succinate dehydrogenase
(2) Fumarase
(3) Malate dehydrogenase (also in glycolysis)
(4)Citrate synthase
| lab04/parsing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Initialization
# + init_cell=true pycharm={"is_executing": false}
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
import scqubits as scq
import scqubits.utils.plotting as plot
import numpy as np
# -
# # Fluxonium qubit
# $H_\text{fl}=-4E_\text{C}\partial_\phi^2-E_\text{J}\cos(\phi-\varphi_\text{ext}) +\frac{1}{2}E_L\phi^2$
fluxonium = scq.Fluxonium(
EJ=8.9,
EC=2.5,
EL=0.5,
cutoff = 110,
flux = 0.5
)
fluxonium = scq.Fluxonium.create()
print(fluxonium)
fluxonium.eigenvals()
flux_list = np.linspace(-0.5, 0.5, 151)
fluxonium.plot_evals_vs_paramvals('flux', flux_list, evals_count=8);
fluxonium.plot_evals_vs_paramvals('flux', flux_list, evals_count=9, subtract_ground=True);
fluxonium.flux = 0.25
fluxonium.plot_wavefunction(esys=None, which=[0, 1, 2, 3, 5, 6], mode='real');
fluxonium.plot_wavefunction(esys=None, which=[0, 1, 2, 3], mode='real');
# ### Matrix elements
phimat = fluxonium.matrixelement_table('phi_operator', evals_count=10)
plot.matrix(phimat);
fluxonium.plot_matrixelements('n_operator', evals_count=16);
| examples/demo_fluxonium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import itertools
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn
from torch import distributions
from torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform
from torch.nn.parameter import Parameter
from nflib.flows import (
AffineConstantFlow, ActNorm, AffineHalfFlow,
SlowMAF, MAF, IAF, Invertible1x1Conv,
NormalizingFlow, NormalizingFlowModel,
)
from nflib.spline_flows import NSF_AR, NSF_CL
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# +
# Lightweight datasets
import pickle
from sklearn import datasets
class DatasetSIGGRAPH:
"""
haha, found from Eric https://blog.evjang.com/2018/01/nf2.html
https://github.com/ericjang/normalizing-flows-tutorial/blob/master/siggraph.pkl
"""
def __init__(self):
with open('siggraph.pkl', 'rb') as f:
XY = np.array(pickle.load(f), dtype=np.float32)
XY -= np.mean(XY, axis=0) # center
self.XY = torch.from_numpy(XY)
def sample(self, n):
X = self.XY[np.random.randint(self.XY.shape[0], size=n)]
return X
class DatasetMoons:
""" two half-moons """
def sample(self, n):
moons = datasets.make_moons(n_samples=n, noise=0.05)[0].astype(np.float32)
return torch.from_numpy(moons)
class DatasetMixture:
""" 4 mixture of gaussians """
def sample(self, n):
assert n%4 == 0
r = np.r_[np.random.randn(n // 4, 2)*0.5 + np.array([0, -2]),
np.random.randn(n // 4, 2)*0.5 + np.array([0, 0]),
np.random.randn(n // 4, 2)*0.5 + np.array([2, 2]),
np.random.randn(n // 4, 2)*0.5 + np.array([-2, 2])]
return torch.from_numpy(r.astype(np.float32))
d = DatasetMoons()
#d = DatasetMixture()
#d = DatasetSIGGRAPH()
x = d.sample(128)
plt.figure(figsize=(4,4))
plt.scatter(x[:,0], x[:,1], s=5, alpha=0.5)
plt.axis('equal');
# +
# construct a model
#prior = MultivariateNormal(torch.zeros(2), torch.eye(2))
prior = TransformedDistribution(Uniform(torch.zeros(2), torch.ones(2)), SigmoidTransform().inv) # Logistic distribution
# RealNVP
# flows = [AffineHalfFlow(dim=2, parity=i%2) for i in range(9)]
# NICE
# flows = [AffineHalfFlow(dim=2, parity=i%2, scale=False) for i in range(4)]
# flows.append(AffineConstantFlow(dim=2, shift=False))
# SlowMAF (MAF, but without any parameter sharing for each dimension's scale/shift)
# flows = [SlowMAF(dim=2, parity=i%2) for i in range(4)]
# MAF (with MADE net, so we get very fast density estimation)
# flows = [MAF(dim=2, parity=i%2) for i in range(4)]
# IAF (with MADE net, so we get very fast sampling)
# flows = [IAF(dim=2, parity=i%2) for i in range(3)]
# insert ActNorms to any of the flows above
# norms = [ActNorm(dim=2) for _ in flows]
# flows = list(itertools.chain(*zip(norms, flows)))
# Glow paper
# flows = [Invertible1x1Conv(dim=2) for i in range(3)]
# norms = [ActNorm(dim=2) for _ in flows]
# couplings = [AffineHalfFlow(dim=2, parity=i%2, nh=32) for i in range(len(flows))]
# flows = list(itertools.chain(*zip(norms, flows, couplings))) # append a coupling layer after each 1x1
# Neural splines, coupling
nfs_flow = NSF_CL if True else NSF_AR
flows = [nfs_flow(dim=2, K=8, B=3, hidden_dim=16) for _ in range(3)]
convs = [Invertible1x1Conv(dim=2) for _ in flows]
norms = [ActNorm(dim=2) for _ in flows]
flows = list(itertools.chain(*zip(norms, convs, flows)))
# construct the model
model = NormalizingFlowModel(prior, flows)
# -
# optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5) # todo tune WD
print("number of params: ", sum(p.numel() for p in model.parameters()))
model.train()
for k in range(1000):
x = d.sample(128)
zs, prior_logprob, log_det = model(x)
logprob = prior_logprob + log_det
loss = -torch.sum(logprob) # NLL
model.zero_grad()
loss.backward()
optimizer.step()
if k % 100 == 0:
print(loss.item())
# +
model.eval()
x = d.sample(128)
zs, prior_logprob, log_det = model(x)
z = zs[-1]
x = x.detach().numpy()
z = z.detach().numpy()
p = model.prior.sample([128, 2]).squeeze()
plt.figure(figsize=(10,5))
plt.subplot(121)
plt.scatter(p[:,0], p[:,1], c='g', s=5)
plt.scatter(z[:,0], z[:,1], c='r', s=5)
plt.scatter(x[:,0], x[:,1], c='b', s=5)
plt.legend(['prior', 'x->z', 'data'])
plt.axis('scaled')
plt.title('x -> z')
zs = model.sample(128*8)
z = zs[-1]
z = z.detach().numpy()
plt.subplot(122)
plt.scatter(x[:,0], x[:,1], c='b', s=5, alpha=0.5)
plt.scatter(z[:,0], z[:,1], c='r', s=5, alpha=0.5)
plt.legend(['data', 'z->x'])
plt.axis('scaled')
plt.title('z -> x')
# +
# Visualize the step-wise flow in the full net
from matplotlib import collections as mc
# plot the coordinate warp
ng = 20
xx, yy = np.linspace(-3, 3, ng), np.linspace(-3, 3, ng)
xv, yv = np.meshgrid(xx, yy)
xy = np.stack([xv, yv], axis=-1)
in_circle = np.sqrt((xy**2).sum(axis=2)) <= 3 # seems appropriate since we use radial distributions as priors
xy = xy.reshape((ng*ng, 2))
xy = torch.from_numpy(xy.astype(np.float32))
zs, log_det = model.backward(xy)
backward_flow_names = [type(f).__name__ for f in model.flow.flows[::-1]]
nz = len(zs)
for i in range(nz - 1):
z0 = zs[i].detach().numpy()
z1 = zs[i+1].detach().numpy()
# plot how the samples travel at this stage
figs, axs = plt.subplots(1, 2, figsize=(6, 3))
#plt.figure(figsize=(20,10))
axs[0].scatter(z0[:,0], z0[:, 1], c='r', s=3)
axs[0].scatter(z1[:,0], z1[:, 1], c='b', s=3)
axs[0].quiver(z0[:,0], z0[:,1], z1[:,0] - z0[:,0], z1[:,1] - z0[:,1], units='xy', scale=1, alpha=0.5)
axs[0].axis([-3, 3, -3, 3])
axs[0].set_title("layer %d -> %d (%s)" % (i, i+1, backward_flow_names[i]))
q = z1.reshape((ng, ng, 2))
# y coords
p1 = np.reshape(q[1:,:,:], (ng**2-ng,2))
p2 = np.reshape(q[:-1,:,:], (ng**2-ng,2))
inc = np.reshape(in_circle[1:,:] | in_circle[:-1,:], (ng**2-ng,))
p1, p2 = p1[inc], p2[inc]
lcy = mc.LineCollection(zip(p1, p2), linewidths=1, alpha=0.5, color='k')
# x coords
p1 = np.reshape(q[:,1:,:], (ng**2-ng,2))
p2 = np.reshape(q[:,:-1,:], (ng**2-ng,2))
inc = np.reshape(in_circle[:,1:] | in_circle[:,:-1], (ng**2-ng,))
p1, p2 = p1[inc], p2[inc]
lcx = mc.LineCollection(zip(p1, p2), linewidths=1, alpha=0.5, color='k')
# draw the lines
axs[1].add_collection(lcy)
axs[1].add_collection(lcx)
axs[1].axis([-3, 3, -3, 3])
axs[1].set_title("grid warp at the end of %d" % (i+1,))
# draw the data too
plt.scatter(x[:,0], x[:,1], c='r', s=5, alpha=0.5)
# +
# train and render
# code duplication because it's very late at night now and i'm tired
import matplotlib.gridspec as gridspec
ng = 20
xx, yy = np.linspace(-3, 3, ng), np.linspace(-3, 3, ng)
xv, yv = np.meshgrid(xx, yy)
xy = np.stack([xv, yv], axis=-1)
in_circle = np.sqrt((xy**2).sum(axis=2)) <= 3
xy = xy.reshape((ng*ng, 2))
xy = torch.from_numpy(xy.astype(np.float32))
xval = d.sample(128*5)
model.train()
for k in range(500):
# sample
x = d.sample(128)
# train a bit
zs, prior_logprob, log_det = model(x)
logprob = prior_logprob + log_det
loss = -torch.sum(logprob) # NLL
model.zero_grad()
loss.backward()
optimizer.step()
if k % 10 == 0:
# vis
zs, log_det = model.backward(xy)
backward_flow_names = [type(f).__name__ for f in model.flow.flows[::-1]]
nz = len(zs)
i = nz - 1 - 1
z0 = zs[i].detach().numpy()
z1 = zs[i+1].detach().numpy()
# plot how the samples travel at this stage
ss = 0.1
fig = plt.figure(figsize=(10, 5))
outer = gridspec.GridSpec(1, 2, wspace=ss, hspace=ss)
inner1 = gridspec.GridSpecFromSubplotSpec(3, 3, subplot_spec=outer[0], wspace=ss, hspace=ss)
inner2 = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer[1], wspace=ss, hspace=ss)
backward_flow_names = [type(f).__name__ for f in model.flow.flows[::-1]]
nz = len(zs)
for i in range(min(nz-1, 9)):
ax = plt.Subplot(fig, inner1[i])
z0 = zs[i].detach().numpy()
z1 = zs[i+1].detach().numpy()
ax.scatter(z0[:,0], z0[:, 1], c='r', s=1, alpha=0.5)
ax.scatter(z1[:,0], z1[:, 1], c='b', s=1, alpha=0.5)
ax.quiver(z0[:,0], z0[:,1], z1[:,0] - z0[:,0], z1[:,1] - z0[:,1], units='xy', scale=1, alpha=0.5)
ax.axis([-3, 3, -3, 3])
ax.set_yticklabels([])
ax.set_xticklabels([])
#ax.set_title("layer %d -> %d (%s)" % (i, i+1, backward_flow_names[i]))
fig.add_subplot(ax)
ax = plt.Subplot(fig, inner2[0])
q = z1.reshape((ng, ng, 2))
# y coords
p1 = np.reshape(q[1:,:,:], (ng**2-ng,2))
p2 = np.reshape(q[:-1,:,:], (ng**2-ng,2))
inc = np.reshape(in_circle[1:,:] | in_circle[:-1,:], (ng**2-ng,))
p1, p2 = p1[inc], p2[inc]
lcy = mc.LineCollection(zip(p1, p2), linewidths=1, alpha=0.5, color='k')
# x coords
p1 = np.reshape(q[:,1:,:], (ng**2-ng,2))
p2 = np.reshape(q[:,:-1,:], (ng**2-ng,2))
inc = np.reshape(in_circle[:,1:] | in_circle[:,:-1], (ng**2-ng,))
p1, p2 = p1[inc], p2[inc]
lcx = mc.LineCollection(zip(p1, p2), linewidths=1, alpha=0.5, color='k')
# draw the lines
ax.add_collection(lcy)
ax.add_collection(lcx)
ax.axis([-3, 3, -3, 3])
ax.set_yticklabels([])
ax.set_xticklabels([])
#ax.set_title("grid warp at the end of %d" % (i+1,))
fig.add_subplot(ax)
# draw the data too
plt.scatter(xval[:,0], xval[:,1], c='r', s=5, alpha=0.5)
break
#fname = 'out/step_%04d.png' % (k,)
#plt.savefig(fname, dpi=200)
#print("saved", fname, 'loss', loss.item())
#plt.close(fig)
# -
| seminars/seminar-7-nf/nflib/nflib1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="DB7J6qSNl-ds"
# # FinBERTを用いたセンチメント分析
#
# このノートブックでは、[FinBERT](https://github.com/ProsusAI/finBERT)を使って、株のセンチメントを分類する方法を紹介します。
# + [markdown] id="3WBR5otnMF5c"
# ## 準備
# + [markdown] id="E6bA0ABvMHLV"
# ### パッケージのインストール
# + id="nwnTlzcTMJEJ" colab={"base_uri": "https://localhost:8080/"} outputId="41c46c53-d33c-4da3-b514-e67303e42452"
# !pip install -q pandas==1.1.5 transformers==4.10.2 scikit-learn==0.23.2 datasets==1.12.1
# + [markdown] id="CWs27-6-MKwV"
# ### インポート
# + id="382hdqnZMNUB"
import numpy as np
import pandas as pd
from datasets import load_metric
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from transformers import BertTokenizerFast
from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments
# + [markdown] id="Nc4tB3-CMQy1"
# ### データのアップロード
#
# 今回のデータセットは[stochtwits](https://api.stocktwits.com/developers/docs)から作成できます。1時間あたり200リクエストまでのポリシーがあるので尊重してください。このAPIを使って作成したデータセット(`FinBERT_Data.csv`)が本章の`Data`フォルダの中にあるので、アップロードしましょう。
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 78} id="i0WrPn3tMY9G" outputId="a4420c0b-dcfb-462a-c3a6-df65943e5dcd"
from google.colab import files
uploaded = files.upload()
# + [markdown] id="5usVr6H3Ma8U"
# ### データの読み込み
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="GPgG_mlXMc-8" outputId="6c370df1-815d-4b24-bfcf-aa18c38577fc"
df = pd.read_csv("FinBERT_Data.csv")
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 136} id="WEVAUCGoMlZ1" outputId="2644f50e-4ffc-45da-e9ef-a68c055d8090"
display(df["symbol"].value_counts())
df["sentiment"].value_counts()
# + [markdown] id="KUH_MyWxMrAz"
# ## 前処理
#
# `LabelEncoder`を使って、ラベルの文字列を数字に変換します。
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="NO7V_zosMsYA" outputId="761fc764-6693-4ae5-8161-7a34a37cbbc3"
le = LabelEncoder()
df["sentiment"] = le.fit_transform(df["sentiment"])
df.head()
# + [markdown] id="ziSx6bH-MzgR"
# データを3つに分割します。
# + id="WA3H-JhiMy9w"
x_train, x_test, y_train, y_test = train_test_split(
list(df["message"].values),
list(df["sentiment"].values),
test_size=0.2,
random_state=2021
)
x_valid, x_test, y_valid, y_test = train_test_split(
x_test,
y_test,
test_size=0.5,
random_state=2021
)
# + [markdown] id="pqe6YIwjM-rh"
# `BERTTokenizerFast.from_pretrained`メソッドでトークナイザーをインスタンス化します。
# + colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["48d851396b11478fa14bd6d480ecab49", "423854f4045e4ff085ddd5735b3495ad", "ac133b406d134d049624e23f68ea859f", "f1b0da2375594a4f955b542ede4f1958", "f69ffb4414894e09a62a3c15d3d56d4f", "f6a5f1ed73654c37a2687a462fcdf2b5", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e3a0efbbd68f488db0675f7f98a0cbf9", "<KEY>", "dc5e3ddafd4a4aa59bda9db1a4b9ba40", "c311fb8084b847f7ada76ee48484c05d", "a1eafa1a95ec4443bdb18944b985f631", "<KEY>", "<KEY>", "a5ab5d40e9724e1a8254d62304b3a20e", "<KEY>", "<KEY>", "20707d18b0784c60b6581e42e4e845d5", "<KEY>", "2d5bf0817ad44f0eb5d7b71e4687e09c", "7adaac490bee4912a1cd3b54b1a0e869", "581a6d8ecbdf4bc5adeae5f3828f72bb", "a504fbdf14de4239b950a13907c70a72", "6acafe1b5b9841ab873047146d436ab9", "<KEY>", "<KEY>", "<KEY>", "498fcca0982b40619b3e8e4cdb120339", "f3da8e2ecb974173960e4675ca5518a1", "<KEY>", "<KEY>", "340b567d57f94c1996a2a71fe6fabb79", "cbf22d112df245789ed0ca80d9de446e", "<KEY>", "<KEY>", "<KEY>", "78571932b5734eed945807e3d8dc3527", "<KEY>", "<KEY>"]} id="fqjcSG1DM-2s" outputId="644adb84-0f84-432a-efbe-7c77142afba8"
model_name = "ProsusAI/finbert"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
# + [markdown] id="E3gP56h1NGS7"
# トークナイザーにテキストを与えて、エンコーディングしましょう。
# + id="0exgvfpbjDpM"
train_encodings = tokenizer(x_train, truncation=True, padding=True)
val_encodings = tokenizer(x_valid, truncation=True, padding=True)
test_encodings = tokenizer(x_test, truncation=True, padding=True)
# + [markdown] id="XobDRdOVNI7s"
# ラベルとエンコーディングした入力を与えて、データセットを作成します。
# + id="8jgDpbePWBql"
import torch
class StockDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: val[idx] for key, val in self.encodings.items()}
item['labels'] = self.labels[idx]
return item
def __len__(self):
return len(self.labels)
train_dataset = StockDataset(train_encodings, y_train)
val_dataset = StockDataset(val_encodings, y_valid)
test_dataset = StockDataset(test_encodings, y_test)
# + [markdown] id="m2bGe8FImiAB"
# ## モデルの学習
#
# Datasetsライブラリの`load_metric`関数を使って、評価用の関数を用意しましょう。今回は単純に正解率を用意します。`compute_metrics`関数にlogitsを予測へ変換させて、それを`metric`の`compute`メソッドに与えるだけです。
# + colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["5517d59bfe7240f895218a74df6e51da", "28d540cb91a44caa90517ac6e8442b81", "22309f2a88994b30acda42e5c9ce3a9e", "819067fb58ec4bfb95f3c7c5aa753fd9", "19133aca650b449c82f08a285b3a9137", "05aa5a353a8141da86afcab0208a92ea", "791808ba69624660b3e56a9bbfd172a7", "b8a567202a50479ea410b6d10a9edd2d", "9e9c9f7982ef4385bb9e3830faed98c8", "9ed9bca1adea4de7addd172ebc8dda88", "de82b26c1f9e4a7581246a39b60ac113"]} id="uDFk8iA9bJuF" outputId="9a7f84d6-d36e-4303-9c57-29461bcb34b4"
metric = load_metric("accuracy")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["1d8bdbe4ff204b139deebf0556792392", "21661de8fce74739b94522531bbdb988", "efbe2ca26f1b4626acb28d364c12a049", "22fc66222c33407ab9c409c8b9cdf778", "8f6f8db343d04db18e7500734105c1a9", "72e39da57e994f44850e4dba836cd6ff", "69b73e46b113411d9e3f3fb365b25058", "79a62b122e354d05a9a799fafda64a47", "<KEY>", "3c4f7b9707214fd3864c2c0168499a51", "a2378ade869b4c3e81f61f97eb96773c"]} id="jqbEi8h5WY7e" outputId="4c22c85c-08ee-441c-e49c-8b631579c11c"
training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=10, # total number of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
weight_decay=0.01, # strength of weight decay
evaluation_strategy="steps",
logging_dir='./logs',
logging_steps=100,
eval_steps=100,
save_steps=100,
load_best_model_at_end=True,
)
model = AutoModelForSequenceClassification.from_pretrained(
"ProsusAI/finbert",
num_labels=2,
ignore_mismatched_sizes=True
)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=val_dataset, # evaluation dataset
compute_metrics=compute_metrics,
)
trainer.train()
# + id="rLab-awebYUQ" colab={"base_uri": "https://localhost:8080/", "height": 190} outputId="258fd5a3-8523-4e53-b6b5-a92eeabc1f04"
trainer.evaluate(test_dataset)
# + [markdown] id="78yvfEVHoncI"
# 正解率はまずまずといったところです。前処理やハイパーパラメータチューニングをすれば、もう少し良くなるでしょう。
# + [markdown] id="KChm_2UlBMnH"
# ## 参考資料
#
# - [Fine-tuning a pretrained model](https://colab.research.google.com/github/huggingface/notebooks/blob/master/transformers_doc/training.ipynb)
# + id="e7pITuhdBUIH"
| ch10/03_FinBERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + id="ar4O_1tszb8L"
from google.colab import drive
drive.mount('/content/drive/')
# + id="5mmqSh3szedt"
# ! pip3 install transformers
# ! pip3 install jsonlines
# + id="a0Yf9E5vzfsU"
# %cd /content/drive/My Drive/Sentence_pair_modeling/Models/BERTs/
# + [markdown] id="ZmNbCBlj7Y4e"
# ## **LCQMC**
# + [markdown] id="Cvz-Ub1Z8L-G"
# ### LCQMC train-validate-test
# + id="gqWL6hvozgVy"
from run_Bert_model import model_train_validate_test
import pandas as pd
from utils import Metric
import os
lcqmc_path = "/content/drive/My Drive/Sentence_pair_modeling/LCQMC/"
train_df = pd.read_csv(os.path.join(lcqmc_path, "data/train.tsv"),sep='\t',header=None, names=['s1','s2','label'])
dev_df = pd.read_csv(os.path.join(lcqmc_path, "data/dev.tsv"),sep='\t',header=None, names=['s1','s2','label'])
test_df = pd.read_csv(os.path.join(lcqmc_path, "data/test.tsv"),sep='\t',header=None, names=['s1','s2','label'])
target_dir = os.path.join(lcqmc_path, "output/Bert/")
model_train_validate_test(train_df, dev_df, test_df, target_dir,
max_seq_len=64,
num_labels=2,
epochs=3,
batch_size=32,
lr=2e-05,
patience=3,
max_grad_norm=10.0,
if_save_model=True,
checkpoint=None)
test_result = pd.read_csv(os.path.join(target_dir, 'test_prediction.csv'))
Metric(test_df.label, test_result.prediction)
# + [markdown] id="qh8q-g9TbS7v"
# ### LCQMC infer by other pretrained models
# + id="x5vdDPy9bUlg"
from run_Bert_model import model_load_test
import pandas as pd
from utils import Metric
import os
train_df = pd.read_csv(os.path.join(lcqmc_path, "data/train.tsv"),sep='\t',header=None, names=['s1','s2','label'])
dev_df = pd.read_csv(os.path.join(lcqmc_path, "data/dev.tsv"),sep='\t',header=None, names=['s1','s2','label'])
test_df = pd.read_csv(os.path.join(lcqmc_path, "data/test.tsv"),sep='\t',header=None, names=['s1','s2','label'])
data = pd.concat([train_df,dev_df,test_df]).reset_index(drop=True)
target_dir = os.path.join(bq_path, "output/Bert") # load pretrained model
test_prediction_dir = os.path.join(bq_path, "output/Infer_LCQMC") # where to save the infer result
test_prediction_name = 'Bert_test_prediction.csv' # the infer result name
model_load_test(test_df = data,
target_dir = target_dir,
test_prediction_dir = test_prediction_dir,
test_prediction_name = test_prediction_name)
test_result = pd.read_csv(os.path.join(test_prediction_dir, test_prediction_name))
Metric(data.label, test_result.prediction)
# + [markdown] id="RckVzwQh9kbk"
# # **XiAn**
# + [markdown] id="Q0-mv6D9j2ti"
# ## XiAn train-validate-test
# + [markdown] id="LmSwRidCb8it"
# ## XiAn infer by other pretrained models
# + id="WNSyPnnf9oiL"
from run_Bert_model import model_train_validate_test
import pandas as pd
from utils import Metric
import os
xian_path = "/content/drive/My Drive/Sentence_pair_modeling/XiAn_STS/"
train_df = pd.read_csv(os.path.join(xian_path, "data/train.tsv"),sep='\t',header=None, names=['s1','s2','label'])
dev_df = pd.read_csv(os.path.join(xian_path, "data/dev.tsv"),sep='\t',header=None, names=['s1','s2','label'])
test_df = pd.read_csv(os.path.join(xian_path, "data/test.tsv"),sep='\t',header=None, names=['s1','s2','label'])
target_dir = os.path.join(xian_path, "output/Bert/")
model_train_validate_test(train_df, dev_df, test_df, target_dir,
max_seq_len=64,
num_labels=2,
epochs=3,
batch_size=32,
lr=2e-05,
patience=1,
max_grad_norm=10.0,
if_save_model=True,
checkpoint=None)
test_result = pd.read_csv(os.path.join(target_dir, 'test_prediction.csv'))
Metric(test_df.label, test_result.prediction)
# + id="OoXViPd7b96i"
from run_Bert_model import model_load_test
import pandas as pd
from utils import Metric
import os
train_df = pd.read_csv(os.path.join(xian_path, "data/train.tsv"),sep='\t',header=None, names=['s1','s2','label'])
dev_df = pd.read_csv(os.path.join(xian_path, "data/dev.tsv"),sep='\t',header=None, names=['s1','s2','label'])
test_df = pd.read_csv(os.path.join(xian_path, "data/test.tsv"),sep='\t',header=None, names=['s1','s2','label'])
data = pd.concat([train_df,dev_df,test_df]).reset_index(drop=True)
target_dir = os.path.join(lcqmc_path, "output/Bert") # load pretrained model
test_prediction_dir = os.path.join(lcqmc_path, "output/Infer_XiAn") # where to save the infer result
test_prediction_name = 'Bert_test_prediction.csv' # the infer result name
model_load_test(test_df = data,
target_dir = target_dir,
test_prediction_dir = test_prediction_dir,
test_prediction_name = test_prediction_name)
test_result = pd.read_csv(os.path.join(test_prediction_dir, test_prediction_name))
Metric(data.label, test_result.prediction)
# + [markdown] id="w00K8BBxeS-B"
# # **BQ Corpus**
# + [markdown] id="wb46lMAg7inN"
# ## BQ train-validate-test
# + id="bGSXS-S75IQD"
from run_Bert_model import model_train_validate_test
import pandas as pd
from utils import Metric, json2df
import os
bq_path = "/content/drive/My Drive/Sentence_pair_modeling/BQ Corpus/"
train_df = json2df(os.path.join(bq_path, "data/train.json"))
dev_df = json2df(os.path.join(bq_path, "data/dev.json"))
test_df = json2df(os.path.join(bq_path, "data/test.json"))
target_dir = os.path.join(bq_path, "output/Bert/")
model_train_validate_test(train_df, dev_df, test_df, target_dir,
max_seq_len=64,
num_labels=2,
epochs=3,
batch_size=32,
lr=2e-05,
patience=1,
max_grad_norm=10.0,
if_save_model=True,
checkpoint=None)
test_result = pd.read_csv(os.path.join(target_dir, 'test_prediction.csv'))
Metric(test_df.label, test_result.prediction)
# + [markdown] id="Ux6gzBc77mEE"
# ## BQ infer by other pretrained models
# + id="SNNdmsA0evF8"
from run_Bert_model import model_load_test
import pandas as pd
from utils import Metric, json2df
import os
bq_path = "/content/drive/My Drive/Sentence_pair_modeling/BQ Corpus/"
train_df = json2df(os.path.join(bq_path, "data/train.json"))
dev_df = json2df(os.path.join(bq_path, "data/dev.json"))
test_df = json2df(os.path.join(bq_path, "data/test.json"))
data = pd.concat([train_df,dev_df,test_df]).reset_index(drop=True)
target_dir = os.path.join(lcqmc_path, "output/Bert") # load pretrained model
test_prediction_dir = os.path.join(lcqmc_path, "output/Infer_BQ") # where to save the infer result
test_prediction_name = 'Bert_test_prediction.csv' # the infer result name
model_load_test(test_df = data,
target_dir = target_dir,
test_prediction_dir = test_prediction_dir,
test_prediction_name = test_prediction_name)
test_result = pd.read_csv(os.path.join(test_prediction_dir, test_prediction_name))
Metric(data.label, test_result.prediction)
# + [markdown] id="OJKB8BYheaFA"
# # OCNLI
# + id="uwFt97RAz8MW"
from run_Bert_model import model_train_validate_test
import pandas as pd
from utils import Metric
import os
ocnli_path = "/content/drive/My Drive/Sentence_pair_modeling/OCNLI/"
train_df = pd.read_csv(os.path.join(ocnli_path, "data/train.csv"),header=None, names=['s1','s2','label','genre'])
dev_df = pd.read_csv(os.path.join(ocnli_path, "data/dev.csv"),header=None, names=['s1','s2','label','genre'])
test_df = pd.read_csv(os.path.join(ocnli_path, "data/test.csv"),header=None, names=['s1','s2','label','genre'])
target_dir = os.path.join(ocnli_path, "output/Bert/")
model_train_validate_test(train_df, dev_df, test_df, target_dir ,
max_seq_len=64,
num_labels=3,
epochs=3,
batch_size=32,
lr=2e-02,
patience=1,
max_grad_norm=10.0,
if_save_model=True,
checkpoint=None)
test_result = pd.read_csv(os.path.join(target_dir, 'test_prediction.csv'))
Metric(test_df.label, test_result.prediction)
# torch.cuda.empty_cache() #释放cuda的显存
# + [markdown] id="<KEY>"
# # CMNLI
# + id="hV1ECI2hISZB"
from run_Bert_model import model_train_validate_test
import pandas as pd
from utils import Metric
import os
cmnli_path = "/content/drive/My Drive/Sentence_pair_modeling/CMNLI/"
train_df = pd.read_csv(os.path.join(cmnli_path, "data/train1.csv"),header=None, names=['s1','s2','label'])
dev_df = pd.read_csv(os.path.join(cmnli_path, "data/dev.csv"),header=None, names=['s1','s2','label'])
test_df = pd.read_csv(os.path.join(cmnli_path, "data/test.csv"),header=None, names=['s1','s2','label'])
target_dir = os.path.join(cmnli_path, "output/Bert/")
model_train_validate_test(train_df, dev_df, test_df, target_dir,
max_seq_len=64,
num_labels=3,
epochs=3,
batch_size=64,
lr=3e-05,
patience=1,
max_grad_norm=10.0,
if_save_model=True,
checkpoint=None)
test_result = pd.read_csv(os.path.join(target_dir, 'test_prediction.csv'))
Metric(test_df.label, test_result.prediction)
| Models/BERTs/run_bert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# SeqToSeq Fingerprint
# --------------------
#
# In this example, we will use a `SeqToSeq` model to generate fingerprints for classifying molecules. This is based on the following paper, although some of the implementation details are different: Xu et al., "Seq2seq Fingerprint: An Unsupervised Deep Molecular Embedding for Drug Discovery" (https://doi.org/10.1145/3107411.3107424).
#
# Many types of models require their inputs to have a fixed shape. Since molecules can vary widely in the numbers of atoms and bonds they contain, this makes it hard to apply those models to them. We need a way of generating a fixed length "fingerprint" for each molecule. Various ways of doing this have been designed, such as Extended-Connectivity Fingerprints (ECFPs). But in this example, instead of designing a fingerprint by hand, we will let a `SeqToSeq` model learn its own method of creating fingerprints.
#
# A `SeqToSeq` model performs sequence to sequence translation. For example, they are often used to translate text from one language to another. It consists of two parts called the "encoder" and "decoder". The encoder is a stack of recurrent layers. The input sequence is fed into it, one token at a time, and it generates a fixed length vector called the "embedding vector". The decoder is another stack of recurrent layers that performs the inverse operation: it takes the embedding vector as input, and generates the output sequence. By training it on appropriately chosen input/output pairs, you can create a model that performs many sorts of transformations.
#
# In this case, we will use SMILES strings describing molecules as the input sequences. We will train the model as an autoencoder, so it tries to make the output sequences identical to the input sequences. For that to work, the encoder must create embedding vectors that contain all information from the original sequence. That's exactly what we want in a fingerprint, so perhaps those embedding vectors will then be useful as a way to represent molecules in other models!
#
# Let's start by loading the data. We will use the MUV dataset. It includes 74,501 molecules in the training set, and 9313 molecules in the validation set, so it gives us plenty of SMILES strings to work with.
import deepchem as dc
tasks, datasets, transformers = dc.molnet.load_muv()
train_dataset, valid_dataset, test_dataset = datasets
train_smiles = train_dataset.ids
valid_smiles = valid_dataset.ids
# We need to define the "alphabet" for our `SeqToSeq` model, the list of all tokens that can appear in sequences. (It's also possible for input and output sequences to have different alphabets, but since we're training it as an autoencoder, they're identical in this case.) Make a list of every character that appears in any training sequence.
tokens = set()
for s in train_smiles:
tokens = tokens.union(set(c for c in s))
tokens = sorted(list(tokens))
# Create the model and define the optimization method to use. In this case, learning works much better if we gradually decrease the learning rate. We use an `ExponentialDecay` to multiply the learning rate by 0.9 after each epoch.
from deepchem.models.tensorgraph.optimizers import Adam, ExponentialDecay
max_length = max(len(s) for s in train_smiles)
model = dc.models.SeqToSeq(tokens,
tokens,
max_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=256,
model_dir='fingerprint')
batches_per_epoch = len(train_smiles)/model.batch_size
model.set_optimizer(Adam(learning_rate=ExponentialDecay(0.004, 0.9, batches_per_epoch)))
# Let's train it! The input to `fit_sequences()` is a generator that produces input/output pairs. On a good GPU, this should take a few hours or less.
# +
def generate_sequences(epochs):
for i in range(epochs):
for s in train_smiles:
yield (s, s)
model.fit_sequences(generate_sequences(40))
# -
# Let's see how well it works as an autoencoder. We'll run the first 500 molecules from the validation set through it, and see how many of them are exactly reproduced.
predicted = model.predict_from_sequences(valid_smiles[:500])
count = 0
for s,p in zip(valid_smiles[:500], predicted):
if ''.join(p) == s:
count += 1
print('reproduced', count, 'of 500 validation SMILES strings')
# Now we'll trying using the encoder as a way to generate molecular fingerprints. We compute the embedding vectors for all molecules in the training and validation datasets, and create new datasets that have those as their feature vectors. The amount of data is small enough that we can just store everything in memory.
# +
train_embeddings = model.predict_embeddings(train_smiles)
train_embeddings_dataset = dc.data.NumpyDataset(train_embeddings,
train_dataset.y,
train_dataset.w,
train_dataset.ids)
valid_embeddings = model.predict_embeddings(valid_smiles)
valid_embeddings_dataset = dc.data.NumpyDataset(valid_embeddings,
valid_dataset.y,
valid_dataset.w,
valid_dataset.ids)
# -
# For classification, we'll use a simple fully connected network with one hidden layer.
classifier = dc.models.MultiTaskClassifier(n_tasks=len(tasks),
n_features=256,
layer_sizes=[512])
classifier.fit(train_embeddings_dataset, nb_epoch=10)
# Find out how well it worked. Compute the ROC AUC for the training and validation datasets.
import numpy as np
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification")
train_score = classifier.evaluate(train_embeddings_dataset, [metric], transformers)
valid_score = classifier.evaluate(valid_embeddings_dataset, [metric], transformers)
print('Training set ROC AUC:', train_score)
print('Validation set ROC AUC:', valid_score)
| examples/notebooks/seqtoseq_fingerprint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="c708b7e1020677625d97ad8082cf4a79b9d78800"
# Here I am using sklearn library to implement all the major algorithms from Regression part of Supervised Learning. Will use Matplotlib to draw the outcomes or model predictions.
# + [markdown] _uuid="b09786c8d26169fade8a2b1b1fb6be0492bf1b2b"
# Global Imports
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.linear_model import LinearRegression
# + [markdown] _uuid="e9964675d469a41b959ea1ee45326904ca97611f"
# <h1>1. Simple Linear Regression</h1>
# + [markdown] _uuid="cb8a329f10825ecb4c047b705c92d72beb4b706a"
# Algorithm specific imports
# + _uuid="6ac535ec463cc43295d57cf352f7fcc498106bc4"
from sklearn.linear_model import LinearRegression
# + [markdown] _uuid="d7c2b5c97b1d9a9aa53589492f25e3c5aec66fd4"
# Extracting training and test dataset
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
#dataset = pd.read_csv('../input/salary-data/Salary_Data.csv')
dataset = pd.read_csv("Companies.csv", sep=';', encoding = "ISO-8859-1") # , index_col = 0
cols = dataset.iloc[:,4:].columns
for col in cols:
dataset[col] = dataset[col].astype(str).str.replace(",", ".").astype(float)
X = dataset.iloc[:,4:52].values
y = dataset.iloc[:,52:].values
# We are going to keep 20% of the dataset in test dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/5, random_state=0)
# + [markdown] _uuid="bcb2a0a57c15a43df74f81a8e4edbfb8621fd2d9"
# Training model and making prediction
# + _uuid="4d1fbe0ee22a4347d7cfb130ef01fb3dacf7c33d"
linear_regressor = LinearRegression()
linear_regressor.fit(X_train, y_train)
#np.array(X_train.reshape(-1, 1)), y_train.reshape(-1, 1)
#X_test.reshape(-1, 1)
y_predict = linear_regressor.predict(X_test)
# -
X_train[:,1:4].shape
# + _uuid="f2c7a2114d599ac481374e36390f454ac1dd8b89"
# Plot points and fit line for training data
plt.scatter(X_train[:,1:4], y_train, color='teal', edgecolors='black', label='Training-set observation points')
plt.plot(X_train[:,1:4], linear_regressor.predict(X_train), color='grey', label='Fit Regression Line')
plt.title('Salary vs Experience')
plt.xlabel('Experience (in years)')
plt.ylabel('Salary (in USD)')
# plot scatter points and line for test data
plt.scatter(X_test[:,1:4], y_test, color='red', edgecolors='black', label='Test-set observation points')
plt.legend()
plt.show()
# + [markdown] _uuid="f3cddc6e5b947a6033c584ab5531509e8bbd77ad"
#
# <h1>2. Multivarite Linear Regression</h1>
# + [markdown] _uuid="85e4828457dc131cbaded191c31c5d7bc9727064"
# Import libraries
# + _uuid="2475338e5c31e89d35fbb1828286a60e8077bee2"
# We already have imported LinearRegression in above algorithm, going to use the same
import statsmodels.formula.api as sm
# + [markdown] _uuid="af693cc6520093540b5c425ad092acb5abdf382c"
# Extracting training and test dataset
# + _uuid="b98570c2ceff3936f3c88aa667ef42730f51a60a"
# #dataset = pd.read_csv('../input/m-50-startups/50_Startups.csv')
# #X = dataset.iloc[:,:-1].values
# #y = dataset.iloc[:,-1].values
# # Handle categorical variable - State column
# labelencoder_X = LabelEncoder()
# X[:,3] = labelencoder_X.fit_transform(X[:, 3])
# hotonencoder_X = OneHotEncoder(categorical_features = [3])
# X = hotonencoder_X.fit_transform(X).toarray()
# # Avoiding the dummy trap
# X = X[:, 1:]
# # We are going to keep 20% of the dataset in test dataset
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/5, random_state=0)
# + [markdown] _uuid="126bcac242105b50048c6460ae62ae182397384a"
# Algorithm execution
# + _uuid="5ae45ec7d5d12d82e61c319003ccb0ecb311bf83"
multiple_linear_regressor = LinearRegression()
multiple_linear_regressor.fit(X_train, y_train)
# y_pred contains all the values predicted by trained model
y_pred = multiple_linear_regressor.predict(X_test)
# + [markdown] _uuid="4a371d695e78cd2039aa1ea31acc52310e8410b0"
# * <h2> Backward Elimination for model optimization</h2>
# + [markdown] _uuid="79904bba0fda15a3ad84204d0b592bbda0ad8548"
# Our current model is not optimal, since we don't know the impact of every predictor (or feature) on target variable. So we are going to use Backward Elimanation technique to make it optimal by removing insignificant predictors.
# + _uuid="4ba570b50b7c4094b961a353e3515e98493e08ec"
# adding const X0 at the very start of matrix
X = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)
# + _uuid="525c8558f6df3d7b261e3ca498734a26cf410f92"
# Starting Backword Elimination Steps
# Step 1 - Taking all the predictors(features) in X_opt. Our X_opt will contain only most optimal predictors. We will keep optimizing it
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
# Step 2 - fitting the full model with all predictors
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Step 3 - analysing & fetching the predictor with value more than 0.05
regressor_OLS.summary()
# Step 4 - removing the predictor [In this case column with index 2]
X_opt = X[:, [0, 1, 3, 4, 5]]
# Step 5 - Re-fit the model with removed predictor
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
### Repeating 3, 4, 5 till we don't get all the predictor with Significant level < 0.5
# Removed 2nd index column and done fitting.
# Step 3 - analysing & fetching the predictor with value more than 0.05
regressor_OLS.summary()
# Step 4 - removing the predictor [In this case column with index 1]
X_opt = X[:, [0, 3, 4, 5]]
# Step 5 - Re-fit the model with removed predictor
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Removed 1st index column and done fitting.
# Step 3 - analysing & fetching the predictor with value more than 0.05
regressor_OLS.summary()
# Step 4 - removing the predictor [In this case column with index 4]
X_opt = X[:, [0, 3, 5]]
# Step 5 - Re-fit the model with removed predictor
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Removed 4th index column and done fitting.
# Step 3 - analysing & fetching the predictor with value more than 0.05
regressor_OLS.summary()
# Step 4 - removing the predictor [In this case column with index 5]
X_opt = X[:, [0, 3]]
# Step 5 - Re-fit the model with removed predictor
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
# Removed 5th index column and done fitting.
regressor_OLS.summary()
# FINISHED
# + [markdown] _uuid="2bb5bb24257308099af3856c024a5481b059818b"
# Now all predictors are below 0.05 significant level and our model is optimized as per Backward Elimination
# + [markdown] _uuid="47316eb8921980f9bb122851fd02894ca17d4487"
# <h1>3. Polynomial Regression</h1>
# + [markdown] _uuid="f76b43600873cf71f016469fab0d9413b07c352f"
# Import algorithm specific modules
# + _uuid="69ebea208f6c8c4edd21c5190cad2f1928a37d3c"
from sklearn.preprocessing import PolynomialFeatures
# + _uuid="8a696cd80a5e4ee1be6070815dfab0af3adb856f"
dataset = pd.read_csv('../input/polynomial-position-salary-data/Position_Salaries.csv')
X = dataset.iloc[:,1:2].values
y = dataset.iloc[:,-1].values
# We don't have enough record in our dataset, so we are going to skip the step of spliting our X, y into test and train dataset
# + [markdown] _uuid="6baa326e4940d1a61eb4d1d49b5ecfca7450f0b6"
# From here. we are simply generating the matrix for X^0, X^1 and X^2
# + _uuid="a953382cb685f5197635957b1b8f7b05957b84e0"
poly_reg = PolynomialFeatures(degree = 2)
X_poly = poly_reg.fit_transform(X)
# + _uuid="fd9fe11e11442d4005c6d4284d5058e40c48eafb"
# linear regression model
linear_reg_model = LinearRegression()
linear_reg_model.fit(X, y)
# polynomial regression model
poly_reg_model = LinearRegression()
poly_reg_model.fit(X_poly, y)
# + [markdown] _uuid="a340365b36547baf1d92c8a47605914872e6ca7a"
# We have trained both models linear and polynomial. Now we are going to compare the plots of both models
#
# + _uuid="2a9f5ebcf6abb3b1b3bd14606d3d125c76b65324"
plt.scatter(X, y, color='red', label='Actual observation points')
plt.plot(X, linear_reg_model.predict(X), label='Linear regressor fit curve')
plt.plot(X, poly_reg_model.predict(poly_reg.fit_transform(X)), label='Polynmial regressor fit line')
plt.title('Truth or bluff (Linear Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend()
plt.show()
# + [markdown] _uuid="c503d40858b4757ad066fe4daa40fab0a3537678"
# <h1>4. Simple Vector Regression (SVR)</h1>
# + [markdown] _uuid="f5e2dbfc36f0ce278219bac898ba766d4df8b317"
# Import model specific libraries
# + _uuid="e4482e63e16e04e192a2d001224cbad0215cb651"
from sklearn.svm import SVR
# + _uuid="c56cac57440ff82c7f77a052aaaad56e3b48729e"
dataset = pd.read_csv('../input/polynomial-position-salary-data/Position_Salaries.csv')
X = dataset.iloc[:,1:2].values
y = dataset.iloc[:,-1].values
# + [markdown] _uuid="778dad88db623b1be710ab10a85e162e1cefa346"
# Performing feature scaling, since it is not an inbuild feature of SVR class in sk-learn
# + _uuid="1ff45a8de538a40aa2cd97d50d3ab465b4275381"
scale_X = StandardScaler()
scale_y = StandardScaler()
X = scale_X.fit_transform(X)
y = scale_y.fit_transform(y.reshape(-1, 1))
# + [markdown] _uuid="718b1e0730771b117192eee0895c26b2d3f2b88d"
# Algorithm execution
# + _uuid="626e01e5a3e57359fbb447a515b9efdf39e926ec"
svr_regressor = SVR(kernel='rbf', gamma='auto')
svr_regressor.fit(X, y)
# + [markdown] _uuid="55966707cd13b2ca6178ffe807015f5c87f6a047"
# Visualizing the SVR predictions
# + _uuid="1fd1ae05d36d9ae02c81dd90cd564a7087db1978"
plt.scatter(X, y, color='red', label='Actual observation points')
plt.plot(X, svr_regressor.predict(X), label='SVR regressor')
plt.title('Truth or bluff (SVR Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend()
plt.show()
# + [markdown] _uuid="5cf9c3ea710e6288036f5e9d71a7222b44ac62a1"
# Predicting salary for someone who has 6.5 years of experience.
# + _uuid="c68ea22d7d6dc36e7428c51b5fa598a00c669bf4"
scale_y.inverse_transform(svr_regressor.predict(scale_X.transform(np.array([[6.5]]))))
# + [markdown] _uuid="5acda1d15c065c3a8e5fb5ebfdd66011c76afc27"
# <h1>5. Decision Tree - Regression</h1>
# + [markdown] _uuid="4ccd8fe73781c54215766ecb32531c8b19292ac0"
# Algorithm specific imports
# + _uuid="0e97230a83909f1d85ca4fcf8c348db4e8ef0372"
from sklearn.tree import DecisionTreeRegressor
# + [markdown] _uuid="e2f3ebfdf45e27ed05c9271f7e532a5978cb6859"
# Read dataset and extract feature and target variable from that.
# + _uuid="5cb51b5e93b8718ec81f1ca238e5b54013cc2f07"
dataset = pd.read_csv('../input/polynomial-position-salary-data/Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, -1].values
# + [markdown] _uuid="06279b7454c8796d05cb9b4dbeea73769b7621ef"
# No need to perform feature scaling. Since it will get taken care by the library itself.
# + [markdown] _uuid="7c19a60f93d2a1fbcd25ad61d5e8187ec9e0048f"
# Creating regressor
# + _uuid="49fe1152437a63da95305f0a5211b01237d43551"
tree_regressor = DecisionTreeRegressor(random_state = 0)
tree_regressor.fit(X, y)
# + [markdown] _uuid="c8e8bbb05a20a7521e8512811f1e0f87a7d59797"
# Visualizing the graph
# + _uuid="97cb731fe91bf004a20df10a05bdf8b3431efef2"
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color='red', label='Actual observation points')
plt.plot(X_grid, tree_regressor.predict(X_grid), label='Tree regressor')
plt.title('Truth or bluff (Tree Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend()
plt.show()
# + [markdown] _uuid="06f6a5b158ba091eefbed1718a7b5ff9de2b2390"
# Predicting salary for someone who has 6.5 years of experience.
# + _uuid="7e7a6e3e3d11b71718f20443991bdc8ec4ac55c1"
tree_regressor.predict([[6.5]])
# + [markdown] _uuid="44f68eed0aac9163e9b4d7e211370e83aa1fdf75"
# Here we are not getting the good prediction, may be because of 2D plain (or single feature). It should give better predictions for 3d plan.
# + [markdown] _uuid="8bbff81837a185916225282a71a1fafb81cb1e73"
# <h1>6. Random Forest Regression</h1>
# + [markdown] _uuid="e6c0ff62a46af9b0083e22168a07fd52e941efdc"
# Algorithm specific imports
# + _uuid="971122fab95855f6103c14bf647d56bd2ec37e4a"
from sklearn.ensemble import RandomForestRegressor
# + _uuid="f689705eb6bc542cf2db9278e563e510975d552b"
dataset = pd.read_csv('../input/polynomial-position-salary-data/Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, -1].values
# + [markdown] _uuid="97db86896e3af6dfa32c52d6a146eee02f11c8de"
# Regressor creation
# + _uuid="13aa60e252dae42ec0cb0d5b66f77baa98eecb28"
forest_regressor = RandomForestRegressor(n_estimators = 300, random_state = 0)
forest_regressor.fit(X, y)
# + [markdown] _uuid="9997a329698cd332ae107141aab17d569d06f7a0"
# Plot and visualize graph
# + _uuid="4e5984714310ab6ef6f5f676cc6c24ef23d6d566"
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color='red', label='Actual observation points')
plt.plot(X_grid, forest_regressor.predict(X_grid), label='Random Forest regressor')
plt.title('Truth or bluff (Random Forest Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend()
plt.show()
# + [markdown] _uuid="b2f9451915fdfc3fbf9c8fb83cc87994289545df"
# Predict salary for 6.5 year experienced employee, we are taking same value of x so that we can compare the predicted value form Polynomial's and Decision Tree's prediction
# + _uuid="25562de259e14729c2986968f2241a3d6c20b36e"
forest_regressor.predict([[6.5]])
# + [markdown] _uuid="3ca89b2476ea1fe27a7ae93483cfc05de6725fea"
# This prediction is much better than the ones obtained from other regression algorithms
# + _uuid="044ad644fc9319c32afa62851342d279febc8166"
Will keep refining these implementations and visualizations.
| Code/machine learning/temp/regression-algorithms-using-scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## DeepExplain - Keras (TF backend) example
### MNIST with CNN
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile, sys, os
sys.path.insert(0, os.path.abspath('..'))
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# Import DeepExplain
from deepexplain.tensorflow import DeepExplain
# +
# Build and train a network.
batch_size = 128
num_classes = 10
epochs = 3
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_train = (x_train - 0.5) * 2
x_test = (x_test - 0.5) * 2
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# ^ IMPORTANT: notice that the final softmax must be in its own layer
# if we want to target pre-softmax units
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
with DeepExplain(session=K.get_session()) as de: # <-- init DeepExplain context
# Need to reconstruct the graph in DeepExplain context, using the same weights.
# With Keras this is very easy:
# 1. Get the input tensor to the original model
input_tensor = model.layers[0].input
# 2. We now target the output of the last dense layer (pre-softmax)
# To do so, create a new model sharing the same layers untill the last dense (index -2)
fModel = Model(inputs=input_tensor, outputs = model.layers[-2].output)
target_tensor = fModel(input_tensor)
xs = x_test[0:10]
ys = y_test[0:10]
attributions = de.explain('grad*input', target_tensor * ys, input_tensor, xs)
#attributions = de.explain('saliency', target_tensor * ys, input_tensor, xs)
#attributions = de.explain('intgrad', target_tensor * ys, input_tensor, xs)
#attributions = de.explain('deeplift', target_tensor * ys, input_tensor, xs)
#attributions = de.explain('elrp', target_tensor * ys, input_tensor, xs)
#attributions = de.explain('occlusion', target_tensor * ys, input_tensor, xs)
# +
# Plot attributions
from utils import plot, plt
# %matplotlib inline
n_cols = 4
n_rows = int(len(attributions) / 2)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(3*n_cols, 3*n_rows))
for i, a in enumerate(attributions):
row, col = divmod(i, 2)
plot(xs[i].reshape(28, 28), cmap='Greys', axis=axes[row, col*2]).set_title('Original')
plot(a.reshape(28,28), xi = xs[i], axis=axes[row,col*2+1]).set_title('Attributions')
| examples/mint_cnn_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Internship project on Heart disease prediction
# ## By -
# ## <NAME>
# ## 170310007051
# # Importing necessary libraries
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # Loading the dataset
data = pd.read_csv("heart.csv")
data
data.info()
sns.countplot(x="target", data = data, palette="magma")
sns.countplot(x="sex", data = data, palette="bwr")
pd.crosstab(data.age,data.target).plot(kind="bar",figsize=(17,6))
sns.countplot(x="fbs", data = data, palette="bwr")
plt.scatter(data.age[data.target==0],data.thalach[data.target==0])
plt.scatter(data.age[data.target==1],data.thalach[data.target==1])
plt.legend(["0","1"])
plt.xlabel("Age")
plt.ylabel("thalach")
plt.show()
plt.scatter(data.age[data.target==0],data.chol[data.target==0])
plt.scatter(data.age[data.target==1],data.chol[data.target==1])
plt.legend(["0","1"])
plt.xlabel("Age")
plt.ylabel("chol")
plt.show()
# # Scaling the data
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
data[["age","trestbps","chol","thalach","oldpeak"]] = sc.fit_transform(data[["age","trestbps","chol","thalach","oldpeak"]])
data
data[["sex","cp","fbs","restecg","exang","slope","ca","thal"]] = data[["sex","cp","fbs","restecg","exang","slope","ca","thal"]].astype(object)
# # Creating dummy variables
#
data=pd.get_dummies(data)
X = data.copy()
y = data["target"]
X.drop(columns="target",axis=1,inplace=True)
X = X.values
y=y.values
X
y
# # Splitting data into train and test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.15,random_state=1)
X_train.shape
from sklearn.metrics import classification_report
# # Applying KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
cm = confusion_matrix(y_test,pred)
cm
ac=accuracy_score(y_test, pred)
ac
print(classification_report(pred,y_test))
# # Applying Decision Tree
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train,y_train)
pred1 = dt.predict(X_test)
cm1=confusion_matrix(y_test,pred1)
cm1
ac1=accuracy_score(y_test, pred1)
ac1
print(classification_report(pred1,y_test))
# # Applying Support Vector Machine
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train,y_train)
pred2 = svc.predict(X_test)
cm2=confusion_matrix(y_test,pred2)
cm2
ac2=accuracy_score(y_test, pred2)
ac2
print(classification_report(pred2,y_test))
# # Applying Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(X_train,y_train)
pred3 = rfc.predict(X_test)
cm3=confusion_matrix(y_test,pred3)
cm3
ac3=accuracy_score(y_test, pred3)
ac3
print(classification_report(pred3,y_test))
# # Applying Naive Bayes
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train,y_train)
pred4 = nb.predict(X_test)
cm4=confusion_matrix(y_test,pred4)
cm4
ac4=accuracy_score(y_test, pred4)
ac4
print(classification_report(pred4,y_test))
# # Accuracy
# +
print("Accuracy of models")
plt.bar(0,ac)
plt.bar(1,ac1)
plt.bar(2,ac2)
plt.bar(3,ac3)
plt.bar(4,ac4)
plt.xticks([0,1,2,3,4], ['KNN','DTC','RFC','SVC','NB'])
plt.show()
# -
# +
plt.figure(figsize=(24,12))
plt.suptitle("Confusion Matrices",fontsize=24)
plt.subplots_adjust(wspace = 0.4, hspace= 0.4)
plt.subplot(2,3,1)
plt.title("K Nearest Neighbors Confusion Matrix")
sns.heatmap(cm,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.subplot(2,3,2)
plt.title("Decision Tree Classifier Confusion Matrix")
sns.heatmap(cm1,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.subplot(2,3,3)
plt.title("Random forest classifier Confusion Matrix")
sns.heatmap(cm2,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.subplot(2,3,4)
plt.title("Support Vector Machine Confusion Matrix")
sns.heatmap(cm3,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.subplot(2,3,5)
plt.title("Naive Bayes Confusion Matrix")
sns.heatmap(cm4,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.show()
# -
| Internship.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import all packages here
import pandas as pd
import math
import altair as alt
import os
# -
# ### DATA WRANGLING
# Set working directory
os.chdir("/Volumes/UBC/Block5/551/Project/crypocurrency_db")
# +
# Read databases and add their respective name column
bitcoin_cash_price = pd.read_csv("raw_data/bitcoin_cash_price.csv")
bitcoin_cash_price['Name']='bitcoin_cash'
#bitcoin_cash_price.head()
bitcoin_price = pd.read_csv("raw_data/bitcoin_price.csv")
bitcoin_price['Name']='bitcoin'
#bitcoin_price.head()
bitconnect_price = pd.read_csv("raw_data/bitconnect_price.csv")
bitconnect_price['Name']='bitconnect'
#bitconnect_price.head()
dash_price = pd.read_csv("raw_data/dash_price.csv")
dash_price['Name']='dash'
#dash_price.head()
ethereum_classic_price = pd.read_csv("raw_data/ethereum_classic_price.csv")
ethereum_classic_price['Name']='ethereum_classic'
#ethereum_classic_price.head()
ethereum_price = pd.read_csv("raw_data/ethereum_price.csv")
ethereum_price['Name']='ethereum'
#ethereum_price.head()
iota_price = pd.read_csv("raw_data/iota_price.csv")
iota_price['Name']='iota'
#iota_price.head()
litecoin_price = pd.read_csv("raw_data/litecoin_price.csv")
litecoin_price['Name']='litecoin'
#litecoin_price.head()
monero_price = pd.read_csv("raw_data/monero_price.csv")
monero_price['Name']='monero'
#monero_price.head()
nem_price = pd.read_csv("raw_data/nem_price.csv")
nem_price['Name']='nem'
#nem_price.head()
neo_price = pd.read_csv("raw_data/neo_price.csv")
neo_price['Name']='neo'
#neo_price.head()
numeraire_price = pd.read_csv("raw_data/numeraire_price.csv")
numeraire_price['Name']='numeraire'
#numeraire_price.head()
omisego_price = pd.read_csv("raw_data/omisego_price.csv")
omisego_price['Name']='omisego'
#omisego_price.head()
qtum_price = pd.read_csv("raw_data/qtum_price.csv")
qtum_price['Name']='qtum'
#qtum_price.head()
ripple_price = pd.read_csv("raw_data/ripple_price.csv")
ripple_price['Name']='ripple'
#ripple_price.head()
stratis_price = pd.read_csv("raw_data/stratis_price.csv")
stratis_price['Name']='stratis'
#stratis_price.head()
waves_price = pd.read_csv("raw_data/waves_price.csv")
waves_price['Name']='waves'
#waves_price.head()
# -
| python/.ipynb_checkpoints/Draft1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # De-identification of patient notes with Recurrent neural nets
# In this notebook I explore the paper on De-identification of patient notes with Recurrent neural nets written by <NAME>, <NAME>, <NAME> and <NAME>.
# The paper covers the use case of obfuscating the sensitive information in the patient medical records so that they can be used for research. This method aims to replace the traditional way of manual de-identification which costs a lot and is error prone.
# ## Architecture
# <img src="http://aryancodify.tech/wp-content/uploads/2018/07/architecture.png" />
# The researchers present a LSTM based three layered architecture with character-enhanced token embedding layer that maps each token into a vector representation.
#
# Second Layer is the Label prediction layer that outputs a sequence of vectors containing the probability score for each token for a label.
#
# The third layer called the sequence optimization layer outputs the most likely sequence of predicted labels based on the sequence of probability vectors from the previous layer. To make sure the tokens are predicted in correct sequence the last layer also makes use of the transition probabilities T[i,j] between two subsequent labels.
#
# **The precision and F-1 score of the used LSTM method is superior to the previously used method with an advantage that the ANN approach can learn the features from token embeddings itself using composition.**
| Post2_DeIdentification.ipynb |